pax_global_header00006660000000000000000000000064130352315710014512gustar00rootroot0000000000000052 comment=6d40dace6358ecaff3e70da9b9b2b2d7397b8da7 .gitignore000066400000000000000000000003241303523157100130450ustar00rootroot00000000000000*.py[co] # vim *.swp *.swo # Packages build/ dist/ src/Bcfg2 *.egg *.egg-info # ctags tags TAGS # test artifacts testsuite/test.sqlite # rope project metadata .ropeproject # sphinx build data man/.doctrees .travis.yml000066400000000000000000000012261303523157100131700ustar00rootroot00000000000000language: python matrix: include: - python: "2.6" env: WITH_OPTIONAL_DEPS=no TEST_SPHINX=no - python: "2.6" env: WITH_OPTIONAL_DEPS=yes TEST_SPHINX=no - python: "2.7_with_system_site_packages" env: WITH_OPTIONAL_DEPS=no TEST_SPHINX=no - python: "2.7_with_system_site_packages" env: WITH_OPTIONAL_DEPS=yes TEST_SPHINX=yes install: - testsuite/install.sh - pip install -e . script: - nosetests testsuite after_failure: - pip freeze branches: except: - maint-1.2 - 1.1.0-stable notifications: email: chris.a.st.pierre@gmail.com irc: channels: - "irc.freenode.org#bcfg2" use_notice: true COPYRIGHT000066400000000000000000000130201303523157100123450ustar00rootroot00000000000000This file contains a list of copyright holders. Anyone who contributes more than trivial fixes (typos, etc.) to Bcfg2 should also add themselves to this file. See LICENSE for the full license. - Narayan Desai has written most of Bcfg2, including all parts not explicitly mentioned in this file. - Sol Jerome squashes bugs, helps manage the project roadmap, and implements various interesting features. - Tim Laszlo worked on the reporting system and made plugins. - Fabian Affolter made some patches, added some new features and plugins, and restructured the manual for Bcfg2. - Andrew Brestick fixed bugs and completed plugins. - James Yang worked on bcfg2-admin and bcfg2-reports. - Robert Gogolok fixed bugs and made the code more robust. - Jack Neely worked on the YUM driver. - Joey Hagedorn has written the reporting subsystem, including StatReports, GenerateHostinfo, and the xslt, css and javascript associated with it. - Ed Smith has done substantial hardening of the Bcfg2 client and server and implemented a common logging infrastructure. - Rick Bradshaw has written several of the tools included in the tools/ subdirectory. - Ken Raffenetti , Rick Bradshaw, Rene Martin, and David Dahl have written the Hostbase plugin. - Scott Behrens and Rick Bradshaw have written the VHost plugin. - Cory Lueninghoener wrote the showentries function in bcfg2-info. - Chris Vuletich wrote some SSL code and the verification debugging code. - Daniel Clark created encap packages for Bcfg2 and deps, wrote fossil-scm dvcs support, and helps with debian packaging - Jason Pepas has written a rpm package list creator has contributed patches to the Red Hat toolset. - Sami Haahtinen has writen debian packaging logic. - Brian Pellin and Andrew Lusk did substantial work on Bcfg1, some of which was used in the Bcfg2 client. - Michael Jinks wrote the gentoo tool drivers. - Chris St. Pierre has (re)written vast swaths of more recent Bcfg2 releases. - Anatoly Techtonik has fixed various bugs. - Arto Jantunen maintains the Debian packages. - Asaf Ohaion added Pacman support. - Brent Bloxam fixed bugs, particularly in the documentation. - Calen Pennington write bcfg2-test and contributed performance enhancements. - Calvin Cheng worked on Python packaging. - Carl Jackson fixed client-side bugs. - Chris Brinker added support for client profile assertion to bcfg2.conf - Christopher 'm4z' Holm greatly improved the RPM build logic. - Dan Foster contributed Solaris 10 build fixes. - David Strauss wrote the Bzr plugin and contributed other various fixes. - Gordon Messmer contributed documentation fixes. - Graham Hagger wrote the SSLCA plugin. - Holger Weiß has fixed a tremendous number and variety of bugs, particularly with unicode handling, SSHbase, and bcfg2-reports. - Jake Davis has fixed various bugs. - Jason Kincl added conflict resolution to the Svn plugin. - Jeffrey C. Ollie wrote systemd support. - Jeroen Dekkers worked on the APT driver. - Joe Digilio worked on Cheetah support and fixed other bugs. - John Morris fixed bugs in the Chkconfig driver. - John 'Skip' Reddy worked on DBStats. - Jonathan Billings worked on systemd support, RPM builds, and fixed other bugs. - Kamil Kisiel worked on documentation, Py3k support, launchd support, and other bugs. - Kioob fixed various bugs. - Luke Cyca worked on MacPorts and launchd support. - Mike Brady worked on the YUM and RPM drivers. - Mike McCallister worked on the Packages plugin. - Phillip Steinbachs wrote Solaris packaging manifests. - Raul Cuza worked on Python packaging, documentation, and various bugs. - Remi Broemeling worked on handling restarts of Service entries. - Richard Connon worked on handling of Apt repositories. - Steve Tousignant worked on several of the Debian package list tools and contributed bug fixes. - Ti Legget worked on ebuild packaging and bugfixes, RPM packaging. - Torsten Rehn wrote the Ldap plugin and fixed bugs. - Zach Lowry wrote Solaris support and general hardening. - Michael Fenn implemented the database router for separately storing the reporting database and fixed various small bugs related to bcfg2 on CentOS 5 - Alexander Sulfrian fixed various bugs. LICENSE000066400000000000000000000044431303523157100120700ustar00rootroot00000000000000Copyright (c) 2004, University of Chicago. See the COPYRIGHT file at the top-level directory of this distribution and at https://github.com/Bcfg2/bcfg2/blob/master/COPYRIGHT. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. GOVERNMENT LICENSE Portions of this material resulted from work developed under a U.S. Government Contract and are subject to the following license: the Government is granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable worldwide license in this computer software to reproduce, prepare derivative works, and perform publicly and display publicly. DISCLAIMER This computer code material was prepared, in part, as an account of work sponsored by an agency of the United States Government. Neither the United States, nor the University of Chicago, nor any of their employees, makes any warranty express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. README000066400000000000000000000032721303523157100117420ustar00rootroot00000000000000Bcfg2 - A Configuration Management System ----------------------------------------- Bcfg2 (bee-config two) helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. * Homepage: http://bcfg2.org Bcfg2 is fairly portable. It has been successfully run on: * AIX, FreeBSD, OpenBSD Mac OS X, OpenSolaris, Solaris * Many GNU/Linux distributions, including Arch Linux, Blag, CentOS, Debian, Fedora, Gentoo, gNewSense, Mandriva, openSUSE, Red Hat/RHEL, SuSE/SLES, Trisquel, and Ubuntu. Installation ------------ For details about the installation of Bcfg2 please refer to the following pages in the Bcfg2 online documentation: * Prerequisites: http://docs.bcfg2.org/installation/prerequisites.html * Download: http://bcfg2.org/download/ * Installation: http://docs.bcfg2.org/installation/index.html Need help --------- A lot of documentation is available in the Bcfg2 manual and the Bcfg2 wiki. * Documentation: http://docs.bcfg2.org/ * Wiki: http://bcfg2.org/wiki/ * FAQ: http://bcfg2.org/wiki/FAQ * IRC: #bcfg2 on chat.freenode.net * Mailing list: https://lists.mcs.anl.gov/mailman/listinfo/bcfg-dev Want to help ------------- * Bug tracker: http://bcfg2.org/report * Development: http://docs.bcfg2.org/development/ * Wiki: http://bcfg2.org/wiki/Contribute Bcfg2 is licensed under a Simplified (2-clause) BSD license. For more details check LICENSE. debian/000077500000000000000000000000001303523157100123005ustar00rootroot00000000000000debian/NEWS000066400000000000000000000011411303523157100127740ustar00rootroot00000000000000bcfg2 (1.1.0-1) unstable; urgency=low Due to repository changes made to support Path entries it is recommended to upgrade the clients before upgrading the server. After that, you can use the posixunified.py script (included as an example in the server package) to convert your repository. -- Arto Jantunen Fri, 05 Nov 2010 19:07:59 +0200 bcfg2 (1.0.1-1) unstable; urgency=low Since version 1.0, Bcfg2 no longer supports agent mode. Please update your configuration accordingly if you were using it. -- Arto Jantunen Tue, 03 Aug 2010 12:28:54 +0300 debian/bcfg2-doc.docs000066400000000000000000000000221303523157100146720ustar00rootroot00000000000000build/sphinx/html debian/bcfg2-server.README.Debian000066400000000000000000000015561303523157100166360ustar00rootroot00000000000000Getting started =============== The first thing to do is to create the repository and SSL key, these can be done by running "bcfg2-admin init" as root on the server. The script will ask questions about the SSL key, and a few things about the repository. After that is done, you should be able to start the server with the init script by running "/etc/init.d/bcfg2-server start" You can look at /var/log/daemon.log to see what the server said while starting up. If the server started without problems, you can run "bcfg2 -q -v -n" to see if the client and server are communicating properly. The next step after that is to actually populate the repository (by default in /var/lib/bcfg2/) with configuration files, see the Bcfg2 documentation at http://docs.bcfg2.org/ for information on how that is done. -- Arto Jantunen , Sat, 24 Apr 2010 09:43:44 +0300 debian/bcfg2-server.bcfg2-report-collector.init000077500000000000000000000051201303523157100217340ustar00rootroot00000000000000#!/bin/sh # # bcfg-report-collector - Bcfg2 reporting collector daemon # # chkconfig: 2345 19 81 # description: bcfg2 server for reporting data # ### BEGIN INIT INFO # Provides: bcfg2-report-collector # Required-Start: $network $remote_fs $named # Required-Stop: $network $remote_fs $named # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Configuration management Server # Description: Bcfg2 is a configuration management system that builds # installs configuration files served by bcfg2-server ### END INIT INFO # Include lsb functions . /lib/lsb/init-functions # Commonly used stuff DAEMON=/usr/sbin/bcfg2-report-collector PIDFILE=/var/run/bcfg2-server/bcfg2-report-collector.pid PARAMS="-D $PIDFILE" # Include default startup configuration if exists test -f "/etc/default/bcfg2-server" && . /etc/default/bcfg2-server # Exit if $DAEMON doesn't exist and is not executable test -x $DAEMON || exit 5 # Internal variables BINARY=$(basename $DAEMON) RETVAL=0 start () { echo -n "Starting Configuration Report Collector: " start_daemon ${DAEMON} ${PARAMS} ${BCFG2_REPORT_OPTIONS} STATUS=$? if [ "$STATUS" = 0 ] then log_success_msg "bcfg2-report-collector" test -d /var/lock/subsys && touch /var/lock/subsys/bcfg2-report-collector else log_failure_msg "bcfg2-report-collector" fi return $STATUS } stop () { echo -n "Stopping Configuration Report Collector: " if [ -f $PIDFILE ]; then killproc -p $PIDFILE ${BINARY} else killproc ${BINARY} fi STATUS=$? if [ "$STATUS" = 0 ]; then log_success_msg "bcfg2-report-collector" test -e /var/lock/subsys/bcfg2-report-collector && rm /var/lock/subsys/bcfg2-report-collector else log_failure_msg "bcfg2-report-collector" fi return $STATUS } status () { PID=$(pidofproc -p "$PIDFILE" $BINARY) if [ -n "$PID" ]; then echo "$BINARY (pid $PID) is running..." return 0 fi if [ -f $PIDFILE ]; then if [ -n "$PID" ]; then log_failure_msg "$BINARY dead but pid file exists..." return 1 fi fi log_failure_msg "$BINARY is not running" return 3 } case "$1" in start) start RETVAL=$? ;; stop) stop RETVAL=$? ;; status) status RETVAL=$? ;; restart|reload|force-reload) stop sleep 5 start RETVAL=$? ;; *) log_success_msg "Usage: $0 {start|stop|status|reload|restart|force-reload}" RETVAL=1 ;; esac exit $RETVAL debian/bcfg2-server.default000066400000000000000000000007551303523157100161440ustar00rootroot00000000000000# Configuration options for bcfg2 server # BCFG2_SERVER_OPTIONS: # Set the default options for Bcfg2 Server on startup # Default: "" #BCFG2_SERVER_OPTIONS="" # BCFG2_SERVER_ENABLED: # Should Bcfg2 Server be run automatically by system scripts # # Uncomment the following line to enable any of the below selections # Default: 0 (disable) BCFG2_SERVER_ENABLED=1 # BCFG2_REPORT_OPTIONS: # Set the default options for Bcfg2 Report Collector on startup # Default: "" #BCFG2_REPORT_OPTIONS debian/bcfg2-server.dirs000066400000000000000000000000161303523157100154470ustar00rootroot00000000000000var/lib/bcfg2 debian/bcfg2-server.docs000066400000000000000000000000221303523157100154330ustar00rootroot00000000000000LICENSE COPYRIGHT debian/bcfg2-server.examples000066400000000000000000000000161303523157100163240ustar00rootroot00000000000000tools/upgrade debian/bcfg2-server.init000077500000000000000000000054671303523157100154730ustar00rootroot00000000000000#!/bin/sh # # bcfg-server - Bcfg2 configuration daemon # # chkconfig: 2345 19 81 # description: bcfg2 server for configuration requests # ### BEGIN INIT INFO # Provides: bcfg2-server # Required-Start: $network $remote_fs $named $syslog # Required-Stop: $network $remote_fs $named $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Configuration management Server # Description: The server component of the Bcfg2 configuration management # system ### END INIT INFO # Include lsb functions test -f "/lib/lsb/init-functions" && . /lib/lsb/init-functions # debian test -f "/etc/init.d/functions" && . /etc/init.d/functions # redhat # Commonly used stuff DAEMON=/usr/sbin/bcfg2-server PIDFILE=/var/run/bcfg2-server/bcfg2-server.pid PARAMS="-D $PIDFILE" # Disabled per default BCFG2_SERVER_OPTIONS="" BCFG2_SERVER_ENABLED=0 # Include default startup configuration if exists test -f "/etc/default/bcfg2-server" && . /etc/default/bcfg2-server if [ "$BCFG2_SERVER_ENABLED" -eq 0 ] ; then log_failure_msg "bcfg2-server is disabled - see /etc/default/bcfg2-server" exit 0 fi # Exit if $DAEMON doesn't exist and is not executable test -x $DAEMON || exit 5 # Internal variables BINARY=$(basename $DAEMON) RETVAL=0 start () { echo -n "Starting Configuration Management Server: " start_daemon ${DAEMON} ${PARAMS} ${BCFG2_SERVER_OPTIONS} STATUS=$? if [ "$STATUS" = 0 ] then log_success_msg "bcfg2-server" test -d /var/lock/subsys && touch /var/lock/subsys/bcfg2-server else log_failure_msg "bcfg2-server" fi return $STATUS } stop () { echo -n "Stopping Configuration Management Server: " killproc -p $PIDFILE ${BINARY} STATUS=$? if [ "$STATUS" = 0 ]; then [ -e $PIDFILE ] && rm -f $PIDFILE log_success_msg "bcfg2-server" test -d /var/lock/subsys && touch /var/lock/subsys/bcfg2-server else log_failure_msg "bcfg2-server" fi return $STATUS } status () { # Inspired by redhat /etc/init.d/functions status() call PID=$(pidof -x $BINARY -o %PPID) if [ -n "$PID" ]; then echo "$BINARY (pid $PID) is running..." return 0 fi if [ -f $PIDFILE ]; then if [ -n "$PID" ]; then log_failure_msg "$BINARY dead but pid file exists..." return 1 fi fi log_failure_msg "$BINARY is not running" return 3 } case "$1" in start) start RETVAL=$? ;; stop) stop RETVAL=$? ;; status) status RETVAL=$? ;; restart|reload|force-reload) stop sleep 5 start RETVAL=$? ;; *) log_success_msg "Usage: $0 {start|stop|status|reload|restart|force-reload}" RETVAL=1 ;; esac exit $RETVAL debian/bcfg2-server.install000066400000000000000000000005211303523157100161550ustar00rootroot00000000000000debian/bcfg2-server.default usr/share/bcfg2 debian/tmp/usr/bin/bcfg2-* usr/sbin debian/tmp/usr/lib/python*/*-packages/Bcfg2/Server/* debian/tmp/usr/lib/python*/*-packages/Bcfg2/Reporting/* debian/tmp/usr/share/bcfg2/schemas/* debian/tmp/usr/share/bcfg2/xsl-transforms/* debian/tmp/usr/share/man/man8/* debian/tmp/etc/bash_completion.d/* debian/bcfg2-server.logcheck.ignore.server000066400000000000000000000006321303523157100210600ustar00rootroot00000000000000^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Handled [0-9]+ events in [0-9.]+ ^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Generated config for [._[:alnum:]-]+ in [0-9.]+ s$ ^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Client [._[:alnum:]-]+ reported state (clean|dirty)$ ^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Suppressing event for bogus file .*$ debian/bcfg2-server.postinst000066400000000000000000000021071303523157100163740ustar00rootroot00000000000000#!/bin/sh # postinst script for bcfg2-server # # see: dh_installdeb(1) set -e # summary of how this script can be called: # * `configure' # * `abort-upgrade' # * `abort-remove' `in-favour' # # * `abort-deconfigure' `in-favour' # `removing' # # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package # case "$1" in configure) ucf /usr/share/bcfg2/bcfg2-server.default /etc/default/bcfg2-server if [ -x /usr/bin/ucfr ] ; then ucfr bcfg2 /etc/default/bcfg2-server fi ;; abort-upgrade|abort-remove|abort-deconfigure) ;; *) echo "postinst called with unknown argument \`$1'" >&2 exit 1 ;; esac # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# exit 0 debian/bcfg2-server.postrm000066400000000000000000000021661303523157100160420ustar00rootroot00000000000000#!/bin/sh # postrm script for bcfg2-server # # see: dh_installdeb(1) set -e # summary of how this script can be called: # * `remove' # * `purge' # * `upgrade' # * `failed-upgrade' # * `abort-install' # * `abort-install' # * `abort-upgrade' # * `disappear' overwrit>r> # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package case "$1" in purge) for i in /etc/default/bcfg2-server; do rm -f $i if which ucf >/dev/null; then ucf --purge $i fi if which ucfr >/dev/null; then ucfr --purge bcfg2 $i fi done ;; remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) ;; *) echo "postrm called with unknown argument \`$1'" >&2 exit 1 esac # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# exit 0 debian/bcfg2-utils.install000066400000000000000000000000321303523157100160040ustar00rootroot00000000000000tools/* usr/share/bcfg2 debian/bcfg2-web.install000066400000000000000000000001731303523157100154270ustar00rootroot00000000000000misc/apache/bcfg2.conf etc/apache2/conf.d/ debian/tmp/usr/share/bcfg2/reports.wsgi debian/tmp/usr/share/bcfg2/site_media/* debian/bcfg2.cron.daily000077500000000000000000000005661303523157100152610ustar00rootroot00000000000000#!/bin/sh BCFG2CRON= if [ -x /usr/libexec/bcfg2-cron ]; then BCFG2CRON=/usr/libexec/bcfg2-cron elif [ -x /usr/lib/bcfg2/bcfg2-cron ]; then BCFG2CRON=/usr/lib/bcfg2/bcfg2-cron elif type bcfg2-cron 2>&1 >/dev/null; then BCFG2CRON=bcfg2-cron else echo "No bcfg2-cron command found" exit 1 fi $BCFG2CRON --daily 2>&1 | logger -t bcfg2-cron -p daemon.info -i debian/bcfg2.cron.hourly000077500000000000000000000005671303523157100155020ustar00rootroot00000000000000#!/bin/sh BCFG2CRON= if [ -x /usr/libexec/bcfg2-cron ]; then BCFG2CRON=/usr/libexec/bcfg2-cron elif [ -x /usr/lib/bcfg2/bcfg2-cron ]; then BCFG2CRON=/usr/lib/bcfg2/bcfg2-cron elif type bcfg2-cron 2>&1 >/dev/null; then BCFG2CRON=bcfg2-cron else echo "No bcfg2-cron command found" exit 1 fi $BCFG2CRON --hourly 2>&1 | logger -t bcfg2-cron -p daemon.info -i debian/bcfg2.default000066400000000000000000000012161303523157100146310ustar00rootroot00000000000000# Configuration options for bcfg2 client # BCFG2_OPTIONS: # Set the default options for Bcfg2 on startup # Default: "-q" #BCFG2_OPTIONS="-q" # BCFG2_ENABLED: # Should Bcfg2 be run automatically by system scripts # # Uncomment the following line to enable any of the below selections # Default: 0 (disable) #BCFG2_ENABLED=1 # BCFG2_INIT: # Enable bcfg2 during system bootup # # Set value to 1 to enable # Default: 0 (disable) #BCFG2_INIT=1 # BCFG2_AGENT: # Bcfg2 no longer supports agent mode, please see NEWS.Debian # BCFG2_CRON: # Set the frequency of cron runs. # # Can be set to off, hourly, daily or both # Default: off #BCFG2_CRON=off debian/bcfg2.docs000066400000000000000000000000231303523157100141300ustar00rootroot00000000000000LICENSE COPYRIGHT debian/bcfg2.init000077500000000000000000000040521303523157100141540ustar00rootroot00000000000000#!/bin/sh # # bcfg2 - bcfg2 configuration client # # chkconfig: 2345 19 81 # description: bcfg2 client for configuration requests # ### BEGIN INIT INFO # Provides: bcfg2 # Required-Start: $network $remote_fs $named # Required-Stop: $network $remote_fs $named # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Configuration management client # Description: Bcfg2 is a configuration management system that builds # installs configuration files served by bcfg2-server # This is a client that installs the server provided # Configuration. ### END INIT INFO # This might need some better logic BCFG2=/usr/sbin/bcfg2 # Set default options # You can set script specific options with BCFG2_OPTIONS_INIT # You can set agent-mode specific options with BCFG2_OPTIONS_AGENT BCFG2_OPTIONS="-q" # Disabled per default BCFG2_ENABLED=0 BCFG2_INIT=0 BCFG2_AGENT=0 # Include default startup configuration if exists test -f "/etc/default/bcfg2" && . /etc/default/bcfg2 [ "$BCFG2_ENABLED" -eq 0 ] && exit 0 [ "$BCFG2_AGENT" -eq 0 -a "$BCFG2_INIT" -eq 0 ] && exit 0 # Exit if bcfg2 doesn't exist and is not executable test -x $BCFG2 || exit 0 if [ "$BCFG2_AGENT" != 0 ]; then echo "Bcfg2 no longer supports agent mode, please update your configuration!" exit 1 fi # Internal variables BINARY=$(basename $BCFG2) RETVAL=0 # Include lsb functions . /lib/lsb/init-functions start () { echo -n "Running configuration management client: " if [ "$BCFG2_INIT" -eq 1 ]; then ${BCFG2} ${BCFG2_OPTIONS} ${BCFG2_OPTIONS_INIT} STATUS=$? fi if [ "$STATUS" -eq 0 ] then log_success_msg "bcfg2" else log_failure_msg "bcfg2" fi return $STATUS } case "$1" in start) start RETVAL=$? ;; stop|status) RETVAL=0 ;; restart|force-reload) start RETVAL=$? ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload}" RETVAL=1 ;; esac exit $RETVAL debian/bcfg2.install000066400000000000000000000005511303523157100146540ustar00rootroot00000000000000debian/tmp/usr/bin/bcfg2 usr/sbin debian/tmp/usr/lib/python*/*-packages/Bcfg2/*.py debian/tmp/usr/lib/python*/*-packages/Bcfg2/Client/* debian/tmp/usr/lib/python*/*-packages/Bcfg2/Options/* debian/tmp/usr/share/man/man1/* debian/tmp/usr/share/man/man5/* examples/bcfg2.conf usr/share/bcfg2 debian/bcfg2.default usr/share/bcfg2 tools/bcfg2-cron usr/lib/bcfg2 debian/bcfg2.postinst000066400000000000000000000022551303523157100150740ustar00rootroot00000000000000#!/bin/sh # postinst script for bcfg2 # # see: dh_installdeb(1) set -e # summary of how this script can be called: # * `configure' # * `abort-upgrade' # * `abort-remove' `in-favour' # # * `abort-deconfigure' `in-favour' # `removing' # # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package # case "$1" in configure) OLDUMASK=$(umask) umask 027 ucf /usr/share/bcfg2/bcfg2.conf /etc/bcfg2.conf umask $OLDUMASK ucf /usr/share/bcfg2/bcfg2.default /etc/default/bcfg2 if [ -x /usr/bin/ucfr ] ; then ucfr bcfg2 /etc/bcfg2.conf ucfr bcfg2 /etc/default/bcfg2 fi ;; abort-upgrade|abort-remove|abort-deconfigure) ;; *) echo "postinst called with unknown argument \`$1'" >&2 exit 1 ;; esac # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# exit 0 debian/bcfg2.postrm000066400000000000000000000021701303523157100145310ustar00rootroot00000000000000#!/bin/sh # postrm script for bcfg2 # # see: dh_installdeb(1) set -e # summary of how this script can be called: # * `remove' # * `purge' # * `upgrade' # * `failed-upgrade' # * `abort-install' # * `abort-install' # * `abort-upgrade' # * `disappear' overwrit>r> # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package case "$1" in purge) for i in /etc/default/bcfg2 /etc/bcfg2.conf; do rm -f $i if which ucf >/dev/null; then ucf --purge $i fi if which ucfr >/dev/null; then ucfr --purge bcfg2 $i fi done ;; remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) ;; *) echo "postrm called with unknown argument \`$1'" >&2 exit 1 esac # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# exit 0 debian/changelog000066400000000000000000000736201303523157100141620ustar00rootroot00000000000000bcfg2 (1.4.0pre2-0.1) UNRELEASED; urgency=low * Helper scripts packed in bcfg2-utils -- Holger Mueller Wed, 07 Aug 2013 21:41:10 +0200 bcfg2 (1.4.0pre2-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Fri, 13 Nov 2015 12:01:49 -0600 bcfg2 (1.4.0pre1-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Mon, 16 Jun 2014 09:36:13 -0500 bcfg2 (1.3.6-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Thu, 11 Jun 2015 15:30:04 -0500 bcfg2 (1.3.5-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Fri, 05 Sep 2014 07:54:48 -0500 bcfg2 (1.3.4-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Tue, 25 Feb 2014 13:25:16 -0600 bcfg2 (1.3.3-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Thu, 07 Nov 2013 08:09:57 -0600 bcfg2 (1.3.2-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Mon, 01 Jul 2013 16:24:46 -0500 bcfg2 (1.3.1-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Thu, 21 Mar 2013 09:32:16 -0500 bcfg2 (1.3.0-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Fri, 15 Mar 2013 08:45:18 -0500 bcfg2 (1.3.0rc2-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Tue, 29 Jan 2013 10:43:55 -0600 bcfg2 (1.3.0rc1-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Wed, 09 Jan 2013 10:48:22 -0600 bcfg2 (1.3.0pre2-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Tue, 30 Oct 2012 17:19:01 -0500 bcfg2 (1.3.0pre1-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Fri, 31 Aug 2012 13:16:05 -0500 bcfg2 (1.2.3-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Tue, 03 Jul 2012 09:33:50 -0500 bcfg2 (1.2.2-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Sat, 17 Mar 2012 14:41:17 -0500 bcfg2 (1.2.1-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Fri, 27 Jan 2012 13:55:45 -0600 bcfg2 (1.2.0-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Wed, 07 Dec 2011 20:22:57 -0600 bcfg2 (1.2.0rc2-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Fri, 28 Oct 2011 14:30:45 -0500 bcfg2 (1.2.0rc1-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Thu, 01 Sep 2011 20:59:16 -0500 bcfg2 (1.2.0pre3-0.0) unstable; urgency=low * New upstream release -- Sol Jerome Sat, 18 Jun 2011 22:41:29 -0500 bcfg2 (1.2.0-0.0pre2) unstable; urgency=low * New upstream release -- Sol Jerome Mon, 25 Apr 2011 11:21:13 -0500 bcfg2 (1.2.0-0.0pre1) unstable; urgency=low * New upstream release -- Sol Jerome Sun, 23 Jan 2011 16:33:00 -0600 bcfg2 (1.1.0-0.0) unstable; urgency=low * New upstream release -- Narayan Desai Sun, 26 Sep 2010 20:56:09 -0500 bcfg2 (1.1.0-0.0rc5) unstable; urgency=low * New upstream release -- Narayan Desai Tue, 14 Sep 2010 10:53:22 -0500 bcfg2 (1.1.0-0.0rc4) unstable; urgency=low * New upstream release -- Sol Jerome Mon, 19 Jul 2010 14:46:00 -0500 bcfg2 (1.1.0-0.0rc3) unstable; urgency=low * New upstream release -- Narayan Desai Thu, 17 Jun 2010 11:22:34 -0500 bcfg2 (1.1.0-0.0rc2) unstable; urgency=low * New upstream release -- Sol Jerome Wed, 09 Jun 2010 12:42:54 -0500 bcfg2 (1.1.0-0.1rc1) unstable; urgency=low * release candidate -- Narayan Desai Tue, 27 Apr 2010 11:17:26 -0600 bcfg2 (1.0.1-1) unstable; urgency=low * Remove unnecessary python-lxml dependency (for the client package) * Remove duplicate Recommends items -- Sol Jerome Thu, 08 Apr 2010 08:31:45 -0600 bcfg2 (1.0.0-6) unstable; urgency=low * final 1.0.0 release + debian directory from r5630 -- Daniel Clark Sun, 23 Dec 2009 02:13:44 -0500 bcfg2 (1.0.0-4) unstable; urgency=low * final 1.0.0 release + debian directory from r5621 -- Daniel Clark Sun, 15 Dec 2009 06:12:33 -0500 bcfg2 (1.0.0-2) unstable; urgency=low * final 1.0.0 release + debian directory from r5619 -- Daniel Clark Sun, 11 Dec 2009 15:59:33 -0500 bcfg2 (1.0.0-1) unstable; urgency=low * final release -- Narayan Desai Thu, 05 Nov 2009 17:40:46 -0600 bcfg2 (1.0.0~rc4-1) unstable; urgency=low * last release candidate -- Narayan Desai Thu, 05 Nov 2009 16:10:39 -0600 bcfg2 (1.0.0~rc3-1) unstable; urgency=low [ Narayan Desai ] * new release candidate -- Narayan Desai Wed, 04 Nov 2009 23:47:56 -0600 bcfg2 (1.0.0~rc2-1) unstable; urgency=low [ Narayan Desai ] * new release candidate [ Sami Haahtinen ] * Update packaging * Switch to plain debhelper * Switch from pycentral to python-support * Move homepage to the dedicated Homepage field * Update Standards-Version to 3.8.3.0 * Add python-m2crypto as dependency * bcfg2-server needs python2.4+ -- Sami Haahtinen Sat, 24 Oct 2009 00:20:51 +0300 bcfg2 (1.0.0rc1-0.1) unstable; urgency=low * first release candidate -- Narayan Desai Sun, 25 Oct 2009 21:40:44 -0500 bcfg2 (1.0pre5-0.2) unstable; urgency=low * version bump to 1.0pre5 -- Narayan Desai Thu, 23 Jul 2009 10:54:06 -0500 bcfg2 (1.0pre4-0.2) unstable; urgency=low * Remove python-cheetah dependency -- Sol Jerome Wed, 15 Jun 2009 14:30:25 -0500 bcfg2 (1.0pre4-0.1) unstable; urgency=low * new prerelease -- Narayan Desai Wed, 24 Jun 2009 20:58:52 -0500 bcfg2 (1.0pre3-0.1) unstable; urgency=low * new prerelease -- Narayan Desai Fri, 22 May 2009 12:37:13 -0500 bcfg2 (1.0pre2-0.1) unstable; urgency=low * new prerelease -- Narayan Desai Wed, 25 Mar 2009 19:17:14 -0500 bcfg2 (1.0pre1-0.0) unstable; urgency=low * new prerelease -- Narayan Desai Thu, 29 Jan 2009 15:36:44 -0600 bcfg2 (0.9.6-0.1) unstable; urgency=low * Final release! -- Narayan Desai Thu, 13 Nov 2008 18:36:50 -0600 bcfg2 (0.9.6-0.0rc1) unstable; urgency=low * Release candidate -- Narayan Desai Fri, 07 Nov 2008 13:52:58 -0600 bcfg2 (0.9.6-0.0pre3) unstable; urgency=low * new prerelease (hopefully rc) -- Narayan Desai Fri, 10 Oct 2008 16:35:17 -0600 bcfg2 (0.9.6-0.0pre2) unstable; urgency=low * new prerelease -- Narayan Desai Thu, 07 Aug 2008 15:49:22 -0500 bcfg2 (0.9.5-0.1) unstable; urgency=low * final release -- Narayan Desai Fri, 09 Nov 2007 19:44:25 -0600 bcfg2 (0.9.5-0.0pre7) unstable; urgency=low * final pre ;) -- Narayan Desai Mon, 05 Nov 2007 17:58:03 -0600 bcfg2 (0.9.5-0.0pre6) unstable; urgency=low * new pre (double hopefully the final 0.9.5pre) -- Narayan Desai Tue, 30 Oct 2007 12:42:27 -0500 bcfg2 (0.9.5-0.0pre5) unstable; urgency=low * new pre (hopefully the final 0.9.5pre) -- Narayan Desai Thu, 11 Oct 2007 16:31:33 -0500 bcfg2 (0.9.5-0.0pre4) unstable; urgency=low * new pre (final feature addition for 0.9.5) -- Narayan Desai Sun, 26 Aug 2007 16:25:33 -0500 bcfg2 (0.9.5-0.0pre3) unstable; urgency=low * new pre -- Narayan Desai Tue, 07 Aug 2007 22:32:16 -0500 bcfg2 (0.9.5-0.0pre2) unstable; urgency=low * new pre -- Narayan Desai Wed, 25 Jul 2007 15:01:48 -0500 bcfg2 (0.9.5-0.0pre1) unstable; urgency=low * new pre -- Narayan Desai Thu, 19 Jul 2007 19:25:16 -0500 bcfg2 (0.9.4-0.1) unstable; urgency=low * final release -- Narayan Desai Mon, 25 Jun 2007 10:12:27 -0500 bcfg2 (0.9.4-0.0pre4) unstable; urgency=low * new pre -- Narayan Desai Wed, 20 Jun 2007 16:15:39 -0500 bcfg2 (0.9.4-0.0pre3) unstable; urgency=low * new pre -- Narayan Desai Thu, 14 Jun 2007 09:12:50 -0500 bcfg2 (0.9.4-0.0pre2) unstable; urgency=low * new pre -- Narayan Desai Wed, 06 Jun 2007 10:28:05 -0500 bcfg2 (0.9.4-0.0pre1) unstable; urgency=low * new pre -- Narayan Desai Mon, 14 May 2007 21:48:23 -0500 bcfg2 (0.9.3-0.1) unstable; urgency=low * final release -- Narayan Desai Mon, 30 Apr 2007 13:21:30 -0500 bcfg2 (0.9.3-0.0pre7) unstable; urgency=low * new pre -- Narayan Desai Wed, 18 Apr 2007 19:49:05 -0500 bcfg2 (0.9.3-0.0pre6) unstable; urgency=low * new pre -- Narayan Desai Wed, 11 Apr 2007 13:17:10 -0500 bcfg2 (0.9.3-0.0pre5) unstable; urgency=low * new prerelease -- Narayan Desai Fri, 6 Apr 2007 14:07:26 -0500 bcfg2 (0.9.3-0.0pre4) unstable; urgency=low * new prerelease -- Narayan Desai Thu, 5 Apr 2007 14:24:37 -0500 bcfg2 (0.9.3-0.0pre3) unstable; urgency=low * new prerelease -- Narayan Desai Tue, 20 Mar 2007 10:50:37 -0500 bcfg2 (0.9.3-0.0pre2) unstable; urgency=low * new prerelease -- Narayan Desai Mon, 12 Mar 2007 11:25:13 -0500 bcfg2 (0.9.3-0.0pre1) unstable; urgency=low * new prerelease -- Narayan Desai Mon, 12 Mar 2007 10:10:05 -0500 bcfg2 (0.9.2-0.1) unstable; urgency=low * final release -- Narayan Desai Mon, 19 Feb 2007 13:35:24 -0600 bcfg2 (0.9.2-0.0rc6) unstable; urgency=low * final(?) release candidate -- Narayan Desai Fri, 16 Feb 2007 16:24:53 -0600 bcfg2 (0.9.2-0.0rc5) unstable; urgency=low * release candidate -- Narayan Desai Thu, 15 Feb 2007 14:59:48 -0600 bcfg2 (0.9.2-0.0rc4) unstable; urgency=low * release candidate -- Narayan Desai Thu, 15 Feb 2007 13:53:17 -0600 bcfg2 (0.9.2-0.0rc3) unstable; urgency=low * release candidate -- Narayan Desai Thu, 15 Feb 2007 13:29:00 -0600 bcfg2 (0.9.2-0.0rc2) unstable; urgency=low * release candidate -- Narayan Desai Thu, 15 Feb 2007 11:03:51 -0600 bcfg2 (0.9.2-0.0rc1) unstable; urgency=low * prerelease -- Narayan Desai Tue, 6 Feb 2007 15:18:53 -0600 bcfg2 (0.9.1d-0.1) unstable; urgency=low * another patch release -- Narayan Desai Tue, 6 Feb 2007 10:00:51 -0600 bcfg2 (0.9.1c-0.1) unstable; urgency=low * another patch release -- Narayan Desai Mon, 5 Feb 2007 12:26:43 -0600 bcfg2 (0.9.1b-0.3) unstable; urgency=low * doubly brown paper bag release -- Narayan Desai Fri, 2 Feb 2007 19:26:12 -0600 bcfg2 (0.9.1a-0.2) unstable; urgency=low * brown paper bag release -- Narayan Desai Thu, 1 Feb 2007 17:49:01 -0600 bcfg2 (0.9.1-0.1) unstable; urgency=low * new release -- Narayan Desai Thu, 1 Feb 2007 15:38:06 -0600 bcfg2 (0.9.0-0.1) unstable; urgency=low * final release -- Narayan Desai Mon, 29 Jan 2007 13:18:38 -0600 bcfg2 (0.9.0-0pre6) unstable; urgency=low * new pre, hopefully near final -- Narayan Desai Wed, 24 Jan 2007 13:17:08 -0600 bcfg2 (0.9.0-0pre5) unstable; urgency=low * new pre -- Narayan Desai Tue, 16 Jan 2007 10:44:54 -0600 bcfg2 (0.9.0-0pre4) unstable; urgency=low * new pre, this one works -- Narayan Desai Thu, 11 Jan 2007 14:58:11 -0600 bcfg2 (0.9.0-0.0pre3) unstable; urgency=low * new pre -- Narayan Desai Thu, 11 Jan 2007 14:01:53 -0600 bcfg2 (0.9.0-0.0pre2) unstable; urgency=low * second pre of 0.9.0 -- Narayan Desai Thu, 11 Jan 2007 12:35:19 -0600 bcfg2 (0.9.0-0.0pre1) unstable; urgency=low * first prerelease of 0.9.0 -- Narayan Desai Tue, 9 Jan 2007 10:07:45 -0600 bcfg2 (0.8.7.3-0.1) unstable; urgency=low * license changes -- Narayan Desai Wed, 27 Dec 2006 15:33:22 -0600 bcfg2 (0.8.7.2-0.1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 21 Dec 2006 09:23:56 -0600 bcfg2 (0.8.7.1-0.1) unstable; urgency=low * new upstream -- Narayan Desai Mon, 11 Dec 2006 12:26:28 -0600 bcfg2 (0.8.7-0.1) unstable; urgency=low * final release -- Narayan Desai Fri, 1 Dec 2006 21:22:16 -0600 bcfg2 (0.8.7-0pre2) unstable; urgency=low * new upstream -- Narayan Desai Thu, 30 Nov 2006 14:48:49 -0600 bcfg2 (0.8.7-0pre1) unstable; urgency=low * new upstream -- Narayan Desai Mon, 27 Nov 2006 10:36:56 -0600 bcfg2 (0.8.6.1-0) unstable; urgency=low * new upstream due to another bug -- Narayan Desai Sun, 12 Nov 2006 10:24:29 -0600 bcfg2 (0.8.6-0) unstable; urgency=low * new upstream -- Narayan Desai Sat, 11 Nov 2006 10:07:40 -0600 bcfg2 (0.8.5-0) unstable; urgency=low * Final release -- Narayan Desai Fri, 3 Nov 2006 10:43:12 -0600 bcfg2 (0.8.5-0pre4) unstable; urgency=low * new upstream -- Narayan Desai Wed, 1 Nov 2006 16:46:45 -0600 bcfg2 (0.8.5-0pre3) unstable; urgency=low * new upstream -- Narayan Desai Wed, 18 Oct 2006 18:42:46 -0500 bcfg2 (0.8.5-0pre2) unstable; urgency=low * new upstream * Fixed bugs reported by gogo, Cory, and mjung -- Narayan Desai Mon, 9 Oct 2006 13:44:46 -0500 bcfg2 (0.8.5-0pre1) unstable; urgency=low * new upstream * client refactor -- Narayan Desai Fri, 6 Oct 2006 16:08:08 -0500 bcfg2 (0.8.4-1) unstable; urgency=low * Change paths to the reporting system * Add server dep on python-cheetah -- Narayan Desai Fri, 15 Sep 2006 12:53:56 -0500 bcfg2 (0.8.3-2) unstable; urgency=low * fix an init script bug -- Narayan Desai Sun, 10 Sep 2006 16:08:04 -0500 bcfg2 (0.8.3-1) unstable; urgency=low * final release! -- Narayan Desai Tue, 5 Sep 2006 10:08:29 -0500 bcfg2 (0.8.3pre7-1) unstable; urgency=low * new upstream -- Narayan Desai Fri, 1 Sep 2006 14:44:56 -0500 bcfg2 (0.8.3pre6-1) unstable; urgency=low * fix debian cron stuff -- Narayan Desai Fri, 1 Sep 2006 09:31:49 -0500 bcfg2 (0.8.3pre5-1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 31 Aug 2006 09:48:52 -0500 bcfg2 (0.8.3pre4-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 29 Aug 2006 16:22:58 -0500 bcfg2 (0.8.3pre3-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 23 Aug 2006 21:36:48 -0500 bcfg2 (0.8.3pre2-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 22 Aug 2006 16:14:09 -0500 bcfg2 (0.8.3pre1-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 15 Aug 2006 12:28:15 -0500 bcfg2 (0.8.2+svn060801-1) unstable; urgency=low * new packaging code * Use dh_installinit to add init-script handling in post/pre scripts, invoke it after compiling python modules. * revert init scripts to lsb 3.0 * fix bcfg2-server pidfile handling. * fix cron scripts -- Sami Haahtinen Tue, 01 Aug 2006 09:53:19 -0500 bcfg2 (0.8.2+svn060725-1) unstable; urgency=low [ Sami Haahtinen ] * Initial upload to debian. * add cron scripts * Update control.in and copyright files in preparation for debian release -- Sami Haahtinen Tue, 25 Jul 2006 10:58:28 +0300 bcfg2 (0.8.2-1) unstable; urgency=low [ Narayan Desai ] * final release [ Sami Haahtinen ] * add cron scripts -- Sami Haahtinen Sat, 22 Jul 2006 23:16:11 +0300 bcfg2 (0.8.2-0pre12) unstable; urgency=low * new upstream -- desai Thu, 20 Jul 2006 16:53:52 -0500 bcfg2 (0.8.2-0pre11) unstable; urgency=low * Update Debian packaging * Update Init Scripts * Move to Standards-Version 3.7.2 (no changes) * build with cdbs * Use python-central -- Sami Haahtinen Mon, 17 Jul 2006 11:11:49 -0500 bcfg2 (0.8.2-0pre10) unstable; urgency=low * new upstream -- desai Sun, 16 Jul 2006 10:53:52 +0300 bcfg2 (0.8.2-0pre9) unstable; urgency=low * new upstream -- desai Fri, 14 Jul 2006 11:11:49 -0500 bcfg2 (0.8.2-0pre8) unstable; urgency=low * new upstream -- Narayan Desai Wed, 12 Jul 2006 14:36:26 -0500 bcfg2 (0.8.2-0pre7) unstable; urgency=low * new upstream -- Narayan Desai Fri, 7 Jul 2006 14:59:30 -0500 bcfg2 (0.8.2-0pre6) unstable; urgency=low * new upstream -- Narayan Desai Mon, 12 Jun 2006 11:49:35 -0500 bcfg2 (0.8.2-0pre5) unstable; urgency=low * new upstream -- Narayan Desai Mon, 12 Jun 2006 13:59:18 -0500 bcfg2 (0.8.2-0pre4) unstable; urgency=low * new upstream -- Narayan Desai Wed, 26 Apr 2006 13:59:18 -0500 bcfg2 (0.8.2-0pre2) unstable; urgency=low * new upstream -- Narayan Desai Wed, 29 Mar 2006 15:41:29 -0600 bcfg2 (0.8.2-0pre1) unstable; urgency=low * new release -- Narayan Desai Thu, 23 Mar 2006 14:10:00 -0600 bcfg2 (0.8.1-1) unstable; urgency=low * new release -- Narayan Desai Wed, 1 Mar 2006 15:11:25 -0600 bcfg2 (0.8.1pre9-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 1 Mar 2006 13:40:27 -0600 bcfg2 (0.8.1pre8-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 28 Feb 2006 10:42:11 -0600 bcfg2 (0.8.1pre7-1) unstable; urgency=low * new upstream -- Narayan Desai Fri, 24 Feb 2006 10:38:52 -0600 bcfg2 (0.8.1pre6-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 21 Feb 2006 10:43:23 -0600 bcfg2 (0.8.1pre5-2) unstable; urgency=low * Fix a few client side bugs -- Narayan Desai Mon, 20 Feb 2006 16:35:50 -0600 bcfg2 (0.8.1pre4-1) unstable; urgency=low * new upstream -- Narayan Desai Fri, 17 Feb 2006 13:02:37 -0600 bcfg2 (0.8.1pre3-1) unstable; urgency=low * packaging updates -- Narayan Desai Wed, 15 Feb 2006 09:35:40 -0600 bcfg2 (0.8.1pre1-1) unstable; urgency=low * new upstream prerelease -- Narayan Desai Wed, 25 Jan 2006 09:59:54 -0600 bcfg2 (0.8.0-1) unstable; urgency=low * New upstream release -- Narayan Desai Fri, 20 Jan 2006 15:53:30 -0600 bcfg2 (0.8.0pre5-1) unstable; urgency=low * New upstream release -- Narayan Desai Mon, 16 Jan 2006 12:11:09 -0600 bcfg2 (0.8.0pre4-1) unstable; urgency=low * New upstream release -- Narayan Desai Wed, 11 Jan 2006 16:04:53 -0600 bcfg2 (0.8.0pre3-1) unstable; urgency=low * New upstream release -- Narayan Desai Tue, 10 Jan 2006 15:05:48 -0600 bcfg2 (0.8.0pre2-1) unstable; urgency=low * New upstream release -- Narayan Desai Fri, 6 Jan 2006 14:18:36 -0600 bcfg2 (0.8.0pre1-1) unstable; urgency=low * new upstream * major repository format change -- Narayan Desai Thu, 5 Jan 2006 10:47:04 -0600 bcfg2 (0.7.4pre3-1) unstable; urgency=low * new upstream -- Joey Hagedorn Thu, 22 Dec 2005 11:08:35 -0600 bcfg2 (0.7.4pre2-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 13 Dec 2005 15:35:35 -0600 bcfg2 (0.7.4pre1-1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 8 Dec 2005 22:32:22 -0600 bcfg2 (0.7.3-1) unstable; urgency=low * finally a release -- Narayan Desai Fri, 2 Dec 2005 13:02:37 -0600 bcfg2 (0.7.3pre6-1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 1 Dec 2005 16:16:08 -0600 bcfg2 (0.7.3pre5-1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 1 Dec 2005 11:29:52 -0600 bcfg2 (0.7.3pre4-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 30 Nov 2005 16:38:35 -0600 bcfg2 (0.7.3pre3-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 29 Nov 2005 15:57:57 -0600 bcfg2 (0.7.3pre2-1) unstable; urgency=low * new upstream -- Narayan Desai Mon, 28 Nov 2005 15:47:12 -0600 bcfg2 (0.7.3pre1-2) unstable; urgency=low * package rework -- Narayan Desai Sat, 12 Nov 2005 15:16:34 -0600 bcfg2 (0.7.3pre1-1) unstable; urgency=low * new upstream * now using lxml instead of elementtree -- Narayan Desai Sat, 12 Nov 2005 12:43:44 -0600 bcfg2 (0.7.2-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 9 Nov 2005 14:47:38 -0600 bcfg2 (0.7.1-1) unstable; urgency=low * release -- Narayan Desai Mon, 7 Nov 2005 16:26:30 -0600 bcfg2 (0.7.1pre6-1) unstable; urgency=low * new upstream (getting close to release) -- Narayan Desai Thu, 3 Nov 2005 15:46:14 -0600 bcfg2 (0.7.1pre5-1) unstable; urgency=low * new upstream (now with timing results) -- Narayan Desai Fri, 28 Oct 2005 11:21:28 -0500 bcfg2 (0.7.1pre4-1) unstable; urgency=low * new upstream (nearing final release) -- Narayan Desai Tue, 25 Oct 2005 01:32:40 -0500 bcfg2 (0.7.1pre3-1) unstable; urgency=low * new upstream -- Narayan Desai Mon, 24 Oct 2005 04:07:45 -0500 bcfg2 (0.7.1pre2-1) unstable; urgency=low * new upstream -- Narayan Desai Mon, 17 Oct 2005 23:56:02 -0500 bcfg2 (0.7.1pre1-1) unstable; urgency=low * new dev version -- Narayan Desai Thu, 13 Oct 2005 13:54:24 -0500 bcfg2 (0.7-1) unstable; urgency=low * final release -- Narayan Desai Wed, 5 Oct 2005 15:38:50 -0500 bcfg2 (0.7rc2-1) unstable; urgency=low * new upstream -- Narayan Desai Sat, 1 Oct 2005 23:12:49 -0500 bcfg2 (0.7pre1-1) unstable; urgency=low * jump to 0.7 -- Narayan Desai Thu, 29 Sep 2005 13:45:48 -0500 bcfg2 (0.6.11pre9-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 28 Sep 2005 15:22:03 -0500 bcfg2 (0.6.11pre8-2) unstable; urgency=low * new upstream -- Narayan Desai Wed, 28 Sep 2005 11:11:39 -0500 bcfg2 (0.6.11pre7-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 27 Sep 2005 15:10:24 -0500 bcfg2 (0.6.11pre6-1) unstable; urgency=low * new upstream -- Narayan Desai Fri, 23 Sep 2005 16:35:49 -0500 bcfg2 (0.6.11pre5-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 21 Sep 2005 04:34:06 -0500 bcfg2 (0.6.11pre4-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 14 Sep 2005 13:20:26 -0500 bcfg2 (0.6.11pre3-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 13 Sep 2005 10:47:52 -0500 bcfg2 (0.6.11pre2-1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 8 Sep 2005 15:59:32 -0500 bcfg2 (0.6.11pre1-2) unstable; urgency=low * new upstream -- Narayan Desai Wed, 7 Sep 2005 14:15:58 -0500 bcfg2 (0.6.10-1) unstable; urgency=low * new release -- Narayan Desai Fri, 2 Sep 2005 12:42:03 -0500 bcfg2 (0.6.10rc1-1) unstable; urgency=low * new upstream, now with report support -- Narayan Desai Fri, 2 Sep 2005 10:57:27 -0500 bcfg2 (0.6.9.3pre3-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 17 Aug 2005 15:59:13 -0500 bcfg2 (0.6.9.3pre2-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 3 Aug 2005 10:20:16 -0500 bcfg2 (0.6.9.3pre1-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 2 Aug 2005 16:24:49 -0500 bcfg2 (0.6.9.2-1) unstable; urgency=low * Here goes one more shot -- Narayan Desai Thu, 14 Jul 2005 10:01:46 -0500 bcfg2 (0.6.9.1-1) unstable; urgency=low * minor bugfix -- Narayan Desai Thu, 14 Jul 2005 09:27:04 -0500 bcfg2 (0.6.9-1) unstable; urgency=low * release -- Narayan Desai Wed, 13 Jul 2005 15:07:58 -0500 bcfg2 (0.6.9pre3-1) unstable; urgency=low * new version -- Narayan Desai Wed, 29 Jun 2005 16:40:22 -0500 bcfg2 (0.6.9pre2-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 29 Jun 2005 13:12:07 -0500 bcfg2 (0.6.9pre1-1) unstable; urgency=low * new version -- Narayan Desai Tue, 28 Jun 2005 16:17:27 -0500 bcfg2 (0.6.8-2) unstable; urgency=low * minor bugfixes -- Narayan Desai Fri, 13 May 2005 15:18:39 -0500 bcfg2 (0.6.8-1) unstable; urgency=low * New upstream -- Narayan Desai Thu, 12 May 2005 13:01:58 -0500 bcfg2 (0.6.7-2) unstable; urgency=low * bug fix -- Narayan Desai Mon, 11 Apr 2005 13:52:07 -0500 bcfg2 (0.6.7-1) unstable; urgency=low * client refactor -- Narayan Desai Mon, 11 Apr 2005 12:54:59 -0500 bcfg2 (0.6.6-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 29 Mar 2005 10:23:11 -0600 bcfg2 (0.6.5-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 16 Feb 2005 15:50:43 -0600 bcfg2 (0.6.4-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 1 Feb 2005 16:31:00 -0600 bcfg2 (0.6.4pre1-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 1 Feb 2005 10:59:24 -0600 bcfg2 (0.6.3-4) unstable; urgency=low * More bugfixes -- Narayan Desai Thu, 27 Jan 2005 13:20:10 -0600 bcfg2 (0.6.3-3) unstable; urgency=low * Fixes for TG -- Narayan Desai Thu, 20 Jan 2005 16:45:20 -0600 bcfg2 (0.6.3-2) unstable; urgency=low * Fixed some stats counting and format issues -- Brian Pellin Tue, 11 Jan 2005 15:15:59 -0600 bcfg2 (0.6.3-1) unstable; urgency=low * New Upstream version -- Brian Pellin Tue, 11 Jan 2005 13:53:57 -0600 bcfg2 (0.6.2-2) unstable; urgency=low * Several bug fixes over last release -- Brian Pellin Mon, 10 Jan 2005 12:24:37 -0600 bcfg2 (0.6.2-1) unstable; urgency=low * New upstream release -- Brian Pellin Fri, 7 Jan 2005 11:02:47 -0600 bcfg2 (0.6.1-1) unstable; urgency=low * actual release -- Narayan Desai Thu, 16 Dec 2004 14:33:22 -0600 bcfg2 (0.6.1-0) unstable; urgency=low * new upstream -- Narayan Desai Tue, 14 Dec 2004 18:43:08 -0600 bcfg2 (0.6-1) unstable; urgency=low * final release -- Narayan Desai Fri, 12 Nov 2004 19:56:55 -0600 bcfg2 (0.6pre8-1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 11 Nov 2004 09:48:37 -0600 bcfg2 (0.6pre7-1) unstable; urgency=low * version bump -- Narayan Desai Wed, 10 Nov 2004 10:57:22 -0600 bcfg2 (0.6pre6-3) unstable; urgency=low * debian bugfixes -- Narayan Desai Thu, 4 Nov 2004 10:57:22 -0600 bcfg2 (0.6pre6-2) unstable; urgency=low * new upstream -- Narayan Desai Thu, 4 Nov 2004 09:47:02 -0600 bcfg2 (0.6pre6-1) unstable; urgency=low * new upstream -- Narayan Desai Mon, 1 Nov 2004 13:38:54 -0600 bcfg2 (0.6pre4-2) unstable; urgency=low * new upstream * lintian fixes -- Narayan Desai Fri, 29 Oct 2004 22:30:28 -0500 bcfg2 (0.6pre4-1) unstable; urgency=low * add schema validation * new upstream -- Narayan Desai Fri, 29 Oct 2004 22:30:14 -0500 bcfg2 (0.6pre3-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 27 Oct 2004 14:40:49 -0500 bcfg2 (0.6pre1-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 26 Oct 2004 11:07:11 -0500 bcfg2 (0.5.2-1) unstable; urgency=low * new upstream -- Narayan Desai Fri, 22 Oct 2004 13:19:35 -0500 bcfg2 (0.5.1-1) unstable; urgency=low * new upstream -- Narayan Desai Tue, 19 Oct 2004 14:12:00 -0500 bcfg2 (0.5-2c) unstable; urgency=low * new upstream -- Narayan Desai Tue, 12 Oct 2004 09:32:37 -0500 bcfg2 (0.4-1) unstable; urgency=low * new upstream -- Narayan Desai Wed, 6 Oct 2004 16:15:21 -0500 bcfg2 (0.3.2-1) unstable; urgency=low * new upstream -- Narayan Desai Thu, 2 Sep 2004 10:41:48 -0500 bcfg2 (0.3-1) unstable; urgency=low * New upstream -- Narayan Desai Tue, 31 Aug 2004 09:39:28 -0500 bcfg2 (0.2-1) unstable; urgency=low * Initial alpha -- Narayan Desai Wed, 11 Aug 2004 09:57:37 -0500 debian/compat000066400000000000000000000000021303523157100134760ustar00rootroot000000000000007 debian/control000066400000000000000000000056161303523157100137130ustar00rootroot00000000000000Source: bcfg2 Section: admin Priority: optional Maintainer: Arto Jantunen Uploaders: Sami Haahtinen Build-Depends: debhelper (>= 7.0.50~), python (>= 2.6), python-setuptools, python-sphinx (>= 1.0.7+dfsg) | python3-sphinx, python-lxml, python-daemon, python-boto, python-cherrypy3, python-gamin, python-genshi, python-pyinotify, python-m2crypto, python-doc, python-mock, python-mock-doc, dh-python Standards-Version: 3.8.0.0 Homepage: http://bcfg2.org/ Package: bcfg2 Architecture: all Depends: ${python:Depends}, ${misc:Depends}, debsums, python-apt (>= 0.7.91), ucf, lsb-base (>= 3.1-9), python (>= 2.6) Description: Configuration management client Bcfg2 is a configuration management system that generates configuration sets for clients bound by client profiles. bcfg2 is the client portion of bcfg2 system which installs configuration images provided by bcfg2-server Package: bcfg2-server Architecture: all Depends: ${python:Depends}, ${misc:Depends}, python-lxml (>= 0.9), libxml2-utils (>= 2.6.23), lsb-base (>= 3.1-9), ucf, bcfg2 (= ${binary:Version}), openssl, python (>= 2.6), python-pyinotify | python-gamin, python-daemon, python-genshi (>= 0.4.4) Recommends: graphviz, patch Suggests: python-cheetah, python-profiler, python-django (>= 1.3), mail-transport-agent, bcfg2-doc (= ${binary:Version}) Description: Configuration management server Bcfg2 is a configuration management system that generates configuration sets for clients bound by client profiles. bcfg2-server is the server for bcfg2 clients, which generates configuration sets and stores statistics of client system states. Package: bcfg2-web Architecture: all Depends: ${python:Depends}, ${misc:Depends}, bcfg2-server (= ${binary:Version}), python-django (>= 1.3), python-django-south (>= 0.7.5) Suggests: python-mysqldb, python-psycopg2, python-sqlite, libapache2-mod-wsgi Description: Configuration management web interface Bcfg2 is a configuration management system that generates configuration sets for clients bound by client profiles. bcfg2-web is the reporting server for bcfg2. Package: bcfg2-utils Architecture: all Depends: ${python:Depends}, ${misc:Depends}, bcfg2 (= ${binary:Version}) Suggests: bcfg2-doc (= ${binary:Version}) Description: Configuration management helper package Bcfg2 is a configuration management system that generates configuration sets for clients bound by client profiles. bcfg2-utils contains scripts for gethering useful information for config creation. Package: bcfg2-doc Section: doc Architecture: all Depends: ${sphinxdoc:Depends}, ${misc:Depends} Description: Configuration management system documentation Bcfg2 is a configuration management system that generates configuration sets for clients bound by client profiles. bcfg2-doc is the documentation for bcfg2. debian/copyright000066400000000000000000000051261303523157100142370ustar00rootroot00000000000000This package was debianized by Sami Haahtinen It was downloaded from http://trac.mcs.anl.gov/projects/bcfg2/ Upstream Author: Narayan Desai Copyright: 2004 - 2013: University of Chicago License: Unless otherwise specified, files are copyright by the following: Copyright (c) 2004, University of Chicago. See the COPYRIGHT file at the top-level directory of this distribution and at https://github.com/Bcfg2/bcfg2/blob/master/COPYRIGHT. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. GOVERNMENT LICENSE Portions of this material resulted from work developed under a U.S. Government Contract and are subject to the following license: the Government is granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable worldwide license in this computer software to reproduce, prepare derivative works, and perform publicly and display publicly. DISCLAIMER This computer code material was prepared, in part, as an account of work sponsored by an agency of the United States Government. Neither the United States, nor the University of Chicago, nor any of their employees, makes any warranty express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. debian/rules000077500000000000000000000017451303523157100133670ustar00rootroot00000000000000#!/usr/bin/make -f # Lucid does not have dh_python2, but we would like to be able to use # this rules file to build on lucid as well. WITH_PYTHON2 = $(shell test -f /usr/bin/dh_python2 && echo "--with python2") WITH_SPHINXDOC = $(shell test -f /usr/bin/dh_sphinxdoc && echo "--with sphinxdoc") %: dh $@ ${WITH_PYTHON2} ${WITH_SPHINXDOC} override_dh_installinit: # Install bcfg2 initscript without starting it on postinst dh_installinit --package=bcfg2 --no-start # Install bcfg2-server initscript without starting it on postinst dh_installinit --package=bcfg2-server --no-start # Install bcfg2-report-collector initscript without starting it on postinst dh_installinit --package=bcfg2-server --name=bcfg2-report-collector --no-start override_dh_auto_build: dh_auto_build python setup.py build_sphinx override_dh_auto_clean: dh_auto_clean rm -rf build override_dh_auto_install: dh_auto_install install -m 644 -D tools/bcfg2-completion.bash debian/tmp/etc/bash_completion.d/bcfg2 debian/source/000077500000000000000000000000001303523157100136005ustar00rootroot00000000000000debian/source/format000066400000000000000000000000041303523157100150050ustar00rootroot000000000000001.0 debian/watch000066400000000000000000000001751303523157100133340ustar00rootroot00000000000000# format version number, currently 3; this line is compulsory! version=3 ftp://ftp.mcs.anl.gov/pub/bcfg/bcfg2-(.*)\.tar\.gz doc/000077500000000000000000000000001303523157100116235ustar00rootroot00000000000000doc/_static/000077500000000000000000000000001303523157100132515ustar00rootroot00000000000000doc/_static/bcfg2.css000066400000000000000000000011511303523157100147440ustar00rootroot00000000000000/* -- tables ---------------------------------------------------------------- */ table.docutils { border: 0 solid #dce; border-collapse: collapse; } table.docutils td, table.docutils th { padding: 2px 5px 2px 5px; border-left: 0; background-color: #bebebe; } table.field-list td, table.field-list th { border: 0 !important; } table.footnote td, table.footnote th { border: 0 !important; } table.docutils th { border-top: 1px solid #2f4f4f; background-color: #708090; } th { text-align: left; padding-right: 5px; } th.head { text-align: center; } doc/_static/bcfg2_logo.png000066400000000000000000000647451303523157100160020ustar00rootroot00000000000000PNG  IHDR,s"?tEXtSoftwareAdobe ImageReadyqe<iIDATx}鞴9%眃A0xAQkDEEQ1(0AATI%g؜wBwsfj{g6)ofzgϩ0 \%\+\%\€..p p VKK%\%\€..a p p VKK%\% X..a p plX56lsӠW.`Ȏ.aR kƒwL'G?/V}.>kX5X!.`8<*;]Q*}AAԚмq=CKh٬1$TͰe0h4YVU$R9h͆)I2u{-<}u{}[X zwk].U\A~a!lxYͬ fu@& kkupð+* m36\t!YFV,Z ibfɢhx.X36M=;x9|3orxA6CKo~߰n^TDůG;acy3.T_x|>` zr*}W&+y pE^Ĩ$6|鳡S{aƭK?v bOY` h-9>L ˫i0or06u87wHHp +l#1Š`Kob,JQQq ر@6#*TAUUDprB 2H8!ļ^~vlyM Jm(aTK+h!l"`6??knX'栨q`'\l"a t81[ŠlղWߝ /?>p VEJwu)RH lTIƀp*$hjuׁ:]0gꍒ㷬j={ͅ ±c#1\񒜜 7pCb v\38p"Zmp@ePL0b\8p@˥+:o> }o6/?%{i)W맄VWKӦMAӀEnF='* :+bW\4jlԏxIvκ=p 'U*,%2BzB infx!pm|n8.."BxCjLF1Ρ(T?r  0 c뭷ނq@dT€Ib7Ql4(vաSڬ6  ʉ@ EC.)O"89y ]Qry<ؽ7s{?xʌhUU 5iW^ KՃԊ?هemEVM-15Dh{W blǒ+ً?)&hsṔ: YPlL4\ջ3[׭ ёLwH'nn|v/@Fr"NIZ5<^8>idٹIUk#p9B5o(ףxY^ 4 ]?:lڥ*Y'!M@>^5Uh6TLxDmC,+i,o 5-8 mꅬpZ2ю1+ɬtS{Z3(*BP)v#R@Aeft}gY ޣ[RB, DuOgȴlPTl*=!J>)ŰW;d`9u@}8yuH"mf&(Ȉ "޷^i{T}VamwNi+XirKKF(",uMܼ?汀E.=W[ xn2 Xw#H NW]~*)oj -' ^$V))jA3{MT;/ʄ+>"?H/- bj:ՓkN-㠩ˬ CfH+6q`FǕ˰QTTs%^  `2P9 t<{{ٲsMիW7555!***Zyr?~j۶m֭[k 6i|jW蚑qv=L@-7 <כD4R,@%ŏE֮];O>-.-ZhS&FcǎݻkŊ=z1C'볬hYYCzNK#Ptp8Ȑ @ŸU=eRWX )XMSRfPP+9>#Hx +1'{Law"Nء`S>(x0Io*"E> kTw][Y7gmTUٔ7Bq8TN*NV+9 rNG*NOO/J۽{-[C@!-SdD00*}mڴiv:th&M4c΃%''pt7Ð!C1K.];k֬Ybg҄$0` 7nʕ5hРZ}XV߂X.>7x$f$+.n#FAy8inn.|K/ロk5k6W?5، , H<P# hݯ_;oZWRbN璞Uxٴ;w}͚3gΜIU)ˆD:oc r`EJv]eJaBvM)#l|Ƈ+[*(~EC2YQYYCPiy}€dZcb\ aj ȶ.nr#U>-z*dXN˕Xz4aZU+226l@K.1i`룏>>[)jό `eXM>bLdM~雮+bqիa֭pA"V~ "k9rpÐmzg/[li5hKM\zrtXuDBh>ǍwSfZ8"'N`N`EZ:Ì[N:%]ve>;L2{̙K%)DD#X>3!!㇎9bHFξE+"ر])c-.ٳ,\跷yg>[ MA,g'1 "]EA꧚Ip4"O@ a*dDW XeePܧdC}Sns ;a#04v%" %% &aH5n15|Og{衇>>ۓϯ 90`EnuyHI<|Pj3>#07pi+| TlPEj28\"!F 4ʅD?hR5vNS\"\01R|o-EE_7qTUCտC5C%8dQՓ&GFF6moMÇu]7WMet`E0 WT8wСR,O!P} `v ]GR$O@@NZn|׌Y舘_'IN:~g oZ⢱&ĦD'*?~pxd|s&pfD9A*e3POݺ><ڿ[ѣoիg1cnl{S^˪4`֨и#x[ΊDp9a@D,V) ^PHGd"/ 4 1E9VR$6 $,!8A`"UobݩpkGBs㲧&1WD]7NKZ5[j3@ oI"*A?ZEL1Vuq :: ^xhonݺ~C}. n1P jp—۴iӎG_Q UeQ \ vn"y q<Ų)[Q]>dUlcLVZ E5Aij>;{xݖ~I5bbG!"WEe]׭v#}tO iz3=_zntOTHf>Azxo㱟EpLvVz>|xG;g/(s['E۬AF(ݰůoPzZ:!ܔ*a "x 6:run@) SkgpuA@X_lcqMq8Tr8U4E@;QL8)l\qzEMgjiI-m{Y{%Xtة_ BuyeDV9 viN {RDZ\r;"Fw˖-$}sڴiwBJ?`. *v)ԩSB\]CRғag#lOVI!=ܳwߝ%-ThR5܁_W ѢEFSN}F eu:vv_ %+v^ hgNC iN'sH"!3_ktCi 6i8Pjj2LjNK9XHA ,.z4 s-\g}ϯ/Uo k+?! )݇lRFб^* m1@S#0w01@~(  -b ]C> e," P!3P@d+^tcC;4VޗnjieIe2`5=́~UH+VL+184ݐZd?!&14 @vӣG|uKc[hWWo6񃝏&2ҏ!kc7n<7n\iӦ 4\}lw3Xg(bO?}Ǘ_~9Sұ) W^ } v>'l/qr KMu{dΞ={b۶m'̳/udQWuw^6y"m0p'Lİ3gv`x 6l-XbY6"y:cx=ኼKK3HBf-!ła-#1{,Z+'[LcTN$rV6kƫ, N(Դ[3;)CeBAHE' ETvThh\ ؞|\ 4H:KQk#V|$6 YdOtU=.EMjH~>4zٵ9` wG&ubkOAN>}^7߬ݽ{).kB)4E>wyu?f"(WXs} :rʐԇ]v)ʩm… !,_" 'h;J?c*mt知; 裏k7Cwy}po[f]n>|tis-[KqZbqk{z uVK_// G4X \Ѯ}{/tF3k{Gݎ i #X%0rrwZyBQ$B%m1X(+/4Le@}%~"ţg,KsTw"mO$.oV! 6 j+7Z-6-Lz!xJ馛A,ƺ*3$F(bE `4[oENdnsNJvܸjCFh_ #ΣOZ2'N>X>`܋D{/ޕ0~MXs}d\p"g'b]C"Ќ='ʸgn=zwyg. .v~}RSSX7gVlV[%٥wpW P 5hРd|az3>ϟxwp f9eQ߲hXtƱ֢xv&?w 68o2@"FEE)(T&cR)5 G6z-2IGa& T'4thP7"5xW\#;`3/gurԅpbY2)/G.ҁǟ+ɮ]طjR||P1YӬ^YRLj% 2777#0J}Wm ;^:A;:"ohw(¾qر(jv m&$!adݼ=y`/Yot~rK g2Ͼ?czYyR_kw_~/s]DnA:ub3=W#E=CI樋.֍cg9+F5ua9ے-=/E8#{vb,SQCVꤦYf rw)%NkabhIlc59[CE/ܑp0 nbz;u+{~+jV^}<)cunx?#\qee 2 ="fU=ɴ`eNBM o *ٹm,mqV+9Bhg W,3 %eCRL!X )W_HzˁWW>.HGP&5m2|#3E],c@DtA+R=7mڴ~=+b@fΜ9_rx?I}%WH>}Wb_xB"WRkݺuCɬAдqz3{r JG;=pEݧg6HmBH2ϺuV/ X/مE]sF^9f2 Vd|ҿFL0laNAB,"hQ-q“? )͓qQUt'1Ly;Dc批j4eEBxXU b;]טق+Kn{Wh\.Mgl2hȾvmjWC^AC^ﭴG0aIb&GLxݻ|jժAaЫzA+~>Я_n%])K,Ƹ[q#K2)ՙP-> ɓ'ӧO<; ֕vBIe Jgl2,-l^]yh@\/n"ۥ$`H Th }ʿ@qo޽{`߷#K u֭vⳢٚ4i\-O2I]]p,wW22ή]2񷴓g0}[CXP:PQ˓j'ֽ1ȍL/MQ85ZAqHbt`ؘjW1l*̧0egFtSգ" 1Rܘ16H\9 = >*n\EȾX$S͇7pz}Nvt4싏Fbg*m>YO'=bús,n)n6QDWj1?dCe[#;ݻ ,]Q:tP`B6D3g\,eXߖvm۶q2\} N`.((tN@_ P~y(:= qк0-+Vl@`ӢofW_,T4PR!hdSc2Ћ"-=(v" G8#J1,j͔HSԂ>xǎg`[=iJ&[=$'ֆ8)c ?kLd+ظHa@e]1T`*}X !Z1W2# @Ϙ_E A/2Y$c@]w5￁9 1EM"R۷o7u@ v%&_bdKtĉc˖-m36P|ۚQ' @viqK,a-u .rQCXGv횓ޔCIY `(_H ٪ѱh&MKvIl^2 5pvQ 8Ə#B&{,}dݻ v1ӧ(Cq[%EʱP2/8|M[4 `8]fQ́, ' {}cG*3Qְ1&$'e38|b`=De&MÄŌ#RmܞXEA`ɘDHU3MёΙL]*csNFQsۜȺ,6:j|W?7mTa_gӦMO2Xׯ_4&E4V!#<6^#elmڴiȍQV#Q}ʕ+w$VA1˰0#33OIJ#sJ0,ncފLXHam@C؁i k"&e #OMz' rm)[|CMMo֚ZY=P2ҫƅhW> ˎ,7VOdع{~ dX4xAɰ;~ﶁ8cF$QAA%>.'}x6) _l3Cĺ]QB⢘B8A4ƍX^=|{mSɯ״LrcuzR.ZV0X<QSHJ.9CĤGe%6Y~?ФIڦk~P"=e֭t6ҤKbo BSk`4D݋u+G Ai}nI'1 փXJeaPVݛMfAؠ|&Oa5k'NRA(޾]~8B9PD;c&}5"kd7KmRU4xm!F]3ıw"vT.@N\ 3F16`E]3]u@ Xf:zs LBΔ/Lo:6Shv&VdaE0k8̂`Ht^ Nm>^WF<Õ{D(3[0v@eܹ)I%f$O$"sz衇f(O|i9 jaeϣbIiE%B")YŰHF8 ,xk/ iԨQ } 03@E~۷oO>̓"3ӃG}ښ5kB$EOU^-SPɳe˖g^E%ӑ1c<82$%WbO QӧOg.]0~Wi_D#(vi/.-NesVNςx}Q04\.^f\d_Ih%_aWKDN.fwe@~$Js[(nIA~H6HF0"3b҃g 1(}6#;V@$7 3&LxQiΏiGplUədxᦛSLRY<+:\V9k&b1!QO];hbg]G$Eq("TP\2%m`y `.Xjvvv$bSb7MƎ0 ŏX+%ݖ!5fѿ VK=z]'ź"u.oO#; 7$5,+,EDW e{Y>wgh儁׶k [Âa*0-([Eh wPکCMO^?#E>؊m?vqi 9y-+([ک ȃ*%#l_.0a`HPhs-_zIAEtXUUdHH| }?o8^?yddҢə,R8'w p rх0(PbTTYRDsXxz.!"x;Y yAsSO]TAHTᅒLmVbឧ={^i8gd"Ŧ=f #;4az26mB~L86]GOm)̫8)[%&*GUPB c4F+pe]KNHemc38eVz^I! @&^pED2 e|\XFbs!`e=(lǐV.&菢RjeBjep0niVE |WIbuD+<b'/"ʹ0,!7pE7yʟ4|q 0ϲe˪'NT d@Ų8: An60-W!ȓ +Ī.)I9nȌO_cnI=p(e"Y(V~hPL{ۓ r7e*X;> ]F׈fnMͶ:&P +h^_eN/*%ahۆд~}Ը4]{} ;*8n@l!]M~: D1 g>ZI'9V? ["a+]de[5n ~ɉb^h|8*.8Xy8UV~Pfxl p:T8VG@IY/P=9ϒ.V׮][}Wa';#ΛHOv>ksa^Veb*ZNT(?J'$:t7߼W^=o?vӎ̪ղۮxeժU%3 V zi)lLEb+`.vRïՍGf bRnkiU9bhxOAm/,,=QkZFMa+\gG:K0ˌ;\ЯK{hT4G MUk>RD_Ы %G>rŶ`kϞ=)N޲e?4iS һw=)S,¾PwXFr~Ԛ5kcdVTҩ}(Ηaq@ Int@xv{=% D^qeǴbeU1a<݌UjZQhr!Cc_!1ҥ?sʗUVmח4f/,ɪs& +'6 UիSHϤ5Jn EF"d6 Tgfra&=͠)bȴ@衸jz3̲ߩp0[]u8܉{3ziZiP5P=rȉG}K"ħ(nݺʚbpB05']B~p7oޜMtgdd@_ŽJ+ui'Q|G;ڦMcI0UKOOgYZPB=C>)S(7aaH oC0EŮh0Vw7hРȚxoQYt(y1<<x!2 %[&/ʢs(i LQ$IqDQ nHk@MWw[ RйA-8|Ё{ҢErf˕wU> ]Y;)֍AmCZfAQaWx0sG]\W;)v,X|j( PJ;MFz WJ߾}/"'e0m5S,)W\G e[2 ưhҥ : Yj*Oц@23a"47"L\s;ӽ{{LE&|ĶMNݻwo YlFc/.wPxZtkc)B;v4ޠ6_gU/]ԃNd|=;)O[U^a\+dUۀ̦F+ڈ"2%}<>Cr8 XbW3f 18C  59D.[@Fx,@ljɃ-@B P$pp:?\L@@ηXзk׮> 6v*UVZh`Ϟ=90FȽ[f͚51A GIGPL!A5O]KG%!!AYQ_BFIT?G:uTh( .!`-@aUB Y{ۚ G{t$>?ܛ5eR, xX6Oիәp 6q}͝"ti \:֫ Ǐ=ĉoˠds/π=PRyd l7M5q7E`z xeJ(C7~EwRnC%$E&>?;< 5y#* #"[ZLydžl:XVBH(:DrI2s f=nt„ 8[סXjџ(:D˖-E:-V%psa;:|pu0X;eeV?/Mi751=m$š_"X8r#馛&s"V%XL1cBlF)djCrSyl}l@^i>@OJbh\\Q|u Xhy+T)vUZV&U q;]^!]s/<}RdrB*aVTMLȒ8#K i@Z\"gY1t$f|0db9C9eʔ[ƝP2hS(wuuFb)\@;T2%Cݛ }WAIftҝh_^G&8;2͛7o'NxkT$?^ CE bPr_WA~^ ~\mfбՅnMCH8hZ7;w:E^UVŮq\JV!wɰnH Ċg4X0}DT,@>8]5Z8s)ȋiUŅ=]ΥT1L=\sNARY0,Su."aY}b)m?GIQҫȌ'sm;r_FsY_ 7GDIJ΍,k}7Q]bvFJ;RѼ}(onCPw׮]l78Xy'+}[`&iba"٨ S-2 I3gjsÆ $K`_CLľ},**KS舠gwq'9X ت]ŒpӦMM_'LeXn!X=ԫ-|U=\wuuJhy.I-*X㻑%J".MZC/ Z"VN>K+Nj0)8Urm tR^.L^sªlr2MgV$Z? <)VrJ^$eGs4 N_Mn(sLt/so}y'0S@M5W:jѢE~x8 L"!#j_b/r!g)%P?]ؖ2u8G6$I&ڝ'nK/Y ZS_Er}?8{LU\3_^"1& x(Q&U =}?x!>ς@ +5ii7Gn8f< OBav@;:eh&$bQ1},`\^w2#0' ?fîg[-m_Y##3435aDK}>Vv?9ubiWMN9/c=j̘7HռN˖-[O8+WZ,E`Y4C UW_ޮ];t`,VZ_|%K8`M5M6mQxg} B7=+JE}GWT+KUEE6d:GO#&JJ$ˢdݺunݺnIJJjD%!9s&!PYs\m\8>؄3 aUf+XbM.#]rɥ3-Z7<">^Z-㲚`JQ˪.a4b61|)aL=V&PDk  [l>N1cnD#Cq/FlS9PˡA'k"#`ZN- -ƼQ^BlX^?a?r+@:/%ϤIf80Zyf R X,% &矿`PzH<$!e}zj4jЋYmFЮ R3$6VO!666F>c? S E,$ ؤF'sG9Su qAtoMÿFM$i=DF,"VbgYfǺfl) Z p~Jvr )l993(^q[/db҄0tfO X"R+ - A>NE"(ߒ4ʰtdk&31֊}#hjz 5tI!S՟C/Cя-|╸F5AU8KVbPEqgDҞx i)? .NdKcttnEOb lV-&^IEJ`nȨ=H]zMH^^TWeNspĐ`F%Clhҥ~6?m8~ {/!/e^Ľ +zk ,_-PR}=zȊ,^87,bs;Șj -Tɬ`ޏ0?"'_G]Jv>c]=zԺBkI+3Õyȑ#'CĞ^e~nG1|n,Ts-£UV"믿~bvv ɈSu t%%쪫^~sMP*mō#e-"G^b"У%!?n6me}l zfU,B X*@_ի%=?hn(:lXUDl1PRX&h;ԔK$O61f/3#,q,;c@)TT{ d-G_6ô'#UJ8c)\ᯛ)x UADـ>s7dXs `,WMoqp~fmQ2a翸% \HXvKOO?.R'-̟9s3x Mf޽Ak&$"7|;$#EݧD"DUŲ@ } 2f͚v-$NεdAz=den\8Ix6թ?-:_(9Rnӎ-4PZ*%RGQmfCAp9 ?c镲=,̦*,U*m VOeW9.ͪR!LB,Δ\C)AD^(rL`* F4B9xZThfd"GATd/T&'Ue bC?0agx,T"l,K,YۣGg̘q?2!ғ2#m"%G%&B *-ZӧO`'Rș}fs t6l3όEXvcI.%O⾄/%-6G?jӦMC@OfiQ'f舉9vm0E¹tYWTek4O'm܋G DC""&&M6j㻈1puR,ލ?eyh8(R?` ɎK顚GAH'.f#DBMURe)SXh{ֽ{?}j'=T$O-{a5&*ӝ={\[o۱c^>*V=IbQF-?~-۷@`CL,1ϞjF,r#:th)S>>}BIL'슘_a$VL$⛫xM7c JZÓQwM4&dWm6NXLj'zFGTm۶xoժU{ڕю/97ٲf2LW*ƹ׹^M"=&͊‹g&[;k-ڣHwΦ4tp}Ȳ\YJ8ע]L<'r{bPKu*kӎ@L>ti naH'vLc,}\\*2;H:''A19(ܼy+WnAֲES|x$'XY)۵ٳg/ĺvѽPҽ{. H,BS [n_,D淔Vz!DvUd IktaKV'W ϒmD(dnj{0Lz5$_+W!c߿ߧ~:"O~ZPP`$ҸYE\b醧ID[bUd,>,|v?3>TȺi Qrva*I$mWRPBNcQ(f  =(LCBl SgR7ʆo H UN3v|w,)8X>l}c[ DJ QKƁԷo8%̄K`n?`A]x̙r$ݔd P5h r^QkzqڵSEs&1pRRjB28px;.]y͚5nX> ~q1k珧-s;J/F*_Β6 4Y(3D2pV^ziN:@vTv|Ȩ8@QY6mھp?,X3$u\r%bq9o-[m]vYC ĿgV.ٲeq1ʖU3f0瞪@9jY&"XMQn+&L"1xj;0$5~L6? #I 2TSGÃ99 ;I.1) & ¤ VZٟ,R-@@kf*D 8)V&!xP,eBӰ^˫fF')MJ, q96:555Y,Q\$dzyLX[ڡB!9iHRzt-R{#?N[cQ,eJVVƼlRL$^@J]9U`Ȏ*osKb?d2XD‡^`NDyq|6nIT(%a?Emr8h0xٹ:m]2rnS&`'QO[I"|3Vy#B:Ą񎨪c\dC!"YL$3XkS9\Jr FŢ/n(۰r7Lx.2OE瑨5XBSY Z'HlNdrƉU Vֱ"GРQ^v? >Yke),/=U^FrQ_dr*k#M89h hOՒ 61m r ȴ@gP!Z' E=a mK 琙υeY&?ݪF9חYR~8W^-G r0TB\?* X~3@N] ny}6) Q+Cq]0Z~N`d)Jdy_}ţȦ1 AiIIl^NyCL<޴BCP+g$ 9uyMkt&p76D@>!?o@s rdSSZܗZ SG~r-AO'{W!=ϟ=[9I0⁕Yf 0'=g5a#H 'I[0ADC/}dB0VNNv3?Ūeoc5ߧ+rҨQʼQv[t¯!-S~Jߣv ۆD76;Un}Vv.LqCǠcee#Y-d@ " b[ir]bVn?L$"X<\- JUO‰~,zҙuw43Bnx(}# 4Wzԡf: #O ax6Sxss08|}VAѯV׃3\%\v  pWioq{bbX<] 3-ǡ0*zAβ}1%8C@YB(Z#H;B"dX&`q]2`l(!E'0`7᥸Rr7XG'<2w1RW;sge]ZQj+ "k"DTZ4Fm 4UAFђ "%`j1 $ƦG+*ݲ 2׽;󛻳.9f}wL׻_qH% aE$9v lwZrR *#ь#5v8`ii!q ng[WrX-t;ި\9CI")/6n܈ 6N4m_2 Z[SÇ͔W"QB>j0K6UqPsd!?:B n"*%4H~m,O.Vz+2Ēb*-H1SinI+R85G{R_dhxiP.}Ԕ{1wdVnpA0]SzEB%R"y|smw 5wQlvꧠ^DXN*Lѓ:A4'-bP%m>g&HbIb6Cf27I7]dN6 MMM &ӫ"}단q*1 [F_Qƀb㨝UѯϋuFQika2Lֈ%mx^q)CSz-*WGm8JFPq4ougqpzMxlGEC 9uQ( |og5D?UwՂa<_Q+Ux HG.y/Ly׵Nc2!RJ C*S‚tk/=- ])ۤBʨ*?^{X}:k/>Tt2X-@v̯_c? |ܩ2pwɿA(1ܾ)~0fMԷ4ŗ^&];|q[j/Y!NC%Fq_SSt{Ou1z |3`A2]؇xxVϩ;=;%?SO0wu1Z7u,+~J#EC&1H'E(-/mPezW&%6r.XUfR!UB.G[3eY'/8.zۜ%MeM)50 Y s6'>kS`u95h "u 9tmہe bo9ݾ *$XYcknr$RNI;Ǘð`1 ð`1 Â0 Â0 Â0 0 0 0,X 0,X 0,X ð`1 ð`1 ð`1 Â0 Â0 S 0Dd'IENDB`doc/_static/favicon.ico000066400000000000000000000124661303523157100154030ustar00rootroot00000000000000 h&  (  ]LABpp^LAA$%8^>+X5X5^>*8%"$%B%a@$aF`Ba@$B%%"0a@#~W4|W}Z~W4a@#5F2(AV6 bChtbbsibCW7!H4)>qR0ûvuûR0pq[:![;!pC/$@\?)īɳƫʫɲĪ]?)K6,?+ Ųʳ+ 'U6$÷öT4!#):{x:&O4+CqpU=.B( @ $^KBm)/2 2 /)ZG=l$ # t2 T<-D'I,L.L.I,D'R9*2 # s+) ,< I,kQ=\<#`@&eH/eH/`@&\<#iN9I,< , ) .A%Q3^>%~bKoM0tQ3tQ3oM0{_G^>%Q3A%. $-A%T5bB(oM0pV~Z:ii~Z:mRoM0bB(T5A%,+# t< Q3bB(qO1|X8{_kIkIy\|X8qO1bB(Q3< $ r"2 I,^>%oM0~Z:jIms̼ͽͽ̼sjjI~Z:oM0^>%I,1 $WD:mQ8(hM8z]FpVko}xxzzyx|okpV{_GiN9Q8)ZG=l)D'\<#qP3qU~`xhiooih~x~`pTqO2\<#D').I,`@&eKpùźpeJ`@&I,/2 L.iK3Ⱥé}|¨ǹiK3L.2 2 L.sWAµƭŪsWAL.2 .I,sV?qɼȰǮɼqsV?I,/(D'pT>w~įʴçīīçɲį~wpT>D')VC:nQ8(t[HŰ͸̶͸ϸϹϹ̷͸űv]KT;,]JAn"2 I,wʷŴ¹¹ŴȵvI,2 "$ w< qYGɸ®®ȷpWE< # t -B&|mǸƷ|mB&- & .D)u÷ötD).) ' ,=!wbRv`P=!,)  % v2 W>/dLo)/2 2 /(WD:m$????doc/_templates/000077500000000000000000000000001303523157100137605ustar00rootroot00000000000000doc/_templates/indexsidebar.html000066400000000000000000000011531303523157100173070ustar00rootroot00000000000000

Docs for other versions

doc/_templates/layout.html000066400000000000000000000012721303523157100161650ustar00rootroot00000000000000{% extends "!layout.html" %} {% block extrahead %} {{ super() }} {% endblock %} {% block rootrellink %}
  • home
  • help
  • documentation »
  • {% endblock %} {% block relbar1 %}
    sampledoc
    {{ super() }} {% endblock %} doc/appendix/000077500000000000000000000000001303523157100134335ustar00rootroot00000000000000doc/appendix/articles.txt000066400000000000000000000020121303523157100157750ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-articles: ======== Articles ======== * Configuration and change management with Bcfg2: "The Dean" - The powerful Bcfg2 provides a sophisticated environment for centralized configuration management. * Marko Jung, Nils Magnus * In the english 'Linux Magazine', 04/09, pages 30-35, April 2009 * The `Bcfg2 code listings `_ for the article are public. * `Konfigurations- und Change-Management in Bcfg2 `_ * Marko Jung, Nils Magnus * In the german 'Linux Magazin', 10/08, pages 76-80, September 2008 * The `code listings `_ for the article are public. * `System Management Methodologies with Bcfg2 `_ * Narayan Desai, Rick Bradshaw and Joey Hagedorn * In ;login: Magazine, Volume 31, #1, pages 11-18, February 2006 doc/appendix/books.txt000066400000000000000000000002651303523157100153140ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-books: ===== Books ===== * `Configuration Management with Bcfg2 `_ * Narayan Desai and Cory Lueninghoener doc/appendix/configuration.txt000066400000000000000000000003621303523157100170440ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-configuration: ===================== Example configuration ===================== This section contains useful configuration of additional tools. .. toctree:: :maxdepth: 1 :glob: configuration/* doc/appendix/configuration/000077500000000000000000000000001303523157100163025ustar00rootroot00000000000000doc/appendix/configuration/mrepo.txt000066400000000000000000000055031303523157100201700ustar00rootroot00000000000000.. -*- mode: rst -*- .. _mrepo: http://dag.wieers.com/home-made/mrepo/ .. _appendix-configuration-mrepo: mrepo ===== .. This section describes how to setup an `mrepo`_ mirror. `mrepo`_ builds a local APT/Yum RPM repository from local ISO files, downloaded updates, and extra packages from 3rd party repositories. It takes care of setting up the ISO files, downloading the RPMs, configuring HTTP access and providing PXE/TFTP resources for remote network installations. Sample mrepo configuration -------------------------- :: ### Configuration file for mrepo ### The [main] section allows to override mrepo's default settings ### The mrepo-example.conf gives an overview of all the possible settings [main] srcdir = /var/mrepo/src wwwdir = /var/www/mrepo confdir = /etc/mrepo.conf.d arch = x86_64 mailto = smtp-server = localhost hardlink = yes shareiso = yes rsync-timeout = 3600 [centos5] name = CentOS Server $release ($arch) release = 5 arch = x86_64 metadata = yum repomd # ISO images iso = centos-$release-server-$arch-DVD.iso #addons = rsync://mirrors.kernel.org/centos/$release/addons/$arch/RPMS centosplus = rsync://mirrors.kernel.org/centos/$release/centosplus/$arch/RPMS extras = rsync://mirrors.kernel.org/centos/$release/extras/$arch/RPMS #fasttrack = rsync://mirrors.kernel.org/centos/$release/fasttrack/$arch/RPMS os = rsync://mirrors.kernel.org/centos/$release/os/$arch/CentOS updates = rsync://mirrors.kernel.org/centos/$release/updates/$arch/RPMS dag = http://apt.sw.be/redhat/el$release/en/$arch/RPMS.dag dries = http://apt.sw.be/redhat/el$release/en/$arch/RPMS.dries rpmforge = http://apt.sw.be/redhat/el$release/en/$arch/RPMS.rpmforge ### Any other section is considered a definition for a distribution ### You can put distribution sections in /etc/mrepo.conf.d/ ### Examples can be found in the documentation at: ### /usr/share/doc/mrepo-0.8.6/dists/. Update the repositories ----------------------- :: mrepo -ug Example sources.xml file ------------------------ .. code-block:: xml centos-5.4 http://mrepo/centos5-x86_64/RPMS.os x86_64 centos-5.4 http://mrepo/centos5-x86_64/RPMS.updates x86_64 centos-5.4 http://mrepo/centos5-x86_64/RPMS.extras x86_64 doc/appendix/files.txt000066400000000000000000000003461303523157100153010ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-files: ============= Example files ============= In this section are some examples for getting started with a more in-depth usage of Bcfg2. .. toctree:: :maxdepth: 1 :glob: files/* doc/appendix/files/000077500000000000000000000000001303523157100145355ustar00rootroot00000000000000doc/appendix/files/mysql.txt000066400000000000000000000032671303523157100164530ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _appendix-files-mysql: .. Author: Patrick Ruckstuhl MySQL example ============= I had some time ago to continue with putting my configuration into Bcfg2 and maybe this helps someone else. I added a new bundle: .. code-block:: xml The ``users.sh`` script looks like this: .. code-block:: sh #!/bin/sh mysql --defaults-extra-file=/etc/mysql/debian.cnf mysql \ < /root/bcfg2-install/mysql/users.sql On debian there is a user account in ``/etc/mysql/debian.cnf`` automatically created, but you could also (manually) create a user in the database that has enough permissions and add the login information in a file yourself. This file looks like this:: [client] host = localhost user = debian-sys-maint password = XXXXXXXXXX The ``users.sql`` looks like this:: DELETE FROM db; INSERT INTO db VALUES ('localhost', 'phpmyadmin', 'pma', 'Y', 'Y', 'Y', 'Y', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N'); DELETE FROM user WHERE User <> 'debian-sys-maint'; INSERT INTO user VALUES ('localhost', 'root', 'XXXXXXXXXXX', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', '', '', '', '', 0, 0, 0); INSERT INTO user VALUES ('localhost', 'pma', '', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', '', '', '', '', 0, 0, 0); FLUSH PRIVILEGES; doc/appendix/files/ntp.txt000066400000000000000000000070721303523157100161050ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-files-ntp: .. Author: Jason Pepas ntp example =========== Here is a series of example configurations for Bcfg2, each introducing another layer of functionality. * After each change, run ``bcfg-repo-validate -v`` * Run the server with ``bcfg2-server -v`` * Update the client with ``bcfg2 -v -d -n`` (will not actually make client changes) Package only ------------ Our example starts with the bare minimum configuration setup. We have a client, a profile group, a list of packages, and an NTP bundle. ``Metadata/clients.xml``: .. code-block:: xml ``Metadata/groups.xml``: .. code-block:: xml ``Bundler/ntp.xml``: .. code-block:: xml ``Pkgmgr/packages.xml``: .. code-block:: xml (This can also be performed more elegantly with the :ref:`server-plugins-generators-packages` plugin.) Add service ----------- Configure the service, and add it to Rules. ``Rules/services.xml``: .. code-block:: xml ``Bundler/ntp.xml``: .. code-block:: xml Add config file --------------- Setup an ``etc/`` directory structure, and add it to the base:: # cat Cfg/etc/ntp.conf/ntp.conf server ntp1.utexas.edu ``Bundler/ntp.xml``: .. code-block:: xml Create a bundle --------------- Bundles allow the grouping of related configuration entries that are used to provide a single service. This is done for several reasons: * Grouping related things in one place makes it easier to add those entries for multiple groups of clients * Grouping entries into bundles makes their validation occur collectively. This means that config files can override the contents of packages. Also, config files are rechecked after packages are upgraded, so that they can be repaired if the package install clobbered them. * Services associated with a bundle get restarted whenever any entity in that bundle is modified. This ensures that new configuration files and software are used after installation. The config file, package, and service are really all related components describing the idea of an ntp client, so they should be logically grouped together. We use a bundle to accomplish this. ``Bundler/ntp.xml``: .. code-block:: xml After this bundle is created, it must be associated with a group (or groups). Add a bundle child element to the group(s) which should install this bundle. ``Metadata/groups.xml``: .. code-block:: xml ... ... Once this bundle is created, a client reconfigure will install these entries. If any are modified, then the *ntpd* service will be restarted. If you only want ntp configurations to be updated (and nothing else), the bcfg2 client can be run with a ``-b `` option that will only update entries in the specified bundle. doc/appendix/guides.txt000066400000000000000000000003121303523157100154500ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides: ====== Guides ====== This section contains platform-specific quickstart guides and howtos around Bcfg2. .. toctree:: :maxdepth: 1 :glob: guides/* doc/appendix/guides/000077500000000000000000000000001303523157100147135ustar00rootroot00000000000000doc/appendix/guides/authentication.txt000066400000000000000000000123501303523157100204740ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides-authentication: ============== Authentication ============== Scenarios ========= 1. Cluster nodes that are frequently rebuilt Default settings work well; machines do not float, and a per-client password is not required. 2. :ref:`appendix-guides-nat_howto` * Build client records in advance with ``bcfg2-admin``, setting a uuid for each new client. * Set the address attribute for each to the address of the NAT. * Optionally, set a per-client password for each, and set into secure mode. .. note:: This will require the use of the uuid and password from each client, and will require that they come through the NAT address. Building bcfg2.conf automatically ================================= This is a :ref:`Cheetah template ` that automatically constructs per-client bcfg2.conf from the per-client metadata:: [communication] #if $self.metadata.uuid != None user = $self.metadata.uuid #end if #if $self.metadata.password != None password = $self.metadata.password #else password = my-password-foobat #end if [components] bcfg2 = https://localhost:6789 In this setup, this will cause any clients that have uuids established to be set to use them in ``bcfg2.conf``. It will also cause any clients with passwords set to use them instead of the global password. How Authentication Works ======================== #. First, the client is associated with a client record. If the client specifies a uuid, it uses this instead of the results of a dns or address lookup. #. Next, the ip address is verified against the client record. If the address doesn't match, then the client must be set to floating='true' #. Finally, the password is verified. If the client is set to secure mode, the only its per-client password is accepted. If it is not set to secure mode, then either the global password or per-client password will be accepted Failure during any of these stages results in authentication failure. Note that clients set into secure mode that do not have per-client passwords set will not be able to connect. SSL Cert-based client authentication ==================================== SSL-based client authentication is supported. This requires several things: #. Certificate Authority (to sign all keys) #. Server key and cert signed by the CA #. Client key and cert signed by the CA A variety of CAs can be used, but these keys can be simply generated using the following set of steps: #. Setup a CA http://www.flatmtn.com/article/setting-openssl-create-certificates #. Create keys for each client and server, signing them with the CA signing cert http://www.flatmtn.com/article/setting-ssl-certificates-apache .. note:: The client CN must be the FQDN of the client (as returned by a reverse DNS lookup of the ip address. Otherwise, you will end up with an error message on the client that looks like:: Server failure: Protocol Error: 401 Unauthorized Failed to download probes from bcfg2 Server Failure You will also see an error message on the server that looks something like:: cmssrv01 bcfg2-server[9785]: Got request for cmssrv115 from incorrect address 131.225.206.122 cmssrv01 bcfg2-server[9785]: Resolved to cmssrv115.fnal.gov #. Distribute the keys and certs to the appropriate locations #. Copy the ca cert to clients, so that the server can be authenticated Clients authenticating themselves with a certificate will be authenticated that way first; clients can be setup to either authenticate solely with certs, use certs with a fallback to password, or password only. Also a bootstrap mode will be added shortly; this will allow a client to authenticate with a password its first time, requiring a certificate all subsequent times. This behavior can be controlled through the use of the auth attribute in ``Metadata/clients.xml``:: Allowed values are: +-------------------+------------------------------------------+ | Auth Type | Meaning | +===================+==========================================+ | ``cert`` | Certificates must be used | +-------------------+------------------------------------------+ | ``cert+password`` | Certificate or password may be used. If | | | a certificate is used, the password must | | | also be used. | +-------------------+------------------------------------------+ | ``bootstrap`` | Password can be used for one client run, | | | after that only certificate is allowed | +-------------------+------------------------------------------+ ``cert+password`` is the default. This can be changed by setting the ``authentication`` parameter in the ``[communication]`` section of ``bcfg2.conf``. For instance, to set ``bootstrap`` mode as the global default, you would add the following to ``bcfg2.conf``:: [communication] authentication = bootstrap ``bootstrap`` mode is currently incompatible with the :ref:`server-plugins-grouping-metadata-clients-database`. doc/appendix/guides/bootstrap.txt000066400000000000000000000024501303523157100174720ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides-bootstrap: ========= Bootstrap ========= Once you have your bcfg2 server setup and working, a common next step to take is automating the addition of new clients. The method for bootstrapping your clients will vary depending on your needs. The simplest way to go about this is to create a public default group in ``Metadata/groups.xml``. Example: .. code-block:: xml This allows clients to freely associate themselves with this group so that you will not be required to manually add them to ``Metadata/clients.xml`` prior to running the client. .. note:: Reverse DNS will need to work in order to automate the process of bootstrapping clients without first adding them to ``Metadata/clients.xml``. There are command line options available on the client which allow you to specify the options that are normally found in the client's ``/etc/bcfg2.conf``:: bcfg2 -x password -p basic -S https://bcfg2-server:6789 The above command will add the client to ``Metadata/clients.xml`` with the profile *basic*. Generally, you should include ``/etc/bcfg2.conf`` in the configuration given to the client by the bcfg2 server in this initial run to avoid specifying these options on future runs. doc/appendix/guides/centos.txt000066400000000000000000000352621303523157100167570ustar00rootroot00000000000000.. -*- mode: rst -*- .. _EPEL: http://fedoraproject.org/wiki/EPEL .. _appendix-guides-centos: ===================== Quickstart for CentOS ===================== This is a complete getting started guide for CentOS. With this document you should be able to install a Bcfg2 server and a Bcfg2 client. Install Bcfg2 ============= The fastest way to get Bcfg2 onto your system is to use Yum or your preferred package management tool. We'll be using the ones that are distributed through EPEL_, but depending on your aversion to risk you could download an RPM from other places as well. See :ref:`getting_started-using_bcfg2-with-centos` for information about building Bcfg2 from source and making your own packages. Using EPEL ---------- Make sure EPEL_ is a valid repository on your server. The `instructions `_ on how to do this basically say:: [root@centos ~]# rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm .. note:: You will have to adjust this command to match your architecture and the current EPEL release. Install the bcfg2-server and bcfg2 RPMs:: [root@centos ~]# yum install bcfg2-server bcfg2 Your system should now have the necessary software to use Bcfg2. The next step is to set up your Bcfg2 :term:`repository`. Initialize your repository ========================== Now that you're done with the install, you need to initialize your repository and setup your ``/etc/bcfg2.conf``. ``bcfg2-admin init`` is a tool which allows you to automate this:: [root@centos ~]# bcfg2-admin init Store bcfg2 configuration in [/etc/bcfg2.conf]: Location of bcfg2 repository [/var/lib/bcfg2]: Input password used for communication verification (without echoing; leave blank for a random): What is the server's hostname: [centos] Input the server location [https://centos:6789]: Input base Operating System for clients: 1: Redhat/Fedora/RHEL/RHAS/Centos 2: SUSE/SLES 3: Mandrake 4: Debian 5: Ubuntu 6: Gentoo 7: FreeBSD : 1 Generating a 2048 bit RSA private key .........................+++ ..................+++ writing new private key to '/etc/bcfg2.key' ----- Signature ok subject=/C=US=ST=Illinois/L=Argonne/CN=centos Getting Private key Repository created successfuly in /var/lib/bcfg2 Change responses as necessary. Start the server ================ You are now ready to start your bcfg2 server for the first time:: [root@centos ~]# /sbin/service bcfg2-server start To verify that everything started ok, look for the running daemon and check the logs:: [root@centos ~]# /etc/init.d/service bcfg2-server status [root@centos ~]# tail /var/log/messages Mar 29 12:42:26 centos bcfg2-server[5093]: service available at https://centos:6789 Mar 29 12:42:26 centos bcfg2-server[5093]: serving bcfg2-server at https://centos:6789 Mar 29 12:42:26 centos bcfg2-server[5093]: serve_forever() [start] Mar 29 12:42:41 centos bcfg2-server[5093]: Handled 16 events in 0.007s Run bcfg2 to be sure you are able to communicate with the server:: [root@centos ~]# bcfg2 -vqn No ca is specified. Cannot authenticate the server with SSL. No ca is specified. Cannot authenticate the server with SSL. Loaded plugins: fastestmirror Loading mirror speeds from cached hostfile Excluding Packages in global exclude list Finished Loaded tool drivers: Action Chkconfig POSIX YUM Phase: initial Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 208 Phase: final Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 208 No ca is specified. Cannot authenticate the server with SSL. The ca message is just a warning, meaning that the client does not have sufficient information to verify that it is talking to the correct server. This can be fixed by distributing the ca certificate from the server to all clients. By default, this file is available in ``/etc/bcfg2.crt`` on the server. Copy this file to the client (with a bundle) and add the ca option to ``bcfg2.conf`` pointing at the file, and the client will be able to verify it is talking to the correct server upon connection:: [root@centos ~]# cat /etc/bcfg2.conf [communication] password = N41lMNeW ca = /etc/bcfg2.crt [components] bcfg2 = https://centos:6789 Now if you run the client, no more warning:: [root@centos ~]# bcfg2 -vqn Loaded plugins: fastestmirror Loading mirror speeds from cached hostfile Excluding Packages in global exclude list Finished Loaded tool drivers: Action Chkconfig POSIX YUM Phase: initial Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 208 Phase: final Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 208 Bring your first machine under Bcfg2 control ============================================ Now it is time to get your first machine's configuration into your Bcfg2 :term:`repository`. Let's start with the server itself. Setup the :ref:`server-plugins-generators-packages` plugin ---------------------------------------------------------- First, replace **Pkgmgr** with **Packages** in the plugins line of ``bcfg2.conf``. Then create Packages layout (as per :ref:`packages-exampleusage`) in ``/var/lib/bcfg2`` .. note:: I am using the rawurl syntax here since we are using `mrepo`_ to manage our yum mirrors. .. _mrepo: http://dag.wieers.com/home-made/mrepo/ .. code-block:: xml x86_64 x86_64 x86_64 To make these sources apply to our centos 5 clients, we need to modify our Metadata. Let's add a **centos5** group which inherits a **centos** group (this should replace the existing **redhat** group) present in ``/var/lib/bcfg2/Metadata/groups.xml``. The resulting file should look something like this .. code-block:: xml .. note:: When editing your xml files by hand, it is useful to occasionally run `bcfg2-lint` to ensure that your xml validates properly. The final thing we need is for the client to have the proper arch group membership. For this, we will make use of the :ref:`server-plugins-probes-dynamic-groups` capabilities of the Probes plugin. Add Probes to your plugins line in ``bcfg2.conf`` and create the Probe.:: [root@centos ~]# grep plugins /etc/bcfg2.conf plugins = Bundler,Cfg,...,Probes [root@centos ~]# mkdir /var/lib/bcfg2/Probes [root@centos ~]# cat /var/lib/bcfg2/Probes/groups #!/bin/sh echo "group:`uname -m`" Now we restart the bcfg2-server:: [root@centos ~]# /etc/init.d/bcfg2-server restart If you now ``tail -f /var/log/messages``, you will see the Packages plugin in action, updating the cache. Start managing packages ----------------------- Add a base-packages bundle. Let's see what happens when we just populate it with the *yum* package. .. code-block:: xml You need to reference the bundle from your Metadata. The resulting profile group might look something like this .. code-block:: xml Now if we run the client, we can see what this has done for us.:: [root@centos ~]# bcfg2 -vqn Running probe groups Probe groups has result: x86_64 Loaded plugins: fastestmirror Loading mirror speeds from cached hostfile Excluding Packages in global exclude list Finished Loaded tool drivers: Action Chkconfig POSIX YUM Package pam failed verification. Phase: initial Correct entries: 94 Incorrect entries: 1 Total managed entries: 95 Unmanaged entries: 113 In dryrun mode: suppressing entry installation for: Package:pam Phase: final Correct entries: 94 Incorrect entries: 1 Package:pam Total managed entries: 95 Unmanaged entries: 113 Interesting, our **pam** package failed verification. What does this mean? Let's have a look:: [root@centos ~]# rpm --verify pam ....L... c /etc/pam.d/system-auth Sigh, it looks like the default RPM install for pam fails to verify using its own verification process (trust me, it's not the only one). At any rate, I was able to get rid of this particular issue by removing the symlink and running ``yum reinstall pam``. As you can see, the Packages plugin has generated the dependencies required for the yum package automatically. The ultimate goal should be to move all the packages from the **Unmanaged** entries section to the **Managed** entries section. So, what exactly *are* those Unmanaged entries?:: [root@centos ~]# bcfg2 -veqn Running probe groups Probe groups has result: x86_64 Loaded plugins: fastestmirror Loading mirror speeds from cached hostfile Excluding Packages in global exclude list Finished Loaded tool drivers: Action Chkconfig POSIX YUM Extra Package openssh-clients 4.3p2-36.el5_4.4.x86_64. Extra Package libuser 0.54.7-2.1el5_4.1.x86_64. ... Phase: initial Correct entries: 95 Incorrect entries: 0 Total managed entries: 95 Unmanaged entries: 113 Phase: final Correct entries: 95 Incorrect entries: 0 Total managed entries: 95 Unmanaged entries: 113 Package:at Package:avahi Package:avahi-compat-libdns_sd ... Now you can go through these and continue adding the packages you want to your Bundle. After a while, I ended up with a minimal bundle that looks like this .. code-block:: xml Now when I run the client, you can see I have only one unmanaged package:: [root@centos ~]# bcfg2 -veqn Running probe groups Probe groups has result: x86_64 Loaded plugins: fastestmirror Loading mirror speeds from cached hostfile Excluding Packages in global exclude list Finished Loaded tool drivers: Action Chkconfig POSIX YUM Phase: initial Correct entries: 187 Incorrect entries: 0 Total managed entries: 187 Unmanaged entries: 16 Phase: final Correct entries: 187 Incorrect entries: 0 Total managed entries: 187 Unmanaged entries: 16 Service:atd Service:avahi-daemon Service:bcfg2-server ... Now, running the client shows only unmanaged Service entries. Woohoo! Manage services --------------- Now let's clear up the unmanaged service entries by adding the following entries to our bundle. .. code-block:: xml ...and bind them in Rules .. code-block:: xml [root@centos ~]# cat /var/lib/bcfg2/Rules/services.xml Now we run the client and see there are no more unmanaged entries!:: [root@centos ~]# bcfg2 -veqn Running probe groups Probe groups has result: x86_64 Loaded plugins: fastestmirror Loading mirror speeds from cached hostfile Excluding Packages in global exclude list Finished Loaded tool drivers: Action Chkconfig POSIX YUM Phase: initial Correct entries: 205 Incorrect entries: 0 Total managed entries: 205 Unmanaged entries: 0 Phase: final Correct entries: 205 Incorrect entries: 0 Total managed entries: 205 Unmanaged entries: 0 .. warning:: This basic bundle is created mainly for the purposes of getting you to a completely managed client. It is recommended that you create bundles for appropriate services due to the way bundle updates are managed. Please see :ref:`unsorted-writing_specification` for more details. Dynamic (web) reports ===================== See installation instructions at :ref:`reports-dynamic` Next Steps ========== :ref:`getting_started-index-next-steps` doc/appendix/guides/converging_rhel5.txt000066400000000000000000000071201303523157100207140ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides-converging_rhel5: ====================================== Converging on Verification with RHEL 5 ====================================== Running verification ==================== To get complete verification status, run:: bcfg2 -vqned Unmanaged entries ================= * Package (top-level) #. Enable the "Packages" plugin in ``/etc/bcfg2.conf``, and configure the Yum repositories in ``/var/lib/bcfg2/Packages/sources.xml``. #. If a package is unwanted, remove it:: sudo yum remove PACKAGE #. Otherwise, add ```` to the Bundler configuration. * Package (dependency) #. Ensure the Yum repository sources configured in ``/var/lib/bcfg2/Packages/sources.xml`` are correct. #. Ensure the Yum repositories themselves are up-to-date with the main package and dependencies. #. Rebuild the Packages plugin cache:: bcfg2-admin xcmd Packages.Refresh * Service #. Add ```` to the Bundler configuration. #. Add ```` to ``/var/lib/bcfg2/Rules/services.xml``. Incorrect entries ================= For a "Package" --------------- * Failed RPM verification #. Run ``rpm -V PACKAGE`` #. Add configuration files (the ones with "c" next to them in the verification output) to ``/var/lib/bcfg2/Cfg/``. * For example, ``/etc/motd`` to ``/var/lib/bcfg2/Cfg/etc/motd/motd``. Yes, there is an extra directory level named after the file. #. Specify configuration files as ```` in the Bundler configuration. #. Add directories to ``/var/lib/bcfg2/Rules/directories.xml``. For example: .. code-block:: xml * Multiple instances * Option A: Explicitly list the instances #. Drop the ```` from the Bundler configuration. #. Add an explicit ```` and ```` configuration to a new Bundle, like the following: .. code-block:: xml #. Add the bundle to the applicable groups in ``/var/lib/bcfg2/Metadata/groups.xml``. * Option B: Disable verification of the package #. Add ``pkg_checks="false"`` to the ```` tag. For a "Path" ------------------- * Unclear verification problem (no details from Bcfg2) 1. Run ``bcfg2 -vqI`` to see detailed verification issues (but deny any suggested actions). * Permissions mismatch 1. Create an ``info.xml`` file in the same directory as the configuration file. Example: .. code-block:: xml Other troubleshooting tools =========================== * Generate the physical configuration from the server side:: bcfg2-info buildfile /test test.example.com * Generate the physical configuration from the client side:: bcfg2 -vqn -c/root/bcfg2-physical.xml doc/appendix/guides/gentoo.txt000066400000000000000000000115701303523157100167530ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides-gentoo: ====== Gentoo ====== This document tries to lay out anything Gentoo-specific that you need to know in order to use Bcfg2. Mostly that has to do with getting it to cooperate with the various pieces of Portage. Services, all things POSIX, and just about anything else that Bcfg2 does will work the same on Gentoo as on any other distribution. Bcfg2 is new on Gentoo; please let the list know if you find errors or omissions. Installing Bcfg2 ================ Early in July 2008, Bcfg2 was added to the Gentoo portage tree. If you don't use portage to install Bcfg2, you'll want to make sure you have all the prerequisites installed first. For a server, you'll need: * ``dev-libs/libgamin[python]`` * ``dev-python/lxml`` Clients will need at least: * ``app-portage/gentoolkit`` Portage installs from source ============================ .. versionadded:: 1.3.0 By default the client will run with the ``--gitbinpkgonly`` option. If you want your client to install packages from source (rather than having a binary build host as seen below), you can set the following in ``/etc/bcfg2.conf``.:: [Portage] binpkgonly = false Package Repository ================== .. note: This is only necessary for using binary packages. You’ll need (to make) at least one archive of binary packages. The Portage driver calls ``emerge`` with the ``--getbinpkgonly`` option. See :manpage:`make.conf(5)` and :manpage:`emerge(1)` manpages, specifically the :envvar:`PORTAGE_BINHOST` environment variable. Time Saver: quickpkg -------------------- If you have a standing Gentoo machine that you want to preserve or propagate, you can generate a complete package archive based on the present state of the system by using the quickpkg utility. For example: .. code-block:: sh for pkg in `equery -q l` ; do quickpkg "=$pkg" ; done ...will leave you with a complete archive of all the packages on your system in ``/usr/portage/packages/All``, which you can then move to your ftp server. Cataloging Packages In Your Repository -------------------------------------- Once you have a set of packages, you will need to create a catalog for them in ``/var/lib/bcfg2/Pkgmgr``. Here's a template: .. code-block:: xml ...and a partially filled-out example, for our local Gentoo/VMware build: .. code-block:: xml [...] The `` name (in our example, "gentoo-200701-vmware") should be included by any host which will draw its packages from this list. Our collection of packages for this class of machines is at the listed URI, and we only have one collection of packages for this batch of machines so in our case the `priority` doesn’t really matter, we've set it to `0`. Notice that package name fields are in `CAT/TITLE` format. Here is a hack which will generate a list of Package lines from a system's database of installed packages, especially useful in conjunction with the ``quickpkg`` example above: .. code-block:: sh #!/bin/bash for pkg in `equery -q l` ; do title=`echo $pkg | sed -e 's/\(.*\)-\([0-9].*\)/\1/'` version=`echo $pkg | sed -e 's/\(.*\)-\([0-9].*\)/\2/'` echo " " done Configuring Client Machines =========================== Set up ``/etc/bcfg2.conf`` the way you would for any other Bcfg2 client. In ``make.conf``, set *PORTAGE_BINHOST* to point to the URI of your package repository. You may want to create versions of ``make.conf`` for each package repository you maintain, with appropriate *PORTAGE_BINHOST* URI's in each, and associated with that package archive's group under ``Cfg`` -- for example, we have ``Cfg/etc/make.conf/make.conf.G99_gentoo-200701-vmware``. If a client host switches groups, and the new group needs a different set of packages, everything should just fall into place. Pitfalls ======== /boot ----- Gentoo as well as some other distros recommend leaving ``/boot`` unmounted during normal runtime. This can lead to trouble during verification and package installation, for example when ``/boot/grub/grub.conf`` turns up missing. The simplest way around this might just be to ensure that ``/boot`` is mounted whenever you run Bcfg2, possibly wrapping Bcfg2 in a script for the purpose. I've also thought about adding *Action* clauses to bundles for grub and our kernel packages, which would mount ``/boot`` before the bundle installs and unmount it afterward, but this doesn't get around the problem of those packages flunking verification. doc/appendix/guides/import-existing-ssh-keys.txt000066400000000000000000000104511303523157100223630ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _appendix-guides-import-existing-ssh-keys: ======================== Import existing ssh keys ======================== .. note:: In order for the instructions in this guide to work, you will need to first setup the :ref:`reporting system ` so that the server has the information needed to create the existing entries. This guide details the process for importing existing ssh keys into your server repository. Add a bundle for ssh ==================== After verifying that SSHbase is listed on the plugins line in ``/etc/bcfg2.conf``, you need to create a bundle containing the appropriate entries. In general, you can use a path glob: .. code-block:: xml If you need more granular control -- e.g., other entries in ``/etc/ssh`` are specified in other bundles -- you can also list the files explicity: .. code-block:: xml Next, you need to add the ssh bundle to the client's metadata in groups.xml. Validate your repository ======================== Validation can be performed using the following command:: bcfg2-lint Run the bcfg2 client ==================== :: bcfg2 -vqn You will see the incorrect entries for the ssh files:: Phase: initial Correct entries: 0 Incorrect entries: 7 Total managed entries: 7 Unmanaged entries: 649 In dryrun mode: suppressing entry installation for: Path:/etc/ssh/ssh_host_dsa_key Path:/etc/ssh/ssh_host_rsa_key Path:/etc/ssh/ssh_host_dsa_key.pub Path:/etc/ssh/ssh_host_rsa_key.pub Path:/etc/ssh/ssh_host_key Path:/etc/ssh/ssh_known_hosts Path:/etc/ssh/ssh_host_key.pub Phase: final Correct entries: 0 Incorrect entries: 7 Path:/etc/ssh/ssh_host_dsa_key Path:/etc/ssh/ssh_host_rsa_key Path:/etc/ssh/ssh_host_dsa_key.pub Path:/etc/ssh/ssh_host_rsa_key.pub Path:/etc/ssh/ssh_host_key Path:/etc/ssh/ssh_known_hosts Path:/etc/ssh/ssh_host_key.pub Total managed entries: 7 Unmanaged entries: 649 Install the client's ssh keys into the Bcfg2 repository ======================================================= Now, we pull the ssh host key data for the client out of the uploaded stats and insert it as host-specific copies of these files in ``/var/lib/bcfg2/SSHBase``.:: for key in ssh_host_ed25519_key ssh_host_ecdsa_key ssh_host_rsa_key ssh_host_dsa_key ssh_host_key; do sudo bcfg2-admin pull Path /etc/ssh/$key sudo bcfg2-admin pull Path /etc/ssh/$key.pub done This for loop pulls data that was collected by the bcfg2 client out of the statistics file and installs it into the repository. This means that the client will keep the same ssh keys and the bcfg2 server can start generating a correct ssh_known_hosts file for the client. Run the bcfg2 client (again) ============================ :: bcfg2 -vqn This time, we will only see 1 incorrect entry.:: Phase: initial Correct entries: 6 Incorrect entries: 1 Total managed entries: 7 Unmanaged entries: 649 In dryrun mode: suppressing entry installation for: Path:/etc/ssh/ssh_known_hosts Phase: final Correct entries: 6 Incorrect entries: 1 Path:/etc/ssh/ssh_known_hosts Total managed entries: 7 Unmanaged entries: 649 Now, the only wrong entry is the ssh_known_hosts file, so go ahead and install it:: bcfg2 -vqI After answering 'y' to the interactive prompt, the client will install the known_hosts file successfully. doc/appendix/guides/nat_howto.txt000066400000000000000000000063141303523157100174620ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides-nat_howto: ========= NAT HOWTO ========= This page describes how to setup Bcfg2 to properly function with a collection of clients behind NAT. It describes the issues, how the underlying portions of the Bcfg2 system function, and how to correctly setup client metadata to cope with this environment. Issues ====== Bcfg2, by default, uses ip address lookup to determine the identity of a client that has connected. This process doesn't work properly in the case of NAT'ed hosts, because all requests from these clients come from the same external address when connecting to the server. These client identification issues will manifest themselves in a number of ways: * Inability to setup discrete clients with different profiles * Incorrect sharing of probe results across clients in the same NAT pool * Inability to bootstrap clients properly when client data is not predefined Architectural Issues ==================== Client identification is performed at the beginning of each client/server interaction. As of 0.9.3, client identification via IP address can be completely short-circuited through the use of a client uuid. Setup of client uuids requires actions of both the client and server. On the server side, the client uuid must be added to the client record in ``Metadata/clients.xml``. This attribute allows the server to use the user part of the authentication to resolve the client's metadata. Also, either the location attribute should be set to floating, or the IP address of the NAT must be reflected in the address attribute. Once added, the Client entry in clients.xml will look something like this: .. code-block:: xml Alternatively, the Client entry can be setup like this: .. code-block:: xml The difference between these definitions is explained in detail in the :ref:`appendix-guides-authentication` section, but in short, the second form requires that the client access the server from the NAT address, while the first form allows it to connect from any address provided it uses the proper uuid. (Client identification is orthogonal to the use of per-client passwords; this can be set in addition to the attributes above.) Once this setup is done, each client must be configured to use the proper uuid upon any server interaction. This can be done using either the command line argument -u, or the setting "user" in the "communication" section of ``/etc/bcfg2.conf``. UUID Choice =========== When determining client UUIDs, one must take care to ensure that UUIDs are unique to the client. Any hardware-specific attribute, like a hash of a mac address would be sufficient. Alternatively, if a local hostname is unique, it may be used as well. Automated Client Bootstrapping ============================== Automated setup of new clients from behind NAT works by using the common password. For example:: /usr/sbin/bcfg2 -u ubik3 -p desktop -x It is not possible at this time to do automated setup without setting up a pre-shared per-client key. doc/appendix/guides/sslca_howto.txt000066400000000000000000000147551303523157100200150ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides-sslca_howto: ==================================== Automated Bcfg2 SSL Authentication ==================================== This how-to describes one possible scenario for automating SSL certificate generation and distribution for bcfg2 client/server communication using the :ref:`SSL CA feature ` of :ref:`server-plugins-generators-cfg`. The process involves configuring a certificate authority (CA), generating the CA cert and key pair, configuring the Cfg SSL CA feature and a Bundle to use the generated certs to authenticate the Bcfg2 client and server. OpenSSL CA ========== If you already have a SSL CA available you can skip this section, otherwise you can easily build one on the server using openssl. The paths should be adjusted to suite your preferences. #. Prepare the directories and files:: mkdir -p /etc/pki/CA/newcerts mkdir /etc/pki/CA/crl echo '01' > /etc/pki/CA/serial touch /etc/pki/CA/index.txt touch /etc/pki/CA/crlnumber #. Edit the ``openssl.cnf`` config file, and in the **[ CA_default ]** section adjust the following parameters:: dir = /etc/pki # Where everything is kept certs = /etc/pki/CA/certs # Where the issued certs are kept database = /etc/pki/CA/index.txt # database index file. new_certs_dir = /etc/pki/CA/newcerts # default place for new certs. certificate = /etc/pki/CA/certs/bcfg2ca.crt # The CA certificate serial = /etc/pki/CA/serial # The current serial number crl_dir = /etc/pki/CA/crl # Where the issued crl are kept crlnumber = /etc/pki/CA/crlnumber # the current crl number crl = /etc/pki/CA/crl.pem # The current CRL private_key = /etc/pki/CA/private/bcfg2ca.key # The private key #. Create the CA root certificate and key pair. You'll be asked to supply a passphrase, and some organizational info. The most important bit is **Common Name** which you should set to be the hostname of your bcfg2 server that your clients will see when doing a reverse DNS query on it's ip address.:: openssl req -new -x509 -extensions v3_ca -keyout bcfg2ca.key \ -out bcfg2ca.crt -days 3650 #. Move the generated cert and key to the locations specified in ``openssl.cnf``:: mv bcfg2ca.key /etc/pki/CA/private/ mv bcfg2ca.crt /etc/pki/CA/certs/ Your self-signing CA is now ready to use. Bcfg2 ===== SSL CA Feature -------------- The SSL CA feature of Cfg was not designed specifically to manage Bcfg2 client/server communication, though it is certainly able to provide certificate generation and management services for that purpose. You'll need to configure Cfg as described in :ref:`server-plugins-generators-cfg-ssl-certificates`, including: * Configuring a ``[sslca_default]`` section in ``bcfg2.conf`` that describes the CA you created above; * Creating ``Cfg/etc/pki/tls/certs/bcfg2client.crt/sslcert.xml`` and ``Cfg/etc/pki/tls/private/bcfg2client.key/sslkey.xml`` to describe the key and cert you want generated. In general, the defaults in ``sslcert.xml`` and ``sslkey.xml`` should be fine, so those files can look like this: ``Cfg/etc/pki/tls/certs/bcfg2client.crt/sslcert.xml``: .. code-block:: xml ``Cfg/etc/pki/tls/private/bcfg2client.key/sslkey.xml``: .. code-block:: xml Client Bundle ------------- To automate the process of generating and distributing certs to the clients we need define at least the cert and key paths created by Cfg, as well as the CA certificate path in a Bundle. For example: .. code-block:: xml Here's a more complete example bcfg2-client bundle: .. code-block:: xml The ``bcfg2.conf`` client config needs at least 5 parameters set for SSL auth. #. ``key`` : This is the host specific key that Cfg will create. #. ``certificate`` : This is the host specific cert that Cfg will create. #. ``ca`` : This is a copy of your CA certificate. Not generated by Cfg. #. ``password`` : Set to arbitrary string when using certificate auth. This also *shouldn't* be required. See: http://trac.mcs.anl.gov/projects/bcfg2/ticket/1019 Here's what a functional **[communication]** section in a ``bcfg2.conf`` genshi template for clients might look like.:: [communication] {% if metadata.uuid != None %}\ user = ${metadata.uuid} {% end %}\ password = DUMMYPASSWORDFORCERTAUTH {% choose %}\ {% when 'rpm' in metadata.groups %}\ certificate = /etc/pki/tls/certs/bcfg2client.crt key = /etc/pki/tls/private/bcfg2client.key ca = /etc/pki/tls/certs/bcfg2ca.crt {% end %}\ {% when 'deb' in metadata.groups %}\ certificate = /etc/ssl/certs/bcfg2client.crt key = /etc/ssl/private/bcfg2client.key ca = /etc/ssl/certs/bcfg2ca.crt {% end %}\ {% end %}\ As a client will not be able to authenticate with certificates it does not yet posses we need to overcome the chicken and egg scenario the first time we try to connect such a client to the server. We can do so using password based auth to bootstrap the client manually specifying all the relevant auth parameters like so:: bcfg2 -qv -S https://fqdn.of.bcfg2-server:6789 -u fqdn.of.client \ -x SUPER_SECRET_PASSWORD If all goes well the client should recieve a freshly generated key and cert and you should be able to run ``bcfg2`` again without specifying the connection parameters. If you do run into problems you may want to review :ref:`appendix-guides-authentication`. doc/appendix/guides/ubuntu.txt000066400000000000000000001073671303523157100170140ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _appendix-guides-ubuntu: ====== Ubuntu ====== .. note:: This particular how to was done on saucy, but should apply to any other `stable`__ version of Ubuntu. __ ubuntu-releases_ .. _ubuntu-releases: https://wiki.ubuntu.com/Releases Install Bcfg2 ============= We first need to install the server. For this example, we will use the bcfg2 server package from the bcfg2 `PPA`_ (note that there is also a version available in the ubuntu archives, but it is not as up to date). .. _PPA: https://launchpad.net/~bcfg2/+archive/ppa Install bcfg2-server -------------------- :: aptitude install bcfg2-server Remove the default configuration preseeded by the ubuntu package:: root@saucy:~# rm -rf /etc/bcfg2* /etc/ssl/bcfg2* /var/lib/bcfg2 Initialize your repository ========================== Now that you're done with the install, you need to intialize your repository and setup your bcfg2.conf. bcfg2-admin init is a tool which allows you to automate this process.:: root@saucy:~# bcfg2-admin init Store Bcfg2 configuration in [/etc/bcfg2.conf]: Location of Bcfg2 repository [/var/lib/bcfg2]: Input password used for communication verification (without echoing; leave blank for a random): What is the server's hostname: [saucy] Input the server location (the server listens on a single interface by default) [https://saucy:6789]: Input base Operating System for clients: 1: Redhat/Fedora/RHEL/RHAS/CentOS 2: SUSE/SLES 3: Mandrake 4: Debian 5: Ubuntu 6: Gentoo 7: FreeBSD 8: Arch : 5 Path where Bcfg2 server private key will be created [/etc/ssl/bcfg2.key]: Path where Bcfg2 server cert will be created [/etc/ssl/bcfg2.crt]: The following questions affect SSL certificate generation. If no data is provided, the default values are used. Country name (2 letter code) for certificate: US State or Province Name (full name) for certificate: Illinois Locality Name (eg, city) for certificate: Argonne Repository created successfuly in /var/lib/bcfg2 Generating a 2048 bit RSA private key ....................................................................................................................+++ ..............................+++ writing new private key to '/etc/ssl/bcfg2.key' ----- Signature ok subject=/C=US/ST=Illinois/L=Argonne/CN=saucy Getting Private key Of course, change responses as necessary. Start the server ================ Before you start the server, you need to fix your network resolution for this host. The short and easy way is to remove the 127.0.1.1 line in ``/etc/hosts`` and move your hostname to the 127.0.0.1 line. :: 127.0.0.1 saucy localhost # The following lines are desirable for IPv6 capable hosts ... .. _Debian Manual: http://www.debian.org/doc/manuals/debian-reference/ch05.en.html#_the_hostname_resolution .. note:: This configuration is not recommended except as a quick hack to get you through this guide. Ideally you'd add a line containing the host's actual IP address. More information on why this is broken can be found in the `Debian Manual`_. You are now ready to start your bcfg2 server for the first time.:: root@saucy:~# /etc/init.d/bcfg2-server start Starting Configuration Management Server: * bcfg2-server root@saucy:~# tail /var/log/syslog Jul 18 17:50:48 saucy bcfg2-server[5872]: Reconnected to syslog Jul 18 17:50:48 saucy bcfg2-server[5872]: bcfg2-server daemonized Jul 18 17:50:48 saucy bcfg2-server[5872]: service available at https://saucy:6789 Jul 18 17:50:48 saucy bcfg2-server[5872]: serving bcfg2-server at https://saucy:6789 Jul 18 17:50:48 saucy bcfg2-server[5872]: serve_forever() [start] Jul 18 17:50:48 saucy bcfg2-server[5872]: Handled 13 events in 0.006s Run bcfg2 to be sure you are able to communicate with the server:: root@saucy:~# bcfg2 -vqn Starting Bcfg2 client run at 1374188552.53 Loaded tool drivers: APT Action DebInit POSIX POSIXUsers Upstart VCS Loaded experimental tool drivers: POSIXUsers Phase: initial Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 590 Phase: final Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 590 Finished Bcfg2 client run at 1374188563.26 Bring your first machine under Bcfg2 control ============================================ Now it is time to get your first machine's configuration into your Bcfg2 repository. Let's start with the server itself. Setup the :ref:`server-plugins-generators-packages` plugin ---------------------------------------------------------- Replace Pkgmgr with Packages in the plugins line of ``bcfg2.conf``:: root@saucy:~# cat /etc/bcfg2.conf [server] repository = /var/lib/bcfg2 plugins = Bundler,Cfg,Metadata,Packages,Rules,SSHbase # Uncomment the following to listen on all interfaces #listen_all = true [statistics] sendmailpath = /usr/lib/sendmail #web_debug = False #time_zone = [database] #engine = sqlite3 # 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'. #name = # Or path to database file if using sqlite3. #/bcfg2.sqlite is default path if left empty #user = # Not used with sqlite3. #password = # Not used with sqlite3. #host = # Not used with sqlite3. #port = [reporting] transport = LocalFilesystem [communication] password = secret certificate = /etc/ssl/bcfg2.crt key = /etc/ssl/bcfg2.key ca = /etc/ssl/bcfg2.crt [components] bcfg2 = https://saucy:6789 Create Packages layout (as per :ref:`packages-exampleusage`) in ``/var/lib/bcfg2`` .. code-block:: xml root@saucy:~# mkdir /var/lib/bcfg2/Packages root@saucy:~# cat /var/lib/bcfg2/Packages/packages.conf [global] root@saucy:~# cat /var/lib/bcfg2/Packages/sources.xml main multiverse restricted universe amd64 bcfg2 bcfg2-server main multiverse restricted universe amd64 bcfg2 bcfg2-server main multiverse restricted universe amd64 bcfg2 bcfg2-server main amd64 Above, we have grouped our package sources under **ubuntu-saucy**. We need to add this group to our ``/var/lib/bcfg2/Metadata/groups.xml`` so that our client is able to obtain these sources. .. code-block:: xml .. note:: When editing your xml files by hand, it is useful to occasionally run ``bcfg2-lint -v`` to ensure that your xml validates properly. The last thing we need is for the client to have the proper arch group membership. For this, we will make use of the :ref:`server-plugins-probes-dynamic-groups` capabilities of the Probes plugin. Add Probes to your plugins line in ``bcfg2.conf`` and create the Probe. .. code-block:: sh root@saucy:~# grep plugins /etc/bcfg2.conf plugins = Bundler,Cfg,Metadata,...,Probes root@saucy:~# mkdir /var/lib/bcfg2/Probes root@saucy:~# cat /var/lib/bcfg2/Probes/groups #!/bin/sh ARCH=$(uname -m) case "$ARCH" in "x86_64") echo "group:amd64" ;; "i686") echo "group:i386" ;; esac Now we restart the bcfg2-server:: root@saucy:~# /etc/init.d/bcfg2-server restart Stopping Configuration Management Server: * bcfg2-server Starting Configuration Management Server: * bcfg2-server root@saucy:~# tail /var/log/syslog Jul 18 18:43:22 saucy bcfg2-server[6215]: Reconnected to syslog Jul 18 18:43:22 saucy bcfg2-server[6215]: bcfg2-server daemonized Jul 18 18:43:22 saucy bcfg2-server[6215]: service available at https://saucy:6789 Jul 18 18:43:22 saucy bcfg2-server[6215]: Failed to read file probed.xml: Error reading file '/var/lib/bcfg2/Probes/probed.xml': failed to load external entity "/var/lib/bcfg2/Probes/probed.xml" Jul 18 18:43:22 saucy bcfg2-server[6215]: serving bcfg2-server at https://saucy:6789 Jul 18 18:43:22 saucy bcfg2-server[6215]: serve_forever() [start] Jul 18 18:43:22 saucy bcfg2-server[6215]: Reloading Packages plugin Jul 18 18:43:22 saucy bcfg2-server[6215]: Handled 15 events in 0.205s .. note:: The error regarding *probed.xml* is non-fatal and just telling you that the file doesn't yet exist. It will be populated once you have run a client with the Probes plugin enabled. Start managing packages ----------------------- Add a base-saucy (or whatever release you happen to be using) bundle. Let's see what happens when we just populate it with the ubuntu-standard package. .. code-block:: xml root@saucy:~# cat /var/lib/bcfg2/Bundler/base-saucy.xml You need to reference the bundle from your Metadata. The resulting profile group might look something like this .. code-block:: xml Now if we run the client in debug mode (-d), we can see what this has done for us.:: root@saucy:/var/lib/bcfg2# bcfg2 -vqdn Configured logging: DEBUG to console; DEBUG to syslog {'help': False, 'extra': False, 'ppath': '/var/cache/bcfg2', 'ca': '/etc/ssl/bcfg2.crt', 'rpm_version_fail_action': 'upgrade', 'yum_version_fail_action': 'upgrade', 'retry_delay': '1', 'posix_uid_whitelist': [], 'rpm_erase_flags': ['allmatches'], 'verbose': True, 'certificate': '/etc/ssl/bcfg2.crt', 'paranoid': False, 'rpm_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'cache': None, 'yum24_autodep': True, 'yum_pkg_verify': True, 'probe_timeout': None, 'yum_installed_action': 'install', 'rpm_verify_fail_action': 'reinstall', 'dryrun': True, 'retries': '3', 'apt_install_path': '/usr', 'quick': True, 'password': 'secret', 'yum24_installed_action': 'install', 'kevlar': False, 'max_copies': 1, 'syslog': True, 'decision_list': False, 'configfile': '/etc/bcfg2.conf', 'remove': None, 'server': 'https://saucy:6789', 'encoding': 'UTF-8', 'timeout': 90, 'debug': True, 'yum24_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'yum24_erase_flags': ['allmatches'], 'yum24_pkg_checks': True, 'interactive': False, 'apt_etc_path': '/etc', 'rpm_installed_action': 'install', 'yum24_verify_fail_action': 'reinstall', 'omit_lock_check': False, 'yum24_pkg_verify': True, 'serverCN': None, 'file': None, 'apt_var_path': '/var', 'posix_gid_whitelist': [], 'posix_gid_blacklist': [], 'indep': False, 'decision': 'none', 'service_mode': 'default', 'version': False, 'rpm_pkg_checks': True, 'profile': None, 'yum_pkg_checks': True, 'args': [], 'bundle': [], 'posix_uid_blacklist': [], 'user': 'root', 'key': '/etc/ssl/bcfg2.key', 'command_timeout': None, 'probe_exit': True, 'lockfile': '/var/lock/bcfg2.run', 'yum_verify_fail_action': 'reinstall', 'yum24_version_fail_action': 'upgrade', 'yum_verify_flags': [], 'logging': None, 'rpm_pkg_verify': True, 'bundle_quick': False, 'rpm_verify_flags': [], 'yum24_verify_flags': [], 'skipindep': False, 'skipbundle': [], 'portage_binpkgonly': False, 'drivers': ['APK', 'APT', 'Action', 'Blast', 'Chkconfig', 'DebInit', 'Encap', 'FreeBSDInit', 'FreeBSDPackage', 'IPS', 'MacPorts', 'OpenCSW', 'POSIX', 'POSIXUsers', 'Pacman', 'Portage', 'RPM', 'RPMng', 'RcUpdate', 'SELinux', 'SMF', 'SYSV', 'Systemd', 'Upstart', 'VCS', 'YUM', 'YUM24', 'YUMng', 'launchd']} Starting Bcfg2 client run at 1374191628.88 Running probe groups Running: /tmp/tmpEtgdwo < group:amd64 Probe groups has result: group:amd64 POSIX: Handlers loaded: nonexistent, directory, hardlink, symlink, file, device, permissions Loaded tool drivers: APT Action DebInit POSIX POSIXUsers Upstart VCS Loaded experimental tool drivers: POSIXUsers The following packages are specified in bcfg2: ubuntu-standard The following packages are prereqs added by Packages: accountsservice libdrm2 libusb-1.0-0 adduser libedit2 libustr-1.0-1 apparmor libelf1 libuuid1 apt libexpat1 libwind0-heimdal apt-transport-https libffi6 libx11-6 apt-utils libfribidi0 libx11-data base-files libfuse2 libxau6 base-passwd libgcc1 libxcb1 bash libgck-1-0 libxdmcp6 bash-completion libgcr-3-common libxext6 bsdmainutils libgcr-base-3-1 libxml2 bsdutils libgcrypt11 libxmuu1 busybox-initramfs libgdbm3 libxtables10 busybox-static libgeoip1 locales ca-certificates libglib2.0-0 login command-not-found libglib2.0-data logrotate command-not-found-data libgnutls26 lsb-base coreutils libgpg-error0 lsb-release cpio libgpm2 lshw cron libgssapi-krb5-2 lsof dash libgssapi3-heimdal ltrace dbus libhcrypto4-heimdal makedev debconf libheimbase1-heimdal man-db debconf-i18n libheimntlm0-heimdal manpages debianutils libhx509-5-heimdal memtest86+ diffutils libidn11 mime-support dmidecode libisc92 mlocate dmsetup libisccc90 module-init-tools dnsutils libisccfg90 mount dosfstools libjson-c2 mountall dpkg libjson0 mtr-tiny e2fslibs libk5crypto3 multiarch-support e2fsprogs libkeyutils1 nano ed libklibc ncurses-base file libkmod2 ncurses-bin findutils libkrb5-26-heimdal netbase friendly-recovery libkrb5-3 ntfs-3g ftp libkrb5support0 openssh-client fuse libldap-2.4-2 openssl gcc-4.8-base liblocale-gettext-perl parted geoip-database liblwres90 passwd gettext-base liblzma5 pciutils gnupg libmagic1 perl-base gpgv libmount1 plymouth grep libncurses5 plymouth-theme-ubuntu-text groff-base libncursesw5 popularity-contest gzip libnewt0.52 powermgmt-base hdparm libnfnetlink0 ppp hostname libnih-dbus1 pppconfig ifupdown libnih1 pppoeconf info libnuma1 procps initramfs-tools libp11-kit0 psmisc initramfs-tools-bin libpam-modules python-apt-common initscripts libpam-modules-bin python3 insserv libpam-runtime python3-apt install-info libpam-systemd python3-commandnotfound iproute libpam0g python3-dbus iproute2 libparted0debian1 python3-distupgrade iptables libpcap0.8 python3-gdbm iputils-tracepath libpci3 python3-minimal irqbalance libpcre3 python3-update-manager iso-codes libpipeline1 python3.3 klibc-utils libplymouth2 python3.3-minimal kmod libpng12-0 readline-common krb5-locales libpolkit-gobject-1-0 rsync language-selector-common libpopt0 sed libaccountsservice0 libprocps0 sensible-utils libacl1 libpython3-stdlib sgml-base libapparmor-perl libpython3.3-minimal shared-mime-info libapparmor1 libpython3.3-stdlib strace libapt-inst1.5 libreadline6 systemd-services libapt-pkg4.12 libroken18-heimdal sysv-rc libasn1-8-heimdal librtmp0 sysvinit-utils libasprintf0c2 libsasl2-2 tar libatm1 libsasl2-modules tcpdump libattr1 libselinux1 telnet libaudit-common libsemanage-common time libaudit1 libsemanage1 tzdata libbind9-90 libsepol1 ubuntu-keyring libblkid1 libslang2 ubuntu-release-upgrader-core libbsd0 libsqlite3-0 ucf libbz2-1.0 libss2 udev libc-bin libssl1.0.0 ufw libc6 libstdc++6 update-manager-core libcap-ng0 libsystemd-daemon0 upstart libcap2 libsystemd-login0 usbutils libcomerr2 libtasn1-3 util-linux libcurl3-gnutls libtext-charwidth-perl uuid-runtime libdb5.1 libtext-iconv-perl wget libdbus-1-3 libtext-wrapi18n-perl whiptail libdbus-glib-1-2 libtinfo5 xauth libdevmapper1.02.1 libudev1 xml-core libdns95 libusb-0.1-4 zlib1g Phase: initial Correct entries: 280 Incorrect entries: 0 Total managed entries: 280 Unmanaged entries: 313 Installing entries in the following bundle(s): base-saucy Bundle base-saucy was not modified Phase: final Correct entries: 280 Incorrect entries: 0 Total managed entries: 280 Unmanaged entries: 313 Finished Bcfg2 client run at 1374191642.69 As you can see, the Packages plugin has generated the dependencies required for the ubuntu-standard package for us automatically. The ultimate goal should be to move all the packages from the **Unmanaged** entries section to the **Managed** entries section. So, what exactly *are* those Unmanaged entries? :: Starting Bcfg2 client run at 1374192077.76 Running probe groups Probe groups has result: group:amd64 Loaded tool drivers: APT Action DebInit POSIX POSIXUsers Upstart VCS Loaded experimental tool drivers: POSIXUsers Phase: initial Correct entries: 280 Incorrect entries: 0 Total managed entries: 280 Unmanaged entries: 313 Phase: final Correct entries: 280 Incorrect entries: 0 Total managed entries: 280 Unmanaged entries: 313 POSIXGroup:adm POSIXGroup:audio POSIXGroup:backup ... Package:deb:apt-xapian-index Package:deb:aptitude Package:deb:aptitude-common ... Now you can go through these and continue adding the packages you want to your Bundle. Note that ``aptitude why`` is useful when trying to figure out the reason for a package being installed. Also, ``deborphan`` is helpful for removing leftover dependencies which are no longer needed. After a while, I ended up with a minimal bundle that looks like this: .. code-block:: xml Once your ``bcfg2 -vqen`` output no longer shows Package entries, you can move on to the next section. Manage users ------------ The default setting in ``login.defs`` is for system accounts to be UIDs < 1000. We will ignore those accounts for now (you can manage them if you like at a later time). To ignore system UID/GIDs, add the following lines to ``bcfg2.conf`` (we will also ignore the nobody uid and nogroup gid--65534). :: [POSIXUsers] uid_blacklist = 0-999,65534 gid_blacklist = 0-999,65534 If you run the client again with ``bcfg2 -vqen``, you should now see a :ref:`POSIXUser ` entry and :ref:`POSIXGroup ` entry for your user account (assuming this is a fresh install with a regular user). You can manage this user by adding the following to your bundle. .. code-block:: xml adm cdrom dip lpadmin plugdev sambashare sudo Manage services --------------- To clear up the unmanaged service entries, you will need to add the entries to your bundle. Here's an example of what that might look like. .. code-block:: xml Add the literal entries in Rules to bind the Service entries from above. .. code-block:: xml root@saucy:~# cat /var/lib/bcfg2/Rules/services.xml Now we run the client and see there are no more unmanaged entries! :: root@saucy:~# bcfg2 -vqn Starting Bcfg2 client run at 1374271524.83 Running probe groups Probe groups has result: group:amd64 Loaded tool drivers: APT Action DebInit POSIX POSIXUsers Upstart VCS Loaded experimental tool drivers: POSIXUsers Phase: initial Correct entries: 519 Incorrect entries: 0 Total managed entries: 519 Unmanaged entries: 0 Phase: final Correct entries: 519 Incorrect entries: 0 Total managed entries: 519 Unmanaged entries: 0 All entries correct. Finished Bcfg2 client run at 1374271541.56 .. warning:: This basic bundle is created mainly for the purposes of getting you to a completely managed client. It is recommended that you create bundles for appropriate services due to the way bundle updates are managed. Please see :ref:`unsorted-writing_specification` for more details. Upstart ^^^^^^^ Upstart services are defined like this: .. code-block:: xml Some Upstart services require additional parameters, like network-interface and bridge-network-interface: .. code-block:: xml Dynamic (web) reports ===================== See installation instructions at :ref:`appendix-guides-web-reports-install` Next Steps ========== :ref:`getting_started-index-next-steps` doc/appendix/guides/using-bcfg2-with-centos.txt000066400000000000000000000047111303523157100220270ustar00rootroot00000000000000.. -*- mode: rst -*- .. _EPEL: http://fedoraproject.org/wiki/EPEL .. _RPMForge: https://rpmrepo.org/RPMforge .. _getting_started-using_bcfg2-with-centos: ======================= Using Bcfg2 With CentOS ======================= This section covers specific topics for using Bcfg2 with CentOS. Most likely the tips on this page also apply to other members of the Red Hat family of Linux operating systems. From Source +++++++++++ Install Prerequisities ###################### While you can go about building all these things from source, this how to will try and meet the dependencies using packages from EPEL_ or RPMforge_. The *el5* package should be compatible with CentOS 5.x. EPEL_ for 5.x :: [root@centos ~]# rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm EPEL_ for 6.x :: [root@centos ~]# rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-5.noarch.rpm RPMforge_ for 5.x :: [root@centos ~]# rpm -Uvh http://dag.wieers.com/rpm/packages/rpmforge-release/rpmforge-release-0.3.6-1.el5.rf.x86_64.rpm .. note:: Be careful with `mixing package repositories `_. Now you can install the rest of the prerequisites:: [root@centos ~]# yum install python-genshi python-cheetah python-lxml Build Packages from source ########################## * After installing git, clone the master branch:: [root@centos redhat]# git clone git://git.mcs.anl.gov/bcfg2.git * Install the ``fedora-packager`` package :: [root@centos ~]# yum install fedora-packager * A directory structure for the RPM build process has to be established. :: [you@centos ~]$ rpmdev-setuptree * Change to the *redhat* directory of the checked out Bcfg2 source:: [you@centos ~]$ cd bcfg2/redhat/ * In the particular directory is a Makefile which will do the job of building the RPM packages. You can do this as root, but it's not recommended:: [you@centos redhat]$ make * Now the new RPM package can be installed. Please adjust the path to your RPM package:: [root@centos ~]# rpm -ihv /home/YOU/rpmbuild/RPMS/noarch/bcfg2-server-1.0.0-0.2r5835.noarch.rpm Install Packages from Package repository ######################################## To install the bcfg2-server and bcfg2 from a package repository, just use Yum to do it:: [root@centos ~]# yum install bcfg2-server bcfg2 .. toctree:: :hidden: doc/appendix/guides/vcs.txt000066400000000000000000000051661303523157100162570ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-guides-vcs: ======================= Version control systems ======================= The sections in this guide only cover the basics steps in the setup of the different version control systems for usage with the Bcfg2. Git === .. _Git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html Adding the :ref:`server-plugins-version-git` plugin will allow you to store version information in the statistics database. For tracking the configuration files in the ``/var/lib/bcfg2`` directory a git repository needs to be established:: git init For more detail about the setup of git please refer to a `git tutorial`_. The first commit can be the empty or the already populated directory:: git add . && git commit -a While running ``bcfg2-info`` the following line will show up:: Initialized git plugin with git directory = /var/lib/bcfg2/.git Mercurial ========= The :ref:`server-plugins-version-hg` plugin also allows you to store version information in the statistics database. To use mercurial to track your configuration files, the repository must be initialized:: hg init Mercurial will not commit the files to the repository until a user name is defined in ``/var/lib/bcfg2/.hg/`` .. code-block:: sh cat <> /var/lib/bcfg2/.hg/hgrc [ui] username = Yor name END_ENTRY Now you are able to make submissions to the repository:: hg commit While running ``bcfg2-info`` the following line will show up:: Initialized hg plugin with hg directory = /var/lib/bcfg2/.hg Darcs ===== The :ref:`server-plugins-version-darcs` plugin also allows you to store version information in the statistics database. To use darcs to track your configuration files, the repository must be initialized:: darcs initialize To commit to the darcs repository an author must be added to the ``_darcs/prefs/author`` file. If the ``author`` file is missing, darcs will ask you to enter your e-mail address. .. code-block:: sh cat <> /var/lib/bcfg2/_darcs/prefs/author you@example.com END_ENTRY All files in the ``/var/lib/bcfg2`` directory should be added to darcs now:: darcs add * After that you can submit them to the repository:: darcs record While running ``bcfg2-info`` the following line will show up:: Initialized Darcs plugin with darcs directory = /var/lib/bcfg2/_darcs Cvs === The :ref:`server-plugins-version-cvs` plugin also allows you to store version information in the statistics database. plugins = Bundler,Cfg,...,Cvs The CVS repository must be initialized:: cvs -d /var/lib/bcfg2 init doc/appendix/guides/web-reports-install.txt000066400000000000000000000037001303523157100213710ustar00rootroot00000000000000.. -*- mode: rst -*- .. _EPEL: http://fedoraproject.org/wiki/EPEL .. This is combination of the Ubuntu guide and the Centos guide for installing the web reports. .. _appendix-guides-web-reports-install: ======================== Web Reporting Quickstart ======================== You need to install the bcfg2-web package that is available for your particular distribution. All packages for Fedora are in the Fedora Package Collection. You can find packages for CentOS and RHEL in EPEL_:: [root@system01 ~]# yum -y install bcfg2-web The same packages are needed for Debian/Ubuntu systems:: [root@system01 ~]# aptitude install bcfg2-web Add Reporting to the plugins line of ``bcfg2.conf``. The resulting **[server]** section should look something like this:: [server] repository = /var/lib/bcfg2 plugins = Bundler,Cfg,...,Reporting [reporting] transport = LocalFilesystem You then need to initialize the reporting database:: [root@system01 ~]# bcfg2-admin reports init Start/restart the Bcfg2 server:: [root@system01 ~]# /etc/init.d/bcfg2-server restart Start the Bcfg2 report collector:: [root@system01 ~]# /etc/init.d/bcfg2-report-collector start Run the Bcfg2 client in order to populate the statistics database. Copy server/statistics sections of ``bcfg2.conf`` to ``/etc/bcfg2-web.conf`` (make sure it is world-readable). You should then have something like this:: [server] repository = /var/lib/bcfg2 plugins = Bundler,Cfg,...,Reporting [database] engine = sqlite3 # 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'. name = user = # Not used with sqlite3. password = # Not used with sqlite3. host = # Not used with sqlite3. port = [reporting] transport = LocalFilesystem Restart apache and point a browser to your Bcfg2 server. If using sqlite be sure the sql database file and directory containing the database are writable to apache. doc/appendix/index.txt000066400000000000000000000002611303523157100153020ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-index: ======== Appendix ======== .. toctree:: :maxdepth: 2 files configuration books papers articles guides tools doc/appendix/papers.txt000066400000000000000000000041331303523157100154670ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-papers: ====== Papers ====== * Configuration Life-Cycle Management on the TeraGrid. * Ti Leggett, Cory Lueninghoener, and Narayan Desai * In Proceedings of TeraGrid '07 Conference, June 2007 * `A Scalable Approach To Deploying And Managing Appliances `_ * Rick Bradshaw, Narayan Desai, Tim Freeman, and Kate Keahey * In Proceedings of the TeraGrid '07 Conference, June 2007 * `Bcfg2 - Konfigurationsmanagement Für Heterogene Umgebungen `_ * Marko Jung, Robert Gogolok * In Proceedings of German Unix User Group's Frühjahrsfachgespräch 2007, March 2007. * `Directing Change Using Bcfg2 `_ * Narayan Desai, Rick Bradshaw, Joey Hagedorn, and Cory Lueninghoener * In Proceedings of the Twentieth Large Install System Administration Conference (LISA XX), December 2-9, 2006, Washington D.C., USA, 2006. * `A Case Study in Configuration Management Tool Deployment `_ * Narayan Desai, Rick Bradshaw, Scott Matott, Sandra Bittner, Susan Coghlan, Remy Evard, Cory Leunighhoener, Ti Leggett, J.P. Navarro, Gene Rackow, Craig Stacey, and Tisha Stacey * In Proceedings of the Nineteenth Large Install System Administration Conference (LISA XIX), December 4-9, 2005, San Diego, CA, USA, 2005. * `Bcfg2: A Pay As You Go Approach to Configuration Complexity `_ * Narayan Desai * In Proceedings of the 2005 Australian Unix Users Group (AUUG2005), October 16-21, 2005, Sydney, Australia, 2005. * `Bcfg: A Configuration Management Tool for Heterogenous Environments `_ * Narayan Desai, Andrew Lusk, Rick Bradshaw, and Remy Evard * In Proceedings of the 5th IEEE International Conference on Cluster Computing (CLUSTER03), pages 500-503. IEEE Computer Society, 2003. doc/appendix/tools.txt000066400000000000000000000004461303523157100153400ustar00rootroot00000000000000.. -*- mode: rst -*- .. _appendix-tools: ===== Tools ===== In the ``tools/`` directory are several tools collected. Those tools can help you to maintain your Bcfg2 configuration, to make the initial setup easier, or to do some other tasks. https://github.com/Bcfg2/bcfg2/tree/maint/tools doc/architecture/000077500000000000000000000000001303523157100143055ustar00rootroot00000000000000doc/architecture/client.txt000066400000000000000000000127721303523157100163350ustar00rootroot00000000000000.. -*- mode: rst -*- .. _architecture-client: The Bcfg2 Client ================ The Bcfg2 client performs all client configuration or reconfiguration operations. It renders a declarative configuration specification, provided by the Bcfg2 server, into a set of configuration operations which will, if executed, attempt to change the client's state into that described by the configuration specification. Conceptually, the Bcfg2 client serves to isolate the Bcfg2 server and specification from the imperative operations required to implement configuration changes. This isolation allows declarative specifications to be manipulated symbolically on the server, without needing to understand the properties of the underlying system tools. In this way, the Bcfg2 client acts as a sort of expert system that *knows* how to implement declarative configuration changes. The operation of the Bcfg2 client is intended to be as simple as possible. The normal configuration process consists of four main steps: * **Probe Execution** During the probe execution stage, the client connects to the server and downloads a series of probes to execute. These probes reveal local facts to the Bcfg2 server. For example, a probe could discover the type of video card in a system. The Bcfg2 client returns this data to the server, where it can influence the client configuration generation process. * **Configuration Download and Inventory** The Bcfg2 client now downloads a configuration specification from the Bcfg2 server. The configuration describes the complete target state of the machine. That is, all aspects of client configuration should be represented in this specification. For example, all software packages and services should be represented in the configuration specification. The client now performs a local system inventory. This process consists of verifying each entry present in the configuration specification. After this check is completed, heuristic checks are executed for configuration not included in the configuration specification. We refer to this inventory process as 2-way validation, as first we verify that the client contains all configuration that is included in the specification, then we check if the client has any extra configuration that isn't present. This provides a fairly rigorous notion of client configuration congruence. Once the 2-way verification process has been performed, the client has built a list of all configuration entries that are out of spec. This list has two parts: specified configuration that is incorrect (or missing) and unspecified configuration that should be removed. * **Configuration Update** The client now attempts to update its configuration to match the specification. Depending on options, changes may not (or only partially) be performed. First, if extra configuration correction is enabled, extra configuration can be removed. Then the remaining changes are processed. The Bcfg2 client loops while progress is made in the correction of these incorrect configuration entries. This loop results in the client being able to accomplish all it will be able to during one execution. Once all entries are fixed, or no progress is being made, the loop terminates. Once all configuration changes that can be performed have been, bundle dependencies are handled. Bundle groupings result in two different behaviors. Contained entries are assumed to be inter-dependent. To address this, the client re-verifies each entry in any bundle containing an updates configuration entry. Also, services contained in modified bundles are restarted. * **Statistics Upload** Once the reconfiguration process has concluded, the client reports information back to the server about the actions it performed during the reconfiguration process. Statistics function as a detailed return code from the client. The server stores statistics information. Information included in this statistics update includes (but is not limited to): * Overall client status (clean/dirty) * List of modified configuration entries * List of uncorrectable configuration entries * List of unmanaged configuration entries Architecture Abstraction ------------------------ The Bcfg2 client internally supports the administrative tools available on different architectures. For example, ``rpm`` and ``apt-get`` are both supported, allowing operation of Debian, Redhat, SUSE, and Mandriva systems. The client toolset is determined based on the availability of client tools. The client includes a series of libraries which describe how to interact with the system tools on a particular platform. Three of the libraries exist. There is a base set of functions, which contain definitions describing how to perform POSIX operations. Support for configuration files, directories, symlinks, hardlinks, etc., are included here. Two other libraries subclass this one, providing support for Debian and rpm-based systems. The Debian toolset includes support for apt-get and update-rc.d. These tools provide the ability to install and remove packages, and to install and remove services. The Redhat toolset includes support for rpm and chkconfig. Any other platform that uses these tools can also use this toolset. Hence, all of the other familiar rpm-based distributions can use this toolset without issue. Other platforms can easily use the POSIX toolset, ignoring support for packages or services. Alternatively, adding support for new toolsets isn't difficult. Each toolset consists of about 125 lines of python code. doc/architecture/config-spec.txt000066400000000000000000000054621303523157100172520ustar00rootroot00000000000000.. -*- mode: rst -*- .. _architecture-config-spec: The Literal Configuration Specification ======================================= Literal configuration specifications are served to clients by the Bcfg2 server. This is a differentiating factor for Bcfg2; all other major configuration management systems use a non-literal configuration specification. That is, the clients receive a symbolic configuration that they process to implement target states. We took the literal approach for a few reasons: * A small list of configuration element types can be defined, each of which can have a set of defined semantics. This allows the server to have a well-formed model of client-side operations. Without a static lexicon with defined semantics, this isn't possible. This allows the server, for example, to record the update of a package as a coherent event. * Literal configurations do not require client-side processing. Removing client-side processing reduces the critical footprint of the tool. That is, the Bcfg2 client (and the tools it calls) need to be functional, but the rest of the system can be in any state. Yet, the client will receive a correct configuration. * Having static, defined element semantics also requires that all operations be defined and implemented in advance. The implementation can maximize reliability and robustness. In more ad-hoc setups, these operations aren't necessarily safely implemented. The Structure of Specifications ------------------------------- Configuration specifications contain some number of clauses. Two types of clauses exist. Bundles are groups of inter-dependent configuration entities. The purpose of bundles is to encode installation-time dependencies such that all new configuration is properly activated during reconfiguration operations. That is, if a daemon configuration file is changed, its daemon should be restarted. Another example of bundle usage is the reconfiguration of a software package. If a package contains a default configuration file, but it gets overwritten by an environment-specific one, then that updated configuration file should survive package upgrade. The purpose of bundles is to describe services, or reconfigured software packages. Independent clauses contain groups of configuration entities that aren't related in any way. This provides a convenient mechanism that can be used for bulk installations of software. Each of these clauses contains some number of configuration entities. A number of configuration entities exist including Path, Package, Service, etc. Each of these correspond to the obvious system item. Configuration specifications can get quite large; many systems have specifications that top one megabyte in size. An example of one is included in an appendix. These configurations can be written by hand, or generated by the server. doc/architecture/design.txt000066400000000000000000000067031303523157100163250ustar00rootroot00000000000000.. -*- mode: rst -*- .. _architecture-design: Design Considerations ===================== This section will discuss several aspects of the design of Bcfg2, and the particular use cases that motivated them. Initially, this will consist of a discussion of the system metadata, and the intended usage model for package indices as well. System Metadata --------------- Bcfg2 system metadata describes the underlying patterns in system configurations. It describes commonalities and differences between these specifications in a rigorous way. The groups used by Bcfg2's metadata are responsible for differentiating clients from one another, and building collections of allocatable configuration. The Bcfg2 metadata system has been designed with several high-level goals in mind. Flexibility and precision are paramount concerns; no configuration should be undescribable using the constructs present in the Bcfg2 repository. We have found (generally the hard way) that any assumptions about the inherent simplicity of configuration patterns tend to be wrong, so obscenely complex configurations must be representable, even if these requirements seem illogical during the implementation. In particular, we wanted to streamline several operations that commonly occurred in our environment. * Copying one node's profile to another node. In many environments, many nodes are instances of a common configuration specification. They all have similar roles and software. In our environment, desktop machines were the best example of this. Other than strictly per-host configuration like SSH keys, all desktop machines use a common configuration specification. This trivializes the process of creating a new desktop machine. * Creating a specialized version of an existing profile. In environments with highly varied configurations, departmental infrastructure being a good example, "another machine like X but with extra software" is a common requirement. For this reason, it must be trivially possible to inherit most of a configuration specification from some more generic source, while being able to describe overriding aspects in a convenient fashion. * Compose several pre-existing configuration aspects to create a new profile. The ability to compose configuration aspects allows the easy creation of new profiles based on a series of predefined set of configuration specification fragments. The end result is more agility in environments where change is the norm. In order for a classing system to be comprehensive, it must be usable in complex ways. The Bcfg2 metadata system has constructs that map cleanly to first-order logic. This implies that any complex configuration pattern can be represented (at all) by the metadata, as first-order logic is provably comprehensive. (There is a discussion later in the document describing the metadata system in detail, and showing how it corresponds to first-order logic) These use cases motivate several of the design decisions that we made. There must be a many to one correspondence between clients and groups. Membership in a given profile group must imbue a client with all of its configuration properties. Package Management ------------------ The interface provided in the Bcfg2 repository for package specification was designed with automation in mind. The goal was to support an append only interface to the repository, so that users do not need to continuously re-write already existing bits of specification. doc/architecture/goals.txt000066400000000000000000000042661303523157100161630ustar00rootroot00000000000000.. -*- mode: rst -*- .. _architecture-goals: Goals ===== * **Model configurations using declarative semantics.** Declarative semantics maximize the utility of configuration management tools; they provide the most flexibility for the tool to determine the right course of action in any given situation. This means that users can focus on the task of describing the desired configuration, while leaving the task of transitioning clients states to the tool. * **Configuration descriptions should be comprehensive.** This means that configurations served to the client should be sufficient to reproduce all desired functionality. This assumption allows the use of heuristics to detect extra configuration, aiding in reliable, comprehensive configuration definitions. * **Provide a flexible approach to user interactions.** Most configuration management systems take a rigid approach to user interactions; that is, either the client system is always correct, or the central system is. This means that users are forced into an overly proscribed model where the system asserts where correct data is. Configuration data modification is frequently undertaken on both the configuration server and clients. Hence, the existence of a single canonical data location can easily pose a problem during normal tool use. Bcfg2 takes a different approach. The default assumption is that data on the server is correct, however, the client has the option to run in another mode where local changes are catalogued for server-side integration. If the Bcfg2 client is run in dry run mode, it can help to reconcile differences between current client state and the configuration described on the server. The Bcfg2 client also searches for extra configuration; that is, configuration that is not specified by the configuration description. When extra configuration is found, either configuration has been removed from the configuration description on the server, or manual configuration has occurred on the client. Options related to two-way verification and removal are useful for configuration reconciliation when interactive access is used. * Plugins and administrative applications. * Incremental operations. doc/architecture/index.txt000066400000000000000000000011471303523157100161600ustar00rootroot00000000000000.. -*- mode: rst -*- .. _architecture-index: =========================== Detailed Bcfg2 Architecture =========================== Bcfg2 is based on a client-server architecture. The client is responsible for interpreting (but not processing) the configuration served by the server. This configuration is literal, so no local process is required. After completion of the configuration process, the client uploads a set of statistics to the server. This section will describe the goals and then the architecture motivated by it. .. toctree:: :maxdepth: 1 goals client server config-spec design doc/architecture/server.txt000066400000000000000000000071031303523157100163550ustar00rootroot00000000000000.. -*- mode: rst -*- .. _architecture-server: The Bcfg2 Server ================ The Bcfg2 server is responsible for taking a network description and turning it into a series of configuration specifications for particular clients. It also manages probed data and tracks statistics for clients. The Bcfg2 server takes information from two sources when generating client configuration specifications. The first is a pool of metadata that describes clients as members of an aspect-based classing system. That is, clients are defined in terms of aspects of their behavior. The other is a file system repository that contains mappings from metadata to literal configuration. These are combined to form the literal configuration specifications for clients. The Configuration Specification Construction Process ---------------------------------------------------- As we described in the previous section, the client connects to the server to request a configuration specification. The server uses the client's metadata and the file system repository to build a specification that is tailored for the client. This process consists of the following steps: * **Metadata Lookup** The server uses the client's IP address to initiate the metadata lookup. This initial metadata consists of a (profile, image) tuple. If the client already has metadata registered, then it is used. If not, then default values are used and stored for future use. This metadata tuple is expanded using some profile and class definitions also included in the metadata. The end result of this process is metadata consisting of hostname, profile, image, a list of classes, a list of attributes and a list of bundles. * **Abstract Configuration Construction** Once the server has the client metadata, it is used to create an abstract configuration. An abstract configuration contains all of the configuration elements that will exist in the final specification **without** any specifics. All entries will be typed (i.e. the tagname will be one of Package, Path, Action, etc) and will include a name. These configuration entries are grouped into bundles, which document installation time interdependencies. Here is an example of an abstract configuration entry: .. code-block:: xml * **Configuration Binding** The abstract configuration determines the structure of the client configuration, however, it doesn't yet contain literal configuration information. After the abstract configuration is created, each configuration entry must be bound to a client-specific value. The Bcfg2 server uses plugins to provide these client-specific bindings. The Bcfg2 server core contains a dispatch table that describes which plugins can handle requests of a particular type. The responsible plugin is located for each entry. It is called, passing in the configuration entry and the client's metadata. The behavior of plugins is explicitly undefined, so as to allow maximum flexibility. The behaviours of the stock plugins are documented elsewhere in this manual. Once this binding process is completed, the server has a literal, client-specific configuration specification. This specification is complete and comprehensive; the client doesn't need to process it at all in order to use it. It also represents the totality of the configuration specified for the client. Here is the entry from above once it has been bound to its literal specification (In this case, using the Packages plugin). .. code-block:: xml doc/client/000077500000000000000000000000001303523157100131015ustar00rootroot00000000000000doc/client/agent.txt000066400000000000000000000042531303523157100147440ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-agent: ============================= Agent Functionality using SSH ============================= The Bcfg2 agent code provides the ability to trigger a client update from the server using a secure mechanism that is restricted to running the Bcfg2 client with the options the agent was started with. This same capability is provided by SSH keypairs, if properly configured. Setup is pretty easy: #. Create an ssh keypair that is to be used solely for triggering Bcfg2 client runs. This key may or may not have a password associated with it; a keyphrase will make things more secure, but will require a person to enter the key passphrase, so it will not be usable automatically.:: $ ssh-keygen -t dsa -b 1024 -f /path/to/key -N "" Generating public/private dsa key pair. Your identification has been saved in /path/to/key. Your public key has been saved in /path/to/key.pub. The key fingerprint is: aa:25:9b:a7:10:60:f3:eb:2b:ae:4b:1a:42:1b:63:5d desai@ubik #. Add this public key to root's authorized_keys file, with several commands prepended to it:: command="/usr/sbin/bcfg2 -q ",no-port-forwarding,no-X11-forwarding,no-pty,no-agent-forwarding,from="" This key is now only useful to call the Bcfg2 client, from the Bcfg2 server's ip address. If PermitRootLogin was set to no in sshd_config, you will need to set it to forced-commands-only. Adding a & to the end of the command will cause the command to immediately return. #. Now, to cause a client to reconfigure, call:: $ ssh -i /path/to/key root@client /usr/sbin/bcfg2 Note that you will not be able to alter the command line options from the ones specified in authorized_keys in any way. Also, it is not needed that the invocation of Bcfg2 in the ssh command match. The following will have the same result.:: $ ssh -i /path/to/key root@client /bin/true If a passphrase was used to create the keypair, then it will need to be entered here. See Also ======== `SSH "triggers" `_ (from Ganneff's Little Blog) doc/client/debugging.txt000066400000000000000000000026151303523157100156010ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-debugging: ================ Client Debugging ================ When working on the Bcfg2 client, it is helpful to employ a few specific techniques to isolate and remedy problems. First, running the client with the -f flag allows configuration from a local file, rather than querying the server. This helps rule out server configuration problems, and allows for rapid development. For example: ``bcfg2 -f test-config.conf`` with the following test-config.conf: .. code-block:: rst Next, it is important to look at the interactive mode. This is similar to the interactive mode on the server and provides an interactive Python interpreter with which one may manipulate all the objects in the client. It will setup all the infrastructure so you will have the appropriate objects to play with. It will run the client through once, then present you with an interpreter. Try it out with: ``python -i /usr/bin/bcfg2`` or, for more fun, a local config file and also enable Debugging and Verbose output with `-d` and `-v`, yielding ``python -i /usr/bin/bcfg2 -d -v -f test-config.conf``. Now we just explore; use ``dir()`` to examine different objects in the client, or run a reconfiguration again by calling `client.run()` doc/client/index.txt000066400000000000000000000022021303523157100147450ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-index: The Bcfg2 Client ================ The Bcfg2 client attempts to reconcile the current configuration state with the configuration passed down from the server using various client tools. It does not perform any processing of the target configuration description. We chose this architecture, as opposed to one with a smarter client, for a few reasons: * Client failure forces administrators to perform an O(n) reconfiguration operation. Simpler code is easier to debug and maintain. * Minimize the bootstrap size; a complicated client can require more aspects of the system to function in order for reconfiguration to work. * Isolate configuration generation functionality on the server, where it can be readily observed. This is the most complicated task that Bcfg2 performs. * The results of the configuration process fit a fairly simple model. We wanted to validate it. The result is that Bcfg2 has a programmable deployment engine that can be driven by anything that writes a compatible configuration description. .. toctree:: :maxdepth: 2 modes tools metadata agent debugging doc/client/metadata.txt000066400000000000000000000116421303523157100154260ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _client-metadata: =============== Client Metadata =============== This page describes ClientMetadata objects. These are used to describe clients in terms of a variety of parameters, group memberships, and so forth. Construction ============ ClientMetadata instances are constructed whenever the server needs to recognize a client. This occurs in every aspect of client server interaction: * Probing * Configuration Generation * Statistics Upload This construction process spans several server plugins. The :ref:`server-plugins-grouping-metadata` is responsible for initial instance creation, including the client hostname, profile, and basic group memberships. After this initial creation, Connector plugins (such as :ref:`server-plugins-probes` or :ref:`server-plugins-connectors-properties`) can add additional group memberships for clients. These memberships are merged into the instance; that is, the new group memberships are treated as if they were included in groups.xml. If any of these groups are defined in groups.xml, then groups included there are included in the ClientMetadata instance group list. At the end of this process, the ClientMetadata instance has its complete set of group memberships. At this point, each connector plugin has the opportunity to return an additional object which will be placed in an attribute corresponding to the Connector name. For example, the Probes plugin returns a dictionary of probe name to probe result mappings for the client. This dictionary is available as the "Probes" attribute. With this, ClientMetadata resolution is complete, and the ClientMetadata instance can be used by the rest of the system. Contents ======== ClientMetadata instances contain all of the information needed to differentiate clients from one another. This data includes: * hostname * groups * profile group * address information (if specified) ClientMetadata instances also contain a query object. This can be used to query the metadata of other clients. Currently, several methods are supported. In this table, we refer to the instance as meta. Each of these is a function that must be called. +------------------------------------------+-------------------+----------------+ | Name | Description | Return Type | +==========================================+===================+================+ | meta.query.names_by_groups([group list]) | Returns names of | List of | | | clients which are | client names | | | members of all | | | | groups | | +------------------------------------------+-------------------+----------------+ | meta.query.names_by_profile(profile) | Returns names of | List of | | | clients which use | client names | | | profile group | | +------------------------------------------+-------------------+----------------+ | meta.query.all_clients() | Returns names of | List of | | | all clients | client names | +------------------------------------------+-------------------+----------------+ | meta.query.all_groups() | Returns names of | List of | | | all groups | group names | +------------------------------------------+-------------------+----------------+ | meta.query.all() | Returns metadata | List of | | | for all clients | ClientMetadata | | | | instances | +------------------------------------------+-------------------+----------------+ | meta.query.by_name(name) | Returns metadata | ClientMetadata | | | for named client | instance | +------------------------------------------+-------------------+----------------+ | meta.query.by_groups([group list]) | Returns metadata | List of | | | for all members | ClientMetadata | | | of all groups | instances | +------------------------------------------+-------------------+----------------+ | meta.query.by_profile(profile) | Returns metadata | List of | | | for all profile | ClientMetadata | | | havers | instances | +------------------------------------------+-------------------+----------------+ In general, there is no substantial benefit to using name returning versions of the query functions; metadata resolution is (in general) fast. doc/client/modes.txt000066400000000000000000000050031303523157100147470ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-modes: ============ Client modes ============ Dryrun mode =========== Dryrun mode (-n) prevents the client from making changes, but gives you some insight into the state of the machine. This mode is also useful if you simply want to gather data from the client into the reporting system. Interactive mode ================ The client can be run interactively (-I) so that you are able to step through each operation in order to see what the client is doing. .. _client-modes-paranoid: Paranoid mode ============= Paranoid mode creates a backup of a local configuration file before Bcfg2 replaces the file. This allows for easier recovery by the local administrator. How do I use it? ---------------- #. In the Bcfg2 repository, put `paranoid='true'` in the ``info.xml`` file (this is the default setting). #. On the client, create ``/var/cache/bcfg2`` (or specify an alternate path in the [paranoid] section of ``/etc/bcfg2.conf``). #. On the client, run `bcfg2` with the `-P` option (alternatively, you can set *paranoid* to *true* in the **[client]** section of ``bcfg2.conf``). This will save a copy of the replaced file in ``/var/cache/bcfg2``, but it'll be named as the path to the file with /'s replaced by _'s. For example, the old ``/etc/hosts`` will be named ``/var/cache/bcfg2/etc_hosts``. Extra configuration ------------------- .. versionadded:: 1.0.0 Here is an example of how to use some of the extra paranoid features available. For the following section in ``bcfg2.conf`` (client-side):: [paranoid] path = /my/custom/backup/path max_copies = 5 You will have the file backups store in ``/my/custom/backup/path``. This will also keep the five most recent backups of files. Altering the global metadata to enable paranoid mode for all files ------------------------------------------------------------------ You may also want to just globally enable the *paranoid* attribute for all files distributed to clients from your Bcfg2 server. You can accomplish this by adding a global metadata override in your ``bcfg2.conf`` (server-side) with the following syntax:: [mdata] paranoid=true .. note:: This is the default setting. Overall client service mode =========================== .. versionadded:: 1.0.0 Overall client service mode. Specified on the client using ``-s ``. * default * perform all service manipulations * disabled * perform no service manipulations * build * attempt to stop all services started * deprecates/replaces -B doc/client/tools.txt000066400000000000000000000116341303523157100150070ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-tools: Client Tool Drivers =================== Client tool drivers allow Bcfg2 to execute configuration operations by interfacing with platform and distribution specific tools. Tool drivers handle any reconfiguration or verification operation. So far we have tools that primarily deal with packaging systems and service management. The POSIX tool also handles file system and permissions/groups operations. To write your own tool driver, to handle a new packaging format, or new service architecture see :ref:`development-client-driver` When the Bcfg2 client is run, it attempts to instantiate each of these drivers. The succeeding list of drivers are printed as a debug message after this process has completed. Drivers can supercede one another, for example, the Yum driver conflicts (and unloads) the RPM driver. This behavior can be overridden by running the Bcfg2 client with the ``-D`` flag. This flag takes a colon delimited list of drivers to use on the system. Currently these are the tool drivers that are distributed with Bcfg2: .. toctree:: :maxdepth: 2 :glob: tools/* Action ------ Pre and post-install tests and actions. This driver executes commands and supplies status information to the Bcfg2 server via the statistics mechanism. It can also be used to prevent bundle installation when pre-conditions are not met. See the UsingActions page for more details. APK --- This tool driver is used to handle packages on apk based systems like Alpine Linux and employs the "apk" executable. Extra information can be found at `apk-tools`_. .. _apk-tools: http://apk-tools.sourceforge.net/ APT --- Debian Packages. This tool driver is used to handle packages on dpkg based systems and employs the "apt" executable. Extra information can be found at :ref:`client-tools-apt`. Blast ----- Blastwave Packages. This tool driver is for blastwave packages on solaris Chkconfig --------- Tool to manage services (primarily on Redhat based distros). .. note:: Start and stop are standard arguments, but the one for reload isn't consistent across services. You can specify which argument to use with the ``target`` attribute in Service tags. Example: .. code-block:: xml DebInit ------- Debian Service Support; exec's update-rc.d to configure services. Encap ----- `Encap `_ Packages. FreeBSDInit ----------- FreeBSD Service Support. Only bundle updates will work. FreeBSDPackage -------------- FreeBSD Packages. Verifies packages and their version numbers but can't install packages. launchd ------- Mac OS X Services. To use this tool, you must maintain a standard launch daemon .plist file in ``/Library/LaunchDaemons/`` (example ssh.plist) and setup an entry in your config to load or unload the service. .. code-block:: xml Note the name is the *Label* specified inside of the .plist file Portage ------- Support for Gentoo Packages. POSIX ----- Files and Permissions are handled by the POSIX driver. Usage well documented other places. RcUpdate -------- Uses the rc-update executable to manage services on distributions such as Gentoo. RPM --- Executes RPM to manage packages on Redhat-based and similar systems. Consider using the :ref:`YUM ` tool instead if possible. SMF --- Solaris Service Support. Example legacy run service (lrc): .. code-block:: xml Systemd ------- Systemd service support. SYSV ---- Handles `System V Packaging `_ format that is available on Solaris. .. note:: If the Packages specified in the PackageList are datastream format packages distributed via HTTP, you must specify a simplefile attribute. Such packages will be downloaded and installed from a local path. Note the use of the uri attribute in the datastream format example. If the simplefile attribute exists, the :ref:`Pkgmgr ` plugin will automatically construct the url attribute by concatenating the uri and simplefile attributes (with an intervening slash). Datastream format over HTTP: .. code-block:: xml File system format over NFS or local path: .. code-block:: xml Upstart ------- Upstart service support. Uses `Upstart`_ to configure services. .. _Upstart: http://upstart.ubuntu.com/ YUM --- Handles RPMs using the YUM package manager. See :ref:`client-tools-yum` for more details. doc/client/tools/000077500000000000000000000000001303523157100142415ustar00rootroot00000000000000doc/client/tools/actions.txt000066400000000000000000000061341303523157100164460ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-tools-actions: ======= Actions ======= This page describes use of the Action configuration entry. Action entries are commands that are executed either before bundle installation, after bundle installation or both. If exit status is observed, a failing pre-action will cause no modification of the enclosing bundle to be performed; all entries in included in that bundle will not be modified. Failing actions are reported through Bcfg2's reporting system, so they can be centrally observed. Actions look like: .. code-block:: xml .. xml:type:: ActionType Note that the status attribute tells the bcfg2 client to ignore return status, causing failures to still not be centrally reported. If central reporting of action failure is desired, set this attribute to 'check'. Actions may be completely defined inside of a bundle with the use of :ref:`server-configurationentries`, much like Packages, Services or Paths. The Rules plugin can also bind these entries. For example to include the above action in a bundle, first the Action entry must be included in the bundle: .. code-block:: xml ... Then a corresponding entry must be included in the Rules directory, like: .. code-block:: xml This allows different clients to get different actions as a part of the same bundle based on group membership. It is also possible to do this in one step in the bundle itself with a ``BoundAction`` tag, e.g.: .. code-block:: xml Example Action (add APT keys) ============================= This example will add the '0C5A2783' for aptitude. It is useful to run this during the client bootstrap process so that the proper keys are installed prior to the bcfg2 client trying to install a package which requires this key. .. code-block:: xml Example BoundAction (add RPM GPG keys) ====================================== This example will add the RPM-GPG-KEY-redhat-release key to the RPM GPG keyring **before** Package entries are handled on the client run. .. code-block:: xml doc/client/tools/apt.txt000066400000000000000000000005701303523157100155700ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-tools-apt: =============== APT Client Tool =============== The APT tool allows you to configure custom options in ``bcfg2.conf`` for systems where the tools reside in non-standard locations. The available options (and their corresponding default values) are:: [APT] install_path = '/usr' var_path = '/var' etc_path = '/etc' doc/client/tools/augeas.txt000066400000000000000000000060621303523157100162530ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-tools-augeas: ======== Augeas ======== The Augeas tool provides a way to use `Augeas `_ to edit files that may not be completely managed. In the simplest case, you simply tell Augeas which path to edit, and give it a sequence of commands: .. code-block:: xml The commands are run in document order. There's no need to do an explicit ``save`` at the end. These commands will be run if any of the paths do not already have the given setting. In other words, if any command has not already been run, they will all be run. So, if the first host already has all of the specified settings, then that Path will verify successfully and nothing will be changed. But suppose the first host looks like this:: 192.168.0.1 pigiron.example.com pigiron All that is missing is the second alias, ``piggy``. The entire Augeas script will be run in this case. It's important, then, to ensure that all commands you use are idempotent. (For instance, the ``Move`` and ``Insert`` commands are unlikely to be useful.) The Augeas paths are all relative to ``/files/etc/hosts``. The Augeas tool understands a subset of ``augtool`` commands. Valid tags are: ``Remove``, ``Move``, ``Set``, ``Clear``, ``SetMulti``, and ``Insert``. Refer to the official Augeas docs or the `Schema`_ below for details on the commands. The Augeas tool also supports one additional directive, ``Initial``, for setting initial file content when a file does not exist. For instance, the ``Xml`` lens fails to parse a file that does not exist, and, as a result, you cannot add content to it. You can use ``Initial`` to circumvent this issue: .. code-block:: xml <Test/> Editing files outside the default load path =========================================== If you're using Augeas to edit files outside of its default load path, you must manually specify the lens. For instance: .. code-block:: xml Note that there's no need to manually modify the load path by setting ``/augeas/load//incl``, nor do you have to call ``load`` explicitly. Schema ====== .. xml:group:: augeasCommands Performance =========== The Augeas tool is quite slow to initialize. For each ```` entry you have, it creates a new Augeas object internally, which can take several seconds. It's thus important to use this tool sparingly. doc/client/tools/posixusers.txt000066400000000000000000000077501303523157100172370ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-tools-posixusers: ========== POSIXUsers ========== .. versionadded:: 1.3.0 The POSIXUsers tool handles the creation of users and groups as defined by ``POSIXUser`` and ``POSIXGroup`` entries. For a full description of those tags, see :ref:`server-plugins-generators-rules`. The POSIXUsers tool relies on the ``useradd``, ``usermod``, ``userdel``, ``groupadd``, ``groupmod``, and ``groupdel`` tools, since there is no Python library to manage users and groups. It expects those tools to be in ``/usr/sbin``. Primary group creation ====================== Each user must have a primary group, which can be specified with the ``group`` attribute of the ``POSIXUser`` tag. (If the ``group`` attribute is not specified, then a group with the same name as the user will be used.) If that group does not exist, the POSIXUsers tool will create it automatically. It does this by adding a ``POSIXGroup`` entry on the fly; this has a few repercussions: * When run in interactive mode (``-I``), Bcfg2 will prompt for installation of the group separately from the user. * The ``POSIXGroup`` entry is added to the same bundle as the ``POSIXUser`` entry, so if the group is created, the bundle is considered to have been modified and consequently Actions will be run and Services will be restarted. This should never be a concern, since the group can only be created, not modified (it has no attributes other than its name), and if the group is being created then the user will certainly be created or modified as well. * The group is created with no specified GID number. If you need to specify a particular GID number, you must explicitly define a ``POSIXGroup`` entry for the group. Managed UID/GID Ranges ====================== In many cases, there will be users on a system that you do not want to manage with Bcfg2, nor do you want them to be flagged as extra entries. For example, users from an LDAP directory. In this case, you may want to manage the local users on a machine with Bcfg2, while leaving the LDAP users to be managed by the LDAP directory. To do this, you can configure the UID and GID ranges that are to be managed by Bcfg2 by setting the following options in the ``[POSIXUsers]`` section of ``bcfg2.conf`` on the *client*: * ``uid_whitelist`` * ``uid_blacklist`` * ``gid_whitelist`` * ``gid_blacklist`` Each option takes a comma-delimited list of numeric ranges, inclusive at both bounds, one of which may be open-ended on the upper bound, e.g.:: [POSIXUsers] uid_blacklist=1000- gid_whitelist=0-500,700-999 This would tell Bcfg2 to manage all users whose uid numbers were *not* greater than or equal to 1000, and all groups whose gid numbers were 0 <= ``gid`` <= 500 or 700 <= ``gid`` <= 999. If a whitelist is provided, it will be used; otherwise, the blacklist will be used. (I.e., if you provide both, the blacklist will be ignored.) If a user or group is added to the specification with a uid or gid in an unmanaged range, it will produce an error. .. note:: If you specify POSIXUser or POSIXGroup tags without an explicit uid or gid, this will **not** prevent the users/groups from being created with a uid/gid in an unmanaged range. If you want that to happen, you will need to configure your ``useradd``/``groupadd`` defaults appropriately. Note also, however, that this will not cause Bcfg2 errors; it is only an error if a POSIXUser or POSIXGroup has an *explicit* uid/gid in an unmanaged range. Creating a baseline configuration ================================= The majority of users on many systems are created by the packages that are installed, but currently Bcfg2 cannot query the package database to determine these users. (In some cases, this is a limitation of the packaging system.) The often-tedious task of creating a baseline that defines all users and groups can be simplified by use of the ``tools/posixusers_baseline.py`` script, which outputs a bundle containing all users and groups on the machine it's run on. doc/client/tools/vcs.txt000066400000000000000000000021341303523157100155750ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-tools-vcs: =============== VCS Client Tool =============== .. warning: This tool is currently under development. .. note: Currently, the only supported VCS is git and svn. The VCS tool allows you to checkout particular revisions from a VCS repository on the client to a specified path. The tool requires the appropriate python libraries for the VCS used to be installed. See :ref:`server-plugins-generators-rules-vcs` for possible options. Example usage: You may want to create a `Rules/paths.xml` with the following: .. code-block:: xml Once the rule is created a client can reference the path from a bundle, this path will then be populated from the repository. To continue the above example, a file `Bundle/bcfg2.xml` might contain this: .. code-block:: xml doc/client/tools/yum.txt000066400000000000000000000260131303523157100156160ustar00rootroot00000000000000.. -*- mode: rst -*- .. _client-tools-yum: ============================ Bcfg2 RPM/YUM Client Drivers ============================ The RPM and YUM client drivers provide client support for RPMs (installed directly from URLs) and Yum repositories. Features ======== * Full RPM package identification using epoch, version, release and arch. * Support for multiple instances of packages with the Instance tag. * Better control of the RPM verification using the pkg_checks, pkg_verify and verify_flags attributes. * Support for install only packages such as the kernel packages. * Support for per instance ignoring of individual files for the RPM verification with the Ignore tag. * Multiple package Instances with full version information listed in interactive mode. * Support for installation and removal of gpg-pubkey packages. * Support for controlling what action is taken on package verification failure with the install_action, version_fail_action and verify_fail_action attributes. Installation ============ isprelink --------- ``isprelink`` is a Python module that can greatly improve the performance of the ``RPM`` driver. It should be installed on any system that has prelink installed and will be using the ``RPM`` driver. Source can be found at ftp://ftp.mcs.anl.gov/pub/bcfg/isprelink-0.1.2.tar.gz To compile and install prelink, execute:: python setup.py install in the rpmtools directory. The elfutils-libelf-devel package is required for the compilation. There may also be RPMs available in the repositories for your distro. Configuration and Usage ======================= Loading of RPM -------------- The RPM driver can be loaded by command line options, client configuration file options or as the default driver for RPM packages. From the command line:: bcfg2 -n -v -d -D Action,POSIX,Chkconfig,RPM This produces quite a bit of output so you may want to redirect the output to a file for review. In the ``bcfg2.conf`` file:: [client] drivers = Action,Chkconfig,POSIX,RPM Configuration File Options -------------------------- A number of paramters can be set in the client configuration for both the RPM and YUM drivers. Each driver has its own section (``[RPM]`` or ``[YUM]``), and most of the same options are accepted by each driver. An example config might look like this:: [RPM] pkg_checks = true pkg_verify = true erase_flags = allmatches installonlypackages = kernel, kernel-bigmem, kernel-enterprise, kernel-smp, kernel-modules, kernel-debug, kernel-unsupported, kernel-source, kernel-devel, kernel-default, kernel-largesmp-devel, kernel-largesmp, kernel-xen, gpg-pubkey install_action = install version_fail_action = upgrade verify_fail_action = reinstall installonlypackages ^^^^^^^^^^^^^^^^^^^ Install-only packages are packages that should only ever be installed or deleted, not upgraded. It is best practice to only ever install/delete kernel packages, the wisdom being that the package for the currently running kernel should always be installed. Doing an upgrade would delete the running kernel package. ``gpg-pubkey`` will be automatically added to the list of install-only packages. Example:: [RPM] installonlypackages = kernel, kernel-bigmem, kernel-enterprise, kernel-smp, kernel-modules, kernel-debug, kernel-unsupported, kernel-source, kernel-devel, kernel-default, kernel-largesmp-devel, kernel-largesmp, kernel-xen, gpg-pubkey This option is not honored by the ``YUM`` driver. erase_flags ^^^^^^^^^^^ erase_flags are rpm options used by 'rpm -erase' in the client ``Remove()`` method. The RPM erase is written using rpm-python and does not use the rpm command. The erase flags are specified in the client configuration file as a comma separated list and apply to all RPM erase operations. The following rpm erase options are supported. See the rpm man page for details:: noscripts notriggers repackage allmatches nodeps This option is not honored by the ``YUM`` driver. pkg_checks ^^^^^^^^^^ The RPM/YUM drivers do the following three checks/status: #. Installed #. Version #. rpm verify Setting pkg_checks = true (the default) in the client configuration file means that all three checks will be done for all packages. Setting pkg_checks = false in the client configuration file means that only the Installed check will be done for all packages. The true/false value can be any combination of upper and lower case. .. note:: #. pkg_checks must evaluate true for both the client (this option) and the package (see the Package Tag pkg_checks attribute below) for the action to take place. #. If pkg_checks = false then the Pkgmgr entries do not need the version information. See the examples towards the bottom of the page. pkg_verify ^^^^^^^^^^ The RPM/YUM drivers do the following three checks/status: #. Installed #. Version #. rpm verify Setting pkg_verify = true (the default) in the client configuration file means that all three checks will be done for all packages as long as pkg_checks = true. Setting pkg_verify = false in the client configuration file means that the rpm verify wil not be done for all packages on the client. The true/false value can be any combination of upper and lower case. .. note:: #. pkg_verify must evaluate true for both the client (this option) and the package instance (see the Instance Tag pkg_verify attribute below) for the action to take place. install_action ^^^^^^^^^^^^^^ ``install_action`` controls whether or not a package instance will be installed if the package instance isn't installed. If install_action = install then the package instance is installed. If install_action = none then the package instance is not installed. .. note:: #. install_action must evaluate true for both the client (this option) and the package instance (see the Instance Tag install_action attribute below) for the action to take place. version_fail_action ^^^^^^^^^^^^^^^^^^^ ``version_fail_action`` controls whether or not a package instance will be updated if the installed package instance isn't the same version as specified in the configuration. If version_fail_action = upgrade then the package instance is upgraded (or downgraded). If version_fail_action = none then the package instance is not upgraded (or downgraded). .. note:: #. verion_fail_action must evaluate true for both the client (this option) and the package instance (see the Instance Tag version_fail_action attribute below) for the action to take place. verify_fail_action ^^^^^^^^^^^^^^^^^^ ``verify_fail_action`` controls whether or not a package instance will be reinstalled if the installed package instance fails the Yum or RPM verify. If verify_fail_action = reinstall then the package instance is reinstalled. If verify_fail_action = none then the package instance is not reinstalled. .. note:: #. verify_fail_action must evaluate true for both the client (this option) and the package instance (see the Instance Tag verify_fail_action attribute below) for the action to take place. #. The driver will not attempt to reinstall a package instance if the only failure is a configuration file. Interactive Mode ---------------- Running the client in interactive mode (-I) prompts for the actions to be taken as before. Prompts are per package and may apply to multiple instances of that package. Each per package prompt will contain a list of actions per instance. In the RPM driver, actions are encoded as: * D - Delete * I - Install * R - Reinstall * U - Upgrade/Downgrade An example follows:: Install/Upgrade/delete Package aaa_base instance(s) - R(*:10.2-38.*) (y/N) Install/Upgrade/delete Package evms instance(s) - R(*:2.5.5-67.*) (y/N) Install/Upgrade/delete Package gpg-pubkey instance(s) - D(*:9c800aca-40d8063e.*) D(*:0dfb3188-41ed929b.*) D(*:7e2e3b05-44748aba.*) D(*:a1912208-446a0899.*) D(*:9c777da4-4515b5fd.*) D(*:307e3d54-44201d5d.*) (y/N) Install/Upgrade/delete Package module-init-tools instance(s) - R(*:3.2.2-62.*) (y/N) Install/Upgrade/delete Package multipath-tools instance(s) - R(*:0.4.7-29.*) (y/N) Install/Upgrade/delete Package pam instance(s) - R(*:0.99.6.3-29.1.*) (y/N) Install/Upgrade/delete Package perl-AppConfig instance(s) - U(None:1.52-4.noarch -> *:1.63-17.*) (y/N) Install/Upgrade/delete Package postfix instance(s) - R(*:2.3.2-28.*) (y/N) Install/Upgrade/delete Package sysconfig instance(s) - R(*:0.60.4-3.*) (y/N) Install/Upgrade/delete Package udev instance(s) - R(*:103-12.*) (y/N) GPG Keys -------- GPG is used by RPM to 'sign' packages. All vendor packages are signed with the vendors GPG key. Additional signatures maybe added to the rpm file at the users discretion. It is normal to have multiple GPG keys installed. For example, SLES10 out of the box has six GPG keys installed. To the RPM database all GPG 'packages' have the name 'gpg-pubkey', which may be nothing like the name of the file specified in the rpm -import command. For example on Centos 4 the file name is RPM-GPG-KEY-centos4. For SLES10 this means that there are six packages with the name 'gpg-pubkey' installed. RPM does not check GPG keys at package installation, while YUM does. RPM uses the rpm command for installation and does not therefore check GPG signatures at package install time. RPM uses rpm-python for verification and does by default do signature checks as part of the client Inventory process. To do the signature check the appropriate GPG keys must be installed. rpm-python is not very friendly if the required key(s) is not installed (it crashes the client). The RPM driver detects, on a per package instance basis, if the appropriate key is installed. If it is not, a warning message is printed and the signature check is disabled for that package instance, for that client run only. GPG keys can be installed and removed by the RPM driver. To install a GPG key configure it in Pkgmgr/Rules as a package and add gpg-pubkey to the clients abstract configuration. The gpg-pubkey package/instance is treated as an install only package. gpg-pubkey packages are installed by the RPM driver with the rpm -import command. gpg-pubkey packages will be removed by ``bcfg2 -r packages`` if they are not in the clients configuration. Ignoring Files during Verification ---------------------------------- The :ref:`path-ignore` Path tag is used to exempt individual files from the RPM verification. This is done by comparing the verification failure results with the ignore Path. If there is a match, that entry is not used by the client to determine if a package has failed verification. Path ignore entries can be specified at both the Package level, in which case they apply to all Instances, and/or at the Instance level, in which case they only apply to that instance. See :ref:`path-ignore` for more details. Example: .. code-block:: xml doc/composable-metadata000066400000000000000000000036251303523157100154560ustar00rootroot00000000000000This documents the redesign of the Metadata subsystem. Goals * Separate core metadata (groups, etc) functionality from augmentors * Enable metadata integration with external data sources * Make metadata features (group inclusion, categories) usable from external datasources The basic idea of this redesign is to split Metadata functions into two major components. One master MetadataPlugin instance handles client identification/authentication, profile assertion, group categories, and metadata instance construction. Multiple MetadataConnectorPlugin instances each contribute additional group memberships and a set of per-instance key/value pairs. This data is merged into the client metadata instance by the master MetadataPlugin. Use Cases * Mapping external data into client metadata instances ** Probes ** Properties ** External network management ** Monitoring ** LDAP data API * Bcfg2.Server.Core.Core ** build_metadata(client_name) * Bcfg2.Server.Plugin.MetadataPlugin ** get_initial_metadata(client_name) ** merge_additional_metadata(metadata, source, group, data_dict) * Bcfg2.Server.Plugin.MetadataConnectorPlugin ** get_additional_metadata(client_metadata) Metadata Resolution Control Flow * B.S.P.MP.resolve_client() -> canonical client name * B.S.C.C.build_metadata() ** B.S.P.MP.get_initial_metadata() -> partial ClientMetadata inst ** [B.S.P.MCP.get_additional_metadata()] -> [([group list], {data dictionary})] ** [B.S.P.MP.merge_additional_metadata()] Implementation Plan (done) * Define new plugin classes * Split Probe code out to discrete plugin * Implement connector support in Core * switch callers to Core.build_metadata * Implement group inheritance/category safety for Connector groups Next Steps * Figure out new version of properties * ICE integration? * zultron's host properties * other external data sources doc/conf.py000066400000000000000000000276701303523157100131360ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Bcfg2 documentation build configuration file, created by # sphinx-quickstart on Sun Dec 13 12:10:30 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import re import sys import time # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../src/lib')) sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('exts')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'xmlschema'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # Path to XML schemas xmlschema_path = "../schemas" # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. #master_doc = 'contents' master_doc = 'index' # General information about the project. # py3k compatibility if sys.hexversion >= 0x03000000: project = 'Bcfg2' copyright = '2009-%s, Narayan Desai' % time.strftime('%Y') else: project = u'Bcfg2' copyright = u'2009-%s, Narayan Desai' % time.strftime('%Y') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. release = '1.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "collapsiblesidebar": "true" } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { 'index': 'indexsidebar.html' } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Bcfg2doc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). # py3k compatibility if sys.hexversion >= 0x03000000: latex_documents = [ ('index', 'Bcfg2.tex', 'Bcfg2 Documentation', 'Narayan Desai et al.', 'manual'), ] else: latex_documents = [ ('index', 'Bcfg2.tex', u'Bcfg2 Documentation', u'Narayan Desai et al.', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('man/bcfg2', 'bcfg2', 'Bcfg2 client tool', [], 1), ('man/bcfg2-admin', 'bcfg2-admin', 'Perform repository administration tasks', [], 8), ('man/bcfg2-build-reports', 'bcfg2-build-reports', 'Generate state reports for Bcfg2 clients', [], 8), ('man/bcfg2.conf', 'bcfg2.conf', 'Configuration parameters for Bcfg2', [], 5), ('man/bcfg2-crypt', 'bcfg2-crypt', 'Bcfg2 encryption and decryption utility', [], 8), ('man/bcfg2-info', 'bcfg2-info', 'Creates a local version of the Bcfg2 server core for state observation', [], 8), ('man/bcfg2-lint', 'bcfg2-lint', 'Check Bcfg2 specification for validity, common mistakes, and style', [], 8), ('man/bcfg2-lint.conf', 'bcfg2-lint.conf', 'Configuration parameters for bcfg2-lint', [], 5), ('man/bcfg2-report-collector', 'bcfg2-report-collector', 'Reports collection daemon', [], 8), ('man/bcfg2-reports', 'bcfg2-reports', 'Query reporting system for client status', [], 8), ('man/bcfg2-server', 'bcfg2-server', 'Server for client configuration specifications', [], 8), ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Bcfg2', u'Bcfg2 Documentation', u'Narayan Desai', 'Bcfg2', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # autodoc settings autodoc_default_flags = ['members', 'show-inheritance'] autoclass_content = "both" private_re = re.compile(r'^\s*\.\.\s*private-include:\s*(.+)$') private_include = [] def skip_member_from_docstring(app, what, name, obj, skip, options): """ since sphinx 1.0 autodoc doesn't support the :private-members: directive, this function allows you to specify ``.. private-include: [,` | | | | ` | | +-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ | Probes, | Hostname | ``list`` of group names | Groups set by :ref:`server-plugins-probes` | | probegroups | | | | +-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ | Probes, | Hostname | ``dict`` of ````: | Other data set by :ref:`server-plugins-probes` | | probedata | | :class:`ProbeData | | | | | ` | | +-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ | Packages, | :attr:`Packages Collection cache key | :class:`Collection` | Kept by :ref:`server-plugins-generators-packages` in | | collections | ` | | order to expire repository metadata cached on disk | +-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ | Packages, | Hostname | :attr:`Packages Collection cache key | Used by the Packages plugin to return Collection | | clients | | ` | objects for clients. This is cross-referenced with | | | | | the ``Packages, collections`` cache | +-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ | Packages, | :attr:`Packages Collection cache key | ``set`` of package names | Cached results from looking up | | pkg_groups | `, | | ```` entries | | | hash of the selected package groups | | | +-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ | Packages, | :attr:`Packages Collection cache key | ``set`` of package names | Cached results from resolving complete package sets | | pkg_sets | `, | | for clients | | | hash of the initial package selection | | | +-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ These are enumerated so that they can be expired as needed by other plugins or other code points. .. automodule:: Bcfg2.Server.Cache doc/development/cfg.txt000066400000000000000000000063471303523157100154570ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-cfg: ======================= Cfg Handler Development ======================= The :ref:`server-plugins-generators-cfg` plugin offers multiple handlers to handle different entries in different ways. Writing a new Cfg handler is a relatively simple way to add significant new features to Cfg. Each new Cfg handler must be contained in its own module in ``Bcfg2.Server.Plugins.Cfg``, and the module and class name must be identical. The name should start with ``Cfg``, and should clearly indicate which of the handler types it is. A handler class may implement more than one handler type. Cfg Handler Types ================= There are several different types of Cfg handlers. A new handler must inherit either from one of these classes, or from an existing handler. .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgCreator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgFilter .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgInfo .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgVerifier Cfg Handler Base Class ====================== In addition to the interfaces defined above, all Cfg handlers inherit from CfgBaseFileMatcher. .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgBaseFileMatcher Cfg Exceptions ============== Cfg handlers may produce the following exceptions: .. autoexception:: Bcfg2.Server.Plugins.Cfg.CfgVerificationError .. autoexception:: Bcfg2.Server.Plugins.Cfg.CfgCreationError In addition, Cfg handlers may produce the following base plugin exceptions: .. autoexception:: Bcfg2.Server.Plugin.exceptions.PluginExecutionError :noindex: .. autoexception:: Bcfg2.Server.Plugin.exceptions.PluginInitError :noindex: Existing Cfg Handlers ===================== Generators ---------- .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator.CfgPlaintextGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator.CfgGenshiGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator.CfgCheetahGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator.CfgJinja2Generator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator.CfgEncryptedGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenshiGenerator.CfgEncryptedGenshiGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedCheetahGenerator.CfgEncryptedCheetahGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedJinja2Generator.CfgEncryptedJinja2Generator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator.CfgAuthorizedKeysGenerator Creators -------- .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgPrivateKeyCreator.CfgPrivateKeyCreator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CfgPublicKeyCreator Info Handlers ------------- .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgDefaultInfo .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgInfoXML.CfgInfoXML Verifiers --------- .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgExternalCommandVerifier.CfgExternalCommandVerifier Other Cfg Objects ================= These other objects comprise the remainder of the Cfg plugin, and are included for completeness. .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEntrySet .. autoclass:: Bcfg2.Server.Plugins.Cfg.Cfg .. automethod:: Bcfg2.Server.Plugins.Cfg.get_cfg doc/development/client-driver.txt000066400000000000000000000054511303523157100174620ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-client-driver: ============================== Writing A Client Tool Driver ============================== This page describes the step-by-step process of writing a client tool driver for a configuration element type. The included example describes an existing driver, and the process that was used to create it. #. Pick a name for the driver. In this case, we picked the name RPM. #. Create a file in ``src/lib/Bcfg2/Client/Tools`` with the same name (RPM.py) #. Create a class in this file with the same name (``class RPM``) * If it handles **Package** entries, subclass :class:`Bcfg2.Client.Tools.PkgTool` * If it handles **Service** entries, subclass :class:`Bcfg2.Client.Tools.SvcTool` * Otherwise, subclass :class:`Bcfg2.Client.Tools.Tool`. #. Add any required executable programs to :attr:`Bcfg2.Client.Tools.Tool.__execs__` #. Set :attr:`Bcfg2.Client.Tools.Tool.__handles__` to a list of ``(, )`` tuples. This determines which entries the Tool module can be used on. In this case, we set ``__handles__ = [('Package', 'rpm')]``. #. Add verification support by defining a method named ``Verify``. See :func:`Bcfg2.Client.Tools.Tool.Inventory` for details. This method should return True/False depending on current entry installation status. In the failure path, the current state of failing entry attributes should be set in the entry, to aid in auditing. (For example, if a file should be mode 644, and is currently mode 600, then set attribute current_mode='600' in the input entry) #. Add installation support by defining a method named ``Install`_ based core. This page documents the server core interface so that other cores can be written to take advantage of other technologies, e.g., `Tornado `_ or `Twisted `_. A core implementation needs to: * Override :func:`Bcfg2.Server.Core.Core._run` to handle server startup. * Override :func:`Bcfg2.Server.Core.Core._block` to run the blocking server loop. * Call :func:`Bcfg2.Server.Core.Core.shutdown` on orderly shutdown. A core that wants to use the network (i.e., a core that isn't used entirely for introspection, as in :ref:`bcfg2-info `, or other local tasks) should inherit from :class:`Bcfg2.Server.Core.NetworkCore`, and must also override :func:`Bcfg2.Server.Core.NetworkCore._daemonize` to handle daemonization, writing the PID file, and dropping privileges. Nearly all XML-RPC handling is delegated entirely to the core implementation. It needs to: * Call :func:`Bcfg2.Server.Core.NetworkCore.authenticate` to authenticate clients. * Handle :exc:`xmlrpclib.Fault` exceptions raised by the exposed XML-RPC methods as appropriate. * Dispatch XML-RPC method invocations to the appropriate method, including Plugin RMI. The client address pair (a tuple of remote IP address and remote hostname) must be prepended to the argument list passed to built-in methods (i.e., not to plugin RMI). Additionally, running and configuring the server is delegated to the core. It needs to honor the configuration options that influence how and where the server runs, including the server location (host and port), listening interfaces, and SSL certificate and key. Base Core ========= .. automodule:: Bcfg2.Server.Core Core Implementations ==================== Builtin Core ------------ The builtin server core consists of the core implementation (:class:`Bcfg2.Server.BuiltinCore.Core`) and the XML-RPC server implementation (:mod:`Bcfg2.Server.SSLServer`). Core ~~~~ .. automodule:: Bcfg2.Server.BuiltinCore XML-RPC Server ~~~~~~~~~~~~~~ .. automodule:: Bcfg2.Server.SSLServer Multiprocessing Core -------------------- .. automodule:: Bcfg2.Server.MultiprocessingCore CherryPy Core ------------- .. automodule:: Bcfg2.Server.CherrypyCore doc/development/documentation.txt000066400000000000000000000065051303523157100175650ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-documentation: =============== Documentation =============== There are two parts of documentation in the Bcfg2 project: * The Wiki_ * The Manual_ The wiki ======== .. _Wiki: http://bcfg2.org .. _Manual: http://docs.bcfg2.org .. _Trac: http://trac.edgewall.org/ .. _OpenID: https://openid.org/ .. _MCS: http://www.mcs.anl.gov/ .. _Argonne National Laboratory: http://www.anl.gov/ A python-based Trac_ instance is used for the Bcfg2 development website. The Wiki_ part of the website can be edited after you have successfully logged in. In order to login, a vaild OpenID provider is needed. Please request your access to the Wiki_ on the :ref:`help-mailinglist` or in the :ref:`help-irc`. The manual ========== .. _rst: http://en.wikipedia.org/wiki/ReStructuredText .. _Sphinx: http://sphinx.pocoo.org .. _Docutils: http://docutils.sourceforge.net The source for the Manual_ is located in the ``doc/`` directory in the git repository or in the source tarball. All files are written in rst_ (ReStructuredText) format. Sphinx_ is used to build the documentation from the restructured text sources. Building the Manual ------------------- * Install the prerequisites. Docutils_ and Sphinx_ are needed to build. * For Debian (Lenny) the tools are available in the `backports `_ repository; installation can be done with the following:: apt-get -t lenny-backports install python-sphinx * The tools for Fedora based systems are in the `Fedora Package Collection `_; installation can be done easily with Yum:: yum -y install python-sphinx python-docutils * The tools for RHEL6-based systems are in the base distribution; you can install them with Yum:: yum -y install python-sphinx python-docutils * The tools for RHEL5-based systems are in the `Extra Packages for Enterprise Linux(EPEL) `_ repository; if your system is configured for EPEL, you can install them with Yum:: yum -y install python-sphinx python-docutils * Additionally, to build the PDF version: * LaTeX * pdftex * Download the source. Please refer to :ref:`source` for more details. * Build the HTML version by running the following command in the top level of the source directory. The output will appear in ``build/sphinx/html``:: python setup.py build_sphinx * Building the PDF version :: python setup.py build_sphinx --builder=latex cd build/sphinx/latex make .. _doc-styleguide: Documentation Style Guide for Bcfg2 =================================== This is a style guide to use when creating documentation for Bcfg2. It is meant to be helpful, not a hindrance. Basics ------ **Bcfg2** When referring to project, Bcfg2 is the preferred use of case. **Monospace fonts** When referring to commands written on the command line use ``monospace`` fonts. **Repository** When used alone this refers to a Bcfg2 :term:`repository`. When there is a chance for confusion, for instance in documents that also discuss :term:`VCS`, be sure to use the longer phrase "Bcfg2 :term:`repository`". Sections -------- Unless necessary, all the documentation follows the sections header rules available at http://docs.python.org/devguide/documenting.html#sections doc/development/fam.txt000066400000000000000000000047011303523157100154530ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-fam: ========================== File Monitor Development ========================== Bcfg2 depends heavily on file activity monitoring (FAM) to reload data from disk when it changes. A number of FAM backends are supported (documented thoroughly below), but you may wish to develop additional backends. For instance, the current best FAM backend on Linux is INotify, but if you are running a non-Linux system that lacks INotify support you may wish to write a backend for your OS (e.g., a kqueue backend for BSD-based Bcfg2 servers). This page documents the FAM API and the existing FAM backends. .. _development-fam-event-codes: Event Codes =========== Five event codes are generally understood: +----------+-----------------------------------------------------------+ | Event | Description | +==========+===========================================================+ | exists | Produced when a monitor is added to a file or directory | | | that exists, and produced for all files or directories | | | inside a directory that is monitored (non-recursively). | +----------+-----------------------------------------------------------+ | endExist | Produced immediately after ``exists``. No plugins should | | | process this event meaningfully, so FAM backends do not | | | need to produce it. | +----------+-----------------------------------------------------------+ | created | Produced when a file is created inside a monitored | | | directory. | +----------+-----------------------------------------------------------+ | changed | Produced when a monitored file, or a file inside a | | | monitored directory, is changed. | +----------+-----------------------------------------------------------+ | deleted | Produced when a monitored file, or a file inside a | | | monitored directory, is deleted. | +----------+-----------------------------------------------------------+ Basics ====== .. automodule:: Bcfg2.Server.FileMonitor Existing FAM Backends ===================== Pseudo ------ .. automodule:: Bcfg2.Server.FileMonitor.Pseudo Gamin ----- .. automodule:: Bcfg2.Server.FileMonitor.Gamin Inotify ------- .. automodule:: Bcfg2.Server.FileMonitor.Inotify doc/development/index.txt000066400000000000000000000011131303523157100160110ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-index: ================= Bcfg2 Development ================= There are many ways to get involved in Bcfg2 development. Here we will outline some things that can help you get familiar with the various areas of the Bcfg2 code. The easiest way to submit a patch is to submit a pull request on Github. You can fork and clone the source tree at https://github.com/bcfg2/Bcfg2 Users wishing to contribute on a regular basis can apply for direct git access. Mail the :ref:`help-mailinglist` for details. .. toctree:: :maxdepth: 1 :glob: * doc/development/lint.txt000066400000000000000000000110101303523157100156450ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-lint: =============================== bcfg2-lint Plugin Development =============================== ``bcfg2-lint``, like most parts of Bcfg2, has a pluggable backend that lets you easily write your own plugins to verify various parts of your Bcfg2 specification. Plugins are included in a module of the same name as the plugin class in :mod:`Bcfg2.Server.Lint`, e.g., :mod:`Bcfg2.Server.Lint.Validate`. .. note:: It is no longer possible to include lint plugins directly in a Bcfg2 server plugin, e.g., :class:`Bcfg2.Server.Plugins.Metadata.MetadataLint`. Plugin Types ============ There are two types of ``bcfg2-lint`` plugins: Serverless plugins ------------------ Serverless plugins are run before ``bcfg2-lint`` starts up a local Bcfg2 server, so the amount of introspection they can do is fairly limited. They can directly examine the Bcfg2 specification, of course, but they can't examine the entries handled by a given plugin or anything that requires a running server. If a serverless plugin raises a lint error, however, the server will not be started and no `Server plugins`_ will be run. This makes them useful to check for the sorts of errors that might prevent the Bcfg2 server from starting properly. Serverless plugins must subclass :class:`Bcfg2.Server.Lint.ServerlessPlugin`. :mod:`Bcfg2.Server.Lint.Validate` is an example of a serverless plugin. Server plugins -------------- Server plugins are run after a local Bcfg2 server has been started, and have full access to all of the parsed data and so on. Because of this, they tend to be easier to use than `Serverless plugins`_, and thus are more common. Server plugins are only run if all `Serverless plugins`_ run successfully (i.e., raise no errors). Server plugins must subclass :class:`Bcfg2.Server.Lint.ServerPlugin`. :mod:`Bcfg2.Server.Lint.Genshi` is an example of a server plugin. Error Handling ============== The job of a ``bcfg2-lint`` plugin is to find errors. Each error that a plugin may produce must have a name, a short string that briefly describes the error and will be used to configure error levels in ``bcfg2.conf``. It must also have a default reporting level. Possible reporting levels are "error", "warning", or "silent". All of the errors that may be produced by a plugin must be returned as a dict by :func:`Bcfg2.Server.Lint.Plugin.Errors`. For instance, consider :func:`Bcfg2.Server.Lint.InfoXML.InfoXML.Errors`: .. code-block:: python @classmethod def Errors(cls): return {"no-infoxml": "warning", "deprecated-info-file": "warning", "paranoid-false": "warning", "required-infoxml-attrs-missing": "error"} This means that the :class:`Bcfg2.Server.Lint.InfoXML.InfoXML` lint plugin can produce five lint errors, although four of them are just warnings by default. The errors returned by each plugin's ``Errors()`` method will be passed to :func:`Bcfg2.Server.Lint.ErrorHandler.RegisterErrors`, which will use that information and the information in the config file to determine how to display (or not display) each error to the end user. Errors are produced in a plugin with :func:`Bcfg2.Server.Lint.Plugin.LintError`, which takes two arguments: the name of the error, which must correspond to a key in the dict returned by :func:`Bcfg2.Server.Lint.Plugin.Errors`, and a freeform string that will be displayed to the end user. Note that the error name and its display are thus only tied together when the error is produced; that is, a single error (by name) can have two completely different outputs. Basics ====== .. automodule:: Bcfg2.Server.Lint Existing ``bcfg2-lint`` Plugins =============================== AWSTags ------- .. automodule:: Bcfg2.Server.Lint.AWSTags Bundler ------- .. automodule:: Bcfg2.Server.Lint.Bundler Comments -------- .. automodule:: Bcfg2.Server.Lint.Comments Genshi ------ .. automodule:: Bcfg2.Server.Lint.Genshi GroupNames ---------- .. automodule:: Bcfg2.Server.Lint.GroupNames GroupPatterns ------------- .. automodule:: Bcfg2.Server.Lint.GroupPatterns InfoXML ------- .. automodule:: Bcfg2.Server.Lint.InfoXML MergeFiles ---------- .. automodule:: Bcfg2.Server.Lint.MergeFiles Metadata -------- .. automodule:: Bcfg2.Server.Lint.Metadata Pkgmgr ------ .. automodule:: Bcfg2.Server.Lint.Pkgmgr RequiredAttrs ------------- .. automodule:: Bcfg2.Server.Lint.RequiredAttrs TemplateHelper -------------- .. automodule:: Bcfg2.Server.Lint.TemplateHelper Validate -------- .. automodule:: Bcfg2.Server.Lint.Validate doc/development/option_parsing.txt000066400000000000000000000201031303523157100177350ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-option-parsing: ==================== Bcfg2 Option Parsing ==================== Bcfg2 uses an option parsing mechanism based on the Python :mod:`argparse` module. It does several very useful things that ``argparse`` does not: * Collects options from various places, which lets us easily specify per-plugin options, for example; * Automatically loads components (such as plugins); * Synthesizes option values from the command line, config files, and environment variables; * Can dynamically create commands with many subcommands (e.g., bcfg2-info and bcfg2-admin); and * Supports keeping documentation inline with the option declaration, which will make it easier to generate man pages. Collecting Options ================== One of the more important features of the option parser is its ability to automatically collect options from loaded components (e.g., Bcfg2 server plugins). Given the highly pluggable architecture of Bcfg2, this helps ensure two things: #. We do not have to specify all options in all places, or even in most places. Options are specified alongside the class(es) that use them. #. All options needed for a given script to run are guaranteed to be loaded, without the need to specify all components that script uses manually. For instance, assume a few plugins: * The ``Foo`` plugin takes one option, ``--foo`` * The ``Bar`` plugin takes two options, ``--bar`` and ``--force`` The plugins are used by the ``bcfg2-quux`` command, which itself takes two options: ``--plugins`` (which selects the plugins) and ``--test``. The options would be selected at runtime, so for instance these would be valid: .. code-block:: bash bcfg2-quux --plugins Foo --foo --test bcfg2-quux --plugins Foo,Bar --foo --bar --force bcfg2-quux --plugins Bar --force But this would not: bcfg2-quux --plugins Foo --bar The help message would reflect the options that are available to the default set of plugins. (For this reason, allowing component lists to be set in the config file is very useful; that way, usage messages reflect the components in the config file.) Components (in this example, the plugins) can be classes or modules. There is no required interface for an option component. They may *optionally* have: * An ``options`` attribute that is a list of :class:`Bcfg2.Options.Options.Option` objects or option groups. * A boolean ``parse_first`` attribute; if set to True, the options for the component are parsed before all other options. This is useful for, e.g., Django database settings, which must be parsed before plugins that use Django can be loaded. * A function or static method, ``options_parsed_hook``, that is called when all options have been parsed. (This will be called again if :func:`Bcfg2.Options.Parser.Parser.reparse` is called.) * A function or static method, ``component_parsed_hook``, that is called when early option parsing for a given component has completed. This is *only* called for components with ``parse_first`` set to True. It is passed a single argument: a :class:`argparse.Namespace` object containing the complete set of early options. Options are collected through two primary mechanisms: #. The :class:`Bcfg2.Options.Actions.ComponentAction` class. When a ComponentAction subclass is used as the action of an option, then options contained in the classes (or modules) given in the option value will be added to the parser. #. Modules that are not loaded via a :class:`Bcfg2.Options.Actions.ComponentAction` option may load options at runtime. Since it is preferred to add components instead of just options, loading options at runtime is generally best accomplished by creating a container object whose only purpose is to hold options. For instance: .. code-block:: python def foo(): # do stuff class _OptionContainer(object): options = [ Bcfg2.Options.BooleanOption("--foo", help="Enable foo")] @staticmethod def options_parsed_hook(): if Bcfg2.Options.setup.foo: foo() Bcfg2.Options.get_parser().add_component(_OptionContainer) The Bcfg2.Options module ======================== .. currentmodule:: Bcfg2.Options .. autodata:: setup Options ------- The base :class:`Bcfg2.Options.Option` object represents an option. Unlike options in :mod:`argparse`, an Option object does not need to be associated with an option parser; it exists on its own. .. autoclass:: Option .. autoclass:: PathOption .. autoclass:: BooleanOption .. autoclass:: PositionalArgument The Parser ---------- .. autoclass:: Parser .. autofunction:: get_parser .. autoexception:: OptionParserException Option Groups ------------- Options can be grouped in various meaningful ways. This uses a variety of :mod:`argparse` functionality behind the scenes. In all cases, options can be added to groups in-line by simply specifying them in the object group constructor: .. code-block:: python options = [ Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.Option(...), Bcfg2.Options.Option(...), required=True), ....] Nesting object groups is supported in theory, but barely tested. .. autoclass:: OptionGroup .. autoclass:: ExclusiveOptionGroup .. autoclass:: Subparser .. autoclass:: WildcardSectionGroup Subcommands ----------- This library makes it easier to work with programs that have a large number of subcommands (e.g., :ref:`bcfg2-info ` and :ref:`bcfg2-admin `). The normal implementation pattern is this: #. Define all of your subcommands as children of :class:`Bcfg2.Options.Subcommand`. #. Create a :class:`Bcfg2.Options.CommandRegistry` object that will be used to register all of the commands. Registering a command collect its options and adds it as a :class:`Bcfg2.Options.Subparser` option group to the main option parser. #. Register your commands with the :func:`Bcfg2.Options.CommandRegistry.register_commands` method of your ``CommandRegistry`` object. #. Add options from the :attr:`Bcfg2.Options.CommandRegistry.command_options` attribute to the option parser. #. Parse options, and run. :mod:`Bcfg2.Server.Admin` provides a fairly simple implementation, where the CLI class subclasses the command registry: .. code-block:: python class CLI(Bcfg2.Options.CommandRegistry): def __init__(self): Bcfg2.Options.CommandRegistry.__init__(self) self.register_commands(globals().values(), parent=AdminCmd) parser = Bcfg2.Options.get_parser( description="Manage a running Bcfg2 server", components=[self]) parser.add_options(self.subcommand_options) parser.parse() In this case, commands are collected from amongst all global variables (the most likely scenario), and they must be children of :class:`Bcfg2.Server.Admin.AdminCmd`, which itself subclasses :class:`Bcfg2.Options.Subcommand`. Commands are defined by subclassing :class:`Bcfg2.Options.Subcommand`. At a minimum, the :func:`Bcfg2.Options.Subcommand.run` method must be overridden, and a docstring written. .. autoclass:: Subcommand .. autoclass:: CommandRegistry Actions ------- Several custom argparse `actions `_ provide some of the option collection magic of :mod:`Bcfg2.Options`. .. autoclass:: ConfigFileAction .. autoclass:: ComponentAction .. autoclass:: PluginsAction Option Types ------------ :mod:`Bcfg2.Options` provides a number of useful types for use as the `type `_ keyword argument to the :class:`Bcfg2.Options.Option` constructor. .. autofunction:: Bcfg2.Options.Types.path .. autofunction:: Bcfg2.Options.Types.comma_list .. autofunction:: Bcfg2.Options.Types.colon_list .. autofunction:: Bcfg2.Options.Types.octal .. autofunction:: Bcfg2.Options.Types.username .. autofunction:: Bcfg2.Options.Types.groupname .. autofunction:: Bcfg2.Options.Types.timeout .. autofunction:: Bcfg2.Options.Types.size Common Options -------------- .. autoclass:: Common doc/development/packages.txt000066400000000000000000000030151303523157100164630ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-packages: ======================= Developing for Packages ======================= The :ref:`server-plugins-generators-packages` plugin offers multiple backends to support different types of software repositories. New backends can be written to handle new types of software repositories. Each new Packages backend must be contained in its own module in ``Bcfg2.Server.Plugins.Packages``. Each module must implement two classes: A :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` subclass called ``Collection``, and a :class:`Bcfg2.Server.Plugins.Packages.Source.Source` subclass called ``Source``. E.g., the :mod:`Bcfg2.Server.Plugins.Packages.Yum` backend has :class:`Bcfg2.Server.Plugins.Packages.Yum.YumCollection` and :class:`Bcfg2.Server.Plugins.Packages.Yum.YumSource` objects. These interfaces are explained in detail below. The Collection Object ===================== .. automodule:: Bcfg2.Server.Plugins.Packages.Collection The Source Object ================= .. automodule:: Bcfg2.Server.Plugins.Packages.Source The Packages Module =================== .. automodule:: Bcfg2.Server.Plugins.Packages Packages Source Description =========================== .. automodule:: Bcfg2.Server.Plugins.Packages.PackagesSources Existing Packages Backends ========================== Yum --- .. automodule:: Bcfg2.Server.Plugins.Packages.Yum APT --- .. automodule:: Bcfg2.Server.Plugins.Packages.Apt Pacman ------ .. automodule:: Bcfg2.Server.Plugins.Packages.Pac doc/development/plugins.txt000066400000000000000000000165471303523157100164040ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _development-plugins: Bcfg2 Plugin development ======================== While the Bcfg2 server provides a good interface for representing general system configurations, its plugin interface offers the ability to implement configuration interfaces and representation tailored to problems encountered by a particular site. This chapter describes what plugins are good for, what they can do, and how to implement them. Several plugins themselves have pluggable backends, and for narrow cases you may want to develop a backend for an existing plugin rather than an entirely new plugin. See the following pages for more information: .. toctree:: :maxdepth: 1 cfg packages Bcfg2 Plugins ------------- Bcfg2 plugins are loadable python modules that the Bcfg2 server loads at initialization time. These plugins can contribute to the functions already offered by the Bcfg2 server or can extend its functionality. In general, plugins will provide some portion of the configuration for clients, with a data representation that is tuned for a set of common tasks. Much of the core functionality of Bcfg2 is implemented by several plugins, however, they are not special in any way; new plugins could easily supplant one or all of them. .. automodule:: Bcfg2.Server.Plugin :no-members: Server Plugin Types ------------------- A plugin must implement at least one of the interfaces described below. Each interface is available as a class in :mod:`Bcfg2.Server.Plugin`. In most cases, a plugin must also inherit from :class:`Bcfg2.Server.Plugin.base.Plugin`, which is the base Plugin object (described below). Some of the interfaces listed below are themselves Plugin objects, so your custom plugin would only need to inherit from the plugin type. Plugin ^^^^^^ .. autoclass:: Bcfg2.Server.Plugin.base.Plugin :members: name, __author__, experimental, deprecated, conflicts, sort_order, __rmi__, init_repo, shutdown :inherited-members: :show-inheritance: With the exceptions of :class:`Bcfg2.Server.Plugin.interfaces.Statistics` and :class:`Bcfg2.Server.Plugin.interfaces.ThreadedStatistics`, the plugin interfaces listed below do **not** inherit from Plugin; they simply provide interfaces that a given plugin may or must implement. Interfaces ^^^^^^^^^^ .. class:: Bcfg2.Server.Plugin.interfaces .. automodule:: Bcfg2.Server.Plugin.interfaces Exposing XML-RPC Functions -------------------------- Plugins can expose XML-RPC functions that can then be called with :ref:`bcfg2-admin xcmd `. Note that there is absolutely no access control beyond the initial authentication, so take care to not expose any data or behavior via XML-RPC that you would not want all of your clients to be able to see or use. To expose a function, simply add its name to the ``__rmi__`` class attribute. (RMI stands for "Remote Method Invocation.") Consider this example from the :ref:`server-plugins-generators-packages` plugin: .. code-block:: python class Packages(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.StructureValidator, Bcfg2.Server.Plugin.Generator, Bcfg2.Server.Plugin.Connector, Bcfg2.Server.Plugin.ClientRunHooks): name = 'Packages' conflicts = ['Pkgmgr'] __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload'] def Refresh(self): self._load_config(force_update=True) return True def Reload(self): self._load_config() return True This exposes two functions, ``Refresh`` and ``Reload``, in addition to any default methods that are already exposed. To call one of these functions, you could run:: bcfg2-admin xcmd Packages.Refresh Invalidating Caches ------------------- .. versionadded:: 1.3.0 In Bcfg2 1.3.0, some limited :ref:`server-caching` was introduced. If you are writing a :class:`Bcfg2.Server.Plugin.interfaces.Connector` plugin that implements :func:`Bcfg2.Server.Plugin.interfaces.Connector.get_additional_groups` or :func:`Bcfg2.Server.Plugin.interfaces.Connector.get_additional_data`, then you need to be able to invalidate the server metadata cache in order to be compatible with the ``cautious`` or ``aggressive`` caching modes. The two attributes you need to know about are: * :attr:`Bcfg2.Server.Core.metadata_cache_mode`: A string description of the caching mode. See :ref:`server-caching` for a description of each mode. * :attr:`Bcfg2.Server.Core.metadata_cache`: A dict-like :class:`Bcfg2.Server.Cache.Cache` object that stores the cached data. :class:`Bcfg2.Server.Plugin.base.Plugin` objects have access to the :class:`Bcfg2.Server.Core` object as ``self.core``. In general, you'll be interested in the :func:`Bcfg2.Server.Cache.Cache.expire` method; if called with no arguments, it expires all cached data; if called with one string argument, it expires cached data for the named client. It's important, therefore, that your Connector plugin can either track when changes are made to the data or group membership it reports, and expire cached data appropriately when in ``cautious`` or ``aggressive`` mode; or prudently flag an incompatibility with those two modes. For examples, see: * :func:`Bcfg2.Server.Plugins.Probes.ReceiveData` takes a copy of the groups that have been assigned to a client by :ref:`server-plugins-probes`, and if that data changes when new probe data is received, it invalidates the cache for that client. * :func:`Bcfg2.Server.Plugins.GroupPatterns.Index` expires the entire cache whenever a FAM event is received for the :ref:`server-plugins-grouping-grouppatterns` config file. * :func:`Bcfg2.Server.Plugins.PuppetENC.end_client_run` expires the entire cache at the end of every client run and produces a message at the warning level that the :ref:`server-plugins-connectors-puppetenc` plugin is incompatible with aggressive caching. Tracking Execution Time ----------------------- .. versionadded:: 1.3.0 Statistics can and should track execution time statistics using :mod:`Bcfg2.Server.Statistics`. This module tracks execution time for the server core and for plugins, and exposes that data via ``bcfg2-admin perf``. This data can be invaluable for locating bottlenecks or other performance issues. The simplest way to track statistics is to use the :func:`Bcfg2.Server.Statistics.track_statistics` decorator to decorate functions that you would like to track execution times for: .. code-block:: python from Bcfg2.Server.Statistics import track_statistics @track_statistics() def do_something(self, ...): ... This will track the execution time of ``do_something``. More granular usage is possible by using :func:`time.time` to manually determine the execution time of a given event and calling :func:`Bcfg2.Server.Statistics.Statistics.add_value` with an appropriate statistic name. Bcfg2.Server.Statistics ^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: Bcfg2.Server.Statistics Plugin Helper Classes --------------------- .. automodule:: Bcfg2.Server.Plugin.helpers :inherited-members: .. Debuggable is in base to avoid circular imports, but it's a helper .. and should be listed here in the docs .. autoclass:: Bcfg2.Server.Plugin.base.Debuggable :inherited-members: Plugin Exceptions ----------------- .. automodule:: Bcfg2.Server.Plugin.exceptions See Also -------- * :ref:`development-compat` * :ref:`development-utils` doc/development/setup.txt000066400000000000000000000065051303523157100160540ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _development-setup: Environment setup for development ================================= Checking Out a Copy of the Code ------------------------------- * Check out a copy of the code:: git clone https://github.com/Bcfg2/bcfg2.git .. note:: The URL above is read-only. If you are planning on submitting patches upstream, please see :ref:`development-submitting-patches`. * Add :file:`bcfg2/src/sbin` to your :envvar:`PATH` environment variable * Add :file:`bcfg2/src/lib` to your :envvar:`PYTHONPATH` environment variable Using a Virtual Environment for Development ------------------------------------------- Bcfg2 is a pure Python program, and Python makes available certain tools that simplify creating isolated environments. Such environments are useful for running code under development, running code that needs to be installed without actually installing it in system locations, or running parallel, independent installations of the same packages. One popular tool for doing this is `virtualenv `_. The following commands will bootstrap an isolated environment where the Bcfg2 server can run. They assume you are starting from an empty directory, on a Posix-like system that has Python and the ``virtualenv`` package installed (e.g., on Debian it is available as ``python-virtualenv``): .. code-block:: sh # Work in a scratch directory mkdir test_env cd test_env # This creates the environment virtualenv . # "Activate" the environment. From this point forward, Python # and its libraries will first be searched for in test_env and # its subdirectories. When you begin a new session that should # use this environment, re-execute this command. . bin/activate # The pip command is useful for installing python code from a # variety of locations, including directly from git repositories easy_install pip # Install Bcfg2 from git. The -e puts the source in an editable # git clone under the "src" dir. pip install -e git://git.mcs.anl.gov/bcfg2.git#egg=Bcfg2 # Install a newer version of the Cheetah library, for example pip install --upgrade cheetah # If you want to run IPython from within the virtual # environment, it will need to be installed locally, even if it # is already available on the system, or else it won't find . pip install --upgrade ipython # Note, if you install IPython, deactivate and reactivate the # virtualenv before attempting to use it. deactivate . bin/activate .. note:: One caveat about this environment is that it assumes you have already installed Bcfg2's dependencies on the system itself. Pip is capable of building packages such as ``lxml`` that include native code, but you will need to be sure its build-time prerequisites are available. Consider using the above commands to create an isolated Bcfg2 environment in the same directory as your Bcfg2 :term:`repository`. Copy your :file:`/etc/bcfg2.conf` file into a local :file:`etc` directory, tweak the paths as needed and you can run an independent Bcfg2 server as a non-root user. This is useful for confirming a new release of Bcfg2 and all its tools works against your current :term:`repository` before upgrading. doc/development/specification_overview.png000066400000000000000000000170151303523157100214250ustar00rootroot00000000000000PNG  IHDRJ8PLTE̙f::WWuu3J` w(/7>FMUl"Dfƪ3"J1`@wO^n}Ҍ"Df׈33GG\\pp)Rz"31J@`Ow^n}"Df3M"f+3<DMUh|:Wu3Pm33MMff3Mf"+3<DMUh:|Wu3"J1`@wO^n}Ҍ"Df̈ת33MMff:Wu3(P7mETbq3Mf3Uw--DD[[qq3&M3f@MYfs:WuȒׯ33MMff:Wu3M&f3@MYfs:WubKGDf |d pHYsodIDATx[R:[)z<a杹,`>>8k%pDw#BTUq&?M6vǛM=t?N_&1tѱܫ`_& &YƿlOk1L(e^'EB`n_;cl0sF l>szn|` AX! 3v(CQcӁ `Xt($`(C%F"}mlz8a Fa[sΕnYw3>q5}c8DUzś%/! щUen$WhX <0سĻjyDH';/fd 72;hOwk,NDc nT;`Ƥ.5H.lAwc_j  !أP( AѰ?''ueT ؜ػ_{nl0C0! 0! `0l0! `0! `CVؔ vh[00000000000000000000lsXNE0GL, c1%0[`ctĢ0C{F4o^?Oz#zzϟz^|j,q>0N4)ؗ񓧴C-+M_|<2N0;|(ZYV&񞞑^tQpJuO씑_Hyi]U]\:%ej~l . #Q߳ng%yCUΫcSN͉2.Wy;.uYJ2Ko|[:79@q>P<])>7I٧ҷڀRetok]V5j}ПM~Ik1 {g&ǔ95A٫O|%;Z-ͬwYJ gҨu(攛\2[>Sэji;vfnl p V/-{nfўq{o/[(7K+QV]|.uYJ2%+>2?*kL}'S}̄ozQh8])}}UjSQ  ¿ZWU׺N%Yo-52u͹ddk`yBygr/WI!vaYEC0! 0dSJٱ|]8%zԓ<#C;60on <<`""j>?VH#|cV p8HX=6L 7^hcrxCtbz{g+2}C4`>"m 7N ^9E)w@LFލotF/@ Q`FB7&|fƪFglB=9n I."љvQ-=\ I7bPbE `C! `C`C0`C0ĦT#DDo ncwR `VMc0pĄ1`0 N8^ 5(eG3;]~~Ui髋`^Egˎ\@|K.)?k"r[e~/<ηϛi_WEkWM{Ч '\V-(nK*H_ 7k0T][mš2:#8tU.U~G}~;xw5et<97.y'p]ym +v\xEwʨk'Y)KV}ogrWNm%?O>GhuU/LlS(JV.ecp}^dZn̲s62nlZ>e˖FFǗj9΄r/kTˤnE1u/8k" 2oU@ܪd$%}y0|:؋na9kK֭"[Xދ0! `C! `C`Cbp70 kpG`6TL@_h] "E-<# P.i,<3#UfX+0_ii8Rr+̭$V5W%ܓhuvYWYm59 ˚1/`0ڳeQ%q L2x[GS&!-֛sI'J/r|K7>3$$h`$r00n`$BvHGWlu$w_>АJQ@ 6'lUJ I#EJS*F`ўU)6*+hNsasR`IAs TX%(S#lUJ: ţ&'/3ʵ(K7zeiLIuߞr5X4 Ԑ+]) ָ0u<}5<v롼69)TG\L|벬 Z>;0d p?| # p0iNjP7F1>/ino"w2,RkjS ɼAaЦ `jϴ`JvpIc0W&1ǚ+7J4ָ|k6w4y̋n .-7E4"0swܗ,{'oBwh^ 'F3݊&!'ttğ;((pپC*|vntn:|&w/UIeI+=86 S ]  BE;ctla:,_1hCQ+m8ʎ Hڊd vI$oٱlR+0!㏣}DU* a=Iz@h 0 ъyBM-3iuBgF ZcHe?)G?:Ѯ̙~Z 9Nq|b,O~*b"T^T| EE*2+l'v)'5<$,YepcRx3`02`ٕiuf葾mw܋_DXٵs{*N%M cSI MQSI. 7TR0.Jjd^~TRa 1:;ed FTR_qwvƪ ΀X>`NC[fM; 64l=Hظ07iC)q;UT՗sNJ.E>|9Ng^frXQR;inUUSyw[q4k>}1fr2PN=5%Zx՜_ӬY;˫눨b?@&%Sn"'߶>pDowsvȱ3LSﺩ풛?|:sKc$+/Ol4<5הΕ4%2/7:hhLo-z?9^V·Mh|0%G<pI^5utJK:8@To?2d<t/V {`OUvxl04̸QN8 up4%z%zv2_@hAXi,yICJQi i0%3@8p>MӺ(;,I@p뾌x} F'N%줦T4)̀526QA^}ID)I4CpHf?TmïS/zryj3.3f~Mr Ґa"}Hß7 `N`C0x~9_'A~! ,HDCYQ$X%DY{ w Adn/|,}" Xz`VX> `hD #lAक(H}$)6 MX ;)iу>1.7(b p(%'g`lpYeW'&YpFGˤ;n`HhKq 0`0`0v60F  `9"(fb0(Y@hsF(C̀OW7W?Ua#u`0<2F x dd>p v1#Y0! `0! `C! `C!6 + 0!){IENDB`doc/development/submitting-patches.txt000066400000000000000000000077311303523157100205300ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _development-submitting-patches: ================== Submitting Patches ================== The purpose of this document is to assist those who may be less familiar with git in submitting patches upstream. While git is powerful, it can be somewhat confusing to those who don't use it regularly (and even those who do). .. note:: We prefer more in-depth commit messages than those given below which are purely for brevity in this guide. See http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html for more about creating proper git commit messages. .. _Github: https://github.com/ `Github`_ ========= These steps outline one way of submitting patches via `Github`_. First, you will want to `fork `_ the upstream Bcfg2 repository. Create a local branch --------------------- Once you have forked the upstream repository, you should clone a local copy (where is your github username). :: git clone git@github.com:/bcfg2.git Create a local feature/bugfix branch off the appropriate upstream branch. For example, let's say we want to submit a bugfix for :program:`bcfg2-info` against the 1.2.x series. We can create a ``fix-bcfg2-info`` branch which is a copy of the ``maint-1.2`` branch. :: git branch fix-bcfg2-info maint-1.2 git checkout fix-bcfg2-info Commit changes to your local branch ----------------------------------- Next make whatever changes need to be made and commit them to the ``fix-bcfg2-info`` branch. :: git add src/sbin/bcfg2-info git commit -m "Fix bcfg2-info bug" Now you need to push your ``fix-bcfg2-info`` branch to github. :: git push origin fix-bcfg2-info Submit pull request ------------------- Next, submit a pull request against the proper branch (in this case, https://github.com/username/bcfg2/pull/new/fix-bcfg2-info -- again, username is your github username). At the top of the pull request, you can edit the upstream branch you are targetting so that you create the pull request against the proper upstream branch (in this case, ``maint-1.2``). All that's left to do is to write up a description of your pull request and click **Send pull request**. Since your local branch is specific to this fix, you can add additional commits if needed and push them. They will automatically be added to the pull request. Remove local branch ------------------- Once we have merged your pull request, you can safely delete your local feature/bugfix branch. To do so, you must first checkout a different branch. :: git checkout master # switch to a different branch git branch -d fix-bcfg2-info # delete your local copy of fix-bcfg2-info git push origin :fix-bcfg2-info # delete fix-bcfg2-info from github Mailing List ============ The following lists the steps needed to use git's facilities for emailing patches to the mailing list. Commit changes to your local clone ---------------------------------- For example, let's say we want to fix a big in :program:`bcfg2-info`. For the 1.2.x series. :: git clone https://github.com/Bcfg2/bcfg2.git git checkout maint-1.2 # make changes git add src/sbin/bcfg2-info git commit -m "Fix bcfg2-info bug" Setup git for gmail (optional) ------------------------------ If you would like to use the GMail SMTP server, you can add the following to your ~/.gitconfig file as per the :manpage:`git-send-email(1)` manpage. :: [sendemail] smtpencryption = tls smtpserver = smtp.gmail.com smtpuser = yourname@gmail.com smtpserverport = 587 Format patches -------------- Use git to create patches formatted for email with the following. :: git format-patch --cover-letter -M origin/maint-1.2 -o outgoing/ Send emails to the mailing list ------------------------------- Edit ``outgoing/0000-*`` and then send your emails to the mailing list (bcfg-dev@lists.mcs.anl.gov):: git send-email outgoing/* doc/development/testing.txt000066400000000000000000000054141303523157100163670ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-testing: Testing ======= Testing Prereleases ------------------- Before each release, several prereleases will be tagged. It is helpful to have users test these releases (when feasible) because it is hard to replicate the full range of potential reconfiguration situations; between different operating systems, system management tools, and configuration specification variation, there can be large differences between sites. For more details please visit `Tracking Development Releases of Bcfg2 `_ . Upgrade Testing --------------- This section describes upgrade procedures to completely test the client and server. These procedures can be used for either pre-release testing, or for confidence building in a new release. Server Testing ^^^^^^^^^^^^^^ 1. Ensure that the server produces the same configurations for clients * Before the upgrade, generate all client configurations using the buildall subcommand of bcfg2-info. This subcommand takes a directory argument; it will generate one client configuration in each file, naming each according to the client name. .. code-block:: sh mgt1:~/bcfg# bcfg2-info Filesystem check 1 of 25 ... > buildall /path/to/cf-old Generated config for fs2.bgl.mcs.anl.gov in 1.97310400009 seconds Generated config for fs13.bgl.mcs.anl.gov in 1.47958016396 seconds ... Take notice of any messages produced during configuration generation. These generally reflect minor issues in the configuration specification. Ideally, they should be fixed. * Upgrade the server software * Generate all client configurations in a second location using the new software. Any tracebacks reflect bugs, and should be filed in the ticketing system. Any new messages should be carefully examined. * Compare each file in the old directory to those in the new directory using ``bcfg2-admin compare -r /old/directory /new/directory`` .. code-block:: sh mgt1:~/bcfg# bcfg2-admin compare -r cf-old/ cf-new/ Entry: fs2.bgl.mcs.anl.gov.xml Entry: fs2.bgl.mcs.anl.gov.xml good Entry: fs13.bgl.mcs.anl.gov.xml Entry: fs13.bgl.mcs.anl.gov.xml good Entry: login1.bgl.mcs.anl.gov.xml Path /bin/whatami contents differ Path /bin/whatami differs (in bundle softenv) Entry: login1.bgl.mcs.anl.gov.xml bad This can be used to compare configurations for single clients, or different clients. 2. Compare old and new group diagrams (using ``bcfg2-admin viz``) Client Testing ^^^^^^^^^^^^^^ Run the client in dry-run and non-dry-run mode; ensure that multiple runs produce consistent results. doc/development/tips.txt000066400000000000000000000025321303523157100156670ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-tips: Tips for Bcfg2 Development -------------------------- #. Focus on either the client or server code. This focuses the development process down to the precise pieces of code that matter for the task at hand. * If you are developing a client driver, then write up a small configuration specification that includes the needed characteristics. * If you are working on the server, run ``bcfg2-info`` and use to assess the code. #. Use the python interpreter. One of python's most appealing features is interactive use of the interpreter. * If you are developing for the client-side, run ``python -i /usr/sbin/bcfg2`` with the appropriate bcfg2 options. This will cause the python interpreter to continue running, leaving all variables intact. This can be used to examine data state in a convenient fashion. * If you are developing for the server side, use ``bcfg2-info`` and the "debug" option. This will leave you at a python interpreter prompt, with the server core loaded in the variable "bcore". #. Use ``pylint`` obsessively. It raises a lot of style-related warnings which can be ignored, but most all of the errors are legitimate. #. If you are doing anything with Regular Expressions, `Kodos`_ and `re-try`_ are your friends. .. _Kodos: http://kodos.sourceforge.net .. _re-try: http://re-try.appspot.com doc/development/unit-testing.txt000066400000000000000000000323051303523157100173430ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _development-unit-testing: ================== Bcfg2 unit testing ================== .. _Python Mock Module: http://www.voidspace.org.uk/python/mock .. _Python Nose: http://readthedocs.org/docs/nose/en/latest/ You will first need to install the `Python Mock Module`_ and `Python Nose`_ modules. You can then run the existing tests with the following: .. code-block: sh cd testsuite nosetests You should see output something like the following:: .................................................. ---------------------------------------------------------------------- Ran 50 tests in 0.121s OK Unit tests are also run by Travis-CI, a free continuous integration service, at http://travis-ci.org/#!/Bcfg2/bcfg2/ Testing in a virtualenv ======================= Travis-CI runs the unit tests in a virtual environment, so to emulate that testing environment as closely as possible you can also use a virtual environment. To do so, you must have `virtualenv `_ installed. There are two ways to test: Either with just the bare essential packages installed, or with optional packages installed as well. (Optional packages are things like Genshi; you can run Bcfg2 with them or without them.) For completeness, the tests should be run in both manners. (On Python 3, almost none of the optional packages are available, so it can only be run with just the required packages.) To install the optional packages, set: .. code-block:: bash export WITH_OPTIONAL_DEPS=yes This flag tells the install script to install optional dependencies as well as requirements. This assumes that you will create a virtual environment in ``~/venvs/``, and that the Bcfg2 source tree is cloned into ``~/bcfg2``. First, create a new virtual environment and activate it: .. code-block:: bash cd ~/venvs virtualenv travis source travis/bin/activate Get the test suite from bcfg2: .. code-block:: bash cp -R ~/bcfg2/* ~/venvs/travis/ Next, you must install prerequisite packages that are required to build some of the required Python packages, and some optional packages that are much easier to install from binary (rather than from source). If you are running on Ubuntu (the platform Travis-CI runs on) and have sudo, you can simply run: .. code-block:: bash testsuite/before_install.sh If not, you will need to examine ``testsuite/before_install.sh`` and install the packages manually. The equivalent for Fedora, for instance, would be: .. code-block:: bash sudo yum -y update sudo yum -y install swig pylint libxml2 if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then sudo yum -y install libselinux-python pylibacl python-inotify \ PyYAML fi You could install these requirements using pip, but you'll likely need to install a great many development packages required to compile them. Next, install required Python packages: .. code-block:: bash testsuite/install.sh Install Bcfg2 itself to the virtualenv: .. code-block:: bash pip install -e . Now you can run tests: .. code-block:: bash nosetests testsuite Writing Unit Tests ================== Bcfg2 makes extremely heavy use of object inheritance, which can make it challenging at times to write reusable tests. For instance, when writing tests for the base :class:`Bcfg2.Server.Plugin.base.Plugin` class, which all Bcfg2 :ref:`server-plugins-index` inherit from via the :mod:`Plugin interfaces `, yielding several levels of often-multiple inheritance. To make this easier, our unit tests adhere to several design considerations. Inherit Tests ------------- Our test objects should have inheritance trees that mirror the inheritance trees of their tested objects. For instance, the :class:`Bcfg2.Server.Plugins.Metadata.Metadata` class definition is: .. code-block:: python class Metadata(Bcfg2.Server.Plugin.Metadata, Bcfg2.Server.Plugin.Statistics, Bcfg2.Server.Plugin.DatabaseBacked): Consequently, the ``TestMetadata`` class definition is: .. code-block:: python class TestMetadata(TestPlugin.TestMetadata, TestPlugin.TestStatistics, TestPlugin.TestDatabaseBacked): .. note:: The test object names are abbreviated because of the system of relative imports in the ``testsuite`` tree, described below. This gives us a large number of tests basically "for free": all core :class:`Bcfg2.Server.Plugin.interfaces.Metadata`, :class:`Bcfg2.Server.Plugin.interfaces.Statistics`, and :class:`Bcfg2.Server.Plugin.helpers.DatabaseBacked` functionality is automatically tested on the ``Metadata`` class, which gives the test writer a lot of free functionality and also an easy list of which tests must be overridden to provide tests appropriate for the ``Metadata`` class implementation. Additionally, a test class should have a class variable that describes the class that is being tested, and tests in that class should use that class variable to instantate the tested object. For instance, the test for :class:`Bcfg2.Server.Plugin.helpers.DirectoryBacked` looks like this: .. code-block:: python class TestDirectoryBacked(Bcfg2TestCase): test_obj = DirectoryBacked ... def test_child_interface(self): """ ensure that the child object has the correct interface """ self.assertTrue(hasattr(self.test_obj.__child__, "HandleEvent")) Then test objects that inherit from ``TestDirectoryBacked`` can override that object, and the ``test_child_interface`` test (e.g.) will still work. For example: .. code-block:: python class TestPropDirectoryBacked(TestDirectoryBacked): test_obj = PropDirectoryBacked Finally, each test class must also provide a ``get_obj`` method that takes no required arguments and produces an instance of ``test_obj``. All test methods must use ``self.get_obj()`` to instantiate an object to be tested. An object that does not inherit from any other tested Bcfg2 objects should inherit from :class:`testsuite.common.Bcfg2TestCase`, described below. .. _development-unit-testing-relative-imports: Relative Imports ---------------- In order to reuse test code and allow for test inheritance, each test module should add all parent module paths to its ``sys.path``. For instance, assuming a test in ``testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py``, the following paths should be added to ``sys.path``:: testsuite testsuite/Testsrc testsuite/Testsrc/Testlib testsuite/Testsrc/Testlib/TestServer testsuite/Testsrc/Testlib/TestServer/TestPlugins This must be done because Python 2.4, one of our target platforms, does not support relative imports. An easy way to do this is to add the following snippet to the top of each test file: .. code-block:: python import os import sys # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) In addition, each new directory created in ``testsuite`` must contain an empty ``__init__.py``. This will allow you, within ``TestMetadata.py``, to import common test code and the parent objects the ``TestMetadata`` class will inherit from: .. code-block:: python from common import inPy3k, call, builtins, u, can_skip, \ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \ patchIf, datastore from TestPlugin import TestXMLFileBacked, TestMetadata as _TestMetadata, \ TestStatistics, TestDatabaseBacked Avoid Patching Where Possible ----------------------------- The `Python Mock Module`_ provides a ``patch`` decorator that can be used to replace tested objects with ``Mock`` objects. This is wonderful and necessary, but due to differences in the way various versions of Python and Python Mock handle object scope, it's not always reliable when combined with our system of test object inheritance. Consequently, you should follow these rules when considering whether to use ``patch``: * If you need to mock an object that is not part of Bcfg2 (e.g., a builtin or an object in another Python library), use ``patch``. * If you need to patch an object being tested in order to instantiate it, use ``patch``, but see below. * If you need to patch a function (not a method) that is part of Bcfg2, use ``patch``. * If you need to mock an object that is part of the object being tested, do not use ``patch``. As an example of the last rule, assume you are writing tests for :class:`Bcfg2.Server.Plugin.helpers.FileBacked`. :func:`Bcfg2.Server.Plugin.helpers.FileBacked.HandleEvent` calls :func:`Bcfg2.Server.Plugin.helpers.FileBacked.Index`, so we need to mock the ``Index`` function. This is the **wrong** way to do that: .. code-block:: python class TestFileBacked(Bcfg2TestCase): @patch("%s.open" % builtins) @patch("Bcfg2.Server.Plugin.helpers.FileBacked.Index") def test_HandleEvent(self, mock_Index, mock_open): ... Tests that inherit from ``TestFileBacked`` will not reliably patch the correct ``Index`` function. Instead, assign the object to be mocked directly: .. code-block:: python class TestFileBacked(Bcfg2TestCase): @patch("%s.open" % builtins) def test_HandleEvent(self, mock_open): fb = self.get_obj() fb.Index = Mock() .. note:: ``@patch`` decorations are evaluated at compile-time, so a workaround like this does **not** work: .. code-block:: python class TestFileBacked(Bcfg2TestCase): @patch("%s.open" % builtins) @patch("%s.%s.Index" % (self.test_obj.__module__, self.test_obj.__name)) def test_HandleEvent(self, mock_Index, mock_open): ... But see below about patching objects before instantiation. In some cases, you will need to patch an object in order to instantiate it. For instance, consider :class:`Bcfg2.Server.Plugin.helpers.DirectoryBacked`, which attempts to set a file access monitor watch when it is instantiated. This won't work during unit testing, so we have to patch :func:`Bcfg2.Server.Plugin.helpers.DirectoryBacked.add_directory_monitor` in order to successfully instantiate a ``DirectoryBacked`` object. In order to do that, we need to patch the object being tested, which is a variable, but we need to evaluate the patch at run-time, not at compile time, in order to deal with inheritance. This can be done with a ``@patch`` decorator on an inner function, e.g.: .. code-block:: python class TestDirectoryBacked(Bcfg2TestCase): test_obj = DirectoryBacked def test__init(self): @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__)) def inner(mock_add_monitor): db = self.test_obj(datastore, Mock()) mock_add_monitor.assert_called_with('') inner() ``inner()`` is patched when ``test__init()`` is called, and so ``@patch()`` is called with the module and the name of the object being tested as defined by the test object (i.e., not as defined by the parent object). If this is not done, then the patch will be applied at compile-time and ``add_directory_monitor`` will be patched on the ``DirectoryBacked`` class instead of on the class to be tested. Some of our older unit tests do not follow these rules religiously, so as more tests are written that inherit from larger portions of the ``testsuite`` tree they may need to be refactored. Naming ------ In order to make the system of inheritance we implement possible, we must follow these naming conventions fairly religiously. * Test classes are given the name of the object to be tested with ``Test`` prepended. E.g., the test for the :class:`Bcfg2.Server.Plugins.Metadata.Metadata` is named ``TestMetadata``. * Test classes that test miscellaneous functions in a module are named ``TestFunctions``. * Test modules are given the name of the module to be tested with ``Test`` prepended. Tests for ``__init__.py`` are named ``Test_init.py`` (one underscore). * Tests for methods or functions are given the name of the method or function to be tested with ``test_`` prepended. E.g., the test for :class:`Bcfg2.Server.Plugin.helpers.StructFile.Match` is called ``test_Match``; the test for :class:`Bcfg2.Server.Plugin.helpers.StructFile._match` is called ``test__match``. * Tests for magic methods -- those that start and end with double underscores -- are named ``test__``, where name is the name of the magic method without underscores. E.g., a test for ``__init__`` is called ``test__init``, and a test for ``__getitem__`` is called ``test__getitem``. If this causes a collision with a non-magic function (e.g., if a class also has a function called ``_getitem()``, the test for which would also be called ``test__getitem``, seriously consider refactoring the code for the class. Common Test Code ---------------- .. automodule:: testsuite.common doc/development/utils.txt000066400000000000000000000006301303523157100160450ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-utils: ================ Common Utilities ================ Some helper functions, classes, etc., are useful to both the client and server. Some of these are used to maintain :ref:`development-compat`, and should go in ``Bcfg2.Compat``. Those that aren't strictly for Python compatibility go in ``Bcfg2.Utils``, which is documented below. .. automodule:: Bcfg2.Utils doc/development/versioning.txt000066400000000000000000000010701303523157100170670ustar00rootroot00000000000000.. -*- mode: rst -*- .. _development-versioning: Versioning Bcfg2 ---------------- #. These are the conventions Bcfg2 has adopted for `versioning`_: * The version number will be broken down into Major.Minor.MicroBuild. * Major and Minor are ever increasing integers. * Micro is a single digit integer. This is because of limits in some of the package systems (specifically Mac OS X). * Build is either missing or refers to release candidates: e.g. "pre3". .. _versioning: https://secure.wikimedia.org/wikipedia/en/wiki/Software_versioning doc/exts/000077500000000000000000000000001303523157100126065ustar00rootroot00000000000000doc/exts/xmlschema.py000066400000000000000000001007651303523157100151520ustar00rootroot00000000000000""" Sphinx extension to generate documention from XML schemas. Known to be woefully imcomplete, probably buggy, terrible error handling, but it *works* for the subset of XML schema we use in Bcfg2. Provides the following directives: * ``.. xml:schema:: ``: Document an XML schema * ``.. xml:type:: ``: Document a complexType or simpleType * ``.. xml:group:: ``: Document an element group * ``.. xml:attributegroup:: ``: Document an attributeGroup * ``.. xml:element:: ``: Document an XML element Each directive supports the following options: * ``:namespace: ``: Specify the namespace of the given entity * ``:nochildren:``: Do not generate documentation for child entities * ``:noattributegroups:``: Do not generate documentation about attribute groups * ``:nodoc:``: Do not include the documentation included in the entity annotation * ``:notext:``: Do not generate documentation about the text content of the entity * ``:onlyattrs: ,``: Only generate documentation about the comma-separated list of attributes given * ``:requiredattrs: ,attr>``: Claim that the attributes named in the given comma-separated list are required, even if they are not flagged as such in the schema. * ``:linktotype: [,]``: If used as a flag, link to documentation on all child types and elements. If a list is given, only link to those types given. (The default is to generate full inline docs for those types.) * ``:noautodep: [,]``: Do not automatically generate docs for any dependent entities. * ``:inlinetypes: ,``: Override a default ``:linktotype:`` setting for the given types. Provides the following roles to link to the objects documented above: * ``:xml:schema:````: Link to an XML schema * ``:xml:type:````: Link to a complexType or simpleType * ``:xml:group:````: Link to an element group * ``:xml:attributegroup:````: Link to an attributeGroup * ``:xml:element:````: Link to an element * ``:xml:attribute:`:```: Link to the attribute in the given context. The context is the name of the containing object, e.g., the parent attributeGroup, element, or complexType. * ``:xml:datatype:````: Link to a built-in XML data type. Note that the entity being linked to does not need to have been explicitly documented with a directive; e.g., if you document a schema that contains a complexType, you can link to that type without having used the ``xml:type::`` directive. Note also that it's far more reliable to link to a complexType than an element, since element name collisions are fairly common. You should avoid type name collisions whenever possible to maximize usability of this extension. There are two configuration items that may be added to conf.py: * ``xmlschema_path`` gives the base path to all XML schemas. * ``xmlschema_datatype_url`` gives a string pattern that will be used to generate links to built-in XML types. It must contain a single ``%s``, which will be replaced by the name of the type. """ import os import operator import lxml.etree from docutils import nodes from sphinx import addnodes, roles from docutils.statemachine import ViewList from docutils.parsers.rst import directives from sphinx.util.nodes import make_refnode, split_explicit_title, \ nested_parse_with_titles from sphinx.util.compat import Directive from sphinx.domains import ObjType, Domain try: from new import classobj except ImportError: classobj = type XS = "http://www.w3.org/2001/XMLSchema" XS_NS = "{%s}" % XS NSMAP = dict(xs=XS) def comma_split(opt): return opt.split(",") def flag_or_split(opt): try: return opt.split(",") except AttributeError: return True class _XMLDirective(Directive): """ Superclass for the other XML schema directives. """ required_arguments = 1 option_spec = dict(namespace=directives.unchanged, nochildren=directives.flag, noattributegroups=directives.flag, nodoc=directives.flag, notext=directives.flag, onlyattrs=comma_split, requiredattrs=comma_split, linktotype=flag_or_split, noautodep=flag_or_split, inlinetypes=comma_split) types = [] def run(self): name = self.arguments[0] env = self.state.document.settings.env reporter = self.state.memo.reporter ns_name = self.options.get('namespace') try: ns_uri = env.xmlschema_namespaces[ns_name] except KeyError: # URI given as namespace ns_uri = ns_name etype = None for etype in self.types: try: entity = env.xmlschema_entities[ns_uri][etype][name] break except KeyError: pass else: reporter.error("No XML %s %s found" % (" or ".join(self.types), name)) return [] documentor = XMLDocumentor(entity, env, self.state, name=name, ns_uri=ns_uri, include=self.process_include(), options=self.process_options()) return documentor.document() def process_include(self): return dict(children='nochildren' not in self.options, attributegroups='noattributegroups' not in self.options, doc='nodoc' not in self.options, text='notext' not in self.options) def process_options(self): return dict(onlyattrs=self.options.get('onlyattrs'), requiredattrs=self.options.get('requiredattrs', []), linktotype=self.options.get('linktotype', []), noautodep=self.options.get('noautodep', False), inlinetypes=self.options.get('inlinetypes', [])) def XMLDirective(types): class cls(_XMLDirective): pass cls.__name__ = 'XML%sDirective' % types[0] cls.types = types return cls class XMLDocumentor(object): def __init__(self, entity, environment, state, name=None, ns_uri=None, parent=None, include=None, options=None): self.entity = entity self.env = environment self.entities = self.env.xmlschema_entities self.namespaces = self.env.xmlschema_namespaces self.namespaces_by_uri = self.env.xmlschema_namespaces_by_uri self.state = state self.include = include self.options = options self.app = self.env.app self.reporter = self.state.memo.reporter if name is None: self.ns_uri = ns_uri self.fqname = self.entity.get("name") self.ns_name, self.name = self.split_ns(self.fqname) if self.ns_uri is None and self.ns_name is not None: self.ns_uri = self.namespaces[self.ns_name] else: self.ns_uri = ns_uri self.ns_name = self.namespaces_by_uri[self.ns_uri] self.name = name if self.ns_name: self.fqname = "%s:%s" % (self.ns_name, self.name) else: self.fqname = name self.tname = nodes.strong(self.fqname, self.fqname) self.tag = self.entity.tag[len(XS_NS):] self.type = tag2type(self.tag) self.parent = parent if self.parent is None: self.dependencies = [] self.documented = [] else: self.dependencies = self.parent.dependencies self.documented = self.parent.documented def document(self): eid = (self.tag, self.fqname) if eid in self.documented: return [build_paragraph(get_xref(self.tag, eid[1]))] else: self.documented.append(eid) rv = [self.target_node(self.tag, self.ns_name, self.name)] data = addnodes.desc(objtype=self.tag) targetid = get_target_id(self.tag, self.ns_name, self.name) header = addnodes.desc_signature('', '', first=True, ids=[targetid]) if self.include['doc']: header.extend([nodes.emphasis(self.tag, self.tag), text(" "), self.tname]) data.append(header) contents = nodes.definition() if self.include['doc']: contents.append(self.get_doc(self.entity)) contents.extend(getattr(self, "document_%s" % self.tag)()) data.append(contents) rv.append(data) if self.parent is None: # avoid adding duplicate dependencies added = [(self.type, self.name)] for typ, name, entity in self.dependencies: if not name: name = entity.get('name') if (typ, name) in added: continue ns_name, name = self.split_ns(name) ns_uri = self.namespaces[ns_name] if not entity: try: entity = self.entities[ns_uri][typ][name] except KeyError: self.app.warn("Dependency %s not found in schemas" % get_target_id(typ, ns_name, name)) continue doc = self.get_documentor(entity, name=name, ns_uri=ns_uri) rv.extend(doc.document()) added.append((typ, name)) return rv def document_schema(self): try: element = self.entity.xpath("xs:element", namespaces=NSMAP)[0] ns, name = self.split_ns(element.get("name")) doc = self.get_documentor(element, name=name, ns_uri=self.namespaces[ns]) return doc.document() except IndexError: # no top-level element or group -- just a list of # (abstract) complexTypes? rv = [] for ctype in self.entity.xpath("xs:complexType", namespaces=NSMAP): ns, name = self.split_ns(ctype.get("name")) doc = self.get_documentor(ctype, name=name, ns_uri=self.namespaces[ns]) rv.extend(doc.document()) return rv def document_group(self): rv = nodes.definition_list() try: (children, groups) = \ self.get_child_elements(self.entity, nodeclass=nodes.paragraph) except TypeError: return [build_paragraph(nodes.strong("Any", "Any"), " arbitrary element allowed")] append_node(rv, nodes.term, text("Elements:")) append_node(rv, nodes.definition, *children) if len(groups): append_node(rv, nodes.term, text("Element groups:")) append_node(rv, nodes.definition, *groups) return rv def document_element(self): fqtype = self.entity.get("type") if fqtype: (etype_ns, etype) = self.split_ns(fqtype) ns_uri = self.get_namespace_uri(etype_ns) values = self.get_values_from_type() if values != "Any": return [build_paragraph( self.tname, " takes only text content, which may be the ", "following values: ", values)] elif etype in self.entities[ns_uri]["complexType"]: if ((self.options['linktotype'] is True or self.name in self.options['linktotype'] or etype in self.options['linktotype'] or fqtype in self.options['linktotype']) and self.name not in self.options['inlinetypes'] and etype not in self.options['inlinetypes']): self.add_dep('complexType', fqtype, None) return [build_paragraph("Type: ", get_xref("type", fqtype))] typespec = self.entities[ns_uri]["complexType"][etype] doc = self.get_documentor(typespec, name=self.entity.get("name")) rv = [self.target_node("complexType", etype_ns, etype)] if self.include['doc'] and not self.get_doc(self.entity): rv.append(self.get_doc(typespec)) rv.extend(doc.document_complexType()) return rv else: self.reporter.error("Unknown element type %s" % fqtype) return [] else: rv = [] typespec = self.entity.xpath("xs:complexType", namespaces=NSMAP)[0] if self.include['doc'] and not self.get_doc(self.entity): rv.append(self.get_doc(typespec)) if typespec is not None: rv = [self.target_node("complexType", self.ns_name, self.name)] doc = self.get_documentor(typespec) rv.extend(doc.document_complexType()) return rv def document_complexType(self): rv = nodes.definition_list() try: content = self.entity.xpath("xs:simpleContent", namespaces=NSMAP)[0] base = content.xpath("xs:extension|xs:restriction", namespaces=NSMAP)[0] attr_container = base except IndexError: base = None attr_container = self.entity ##### ATTRIBUTES ##### table, tbody = self.get_attr_table() attrs = self.get_attrs(attr_container) if attrs: tbody.extend(attrs) foreign_attr_groups = nodes.bullet_list() for agroup in attr_container.xpath("xs:attributeGroup", namespaces=NSMAP): # if the attribute group is in another namespace, just # link to it ns, name = self.split_ns(agroup.get('ref')) if ns != self.ns_name: append_node( foreign_attr_groups, nodes.list_item, build_paragraph(get_xref(tag2type("attributeGroup"), ":".join([ns, name])))) else: tbody.extend(self.get_attrs( self.entities['attributeGroup'][name])) if len(tbody): append_node(rv, nodes.term, text("Attributes:")) append_node(rv, nodes.definition, table) if self.include['attributegroups'] and len(foreign_attr_groups): append_node(rv, nodes.term, text("Attribute groups:")) append_node(rv, nodes.definition, foreign_attr_groups) ##### ELEMENTS ##### if self.include['children']: # todo: distinguish between elements that may occur and # elements that must occur try: (children, groups) = self.get_child_elements(self.entity) except TypeError: children = None groups = None rv.append(build_paragraph(nodes.strong("Any", "Any"), " arbitrary child elements allowed")) if children: append_node(rv, nodes.term, text("Child elements:")) append_node(rv, nodes.definition, build_node(nodes.bullet_list, *children)) if groups: append_node(rv, nodes.term, text("Element groups:")) append_node(rv, nodes.definition, *groups) ##### TEXT CONTENT ##### if self.include['text']: if self.entity.get("mixed", "false").lower() == "true": append_node(rv, nodes.term, text("Text content:")) append_node(rv, nodes.definition, build_paragraph(self.get_values_from_simpletype())) elif base is not None: append_node(rv, nodes.term, text("Text content:")) append_node( rv, nodes.definition, build_paragraph(self.get_values_from_simpletype(content))) return [rv] def document_attributeGroup(self): attrs = self.get_attrs(self.entity) if attrs: table, tbody = self.get_attr_table() tbody.extend(attrs) return [table] else: return [] def get_attr_table(self): atable = nodes.table() atgroup = build_node(nodes.tgroup('', cols=5), nodes.colspec(colwidth=10), nodes.colspec(colwidth=50), nodes.colspec(colwidth=20), nodes.colspec(colwidth=10), nodes.colspec(colwidth=10), nodes.thead('', build_table_row("Name", "Description", "Values", "Required", "Default"))) atable.append(atgroup) atable_body = nodes.tbody() atgroup.append(atable_body) return (atable, atable_body) def get_child_elements(self, el, nodeclass=None): """ returns a tuple of (child element nodes, element group nodes). HOWEVER, if _any_ child is allowed, returns True. """ children = [] groups = [] if nodeclass is None: nodeclass = nodes.list_item if el.xpath("xs:any", namespaces=NSMAP): return True for child in el.xpath("xs:element", namespaces=NSMAP): node = nodeclass() if child.get('ref'): node.append(build_paragraph(get_xref('element', child.get('ref')))) else: # child element given inline doc = self.get_documentor(child, name=child.get('name')) node.extend(doc.document()) children.append(node) for group in el.xpath("xs:group", namespaces=NSMAP): if group.get('ref'): name = group.get('ref') node = nodeclass() node.append(build_paragraph(get_xref('group', name))) self.add_dep('group', name, None) groups.append(node) else: rv = self.get_child_elements(group, nodeclass=nodeclass) try: children.extend(rv[0]) groups.extend(rv[1]) except TypeError: return rv for container in el.xpath("xs:all|xs:choice|xs:sequence", namespaces=NSMAP): rv = self.get_child_elements(container, nodeclass=nodeclass) try: children.extend(rv[0]) groups.extend(rv[1]) except TypeError: return rv return (children, groups) def get_documentor(self, entity, name=None, ns_uri=None): if name is None: name = self.name if ns_uri is None: ns_uri = self.ns_uri return XMLDocumentor(entity, self.env, self.state, name=name, ns_uri=ns_uri, parent=self, options=self.options, include=self.include) def get_attrs(self, el, context=None): cnode = el while context is None and cnode is not None: context = cnode.get('name') cnode = cnode.getparent() rows = [] for attr in el.xpath("xs:attribute[@name]", namespaces=NSMAP): name = attr.get("name") if self.ns_name: fqname = "%s:%s" % (self.ns_name, name) else: fqname = name if (self.options['onlyattrs'] and name not in self.options['onlyattrs'] and fqname not in self.options['onlyattrs']): continue tag = attr.tag[len(XS_NS):] row = [build_paragraph(self.target_node(tag, self.ns_name, context, name), nodes.literal(fqname, fqname))] row.append(self.get_doc(attr)) if attr.get("type") is not None: row.append(build_paragraph( self.get_values_from_type(entity=attr))) else: try: atype = attr.xpath("xs:simpleType", namespaces=NSMAP)[0] row.append(self.get_values_from_simpletype(atype)) except IndexError: # todo: warn about no type found pass reqd = 0 if (name in self.options['requiredattrs'] or attr.get("use", "optional") == "required"): row.append("Yes") reqd = 1 else: row.append("No") default = attr.get("default") if default is None: row.append("None") else: row.append(nodes.literal(default, default)) # we record name and required separately to make sorting # easier rows.append((name, reqd, build_table_row(*row))) rows.sort(key=operator.itemgetter(0)) rows.sort(key=operator.itemgetter(1), reverse=True) if not self.options['onlyattrs'] or '*' in self.options['onlyattrs']: try: anyattr = el.xpath("xs:anyAttribute", namespaces=NSMAP)[0] rows.append((None, None, build_table_row('*', self.get_doc(anyattr), "Any", "No", "None"))) except IndexError: pass return [r[2] for r in rows] def get_values_from_type(self, entity=None, typeattr='type'): if entity is None: entity = self.entity ns_name, name = self.split_ns(entity.get(typeattr)) ns_uri = self.get_namespace_uri(ns_name, entity=entity) if ns_uri == XS: return self.get_builtin_type(name) elif name in self.entities[ns_uri]['simpleType']: return self.get_values_from_simpletype( self.entities[ns_uri]['simpleType'][name]) else: return "Any" def get_builtin_type(self, vtype): if vtype == "boolean": return get_value_list(["true", "false"]) else: return get_datatype_ref(vtype, vtype, self.app.config.xmlschema_datatype_url) def get_doc(self, el): try: return self.parse(el.xpath("xs:annotation/xs:documentation", namespaces=NSMAP)[0].text) except IndexError: return build_paragraph('') def parse(self, text): node = nodes.paragraph() vl = ViewList() for line in text.splitlines(): vl.append(line, '') nested_parse_with_titles(self.state, vl, node) try: return node[0] except IndexError: return build_paragraph(text) def split_ns(self, name): try: (ns, name) = name.split(":") except ValueError: ns = self.ns_name return (ns, name) def get_values_from_simpletype(self, entity=None): if entity is None: entity = self.entity # todo: xs:union, xs:list try: restriction = entity.xpath("xs:restriction|xs:extension", namespaces=NSMAP)[0] except IndexError: return "Any" doc = self.get_doc(restriction) if len(doc) == 1 and len(doc[0]) == 0: # if get_doc returns a paragraph node with an empty Text # node enum = [e.get("value") for e in restriction.xpath("xs:enumeration", namespaces=NSMAP)] if len(enum): return get_value_list(enum) else: return self.get_values_from_type(entity=restriction, typeattr='base') else: return doc def add_dep(self, typ, name, entity): try: if name in self.options['noautodep']: return except TypeError: if self.options['noautodep']: return self.dependencies.append((typ, name, entity)) def target_node(self, tag, ns, *extra): targetid = get_target_id(tag, ns, *extra) fqname = targetid[len(tag) + 1:] rv = nodes.target('', '', ids=[targetid]) self.add_domain_data(tag2type(tag), fqname, (self.env.docname, targetid)) return rv def add_domain_data(self, typ, key, data): if key not in self.env.domaindata['xml'][typ]: self.env.domaindata['xml'][typ][key] = data def get_namespace_uri(self, ns_name, entity=None): if entity is None: entity = self.entity xs_ns = get_xs_ns(entity) if ns_name == xs_ns: return XS else: return self.namespaces[ns_name] def tag2type(tag): if tag in ['complexType', 'simpleType']: return 'type' elif tag == 'attributeGroup': return 'attributegroup' return tag def text(txt): return nodes.Text(txt, txt) def append_node(parent, cls_or_node, *contents): parent.append(build_node(cls_or_node, *contents)) def build_node(cls_or_node, *contents): if isinstance(cls_or_node, (type, classobj)): rv = cls_or_node() else: rv = cls_or_node rv.extend(contents) return rv def get_xref(typ, target, title=None): if title is None: title = target ref = addnodes.pending_xref(title, reftype=typ, refdomain="xml", reftarget=target) ref.append(nodes.literal(title, title)) return ref def build_table_row(*vals): rv = nodes.row('') for val in vals: if isinstance(val, nodes.Node): node = val else: node = nodes.paragraph(val, val) rv.append(nodes.entry(node, node)) return rv def build_paragraph(*args): """ convenience method to build a paragraph node """ rv = nodes.paragraph() for content in args: if isinstance(content, nodes.Node): rv.append(content) else: rv.append(text(content)) return rv def get_target_id(etype, ns_name, *extra): if ns_name: return ":".join([etype, ns_name] + list(extra)) else: return ":".join([etype] + list(extra)) def get_value_list(vals): rv = nodes.paragraph() if vals: rv.append(nodes.literal(vals[0], vals[0])) for i in range(1, len(vals)): rv.append(text(" | ")) rv.append(nodes.literal(vals[i], vals[i])) return rv def get_xs_ns(el): return get_namespace_name(el, XS) def get_namespace_name(el, ns_uri): for name, ns in el.nsmap.items(): if ns == ns_uri: return name return None def get_datatype_ref(title, target, baseurl): return build_node(nodes.reference('', '', refuri=baseurl % target), nodes.literal(title, title)) class XMLDatatypeRole(object): def __init__(self, baseurl): self.baseurl = baseurl def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]): has_explicit_title, title, target = split_explicit_title(text) return [get_datatype_ref(title, target, self.baseurl)], [] class XMLXRefRole(roles.XRefRole): def __init__(self, typ, **kwargs): roles.XRefRole.__init__(self, **kwargs) self.type = typ def process_link(self, env, refnode, has_explicit_title, title, target): if (self.type == 'attribute' and not has_explicit_title and ':' in title): title = title.split(':')[-1] return roles.XRefRole.process_link(self, env, refnode, has_explicit_title, title, target) class XMLDomain(Domain): name = "xml" label = "XML" types = dict(schema=['schema'], type=['complexType', 'simpleType'], group=['group'], attributegroup=['attributeGroup'], element=['element'], attribute=None) object_types = dict([(t, ObjType("XML %s" % t.title(), t)) for t in types.keys()]) directives = dict([(t, XMLDirective(h)) for t, h in types.items() if h is not None]) roles = dict([(t, XMLXRefRole(t)) for t in types.keys()]) dangling_warnings = dict([(t, "unknown XML %s: %%(target)s" % t) for t in types.keys()]) initial_data = dict([(t, dict()) for t in types.keys()]) data_version = 3 def clear_doc(self, docname): to_del = [] for dtype in self.types.keys(): for key, (doc, _) in self.data[dtype].items(): if doc == docname: to_del.append((dtype, key)) for dtype, key in to_del: del self.data[dtype][key] def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): if typ in ['complexType', 'simpleType']: typ = 'type' if target in self.data[typ]: docname, labelid = self.data[typ][target] else: return None return make_refnode(builder, fromdocname, docname, labelid, contnode) def get_objects(self): for dtype in self.types.keys(): for name, (docname, tgtid) in self.data[dtype].items(): yield (name, name, dtype, docname, tgtid, self.object_types[dtype].attrs['searchprio']) def setup(app): app.add_config_value('xmlschema_path', '.', False) app.add_config_value('xmlschema_datatype_url', 'http://www.w3.org/TR/xmlschema-2/#%s', False) app.add_domain(XMLDomain) app.connect('builder-inited', load_xml_schemas) app.connect('builder-inited', add_xml_datatype_role) def add_xml_datatype_role(app): app.add_role_to_domain('xml', 'datatype', XMLDatatypeRole(app.config.xmlschema_datatype_url)) def load_xml_schemas(app): entities = dict() entities[None] = dict(schema=dict(), group=dict(), attributeGroup=dict(), element=dict(), simpleType=dict(), complexType=dict()) namespaces = dict() namespaces_by_uri = dict() schemapath = os.path.abspath(os.path.join(app.builder.env.srcdir, app.config.xmlschema_path)) for root, _, files in os.walk(schemapath): for fname in files: if not fname.endswith(".xsd"): continue path = os.path.join(root, fname) relpath = path[len(schemapath):].strip("/") schema = lxml.etree.parse(path).getroot() ns = schema.get("targetNamespace") ns_name = get_namespace_name(schema, ns) if ns_name not in namespaces: namespaces[ns_name] = ns if ns not in namespaces_by_uri: namespaces_by_uri[ns] = ns_name if ns not in entities: entities[ns] = dict(schema=dict(), group=dict(), attributeGroup=dict(), element=dict(), simpleType=dict(), complexType=dict()) # schemas don't require namespaces to be identified # uniquely, but we let the user identify them either with # or without the namespace entities[None]['schema'][relpath] = schema entities[ns]['schema'][relpath] = schema for entity in schema.xpath("//xs:*[@name]", namespaces=NSMAP): tag = entity.tag[len(XS_NS):] if tag in entities[ns]: entities[ns][tag][entity.get("name")] = entity app.builder.env.xmlschema_namespaces = namespaces app.builder.env.xmlschema_namespaces_by_uri = namespaces_by_uri app.builder.env.xmlschema_entities = entities doc/getting_started/000077500000000000000000000000001303523157100150125ustar00rootroot00000000000000doc/getting_started/index.txt000066400000000000000000000215231303523157100166650ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _getting_started-index: =============== Getting started =============== The steps below should get you from just thinking about a configuration management system to an operational installation of Bcfg2. If you get stuck, be sure to check the :ref:`help-mailinglist` or to drop in on our :ref:`help-irc`. See the `Platform-specific Quickstart Notes`_ at the end of this page if you happen to be using one of the more common operating systems. Get and Install Bcfg2 Server ============================ We recommend running the server on a Linux machine for ease of deployment due to the availability of packages for the dependencies. First, you need to download and install Bcfg2. The section :ref:`installation-index` in this manual describes the steps to take. To start, you will need to install the server on one machine and the client on one or more machines. Yes, your server can also be a client (and should be by the time your environment is fully managed). .. _Bcfg2 download page: http://trac.mcs.anl.gov/projects/bcfg2/wiki/Download Set up Repository ================= The next step after installing the Bcfg2 packages is to configure the server. You can easily set up a personalized default configuration by running, on the server, :: bcfg2-admin init You will be presented with a series of questions that will build a Bcfg2 configuration file in ``/etc/bcfg2.conf``, set up a skeleton repository (in ``/var/lib/bcfg2`` by default), help you create ssl certificates, and do any other similar tasks needed to get you started. Once this process is done, you can start the Bcfg2 server:: /etc/init.d/bcfg2-server start You can try it out by running the Bcfg2 client on the same machine, acting like it is your first client. .. note:: The following command will tell the client to run in no-op mode, meaning it will only check the client against the repository and report any differences it sees. It won't make any changes (partially because you haven't populated the repository with any yet). However, nobody is perfect. You can make a typo, our software can have bugs, monkeys can break in and hit enter before you are done. Don't run this command on a production system if you don't know what it does and aren't prepared for the consequences. We don't know of anybody having problems with it before, but it is better to be safe than sorry. And now for the command:: bcfg2 -q -v -n That can be translated as "bcfg2 quick verbose no-op". The output should be something similar to:: Loaded tool drivers: Chkconfig POSIX YUM Phase: initial Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 242 Phase: final Correct entries: 0 Incorrect entries: 0 Total managed entries: 0 Unmanaged entries: 242 Perfect! We have started out with an empty configuration, and none of our configuration elements are correct. It doesn't get much cleaner than that. But what about those unmanaged entries? Those are the extra configuration elements (probably all packages and services at the moment) that still aren't managed, but have been detected by the client tools. Your goal now is to migrate each of those plus any it can't see up to the "Correct entries" line. Populate Repository =================== Finally, you need to populate your repository. Unfortunately, from here on out we can't write up a simple recipe for you to follow to get this done. It is very dependent on your local configuration, your configuration management goals, the politics surrounding your particular machines, and many other similar details. We can, however, give you guidance. After the above steps, you should have a toplevel repository structure that looks like:: bcfg-server:~ # ls /var/lib/bcfg2 Bundler/ Cfg/ Metadata/ Pkgmgr/ Rules/ SSHbase/ etc/ The place to start is the Metadata directory, which contains two files: ``clients.xml`` and ``groups.xml``. Your current ``clients.xml`` will look pretty close to: .. code-block:: xml The ``clients.xml`` file is just a series of ```` tags, each of which describe one host you manage. Right now we only manage one host, the server machine we just created. This machine is bound to the ``basic`` profile, is pingable, has a pingtime of ``0``, and has the name ``bcfg-server.example.com``. The two "ping" parameters don't matter to us at the moment, but the other two do. The name parameter is the fully qualified domain name of your host, and the profile parameter maps that host into the ``groups.xml`` file. Our simple ``groups.xml`` file looks like: .. code-block:: xml There are two types of groups in Bcfg: profile groups (``profile='true'``) and non-profile groups (``profile='false'``). Profile groups can act as top-level groups to which clients can bind, while non-profile groups only exist as members of other groups. In our simple starter case, we have a profile group named ``basic``, and that is the group that our first client bound to. Our first client is a SuSE machine, so it contains the ``suse`` group. Of course, ``bcfg2-admin`` isn't smart enough to fill out the rest of your config, so the ``suse`` group further down is empty. Let's say the first thing we want to set up on our machine is the message of the day. To do this, we simply need to create a Bundle and add that Bundle to an appropriate group. In this simple example, we start out by adding .. code-block:: xml to the ``basic`` group. Next, we create a motd.xml file in the Bundler directory: .. code-block:: xml Now when we run the client, we get slightly different output:: Loaded tool drivers: Chkconfig POSIX YUM Incomplete information for entry Path:/etc/motd; cannot verify Phase: initial Correct entries: 0 Incorrect entries: 1 Total managed entries: 1 Unmanaged entries: 242 In dryrun mode: suppressing entry installation for: Path:/etc/motd Phase: final Correct entries: 0 Incorrect entries: 1 Total managed entries: 1 Unmanaged entries: 242 We now have an extra unmanaged entry, bringing our total number of managed entries up to one. To manage it we need to copy ``/etc/motd`` to ``/var/lib/bcfg2/Cfg/etc/motd/``. Note the layout of that path: all plain-text config files live in the Cfg directory. The directory structure under that directory directly mimics your real filesystem layout, making it easy to find and add new files. The last directory is the name of the file itself, so in this case the full path to the motd file would be ``/var/lib/bcfg2/Cfg/etc/motd/motd``. Copy your real ``/etc/motd`` file to that location, run the client again, and you will find that we now have a correct entry:: Loaded tool drivers: Chkconfig POSIX YUM Phase: initial Correct entries: 1 Incorrect entries: 0 Total managed entries: 1 Unmanaged entries: 242 Phase: final Correct entries: 1 Incorrect entries: 0 Total managed entries: 1 Unmanaged entries: 242 Done! Now we just have 242 (or more) entries to take care of! :ref:`server-plugins-structures-bundler` is a relatively easy directory to populate. You can find many samples of Bundles in the :ref:`Bundler Example Repository `, many of which can be used without editing. .. _getting_started-index-next-steps: Next Steps ========== Several other utilities can help from this point on: :ref:`bcfg2-info ` is a utility that instantiates a copy of the Bcfg2 server core (minus the networking code) for examination. From this, you can directly query: * Client Metadata * Which entries are provided by particular plugins * Client Configurations Run ``bcfg2-info``, and type help and the prompt when it comes up. ``bcfg2-admin`` can perform a variety of repository maintenance tasks. Run ``bcfg2-admin`` help for details. Once you have the server setup, you may be interested in :ref:`bootstrapping ` additional clients. Platform-specific Quickstart Notes ================================== .. toctree:: :maxdepth: 1 CentOS Ubuntu Gentoo Mac OS X doc/getting_started/macosx/000077500000000000000000000000001303523157100163045ustar00rootroot00000000000000doc/getting_started/macosx/notes.txt000066400000000000000000000060341303523157100202000ustar00rootroot00000000000000.. -*- mode: rst -*- .. _getting_started-macosx-notes: =============================== Setting up Bcfg2 From Scratch =============================== Ala `Managing /etc/motd with Bcfg2 Starting From an Empty VM `_, I'll be setting up a fresh OS X 10.6 machine to be managed by Bcfg2. Get OS X 10.6 Running ===================== Use your favorite provisioning method to get your operating system running and fully patched. For this hands on, I'm running OS X 10.6.8 (Build 10K540) with the supplied python 2.6.1. I've also turned on Remote Login (i.e. ssh) so I can use my client to write this document going through the steps; having ssh on is not a requirement for this howto. Get bcfg2-server Working ======================== Get bcfg2 package ----------------- You might be able to get a package already built for you, but it is not hard to build it from the source. You'll need git (via `git-osx-installer `_ or `homebrew `_; the former is easier, the later more developer friendly) and Apple's `XCode `_. The first step is to clone the bcfg2 repository into a working directory: .. code-block: bash cd ~/Developer git clone https://github.com/Bcfg2/bcfg2.git cd bcfg2 At this point you will probably want to checkout a release tag (`git tag -l` to see a list of them). This test is using v1.2.0pre4. Once you've done that you can build the server. .. code-block: bash git checkout v1.2.0pre4 cd osx make server The server package contains both the client and the server. The package is located at ``./osx/bcfg2-VERSION.pkg``. Copy it to the machine you want to set up from scratch and install it. THIS NEEDS TO VERIFIED Some of the differences between bcfg2 on Mac OS X and Debian is that the libraries are stored at `/Library/Frameworks/Python.framework/Versions/Current/share/bcfg2/` `/Library/Python/site-packages/Bcfg2/` instead of `/usr/lib/pymodules/` and `/usr/share/pyshare/Bcfg2. Also, instead of cron and init.d, `/Library/LaunchDaemons/gov.anl.mcs.bcfg2-daily.plist` controls peridic runs and starts and stops. The runtime files are stored in `/usr/local/bin` under Mac OS X instead of /usr/sbin/ for Debian. VERIFY:: 10.6_client :~ user$ sudo /usr/local/bin/bcfg2-admin init Failed to import lxml dependency. Shutting down server. Try: sudo easy_install lxml. If you don't have gcc-4.2 installed, you'll need to install it on a machine that does. Then move `/Library/Python/2.6/sites-packages/lxml-2.3-py2.6-macosx-10.6-universal.egg` to the client and add the line "./lxml-2.3-py2.6-macosx-10.6-universal.egg" to `/Library/Python/2.6/site-packages/easy-install.pth`. Getting a new error:: $ sudo /usr/local/bin/bcfg2-admin init Interactively initialize a new repository. bcfg2-admin init $ So what is lxml easy_install fully installing? Need to make a package (Lettuce to the rescue!) doc/glossary.txt000066400000000000000000000026231303523157100142320ustar00rootroot00000000000000.. -*- mode: rst -*- .. _glossary: ======== Glossary ======== .. glossary:: :sorted: client A system that runs the :command:`bcfg2` command. Typically, this is to receive a configuration from a Bcfg2 server. generator A type of plugin which provides file contents. For example :ref:`server-plugins-generators-cfg` or :ref:`server-plugins-generators-sshbase`. Genshi A Python-based templating engine. `Genshi Homepage`_. group A "tag" assigned to a client through a probe or other plugin. irc channel #bcfg2 on freenode probe A script that executes on a client machine and sets client metadata such as group membership. profile A special type of group that a client is explicitly assigned to. repository A collection of folders and files that together define the configurations that Bcfg2 applies to clients. The repository is located at :file:`/var/lib/bcfg2` by default. This is not to be confused with a :term:`VCS` repository, which is an excellent place to pull your Bcfg2 repository from to manage changes. When used alone, :term:`repository` refers to a Bcfg2 repository. VCS Stands for `Version Control System `_. .. _Genshi Homepage: http://genshi.edgewall.org/ doc/help/000077500000000000000000000000001303523157100125535ustar00rootroot00000000000000doc/help/faq/000077500000000000000000000000001303523157100133225ustar00rootroot00000000000000doc/help/faq/client.txt000066400000000000000000000013271303523157100153440ustar00rootroot00000000000000.. -*- mode: rst -*- .. _faq-client: FAQ: Client =========== **No ca is specified. Cannot authenticate the server with SSL.** This means that the client does not have a copy of the CA for the server, so it can't verify that it is talking to the right server. To fix this issue, copy ``/etc/bcfg2.crt`` from the server to ``/etc/bcfg2.ca`` on the client. Then add the following on the client.:: [communication] ca = /etc/bcfg2.ca .. FIXME: What is the solution for this? .. **Server failure** .. .. On Fedora 14 and above it can happen that no connection is possible. .. .. # bcfg2 -vqne .. Server failure: Protocol Error: 401 Unauthorized .. Failed to download probes from bcfg2 .. Server Failure doc/help/faq/general.txt000066400000000000000000000037061303523157100155060ustar00rootroot00000000000000.. -*- mode: rst -*- .. _faq-general: FAQ: General ============ **What does Bcfg2 stand for?** Initially, Bcfg stood for the bundle configuration system. Bcfg2 is the second major revision. At this point, the acronym is meaningless, but the name has stuck. Luckily, Bcfg2 googles better than Bcfg does. No, seriously. Try it. All I know is that I have no interest in a billion cubic feet of gas. **What architectures does Bcfg2 support?** Bcfg2 should run on any POSIX compatible operating system, however direct support for an operating system's package and service formats are limited by the currently available :ref:`client-tools` (although new client tools are pretty easy to add). The following is an incomplete but more exact list of platforms on which Bcfg2 works. * GNU/Linux deb based distros * GNU/Linux rpm based distros * Solaris pkg based * Gentoo portage based * OSX (POSIX/launchd support) **What pre-requisites are needed to run Bcfg2?** Please visit the :ref:`installation-prerequisites` section in the manual. **Why won't bcfg2-server start?** If your server doesn't seem to be starting and you see no error messages in your server logs, try running it in the foreground to see why. **Why am I getting a traceback?** If you get a traceback, please let us know. You can file a `ticket `_, send the traceback to the :ref:`help-mailinglist`, or hop on the :ref:`help-irc` and let us know. **Where are the server log messages?** The bcfg2-server process logs to syslog facility LOG_DAEMON. The server produces a series of messages upon a variety of events and errors. **Is there a way to check if all repository XML files conform to schemas?** Bcfg2 comes with XML schemas describing all of the XML formats used in the server repository. A validation command ``bcfg2-lint`` is included with the source distribution and all packages. ``bcfg2-lint`` can also performs lots of other checks for common mistakes. doc/help/faq/index.txt000066400000000000000000000004131303523157100151700ustar00rootroot00000000000000.. -*- mode: rst -*- .. _faq-index: === FAQ === The Frequently Asked Questions (FAQ) answers the most common questions about Bcfg2. At the moment the FAQ is splitted into a general and a client specfic section. .. toctree:: :maxdepth: 2 general client doc/help/index.txt000066400000000000000000000023021303523157100144200ustar00rootroot00000000000000.. -*- mode: rst -*- .. _help-index: ======================= Getting Help with Bcfg2 ======================= Having trouble? We'd like to help! There are several ways to get in touch with the Bcfg2 community. * Try the :ref:`FAQ ` -- it's got answers to many common questions. * Check the :ref:`help-troubleshooting` page to see if you can narrow down the cause of your issue. * Looking for specific information? Try the :ref:`genindex`, :ref:`modindex`, or the :ref:`detailed table of contents `. * Search for information in the :ref:`help-mailinglist`. * Visit our :ref:`help-irc`, or search the `IRC logs`_. Note that the IRC channel tends to be much busier than the mailing list; use whichever seems most appropriate for your query, but don't let the lack of mailing list activity make you think the project isn't active. .. _IRC logs: http://colabti.org/irclogger/irclogger_logs/bcfg2 .. _Bcfg2 mailing list archives: http://trac.mcs.anl.gov/projects/bcfg2/wiki/MailingList .. _Trac ticket tracker: http://bcfg2.org Report A Bug ============ Report bugs with Bcfg2 on the `Trac ticket tracker`_. .. toctree:: :hidden: mailinglist irc faq/index troubleshooting doc/help/irc.txt000066400000000000000000000032771303523157100141020ustar00rootroot00000000000000.. -*- mode: rst -*- .. _Freenode: http://chat.freenode.net .. _#bcfg2: irc://chat.freenode.net/bcfg2 .. _help-irc: =========== IRC Channel =========== The Bcfg2 IRC channel is `#bcfg2`_ on `Freenode`_. It is home to both support and development discussions. If you have a question, suggestion, or just want to know about Bcfg2, please drop in and say hi. Archives are available at: http://colabti.org/irclogger/irclogger_logs/bcfg2 .. raw:: html

    in the timespan (yyyymmdd-yyyymmdd)

    Options:


    Administrative Note =================== If the IRC logging stops working for a while, coordinate on #bcfg2 and then bug **feb** on #irclogger (freenode), and stick around on that channel until you get an answer (**feb** is great, but busy and in a non-US time zone). Actually as long as **ilogger2** is logged in you should be okay (**feb** looks at the logs). If you have private logs for the period of time **ilogger2** was off the channel, you can convert them to the will incorporate them into the online logs. Be sure to strip out any private messages in your logs first :-) .. _format shown here: http://colabti.org/irclogger/irclogger_log/bcfg2?date=2008-03-21,Fri;raw=on doc/help/mailinglist.txt000066400000000000000000000013201303523157100156240ustar00rootroot00000000000000.. -*- mode: rst -*- .. _help-mailinglist: ============ Mailing List ============ To subscribe to the mailing list for Bcfg2 please visit the `bcfg-dev mailman page`_ `Searchable archives`_ are available from Gmane. You can also read the mailing list from any NNTP client via Gmane. .. _bcfg-dev mailman page: https://lists.mcs.anl.gov/mailman/listinfo/bcfg-dev .. _Searchable archives: http://dir.gmane.org/gmane.comp.sysutils.bcfg2.devel .. raw:: html
    doc/help/troubleshooting.txt000066400000000000000000000340101303523157100165410ustar00rootroot00000000000000.. -*- mode: rst -*- .. _help-troubleshooting: =============== Troubleshooting =============== From time to time, Bcfg2 produces results that the user finds surprising. This can happen either due to bugs or user error. This page describes several techniques to gain visibility into the bcfg2 client and server and understand what is going on. Figure out if error is client or server side ============================================ * Cache a copy of the client configuration using ``bcfg2 -qnc /tmp/config.xml`` * Look in the file and search for the entry of interest * If it looks correct, then there is a client issue * If not, it is time to inspect things on the server This file contains all aspects of client configuration. It is structured as a series of bundles and base entries. .. note:: Most often the entry is not correct and the issue lies in the specification. Review server log messages ========================== The bcfg2-server process logs to syslog facility LOG_DAEMON. The server produces a series of messages upon a variety of events and errors. The server also supports two XML-RPC methods that can be used to turn up the debug level in a live server: * ``toggle_debug``: Turn debug on or off, depending on the current setting. * ``set_debug``: Turn debug explicitly on or off. These can be called with :ref:`bcfg2-admin xcmd `, e.g.:: bcfg2-admin xcmd toggle_debug bcfg2-admin xcmd set_debug true Each plugin also supports these two methods, which can be used to set the debug level individually on a given plugin, e.g.:: bcfg2-admin xcmd Packages.set_debug true bcfg2-admin xcmd Probes.toggle_debug Finally, the File Activity Monitor has its own analogue to these two methods, for setting the debug level of the FAM:: bcfg2-admin xcmd Inotify.toggle_debug bcfg2-admin xcmd Inotify.set_debug false Check if all repository XML files conform to schemas ==================================================== Bcfg2 comes with XML schemas describing all of the XML formats used in the server repository. A validation command ``bcfg2-lint`` is included with the source distribution and all packages. If the bcfg2 server is not reflecting recent changes, try restarting the bcfg2-server process ============================================================================================= If this fixes the problem, it is either a bug in the underlying file monitoring system (inotify or gamin) or a bug in Bcfg2's file monitoring code. In either case, file a `ticket `_ in the tracking system. In the ticket, include: * filesystem monitoring system (inotify or gamin) * kernel version (if on linux) * if any messages of the form "Handled N events in M seconds" appeared between the modification event and the client configuration generation request appeared in the server log * which plugin handled the file in the repostiory (Cfg, Rules, Packages, SSHbase, Metadata, etc.) * if a touch of the file after the modification causes the problem to go away bcfg2-info ========== Bcfg2 server operations can be simulated using the ``bcfg2-info`` command. The command is interactive, and has commands to allow several useful operations * clients - Current client metadata (profile and group) settings * groups - Current group metadata values * mappings - Configuration entries provided by plugins * buildfile [--altsrc=] - Build a config file for a client * buildbundle - Render a templated bundle for a client * showentries - Build the abstract configuration (list of entries) for a client * build - Build the complete configuration for a client Type `help` in bcfg2-info for more information. Error Messages ============== The tables below describe error messages produced by Bcfg2 and steps that can be taken to remedy them. Client Errors ------------- +------------------------------+----------------------------+--------------+ | Error | Meaning | Repair | +==============================+============================+==============+ | Incomplete information for | The described entry is not | [c1]_ | | entry : | fully specified by the | | | cannot verify | server, so no verification | | | | can be performed. | | +------------------------------+----------------------------+--------------+ | Incomplete information for | The described entry is not | [c1]_ | | entry : | fully specified by the | | | cannot install | server, so no verification | | | | can be performed. | | +------------------------------+----------------------------+--------------+ | The following entries are | The client cannot figure | [c2]_ | | not handled by any tool: | out how to handle this | | | : | entry. | | +------------------------------+----------------------------+--------------+ | No ca is specified. Cannot | The client is unable to | [c3]_ | | authenticate the server with | verify the server. | | | SSL. | | | +------------------------------+----------------------------+--------------+ | GID normalization failed for | The client is unable to | [c4]_ | | FILENAME. Does group GROUP | convert the group GROUP to | | | exist? | a usable GID. | | +------------------------------+----------------------------+--------------+ | UID normalization failed for | The client is unable to | [c5]_ | | FILENAME. Does owner OWNER | convert the owner OWNER to | | | exist? | a usable UID. | | +------------------------------+----------------------------+--------------+ | SSL CA error | The CA certificate | [c6]_ | | | specified in bcfg2.conf is | | | | incorrect | | +------------------------------+----------------------------+--------------+ .. [c1] This entry is not being bound. Ensure that a version of this entry applies to this client. .. [c2] It is possible that the type attribute for this generator entry is undefined. You may need to add the appropriate type attribute in the literal entry specification. .. [c3] Copy the Bcfg2 server's CA certificate to the client and specify it using the **ca** option in the [communication] section of ``bcfg2.conf`` .. [c4] If the group doesn't exist, you need to specify the correct one in an :ref:`info.xml ` file or set the default group appropriately. .. [c5] If the owner doesn't exist, you need to specify the correct one in an :ref:`info.xml ` file or set the default owner appropriately. .. [c6] Check that the CA specified in bcfg2.conf is appropriate for the server you are attempting to access. Server Errors ------------- +------------------------------+---------------------+--------------+ | Error | Meaning | Repair | +==============================+=====================+==============+ | Failed to bind entry: | The server was | [s1]_ | | | unable to find a | | | | suitable version of | | | | entry for client. | | +------------------------------+---------------------+--------------+ | Failed to bind to socket | The server was | [s2]_ | | | unable to bind to | | | | the tcp server | | | | socket. | | +------------------------------+---------------------+--------------+ | Failed to load | The server was | [s3]_ | | ssl key | unable to read and | | | | process the ssl key.| | +------------------------------+---------------------+--------------+ | Failed to read file | The server failed | [s4]_ | | | to read the | | | | specified file | | +------------------------------+---------------------+--------------+ | Failed to parse file | The server failed | [s5]_ | | | to parse the | | | | specified XML file | | +------------------------------+---------------------+--------------+ | Client metadata resolution | The server cannot | [s6]_ | | error for | resolve the client | | | | hostname or the | | | | client is | | | | associated with a | | | | non-profile group. | | +------------------------------+---------------------+--------------+ | Failed to decode | The encoding being | [s7]_ | | Please verify you are using | used is unable to | | | the proper encoding | decode the | | | | character present | | | | in this file. | | +------------------------------+---------------------+--------------+ | Got unknown entries | The Packages plugin | [s8]_ | | [list of unknown entries] | has no knowledge of | | | | the listed entries | | +------------------------------+---------------------+--------------+ | Failed to import lxml | The server cannot | [s9]_ | | dependency. Shutting | import lxml | | | down server. | | | +------------------------------+---------------------+--------------+ | You need to specify base64 | The server cannot | [s10]_ | | encoding for | send the file as | | | | ascii text | | +------------------------------+---------------------+--------------+ | ERROR: Error reading file | The server cannot | [s11]_ | | '/path/to/schema': failed to | find the schema | | | load external entity | file | | | "/path/to/schema" | | | +------------------------------+---------------------+--------------+ | Packages: No matching | None of the sources | [s12]_ | | sources for client | defined in the | | | ; improper group | Package plugin's | | | memberships? | ``sources.xml`` | | | | apply to the client | | +------------------------------+---------------------+--------------+ .. [s1] This entry is not being bound. Ensure that a version of this entry applies to this client. .. [s2] Ensure that another instance of the daemon (or any other process) is not listening on the same port. .. [s3] Ensure that the key is readable by the user running the daemon and that it is well-formed. .. [s4] Ensure that this file still exists; a frequent cause is the deletion of a temp file. .. [s5] Ensure that the file is properly formed XML. .. [s6] Fix hostname resolution for the client or ensure that the profile group is properly setup. .. [s7] Ensure the correct encoding is specified in the [components] section of ``bcfg2.conf``. .. [s8] For packages listed other than **gpg-pubkey**, this error means that the Packages plugin is unable to find the package in any of the sources listed in ``Packages/sources.xml``. The issue often arises when the client is not in one of the groups necessary for the Source listed. In the case of gpg-pubkey, you can safely ignore the message as the Packages plugin has no knowledge of these packages (however, note that this package is most often specified as a BoundPackage entry). .. [s9] Ensure that you have installed all the necessary :ref:`installation-prerequisites`. .. [s10] You likely need to specify a base64 encoding using an :ref:`server-info` file for this entry. .. [s11] Verify that you have the proper prefix set in bcfg2.conf. .. [s12] Ensure that the client is a member of all the appropriate groups you may have defined in your :ref:`server-plugins-generators-packages` configuration. FAQs ==== Why won't bcfg2-server start? ----------------------------- If your server doesn't seem to be starting and you see no error messages in your server logs, try running it in the foreground to see why. Why am I getting a traceback? ----------------------------- If you get a traceback, please let us know by reporting it on `Trac ticket tracker`_, via the mailing list, or on IRC. Your best bet to get a quick response will be to jump on IRC during the daytime (CST). .. _Trac ticket tracker: http://bcfg2.org What is the most common cause of "The following entries are not handled by any tool"? ------------------------------------------------------------------------------------- Often it corresponds to entries that aren't bound by the server (for which you'll get error messages on the server). You should try inspecting the logs on the server to see what may be the cause. doc/index.txt000066400000000000000000000031461303523157100134770ustar00rootroot00000000000000.. -*- mode: rst -*- .. _index: ============================= Bcfg2 documentation |release| ============================= What is Bcfg2? ============== Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the `Mathematics and Computer Science Division`_ of `Argonne National Laboratory`_. .. _Mathematics and Computer Science Division: http://www.mcs.anl.gov/ .. _Argonne National Laboratory: http://www.anl.gov/ It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to Bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, Bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. .. toctree:: :maxdepth: 2 contents doc/installation/000077500000000000000000000000001303523157100143245ustar00rootroot00000000000000doc/installation/building-packages.txt000066400000000000000000000164421303523157100204450ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _installation-building-packages: ============================= Building packages from source ============================= Building RPMs ============= Building from a tarball ----------------------- * Create a directory structure for rpmbuild:: rpmdev-setuptree * Copy the tarball to ``~/rpmbuild/SOURCES/`` * Extract another copy of it somewhere else (eg: ``/tmp``) and retrieve the ``misc/bcfg2.spec`` file * Run the following:: rpmbuild -ba bcfg2.spec * The resulting RPMs will be in ``~/rpmbuild/RPMS/`` and SRPMs in ``~/rpmbuild/SRPMS/``. Building Debian packages ======================== The Bcfg2 project provides a ``debian`` subdirectory with the project's source that enables users to create their own Debian/Ubuntu compatible packages (`.deb` files). Build deps ---------- If the distribution you are building on already has packaged bcfg2 (even an older version), the following command will likely install the necessary build dependencies:: apt-get build-dep bcfg2 bcfg2-server Install source code ------------------- Depending on which version of bcfg2 you want build, you can obtain the source code from the Download_ page or from the project's git repository. To create a local anonymous working copy of the latest version of the bcfg2 source code, use a command like the following:: git clone git://git.mcs.anl.gov/bcfg2.git Update the changelog -------------------- The next step is to update the ``debian/changelog`` file with an appropriate package version string. Debian packages contain a version that is extracted from the latest entry in this file. An appropriate version will help you distinguish your locally built package from one provided by your distribution. It also helps the packaging system know when a newer version of the package is available to install. It is possible to skip this step, but the packages you build will have the same version as the source distribution and will be easy to confuse with other similarly named (but maybe not equivalent) packages. The basic format of the package version string to use is this:: ~+-0.1+ .. note:: The '+', and '-' characters have significance in determining when one package is newer than another. The following format is believed to do the right thing in all common situations. The components of the package version string are explained below: .. glossary:: This is the version of the Bcfg source code you are working from. It will likely be something like `0.9.6` or `1.0`. If you are using a published pre-release of Bcfg2, it will have a name like `pre1` or `rc1`. Use that string here, otherwise drop this component from the package version string. + If you are building from a local working copy of the git repository, it is useful to include the revision in the package version. If you are building from a downloaded copy of the source, drop this component (including the preceding plus-sign (`+`) from the package version string. + This is a locally relevant name like your last name or your domain name, plus the digit `1`. For example, if your family name is ''Smith'', you could use `smith1`. If you work for ''Example Inc'', you could use `example1`. Here are some examples: * If you are building packages for revision 6c681bd from git, and the latest published version is 1.2.0rc1, the version string should be `1.2.0rc1+6c681bd-0.1+example1`. * If you are building packages for the published 1.0 rc1 version, the version string should be `1.0rc1-0.1+example1`. * If you are building packages for the published 1.0 version, the version string should be `1.0-0.1+example1`. If you are working on a git working copy of 1.0 pre5 and have the ``devscripts`` package installed, the following command is a convenient way to create a well formatted changelog entry:: REV=$(git log --oneline | head -n 1 | cut -d' ' -f1) debchange --force-bad-version --preserve --newversion "1.0~pre5+${REV}-0.1+example1" git revision $REV Building the package -------------------- With the preliminaries out of the way, building the package is simple.:: cd .. # Change into the top level of the source directory fakeroot dpkg-buildpackage -uc -us The freshly built packages will be deposited in the parent of the current directory (``..``). Examine the output of ``dpkg-buildpackage`` for details. External build systems ---------------------- This section describes how to build bcfg2 and deps via external build systems (Currently only a PPA). Some other possibilities are: * #651 Look into project-builder to make more native-system bcfg2 packages available * http://en.opensuse.org/Build_Service/Deb_builds Launchpad PPA ^^^^^^^^^^^^^ https://launchpad.net/~bcfg2 To upload to the PPA you need to be on the active member list of `Bcfg2 in Launchpad`_. Note that **after each successful upload**, you should wait until the PPA is built, and then **install it locally** using ``sudo aptitude update; sudo aptitude install (packagename)`` so the next build doesn't fail on your local machine. If you don't want to wait for a PPA binary build to complete, you can "apt-get source (packagename)" and do a local build before the PPA build is done. setup gpg-agent """"""""""""""" Setting up gpg-agent and pinentry prevents you from having to type your passphrase repeatedly.:: sudo aptitude install gnupg-agent pinentry-gtk2 pinentry-curses # replace 0xAA95C349 with your GPG Key ID export GPGKEY=0xAA95C349 killall -q gpg-agent eval $(gpg-agent --daemon) setup debuild """"""""""""" Tell dpkg-buildpackage who you are, for example:: export DEBEMAIL="dclark@pobox.com" export DEBFULLNAME="Daniel Joseph Barnhart Clark" upload bcfg2 to ppa """"""""""""""""""" A ``dists`` file contains a space-separated list of all distributions you want to build PPA packages for. .. code-block:: sh #!/bin/sh . ./dists # Replace 0xAA95C349 with your GnuPG Key ID export GPGKEY=0xAA95C349 sudo apt-get build-dep bcfg2 bcfg2-server sudo aptitude install git VERSION=1.3.2-1 if [ ! -d testing ]; then mkdir testing fi DATE=$(date +%F-%H%M) ppa="testing" # "testing" or "ppa" (for stable) # download source cd testing git clone git://git.mcs.anl.gov/bcfg2 cd bcfg2 GITID=$(git log --oneline | head -n 1 | cut -d' ' -f1) cp debian/changelog ../changelog.orig for dist in $DISTS do cp ../changelog.orig debian/changelog (cd debian && dch --distribution ${dist} \ --force-bad-version \ --preserve \ --force-distribution \ --newversion "${VERSION}~${ppa}~${dist}${DATE}+${GITID}" \ "bcfg2 backport for ${dist} release ${VERSION} git commit ${GITID}") debuild --no-tgz-check -rfakeroot -I -S -k${GPGKEY} done for dist in $DISTS do dput ppa:bcfg2/${dist}testing ../bcfg2_${VERSION}~${ppa}~${dist}${DATE}+${GITID}_source.changes done .. _Download: http://bcfg2.org/download/ .. _Bcfg2 in Launchpad: https://launchpad.net/~bcfg2 doc/installation/distributions.txt000066400000000000000000000075351303523157100200010ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _distributions: =========================== Distribution-specific notes =========================== The installation of Bcfg2 on a specific distribution depends on the package management tool and the availability of the package in the distribution's repository. Alpine Linux ============ Packages for `Alpine Linux`_ are available in the `testing`_ repository. Just use `apk` to perform the installation :: apk add bcfg2 bcfg2-server -U -X http://dl-3.alpinelinux.org/alpine/edge/testing/ --allow-untrusted .. _Alpine Linux: http://www.alpinelinux.org/ .. _testing: http://git.alpinelinux.org/cgit/aports/tree/testing/bcfg2 Arch Linux ========== Packages for `Arch Linux`_ are available in the Arch User Repository (AUR_). The bcfg2 package includes bcfg2-server. .. _Arch Linux: http://www.archlinux.org/ .. _AUR: http://aur.archlinux.org/packages/bcfg2 Debian ====== Packages of Bcfg2 are available for all current versions of Debian. The fastest way to get Bcfg2 onto your Debian system is to use ``apt-get`` or ``aptitude``. :: sudo aptitude install bcfg2 bcfg2-server If you want to use unofficial packages from Bcfg2 see the instructions at `CustomDebianRepository`_. .. _CustomDebianRepository: http://trac.mcs.anl.gov/projects/bcfg2/wiki/PrecompiledPackages#UnofficialDebianRepository Fedora ====== The fastest way to get Bcfg2 packages onto your Fedora_ system is to use `yum` or PackageKit. Yum will pull in all dependencies of Bcfg2 automatically. :: su -c 'yum install bcfg2-server bcfg2' Be aware that the latest release of Bcfg2 may only be available for the Development release of Fedora (Rawhide). With the activation of the Rawhide repository of Fedora you will be able to install it. :: su -c 'yum install --enablerepo=rawhide bcfg2-server bcfg2' This way is not recommended on production systems. Only for testing. Gentoo ====== Bcfg2 can be installed via portage. OS X ==== Bcfg2 can be installed either via MacPorts or by creating a native OS X package. MacPorts -------- Once macports is installed:: port install bcfg2 Using native OS X python ------------------------ First, make sure you have Xcode installed as you need ``packagemaker`` which comes bundled in the Developer tools. Clone the git source:: git clone git://git.mcs.anl.gov/bcfg2.git Change to the osx directory and type make. Your new package should be located at ``bcfg2-$VERSION.pkg`` (where ``$VERSION`` is that which is specified in ``setup.py``). RHEL / Centos / Scientific Linux ================================ While you can go about building all these things from source, this section will try and meet the dependencies using packages from EPEL_ [#f1]_. The *el5* and the *el6* package should be compatible with `CentOS`_ 5.x/6.x and `Scientific Linux`_. EPEL_ for 5.x:: [root@centos ~]# rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm EPEL_ for 6.x:: [root@centos ~]# rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-5.noarch.rpm Install the bcfg2-server and bcfg2 RPMs:: [root@centos ~]# yum install bcfg2-server bcfg2 .. note:: The latest package for *el5* is only available in the testing repository. .. [#f1] For more details check the EPEL_ `instructions `_ .. _CentOS: http://www.centos.org/ .. _Scientific Linux: http://www.scientificlinux.org/ .. _EPEL: http://fedoraproject.org/wiki/EPEL Ubuntu ====== We highly recommend following the instructions at `ubuntu-installation`_ in order to install a recent version of Bcfg2 on your system. However, if you would like to install the older package, you can use the following command:: sudo aptitude install bcfg2 bcfg2-server .. _ubuntu-installation: http://trac.mcs.anl.gov/projects/bcfg2/wiki/PrecompiledPackages#UbuntuLaunchpadBcfg2PPA doc/installation/index.txt000066400000000000000000000012341303523157100161740ustar00rootroot00000000000000.. -*- mode: rst -*- .. _installation-index: ============ Installation ============ Before installing, you will need to choose a machine to be the Bcfg2 server. We recommend a Linux-based machine for this purpose, but the server will work on any supported operating system. Note that you may eventually want to run a web server on this machine for reporting and serving up package repositories. The server package only needs to be installed on your designated Bcfg2 server machine. The clients package needs to be installed on any machine you plan to manage by Bcfg2. .. toctree:: :maxdepth: 2 prerequisites source building-packages distributions doc/installation/prerequisites.txt000066400000000000000000000106671303523157100200030ustar00rootroot00000000000000.. -*- mode: rst -*- .. _installation-prerequisites: Prerequisites ============= Bcfg2 has several server side prerequisites and a minimal set of client side requirements. This page describes the prerequisite software situation on all supported platforms. The table describes what software is needed on the client and server side. Bcfg2 Client ------------ +----------------------------+------------------------+--------------------------------+ | Software | Version | Requires | +============================+========================+================================+ | libxml2 (if lxml is used) | Any | | +----------------------------+------------------------+--------------------------------+ | libxslt (if lxml is used) | Any | libxml2 | +----------------------------+------------------------+--------------------------------+ | python | 2.4 and greater [#f1]_ | | +----------------------------+------------------------+--------------------------------+ | lxml or elementtree [#f2]_ | Any | lxml: libxml2, libxslt, python | +----------------------------+------------------------+--------------------------------+ | python-apt [#f3]_ | 0.7.91 and greater | python | +----------------------------+------------------------+--------------------------------+ | debsums (if APT tool | Any | | | driver is used) | | | +----------------------------+------------------------+--------------------------------+ | python-setuptools | Any | | +----------------------------+------------------------+--------------------------------+ .. [#f1] python 2.5 and later works with elementtree. .. [#f2] elementtree is included in python 2.5 and later. .. [#f3] python-apt is only required on platforms that use apt, such as Debian and Ubuntu. Bcfg2 Server ------------ +-------------------------------+----------+--------------------------------+ | Software | Version | Requires | +===============================+==========+================================+ | libxml2 | 2.6.24+ | | +-------------------------------+----------+--------------------------------+ | libxslt | Any | libxml2 | +-------------------------------+----------+--------------------------------+ | python | 2.2-2.7 | | +-------------------------------+----------+--------------------------------+ | lxml | 0.9+ | lxml: libxml2, libxslt, python | +-------------------------------+----------+--------------------------------+ | gamin or inotify | Any | | +-------------------------------+----------+--------------------------------+ | python-gamin or pyinotify | Any | gamin or inotify, python | +-------------------------------+----------+--------------------------------+ | python-ssl (note | Any | python, backported ssl module | +-------------------------------+----------+--------------------------------+ | python-setuptools | Any | | +-------------------------------+----------+--------------------------------+ | python-genshi | Any | | +-------------------------------+----------+--------------------------------+ Bcfg2 Reporting --------------- A webserver capabable of running wsgi applications is required for web reporting, such as Apache + mod_wsgi or nginx. +-------------------------------+----------+--------------------------------+ | Software | Version | Requires | +===============================+==========+================================+ | django | 1.3.0+ | | +-------------------------------+----------+--------------------------------+ | south | 0.7.5+ | | +-------------------------------+----------+--------------------------------+ doc/installation/source.txt000066400000000000000000000023151303523157100163660ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _7F7D197E: http://pgp.mit.edu:11371/pks/lookup?op=get&search=0x75BF2C177F7D197E .. _A88FFF4B: http://pgp.mit.edu:11371/pks/lookup?op=get&search=0x80B8492FA88FFF4B .. _Download: http://bcfg2.org/download/ .. _source: Installation from source ======================== Download -------- Tarball ^^^^^^^ The Bcfg2 source tarball can be grabbed from the `Download`_ page. All tarballs are signed with GPG keys `7F7D197E`_ or `A88FFF4B`_. You can verify your download by importing the keys and running :: gpg --recv-keys 0x75bf2c177f7d197e 0x80B8492FA88FFF4B gpg --verify bcfg2-.tar.gz.gpg bcfg2-.tar.gz Git checkout ^^^^^^^^^^^^ You can also get the latest (possibly broken) code via git :: git clone git://git.mcs.anl.gov/bcfg2.git Install ------- If you are working with the release tarball of Bcfg2 you need to untar it before you can go on with the installation :: tar -xzf bcfg2-.tar.gz Now you can build Bcfg2 with. If you are working from a git clone no need to be specified. :: cd bcfg2- python setup.py install --prefix=/install/prefix This will install both the client and server on that machine. doc/intersphinx/000077500000000000000000000000001303523157100141765ustar00rootroot00000000000000doc/intersphinx/cherrypy/000077500000000000000000000000001303523157100160435ustar00rootroot00000000000000doc/intersphinx/cherrypy/objects.inv000066400000000000000000001426161303523157100202240ustar00rootroot00000000000000# Sphinx inventory version 1 # Project: CherryPy # Version: 3.2.0 cherrypy.wsgiserver.ssl_pyopenssl mod refman/wsgiserver/ssl_pyopenssl.html cherrypy mod refman/cherrypy.html cherrypy.lib.encoding mod refman/lib/encoding.html cherrypy.wsgiserver mod refman/wsgiserver/init.html cherrypy.lib.cptools mod refman/lib/cptools.html cherrypy._cpconfig mod refman/_cpconfig.html cherrypy.lib.jsontools mod refman/lib/jsontools.html cherrypy.lib.xmlrpc mod refman/lib/xmlrpc.html cherrypy._cpreqbody mod refman/_cpreqbody.html cherrypy._cptools mod refman/_cptools.html cherrypy.lib.profiler mod refman/lib/profiler.html cherrypy.lib.caching mod refman/lib/caching.html cherrypy._cprequest mod refman/_cprequest.html cherrypy.process.servers mod refman/process/servers.html cherrypy.process.plugins mod refman/process/plugins/index.html cherrypy.lib.sessions mod refman/lib/sessions.html cherrypy.process.wspbus mod refman/process/wspbus.html cherrypy.lib.httpauth mod refman/lib/httpauth.html cherrypy.lib.auth_digest mod refman/lib/auth_digest.html cherrypy.lib.auth mod refman/lib/auth.html cherrypy.wsgiserver.ssl_builtin mod refman/wsgiserver/ssl_builtin.html cherrypy._cpdispatch mod refman/_cpdispatch.html cherrypy._cpchecker mod refman/_cpchecker.html cherrypy.lib.reprconf mod refman/lib/reprconf.html cherrypy._cpserver mod refman/_cpserver.html cherrypy.lib.auth_basic mod refman/lib/auth_basic.html cherrypy._cpwsgi mod refman/_cpwsgi.html cherrypy._cptree mod refman/_cptree.html cherrypy.lib.static mod refman/lib/static.html cherrypy._cplogging mod refman/_cplogging.html cherrypy.lib.covercp mod refman/lib/covercp.html cherrypy.lib.httputil mod refman/lib/httputil.html cherrypy._cperror mod refman/_cperror.html cherrypy.url function refman/cherrypy.html cherrypy._cplogging.LogManager.error method refman/_cplogging.html cherrypy._cptree.Application.release_serving method refman/_cptree.html cherrypy.lib.httputil.header_elements function refman/lib/httputil.html cherrypy.process.plugins.DropPrivileges.umask attribute refman/process/plugins/dropprivileges.html cherrypy._cpreqbody.Entity.fullvalue method refman/_cpreqbody.html cherrypy.lib.sessions.FileSession.setup classmethod refman/lib/sessions.html cherrypy.process.plugins.Monitor.start method refman/process/plugins/index.html cherrypy.lib.cptools.SessionAuth.do_check method refman/lib/cptools.html cherrypy._cpreqbody.Entity.attempt_charsets attribute refman/_cpreqbody.html cherrypy.lib.sessions.MemcachedSession.acquire_lock method refman/lib/sessions.html cherrypy.wsgiserver.HTTPRequest.ready attribute refman/wsgiserver/init.html cherrypy.lib.sessions.MemcachedSession.has_key method refman/lib/sessions.html cherrypy.lib.httputil.AcceptElement.qvalue attribute refman/lib/httputil.html cherrypy._cprequest.Request.method attribute refman/_cprequest.html cherrypy.wsgiserver.WSGIGateway_u0.get_environ method refman/wsgiserver/init.html cherrypy.HTTPError.set_response method refman/cherrypy.html cherrypy._cpserver.Server.base method refman/_cpserver.html cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter.get_environ method refman/wsgiserver/ssl_builtin.html cherrypy.lib.httputil.protocol_from_http function refman/lib/httputil.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.wrap method refman/wsgiserver/ssl_pyopenssl.html cherrypy.process.servers.FlupFCGIServer class refman/process/servers.html cherrypy.lib.sessions.PostgresqlSession.values method refman/lib/sessions.html cherrypy.process.plugins.Autoreloader class refman/process/plugins/index.html cherrypy.lib.reprconf.Config.reset method refman/lib/reprconf.html cherrypy.lib.sessions.RamSession.acquire_lock method refman/lib/sessions.html cherrypy._cpserver.Server.ssl_context attribute refman/_cpserver.html cherrypy.process.servers.client_host function refman/process/servers.html cherrypy.lib.sessions.MemcachedSession.items method refman/lib/sessions.html cherrypy.lib.sessions.MemcachedSession.pop method refman/lib/sessions.html cherrypy.lib.sessions.FileSession.clean_up method refman/lib/sessions.html cherrypy.lib.sessions.MemcachedSession.setdefault method refman/lib/sessions.html cherrypy._cprequest.Response class refman/_cprequest.html cherrypy.wsgiserver.HTTPRequest.outheaders attribute refman/wsgiserver/init.html cherrypy.process.plugins.Monitor class refman/process/plugins/index.html cherrypy.lib.sessions.Session.timeout attribute refman/lib/sessions.html cherrypy._cpreqbody.Entity.headers attribute refman/_cpreqbody.html cherrypy._cpchecker.Checker.check_compatibility method refman/_cpchecker.html cherrypy.wsgiserver.HTTPServer.interrupt attribute refman/wsgiserver/init.html cherrypy.lib.sessions.FileSession.generate_id method refman/lib/sessions.html cherrypy._cpconfig.Config.reset method refman/_cpconfig.html cherrypy._cprequest.Request.error_response attribute refman/_cprequest.html cherrypy.lib.sessions.MemcachedSession.values method refman/lib/sessions.html cherrypy.lib.sessions.FileSession.acquire_lock method refman/lib/sessions.html cherrypy.lib.jsontools.json_out function refman/lib/jsontools.html cherrypy.lib.sessions.MemcachedSession.clean_up method refman/lib/sessions.html cherrypy.lib.reprconf.unrepr function refman/lib/reprconf.html cherrypy._cprequest.Request.run method refman/_cprequest.html cherrypy.lib.cptools.SessionAuth.do_logout method refman/lib/cptools.html cherrypy.wsgiserver.FatalSSLAlert class refman/wsgiserver/init.html cherrypy.lib.static.serve_download function refman/lib/static.html cherrypy.lib.sessions.close function refman/lib/sessions.html cherrypy._cprequest.Request.header_list attribute refman/_cprequest.html cherrypy.wsgiserver.HTTPRequest.chunked_write attribute refman/wsgiserver/init.html cherrypy._cpreqbody.Part.process method refman/_cpreqbody.html cherrypy.lib.auth_digest.digest_auth function refman/lib/auth_digest.html cherrypy._cplogging.LogManager.error_file attribute refman/_cplogging.html cherrypy.process.wspbus.Bus.stop method refman/process/wspbus.html cherrypy.lib.jsontools.json_processor function refman/lib/jsontools.html cherrypy.wsgiserver.HTTPServer.max_request_header_size attribute refman/wsgiserver/init.html cherrypy.process.wspbus.ChannelFailures class refman/process/wspbus.html cherrypy.lib.encoding.ResponseEncoder.encode_stream method refman/lib/encoding.html cherrypy.lib.caching.MemoryCache.maxsize attribute refman/lib/caching.html cherrypy._cpconfig.Config.iterkeys attribute refman/_cpconfig.html cherrypy.wsgiserver.HTTPConnection.close method refman/wsgiserver/init.html cherrypy.lib.httpauth.parseAuthorization function refman/lib/httpauth.html cherrypy._cptree.Application.request_class attribute refman/_cptree.html cherrypy.wsgiserver.HTTPServer.nodelay attribute refman/wsgiserver/init.html cherrypy.wsgiserver.HTTPServer.timeout attribute refman/wsgiserver/init.html cherrypy.lib.sessions.MemcachedSession.load method refman/lib/sessions.html cherrypy._cpconfig.Config.iteritems attribute refman/_cpconfig.html cherrypy._cprequest.Request.dispatch attribute refman/_cprequest.html cherrypy.lib.caching.Cache class refman/lib/caching.html cherrypy.process.wspbus.ChannelFailures.get_instances method refman/process/wspbus.html cherrypy.lib.profiler.Profiler class refman/lib/profiler.html cherrypy._cprequest.Response.body attribute refman/_cprequest.html cherrypy._cprequest.Request.config attribute refman/_cprequest.html cherrypy.wsgiserver.HTTPRequest.inheaders attribute refman/wsgiserver/init.html cherrypy._cplogging.LogManager.error_log attribute refman/_cplogging.html cherrypy.process.servers.check_port function refman/process/servers.html cherrypy.wsgiserver.HTTPServer.bind method refman/wsgiserver/init.html cherrypy._cpdispatch.RoutesDispatcher.find_handler method refman/_cpdispatch.html cherrypy.lib.sessions.FileSession.regenerate method refman/lib/sessions.html cherrypy._cprequest.Request class refman/_cprequest.html cherrypy.process.plugins.ThreadManager class refman/process/plugins/index.html cherrypy.lib.sessions.Session.originalid attribute refman/lib/sessions.html cherrypy._cprequest.Request.login attribute refman/_cprequest.html cherrypy.process.plugins.BackgroundTask class refman/process/plugins/index.html cherrypy._cpreqbody.SizedReader.read method refman/_cpreqbody.html cherrypy._cpreqbody.Entity class refman/_cpreqbody.html cherrypy._cpserver.Server.shutdown_timeout attribute refman/_cpserver.html cherrypy._cplogging.LogManager.wsgi attribute refman/_cplogging.html cherrypy.wsgiserver.HTTPConnection class refman/wsgiserver/init.html cherrypy.lib.sessions.MemcachedSession.get method refman/lib/sessions.html cherrypy.process.servers.FlupSCGIServer class refman/process/servers.html cherrypy.process.servers.ServerAdapter.start method refman/process/servers.html cherrypy._cpreqbody.Part.read_lines_to_boundary method refman/_cpreqbody.html cherrypy.process.plugins.Monitor.stop method refman/process/plugins/index.html cherrypy._cprequest.Request.app attribute refman/_cprequest.html cherrypy._cprequest.Response.collapse_body method refman/_cprequest.html cherrypy._cpwsgi.AppResponse class refman/_cpwsgi.html cherrypy.wsgiserver.HTTPServer.version attribute refman/wsgiserver/init.html cherrypy.lib.sessions.MemcachedSession class refman/lib/sessions.html cherrypy.process.wspbus.Bus.exit method refman/process/wspbus.html cherrypy.HTTPError.reason attribute refman/cherrypy.html cherrypy._cpserver.Server.nodelay attribute refman/_cpserver.html cherrypy._cprequest.Request.methods_with_bodies attribute refman/_cprequest.html cherrypy.wsgiserver.HTTPRequest.parse_request method refman/wsgiserver/init.html cherrypy.lib.caching.MemoryCache class refman/lib/caching.html cherrypy.lib.encoding.gzip function refman/lib/encoding.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.get_context method refman/wsgiserver/ssl_pyopenssl.html cherrypy.lib.reprconf.Config.update method refman/lib/reprconf.html cherrypy._cprequest.Hook.priority attribute refman/_cprequest.html cherrypy._cptools.CachingTool class refman/_cptools.html cherrypy._cpserver.Server.socket_port attribute refman/_cpserver.html cherrypy.lib.profiler.ProfileAggregator class refman/lib/profiler.html cherrypy.lib.httputil.HeaderElement.parse staticmethod refman/lib/httputil.html cherrypy.lib.auth_digest.HttpDigestAuthorization.is_nonce_stale method refman/lib/auth_digest.html cherrypy.lib.sessions.FileSession.save method refman/lib/sessions.html cherrypy.lib.auth_digest.HttpDigestAuthorization.HA2 method refman/lib/auth_digest.html cherrypy._cprequest.Response.timeout attribute refman/_cprequest.html cherrypy.lib.sessions.Session.regenerate method refman/lib/sessions.html cherrypy.lib.static.serve_file function refman/lib/static.html cherrypy.lib.sessions.RamSession.generate_id method refman/lib/sessions.html cherrypy.process.plugins.Daemonizer class refman/process/plugins/daemonizer.html cherrypy.lib.sessions.Session.update method refman/lib/sessions.html cherrypy.lib.sessions.PostgresqlSession class refman/lib/sessions.html cherrypy.lib.caching.MemoryCache.delay attribute refman/lib/caching.html cherrypy._cpreqbody.Entity.default_content_type attribute refman/_cpreqbody.html cherrypy._cprequest.Request.local attribute refman/_cprequest.html cherrypy.HTTPError.code attribute refman/cherrypy.html cherrypy._cpreqbody.Entity.filename attribute refman/_cpreqbody.html cherrypy.process.wspbus.Bus.log method refman/process/wspbus.html cherrypy.HTTPRedirect.set_response method refman/cherrypy.html cherrypy.wsgiserver.WSGIGateway class refman/wsgiserver/init.html cherrypy._cpreqbody.Part.attempt_charsets attribute refman/_cpreqbody.html cherrypy._cprequest.Request.error_page attribute refman/_cprequest.html cherrypy._cpconfig.Config.clear attribute refman/_cpconfig.html cherrypy.lib.caching.Cache.get method refman/lib/caching.html cherrypy._cpreqbody.Entity.length attribute refman/_cpreqbody.html cherrypy.wsgiserver.HTTPRequest.server attribute refman/wsgiserver/init.html cherrypy.lib.caching.expires function refman/lib/caching.html cherrypy.wsgiserver.WorkerThread.server attribute refman/wsgiserver/init.html cherrypy._cpchecker.Checker.on attribute refman/_cpchecker.html cherrypy._cpserver.Server.instance attribute refman/_cpserver.html cherrypy._cpwsgi.CPWSGIApp.response_class attribute refman/_cpwsgi.html cherrypy.lib.profiler.make_app class refman/lib/profiler.html cherrypy._cprequest.Request.stage attribute refman/_cprequest.html cherrypy.process.plugins.Autoreloader.files attribute refman/process/plugins/index.html cherrypy.wsgiserver.WorkerThread.conn attribute refman/wsgiserver/init.html cherrypy._cpconfig.merge function refman/_cpconfig.html cherrypy._cptools.Toolbox class refman/_cptools.html cherrypy._cprequest.Response.time attribute refman/_cprequest.html cherrypy._cpreqbody.RequestBody.type attribute refman/_cpreqbody.html cherrypy._cperror.NotFound class refman/_cperror.html cherrypy.lib.caching.tee_output function refman/lib/caching.html cherrypy.lib.cptools.session_auth function refman/lib/cptools.html cherrypy._cpreqbody.Part.part_class attribute refman/_cpreqbody.html cherrypy.lib.auth_digest.HttpDigestAuthorization.validate_nonce method refman/lib/auth_digest.html cherrypy.process.plugins.DropPrivileges.gid attribute refman/process/plugins/dropprivileges.html cherrypy.wsgiserver.HTTPRequest.write method refman/wsgiserver/init.html cherrypy.lib.caching.get function refman/lib/caching.html cherrypy._cpreqbody.Part.fullvalue method refman/_cpreqbody.html cherrypy.wsgiserver.HTTPServer.gateway attribute refman/wsgiserver/init.html cherrypy._cprequest.Request.params attribute refman/_cprequest.html cherrypy.lib.httputil.valid_status function refman/lib/httputil.html cherrypy._cprequest.Response.check_timeout method refman/_cprequest.html cherrypy.lib.sessions.PostgresqlSession.pop method refman/lib/sessions.html cherrypy._cplogging.LogManager.time method refman/_cplogging.html cherrypy.lib.sessions.MemcachedSession.clear method refman/lib/sessions.html cherrypy.lib.sessions.RamSession.id attribute refman/lib/sessions.html cherrypy._cpwsgi.VirtualHost.use_x_forwarded_host attribute refman/_cpwsgi.html cherrypy.lib.sessions.RamSession.release_lock method refman/lib/sessions.html cherrypy.process.servers.ServerAdapter.wait method refman/process/servers.html cherrypy.lib.sessions.RamSession.pop method refman/lib/sessions.html cherrypy._cpreqbody.RequestBody class refman/_cpreqbody.html cherrypy.lib.sessions.MemcachedSession.setup classmethod refman/lib/sessions.html cherrypy._cpreqbody.Part.default_proc method refman/_cpreqbody.html cherrypy.lib.httputil.CaseInsensitiveDict class refman/lib/httputil.html cherrypy.process.plugins.ThreadManager.acquire_thread method refman/process/plugins/index.html cherrypy.lib.sessions.FileSession.load method refman/lib/sessions.html cherrypy._cpdispatch.VirtualHost class refman/_cpdispatch.html cherrypy.process.plugins.Autoreloader.stop method refman/process/plugins/index.html cherrypy.lib.sessions.FileSession.id attribute refman/lib/sessions.html cherrypy._cpreqbody.SizedReader.readline method refman/_cpreqbody.html cherrypy.wsgiserver.WSGIGateway_u0 class refman/wsgiserver/init.html cherrypy.wsgiserver.KnownLengthRFile class refman/wsgiserver/init.html cherrypy.lib.sessions.PostgresqlSession.acquire_lock method refman/lib/sessions.html cherrypy.process.plugins.DropPrivileges.uid attribute refman/process/plugins/dropprivileges.html cherrypy.lib.encoding.compress function refman/lib/encoding.html cherrypy._cprequest.Response.stream attribute refman/_cprequest.html cherrypy._cpconfig.Config.popitem attribute refman/_cpconfig.html cherrypy._cprequest.Request.script_name attribute refman/_cprequest.html cherrypy._cplogging.LogManager.logger_root attribute refman/_cplogging.html cherrypy.lib.sessions.Session.has_key method refman/lib/sessions.html cherrypy._cprequest.Request.headers attribute refman/_cprequest.html cherrypy._cpreqbody.RequestBody.part_class attribute refman/_cpreqbody.html cherrypy._cpwsgi.VirtualHost.default attribute refman/_cpwsgi.html cherrypy._cplogging.WSGIErrorHandler.emit method refman/_cplogging.html cherrypy.lib.sessions.Session class refman/lib/sessions.html cherrypy.lib.auth_basic.checkpassword_dict function refman/lib/auth_basic.html cherrypy._cpreqbody.Entity.processors attribute refman/_cpreqbody.html cherrypy.lib.httpauth.basicAuth function refman/lib/httpauth.html cherrypy.lib.xmlrpc.on_error function refman/lib/xmlrpc.html cherrypy.wsgiserver.HTTPServer.tick method refman/wsgiserver/init.html cherrypy._cpserver.Server.protocol_version attribute refman/_cpserver.html cherrypy.lib.sessions.MemcachedSession.regenerate method refman/lib/sessions.html cherrypy._cprequest.Request.body attribute refman/_cprequest.html cherrypy.process.servers.FlupFCGIServer.start method refman/process/servers.html cherrypy._cpwsgi.CPWSGIApp.head attribute refman/_cpwsgi.html cherrypy.lib.sessions.PostgresqlSession.release_lock method refman/lib/sessions.html cherrypy.process.servers.FlupSCGIServer.start method refman/process/servers.html cherrypy._cpconfig.Config.setdefault attribute refman/_cpconfig.html cherrypy._cpreqbody.RequestBody.default_proc method refman/_cpreqbody.html cherrypy._cprequest.Request.is_index attribute refman/_cprequest.html cherrypy.lib.sessions.FileSession.get method refman/lib/sessions.html cherrypy._cprequest.Request.close method refman/_cprequest.html cherrypy._cptools.SessionTool.regenerate method refman/_cptools.html cherrypy._cpserver.Server class refman/_cpserver.html cherrypy.process.plugins.DropPrivileges class refman/process/plugins/dropprivileges.html cherrypy.wsgiserver.HTTPRequest.respond method refman/wsgiserver/init.html cherrypy.process.wspbus.Bus.start_with_callback method refman/process/wspbus.html cherrypy.lib.caching.MemoryCache.maxobjects attribute refman/lib/caching.html cherrypy._cpserver.Server.socket_host attribute refman/_cpserver.html cherrypy.process.servers.wait_for_free_port function refman/process/servers.html cherrypy.lib.auth_basic.basic_auth function refman/lib/auth_basic.html cherrypy._cprequest.HookMap.attach method refman/_cprequest.html cherrypy._cpreqbody.RequestBody.make_file method refman/_cpreqbody.html cherrypy.lib.sessions.init function refman/lib/sessions.html cherrypy.lib.auth_digest.synthesize_nonce function refman/lib/auth_digest.html cherrypy.lib.httputil.Host class refman/lib/httputil.html cherrypy._cpdispatch.Dispatcher class refman/_cpdispatch.html cherrypy._cptree.Application.response_class attribute refman/_cptree.html cherrypy.process.plugins.Autoreloader.sysfiles method refman/process/plugins/index.html cherrypy._cptree.Application.get_serving method refman/_cptree.html cherrypy.lib.sessions.RamSession.delete method refman/lib/sessions.html cherrypy._cpreqbody.Entity.parts attribute refman/_cpreqbody.html cherrypy.lib.httputil.HeaderElement class refman/lib/httputil.html cherrypy._cpchecker.Checker.check_config_namespaces method refman/_cpchecker.html cherrypy._cptools.SessionTool class refman/_cptools.html cherrypy.lib.cptools.accept function refman/lib/cptools.html cherrypy._cprequest.Request.protocol attribute refman/_cprequest.html cherrypy.lib.sessions.Session.setdefault method refman/lib/sessions.html cherrypy._cprequest.Request.show_tracebacks attribute refman/_cprequest.html cherrypy.lib.sessions.FileSession.clear method refman/lib/sessions.html cherrypy.lib.sessions.RamSession.clean_up method refman/lib/sessions.html cherrypy.lib.sessions.FileSession.values method refman/lib/sessions.html cherrypy.lib.sessions.PostgresqlSession.get method refman/lib/sessions.html cherrypy.lib.jsontools.json_handler function refman/lib/jsontools.html cherrypy.lib.cptools.validate_since function refman/lib/cptools.html cherrypy.lib.httputil.urljoin function refman/lib/httputil.html cherrypy._cprequest.Request.remote attribute refman/_cprequest.html cherrypy.process.wspbus.Bus.restart method refman/process/wspbus.html cherrypy.process.wspbus.Bus.publish method refman/process/wspbus.html cherrypy._cprequest.Request.closed attribute refman/_cprequest.html cherrypy._cprequest.Request.process_query_string method refman/_cprequest.html cherrypy._cptools.XMLRPCController class refman/_cptools.html cherrypy.process.wspbus.Bus.block method refman/process/wspbus.html cherrypy.wsgiserver.ssl_pyopenssl.SSLConnection class refman/wsgiserver/ssl_pyopenssl.html cherrypy.wsgiserver.WSGIGateway.get_environ method refman/wsgiserver/init.html cherrypy._cprequest.Request.hooks attribute refman/_cprequest.html cherrypy._cplogging.LogManager.screen attribute refman/_cplogging.html cherrypy.lib.cptools.log_traceback function refman/lib/cptools.html cherrypy.HTTPError.status attribute refman/cherrypy.html cherrypy._cpdispatch.XMLRPCDispatcher class refman/_cpdispatch.html cherrypy.lib.sessions.RamSession.clear method refman/lib/sessions.html cherrypy.lib.sessions.Session.get method refman/lib/sessions.html cherrypy.lib.sessions.Session.clean_freq attribute refman/lib/sessions.html cherrypy._cpwsgi.InternalRedirector class refman/_cpwsgi.html cherrypy._cptree.Application.script_name attribute refman/_cptree.html cherrypy.wsgiserver.HTTPRequest.read_request_headers method refman/wsgiserver/init.html cherrypy.lib.sessions.Session.missing attribute refman/lib/sessions.html cherrypy._cptools.HandlerWrapperTool class refman/_cptools.html cherrypy.process.plugins.ThreadManager.graceful method refman/process/plugins/index.html cherrypy._cptree.Tree.mount method refman/_cptree.html cherrypy._cprequest.Request.rfile attribute refman/_cprequest.html cherrypy.wsgiserver.WSGIGateway_10.get_environ method refman/wsgiserver/init.html cherrypy.lib.covercp.CoverStats class refman/lib/covercp.html cherrypy._cpchecker.Checker.check_localhost method refman/_cpchecker.html cherrypy._cprequest.Hook.failsafe attribute refman/_cprequest.html cherrypy.lib.sessions.expire function refman/lib/sessions.html cherrypy.process.plugins.Monitor.thread attribute refman/process/plugins/index.html cherrypy.lib.xmlrpc.process_body function refman/lib/xmlrpc.html cherrypy._cperror.HTTPError.reason attribute refman/_cperror.html cherrypy._cpdispatch.Dispatcher.find_handler method refman/_cpdispatch.html cherrypy.process.plugins.Monitor.frequency attribute refman/process/plugins/index.html cherrypy.lib.cptools.MonitoredHeaderMap class refman/lib/cptools.html cherrypy.quickstart function refman/cherrypy.html cherrypy.process.plugins.Autoreloader.unsubscribe method refman/process/plugins/index.html cherrypy.lib.httpauth.calculateNonce function refman/lib/httpauth.html cherrypy._cpreqbody.process_urlencoded function refman/_cpreqbody.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.private_key attribute refman/wsgiserver/ssl_pyopenssl.html cherrypy.process.plugins.PerpetualTimer class refman/process/plugins/index.html cherrypy._cpreqbody.Part.maxrambytes attribute refman/_cpreqbody.html cherrypy._cprequest.Response.timed_out attribute refman/_cprequest.html cherrypy._cpreqbody.Entity.name attribute refman/_cpreqbody.html cherrypy._cpconfig.Config.pop attribute refman/_cpconfig.html cherrypy.lib.caching.MemoryCache.maxobj_size attribute refman/lib/caching.html cherrypy.lib.sessions.PostgresqlSession.generate_id method refman/lib/sessions.html cherrypy._cptree.Tree.apps attribute refman/_cptree.html cherrypy._cptree.Application.config attribute refman/_cptree.html cherrypy.lib.sessions.PostgresqlSession.save method refman/lib/sessions.html cherrypy._cpchecker.Checker.check_app_config_brackets method refman/_cpchecker.html cherrypy._cpconfig.Config.copy attribute refman/_cpconfig.html cherrypy.HTTPError class refman/cherrypy.html cherrypy.lib.cptools.log_hooks function refman/lib/cptools.html cherrypy.lib.encoding.ResponseEncoder class refman/lib/encoding.html cherrypy._cprequest.Response.cookie attribute refman/_cprequest.html cherrypy.process.wspbus.Bus.start method refman/process/wspbus.html cherrypy.lib.profiler.new_func_strip_path function refman/lib/profiler.html cherrypy.wsgiserver.SizeCheckWrapper class refman/wsgiserver/init.html cherrypy.wsgiserver.HTTPServer.ConnectionClass attribute refman/wsgiserver/init.html cherrypy.wsgiserver.WSGIGateway.start_response method refman/wsgiserver/init.html cherrypy.lib.auth_digest.www_authenticate function refman/lib/auth_digest.html cherrypy.lib.cptools.trailing_slash function refman/lib/cptools.html cherrypy.lib.sessions.FileSession.pop method refman/lib/sessions.html cherrypy.lib.sessions.RamSession.values method refman/lib/sessions.html cherrypy.lib.cptools.log_request_headers function refman/lib/cptools.html cherrypy.lib.sessions.Session.items method refman/lib/sessions.html cherrypy.lib.sessions.Session.values method refman/lib/sessions.html cherrypy.lib.auth_digest.get_ha1_file_htdigest function refman/lib/auth_digest.html cherrypy.lib.sessions.Session.save method refman/lib/sessions.html cherrypy.lib.httputil.HeaderElement.from_str classmethod refman/lib/httputil.html cherrypy.lib.sessions.FileSession.update method refman/lib/sessions.html cherrypy.wsgiserver.HTTPRequest.parse_request_uri method refman/wsgiserver/init.html cherrypy.process.plugins.Autoreloader.start method refman/process/plugins/index.html cherrypy._cpreqbody.Entity.make_file method refman/_cpreqbody.html cherrypy._cpserver.Server.socket_queue_size attribute refman/_cpserver.html cherrypy.lib.sessions.Session.delete method refman/lib/sessions.html cherrypy.process.plugins.ThreadManager.stop method refman/process/plugins/index.html cherrypy.lib.sessions.Session.pop method refman/lib/sessions.html cherrypy.process.servers.FlupFCGIServer.stop method refman/process/servers.html cherrypy.lib.encoding.decompress function refman/lib/encoding.html cherrypy._cperror.HTTPRedirect.encoding attribute refman/_cperror.html cherrypy.wsgiserver.HTTPServer.protocol attribute refman/wsgiserver/init.html cherrypy._cpreqbody.Part.default_content_type attribute refman/_cpreqbody.html cherrypy.lib.sessions.FileSession.keys method refman/lib/sessions.html cherrypy._cpreqbody.process_multipart_form_data function refman/_cpreqbody.html cherrypy.lib.httputil.HeaderMap.encode method refman/lib/httputil.html cherrypy._cpreqbody.Part class refman/_cpreqbody.html cherrypy.lib.auth_digest.HttpDigestAuthorization class refman/lib/auth_digest.html cherrypy.process.servers.FlupSCGIServer.stop method refman/process/servers.html cherrypy.lib.sessions.Session.clear method refman/lib/sessions.html cherrypy.lib.auth.digest_auth function refman/lib/auth.html cherrypy.lib.caching.MemoryCache.antistampede_timeout attribute refman/lib/caching.html cherrypy._cpreqbody.Entity.content_type attribute refman/_cpreqbody.html cherrypy.lib.xmlrpc.respond function refman/lib/xmlrpc.html cherrypy.process.wspbus.Bus.graceful method refman/process/wspbus.html cherrypy.lib.sessions.RamSession.items method refman/lib/sessions.html cherrypy._cpreqbody.Entity.type attribute refman/_cpreqbody.html cherrypy.lib.reprconf.attributes function refman/lib/reprconf.html cherrypy.lib.cptools.redirect function refman/lib/cptools.html cherrypy.wsgiserver.HTTPServer.bind_addr attribute refman/wsgiserver/init.html cherrypy._cpconfig.Config.update method refman/_cpconfig.html cherrypy.lib.cptools.ignore_headers function refman/lib/cptools.html cherrypy._cptools.HandlerTool.handler method refman/_cptools.html cherrypy._cpreqbody.SizedReader.readlines method refman/_cpreqbody.html cherrypy._cprequest.Request.handler attribute refman/_cprequest.html cherrypy._cpwsgi.VirtualHost class refman/_cpwsgi.html cherrypy.lib.cptools.response_headers function refman/lib/cptools.html cherrypy._cperror.HTTPError class refman/_cperror.html cherrypy.process.plugins.Autoreloader.run method refman/process/plugins/index.html cherrypy._cptools.Tool class refman/_cptools.html cherrypy.wsgiserver.HTTPRequest.send_headers method refman/wsgiserver/init.html cherrypy.lib.cptools.flatten function refman/lib/cptools.html cherrypy._cpconfig.Config.has_key attribute refman/_cpconfig.html cherrypy._cplogging.WSGIErrorHandler class refman/_cplogging.html cherrypy.lib.auth.basic_auth function refman/lib/auth.html cherrypy._cperror.TimeoutError class refman/_cperror.html cherrypy.lib.httputil.AcceptElement class refman/lib/httputil.html cherrypy.wsgiserver.HTTPServer.maxthreads attribute refman/wsgiserver/init.html cherrypy.lib.sessions.set_response_cookie function refman/lib/sessions.html cherrypy.lib.auth_digest.get_ha1_dict function refman/lib/auth_digest.html cherrypy.wsgiserver.HTTPServer.minthreads attribute refman/wsgiserver/init.html cherrypy._cpwsgi.CPWSGIApp.namespace_handler method refman/_cpwsgi.html cherrypy.lib.reprconf.NamespaceSet class refman/lib/reprconf.html cherrypy._cpwsgi.AppResponse.close method refman/_cpwsgi.html cherrypy.lib.httputil.HeaderMap.values method refman/lib/httputil.html cherrypy.lib.auth.check_auth function refman/lib/auth.html cherrypy._cpreqbody.Part.type attribute refman/_cpreqbody.html cherrypy.wsgiserver.HTTPServer class refman/wsgiserver/init.html cherrypy.lib.static.staticfile function refman/lib/static.html cherrypy._cpdispatch.PageHandler class refman/_cpdispatch.html cherrypy._cpreqbody.Entity.default_proc method refman/_cpreqbody.html cherrypy.wsgiserver.HTTPServer.stop method refman/wsgiserver/init.html cherrypy._cpreqbody.RequestBody.bufsize attribute refman/_cpreqbody.html cherrypy._cpreqbody.Part.read_into_file method refman/_cpreqbody.html cherrypy.lib.sessions.MemcachedSession.release_lock method refman/lib/sessions.html cherrypy.lib.httputil.get_ranges function refman/lib/httputil.html cherrypy.lib.reprconf.as_dict function refman/lib/reprconf.html cherrypy.lib.httputil.HeaderMap.output method refman/lib/httputil.html cherrypy.process.wspbus.Bus.unsubscribe method refman/process/wspbus.html cherrypy._cprequest.Request.request_line attribute refman/_cprequest.html cherrypy.lib.profiler.serve function refman/lib/profiler.html cherrypy._cptree.Application.log attribute refman/_cptree.html cherrypy.process.servers.ServerAdapter class refman/process/servers.html cherrypy._cplogging.LogManager.reopen_files method refman/_cplogging.html cherrypy._cpreqbody.RequestBody.process method refman/_cpreqbody.html cherrypy._cprequest.Hook.callback attribute refman/_cprequest.html cherrypy.process.plugins.SignalHandler.unsubscribe method refman/process/plugins/signalhandler.html cherrypy._cpserver.Server.max_request_header_size attribute refman/_cpserver.html cherrypy._cprequest.Request.process_request_body attribute refman/_cprequest.html cherrypy._cpreqbody.Entity.charset attribute refman/_cpreqbody.html cherrypy.wsgiserver.ThreadPool.start method refman/wsgiserver/init.html cherrypy._cpchecker.Checker.check_skipped_app_config method refman/_cpchecker.html cherrypy._cptree.Tree.graft method refman/_cptree.html cherrypy._cplogging.LogManager class refman/_cplogging.html cherrypy._cprequest.Request.query_string attribute refman/_cprequest.html cherrypy._cprequest.Request.path_info attribute refman/_cprequest.html cherrypy._cprequest.Request.throw_errors attribute refman/_cprequest.html cherrypy.InternalRedirect class refman/cherrypy.html cherrypy.lib.sessions.RamSession.keys method refman/lib/sessions.html cherrypy._cpdispatch.Dispatcher.dispatch_method_name attribute refman/_cpdispatch.html cherrypy.HTTPRedirect.urls attribute refman/cherrypy.html cherrypy.wsgiserver.CP_fileobject.sendall method refman/wsgiserver/init.html cherrypy.lib.sessions.MemcachedSession.update method refman/lib/sessions.html cherrypy._cpdispatch.MethodDispatcher class refman/_cpdispatch.html cherrypy.CherryPyException class refman/cherrypy.html cherrypy.TimeoutError class refman/cherrypy.html cherrypy.lib.sessions.RamSession.update method refman/lib/sessions.html cherrypy.lib.sessions.PostgresqlSession.regenerate method refman/lib/sessions.html cherrypy.lib.caching.AntiStampedeCache class refman/lib/caching.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.certificate attribute refman/wsgiserver/ssl_pyopenssl.html cherrypy.lib.caching.MemoryCache.clear method refman/lib/caching.html cherrypy.lib.sessions.PostgresqlSession.setup classmethod refman/lib/sessions.html cherrypy.lib.sessions.FileSession.items method refman/lib/sessions.html cherrypy._cplogging.LogManager.access_log attribute refman/_cplogging.html cherrypy.lib.httpauth.md5SessionKey function refman/lib/httpauth.html cherrypy.lib.cptools.SessionAuth.do_login method refman/lib/cptools.html cherrypy.lib.sessions.PostgresqlSession.update method refman/lib/sessions.html cherrypy.lib.static.serve_fileobj function refman/lib/static.html cherrypy.process.servers.ServerAdapter.stop method refman/process/servers.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.context attribute refman/wsgiserver/ssl_pyopenssl.html cherrypy.process.plugins.ThreadManager.threads attribute refman/process/plugins/index.html cherrypy.wsgiserver.CherryPyWSGIServer class refman/wsgiserver/init.html cherrypy.wsgiserver.HTTPServer.shutdown_timeout attribute refman/wsgiserver/init.html cherrypy._cprequest.Request.toolmaps attribute refman/_cprequest.html cherrypy.lib.auth_digest.HttpDigestAuthorization.request_digest method refman/lib/auth_digest.html cherrypy._cprequest.Hook class refman/_cprequest.html cherrypy._cpconfig.Config class refman/_cpconfig.html cherrypy.lib.sessions.PostgresqlSession.setdefault method refman/lib/sessions.html cherrypy.lib.sessions.FileSession.setdefault method refman/lib/sessions.html cherrypy.lib.profiler.Profiler.statfiles method refman/lib/profiler.html cherrypy.wsgiserver.CP_fileobject class refman/wsgiserver/init.html cherrypy._cprequest.Request.base attribute refman/_cprequest.html cherrypy.lib.jsontools.json_in function refman/lib/jsontools.html cherrypy._cpwsgi.CPWSGIApp.pipeline attribute refman/_cpwsgi.html cherrypy._cpreqbody.RequestBody.maxbytes attribute refman/_cpreqbody.html cherrypy.lib.httpauth.doAuth function refman/lib/httpauth.html cherrypy._cpserver.Server.start method refman/_cpserver.html cherrypy._cpserver.Server.thread_pool attribute refman/_cpserver.html cherrypy.process.wspbus.ChannelFailures.handle_exception method refman/process/wspbus.html cherrypy._cperror.HTTPRedirect.status attribute refman/_cperror.html cherrypy._cpchecker.Checker.check_app_config_entries_dont_start_with_script_name method refman/_cpchecker.html cherrypy.process.plugins.PIDFile class refman/process/plugins/pidfile.html cherrypy.lib.sessions.FileSession.delete method refman/lib/sessions.html cherrypy.lib.auth_digest.get_ha1_dict_plain function refman/lib/auth_digest.html cherrypy.process.wspbus.Bus.wait method refman/process/wspbus.html cherrypy.lib.sessions.PostgresqlSession.clear method refman/lib/sessions.html cherrypy._cpreqbody.SizedReader class refman/_cpreqbody.html cherrypy._cperror.InternalRedirect class refman/_cperror.html cherrypy._cpdispatch.RoutesDispatcher class refman/_cpdispatch.html cherrypy._cpreqbody.Entity.fp attribute refman/_cpreqbody.html cherrypy.wsgiserver.HTTPRequest.conn attribute refman/wsgiserver/init.html cherrypy.process.plugins.SimplePlugin.bus attribute refman/process/plugins/index.html cherrypy._cprequest.Response.headers attribute refman/_cprequest.html cherrypy.lib.cptools.SessionAuth.anonymous method refman/lib/cptools.html cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter.private_key attribute refman/wsgiserver/ssl_builtin.html cherrypy.lib.httputil.decode_TEXT function refman/lib/httputil.html cherrypy._cplogging.WSGIErrorHandler.flush method refman/_cplogging.html cherrypy.process.wspbus.Bus class refman/process/wspbus.html cherrypy._cpconfig.Config.itervalues attribute refman/_cpconfig.html cherrypy.lib.sessions.Session.keys method refman/lib/sessions.html cherrypy.wsgiserver.Gateway class refman/wsgiserver/init.html cherrypy.lib.static.staticdir function refman/lib/static.html cherrypy.process.plugins.ThreadManager.release_thread method refman/process/plugins/index.html cherrypy.lib.sessions.PostgresqlSession.clean_up method refman/lib/sessions.html cherrypy.process.plugins.Autoreloader.frequency attribute refman/process/plugins/index.html cherrypy.process.plugins.SignalHandler.handle_SIGHUP method refman/process/plugins/signalhandler.html cherrypy.wsgiserver.HTTPRequest class refman/wsgiserver/init.html cherrypy.wsgiserver.ChunkedRFile class refman/wsgiserver/init.html cherrypy._cpwsgi.CPWSGIApp class refman/_cpwsgi.html cherrypy._cpserver.Server.ssl_certificate_chain attribute refman/_cpserver.html cherrypy._cprequest.Request.scheme attribute refman/_cprequest.html cherrypy._cpreqbody.RequestBody.default_content_type attribute refman/_cpreqbody.html cherrypy.lib.sessions.Session.load method refman/lib/sessions.html cherrypy.lib.sessions.RamSession.setdefault method refman/lib/sessions.html cherrypy._cpchecker.Checker class refman/_cpchecker.html cherrypy.process.plugins.Monitor.graceful method refman/process/plugins/index.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.certificate_chain attribute refman/wsgiserver/ssl_pyopenssl.html cherrypy.lib.profiler.Profiler.stats method refman/lib/profiler.html cherrypy._cperror.HTTPRedirect.urls attribute refman/_cperror.html cherrypy.wsgiserver.WSGIPathInfoDispatcher class refman/wsgiserver/init.html cherrypy.lib.sessions.MemcachedSession.keys method refman/lib/sessions.html cherrypy._cpconfig.Config.items attribute refman/_cpconfig.html cherrypy.lib.caching.Cache.clear method refman/lib/caching.html cherrypy.wsgiserver.HTTPServer.server_name attribute refman/wsgiserver/init.html cherrypy.wsgiserver.HTTPRequest.close_connection attribute refman/wsgiserver/init.html cherrypy.lib.cptools.proxy function refman/lib/cptools.html cherrypy._cpserver.Server.max_request_body_size attribute refman/_cpserver.html cherrypy._cpwsgi.VirtualHost.domains attribute refman/_cpwsgi.html cherrypy.lib.sessions.RamSession.get method refman/lib/sessions.html cherrypy.wsgiserver.ThreadPool.grow method refman/wsgiserver/init.html cherrypy.lib.sessions.Session.generate_id method refman/lib/sessions.html cherrypy.lib.sessions.MemcachedSession.id attribute refman/lib/sessions.html cherrypy.lib.sessions.RamSession.save method refman/lib/sessions.html cherrypy.lib.httpauth.checkResponse function refman/lib/httpauth.html cherrypy.wsgiserver.HTTPRequest.simple_response method refman/wsgiserver/init.html cherrypy.process.wspbus.Bus.subscribe method refman/process/wspbus.html cherrypy.process.plugins.Autoreloader.subscribe method refman/process/plugins/index.html cherrypy._cprequest.Request.cookie attribute refman/_cprequest.html cherrypy._cptree.Tree class refman/_cptree.html cherrypy.lib.encoding.ResponseEncoder.encode_string method refman/lib/encoding.html cherrypy._cpreqbody.process_multipart function refman/_cpreqbody.html cherrypy._cprequest.Request.respond method refman/_cprequest.html cherrypy.wsgiserver.WorkerThread.ready attribute refman/wsgiserver/init.html cherrypy.wsgiserver.ssl_pyopenssl.SSL_fileobject class refman/wsgiserver/ssl_pyopenssl.html cherrypy.expose function refman/cherrypy.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.bind method refman/wsgiserver/ssl_pyopenssl.html cherrypy.wsgiserver.HTTPServer.ssl_adapter attribute refman/wsgiserver/init.html cherrypy.wsgiserver.SSLAdapter class refman/wsgiserver/init.html cherrypy._cperror.HTTPRedirect.set_response method refman/_cperror.html cherrypy.process.plugins.Monitor.callback attribute refman/process/plugins/index.html cherrypy._cptree.Application.merge method refman/_cptree.html cherrypy.lib.sessions.PostgresqlSession.keys method refman/lib/sessions.html cherrypy.lib.cptools.SessionAuth class refman/lib/cptools.html cherrypy._cpchecker.Checker.check_static_paths method refman/_cpchecker.html cherrypy._cpwsgi.AppResponse.run method refman/_cpwsgi.html cherrypy._cperror.HTTPRedirect class refman/_cperror.html cherrypy.lib.sessions.Session.id attribute refman/lib/sessions.html cherrypy._cpwsgi.CPWSGIApp.tail method refman/_cpwsgi.html cherrypy._cprequest.Response.header_list attribute refman/_cprequest.html cherrypy.lib.sessions.PostgresqlSession.id attribute refman/lib/sessions.html cherrypy.HTTPRedirect class refman/cherrypy.html cherrypy._cprequest.Response.finalize method refman/_cprequest.html cherrypy.lib.caching.MemoryCache.put method refman/lib/caching.html cherrypy.lib.sessions.PostgresqlSession.items method refman/lib/sessions.html cherrypy._cptools.HandlerTool class refman/_cptools.html cherrypy.lib.sessions.Session.regenerated attribute refman/lib/sessions.html cherrypy.HTTPRedirect.encoding attribute refman/cherrypy.html cherrypy.process.servers.ServerAdapter.restart method refman/process/servers.html cherrypy.lib.sessions.FileSession class refman/lib/sessions.html cherrypy._cplogging.LogManager.access_file attribute refman/_cplogging.html cherrypy._cprequest.Request.handle_error method refman/_cprequest.html cherrypy._cpchecker.Checker.check_site_config_entries_in_app_config method refman/_cpchecker.html cherrypy._cpchecker.Checker.formatwarning method refman/_cpchecker.html cherrypy._cpserver.Server.ssl_private_key attribute refman/_cpserver.html cherrypy.process.plugins.SignalHandler.subscribe method refman/process/plugins/signalhandler.html cherrypy.lib.xmlrpc.patched_path function refman/lib/xmlrpc.html cherrypy.lib.sessions.save function refman/lib/sessions.html cherrypy.lib.profiler.Profiler.run method refman/lib/profiler.html cherrypy.wsgiserver.HTTPServer.max_request_body_size attribute refman/wsgiserver/init.html cherrypy.process.plugins.SimplePlugin class refman/process/plugins/index.html cherrypy.process.plugins.Autoreloader.match attribute refman/process/plugins/index.html cherrypy._cpdispatch.LateParamPageHandler class refman/_cpdispatch.html cherrypy.log function refman/cherrypy.html cherrypy._cpreqbody.RequestBody.read_into_file method refman/_cpreqbody.html cherrypy.lib.sessions.RamSession class refman/lib/sessions.html cherrypy.wsgiserver.WSGIGateway.write method refman/wsgiserver/init.html cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter class refman/wsgiserver/ssl_builtin.html cherrypy._cprequest.Request.show_mismatched_params attribute refman/_cprequest.html cherrypy.process.plugins.SignalHandler.signals attribute refman/process/plugins/signalhandler.html cherrypy._cptree.Application.wsgiapp attribute refman/_cptree.html cherrypy.wsgiserver.HTTPConnection.RequestHandlerClass attribute refman/wsgiserver/init.html cherrypy._cpserver.Server.bind_addr attribute refman/_cpserver.html cherrypy._cptree.Application.root attribute refman/_cptree.html cherrypy.wsgiserver.HTTPConnection.communicate method refman/wsgiserver/init.html cherrypy.wsgiserver.HTTPServer.request_queue_size attribute refman/wsgiserver/init.html cherrypy.lib.sessions.MemcachedSession.save method refman/lib/sessions.html cherrypy._cperror.HTTPError.code attribute refman/_cperror.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter class refman/wsgiserver/ssl_pyopenssl.html cherrypy.lib.sessions.FileSession.release_lock method refman/lib/sessions.html cherrypy._cpwsgi.AppResponse.translate_headers method refman/_cpwsgi.html cherrypy._cpwsgi.CPWSGIApp.config attribute refman/_cpwsgi.html cherrypy.lib.caching.MemoryCache.delete method refman/lib/caching.html cherrypy.lib.encoding.decode function refman/lib/encoding.html cherrypy.NotFound class refman/cherrypy.html cherrypy._cpserver.Server.socket_timeout attribute refman/_cpserver.html cherrypy._cpwsgi.ExceptionTrapper class refman/_cpwsgi.html cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter.get_environ method refman/wsgiserver/ssl_pyopenssl.html cherrypy.lib.httputil.parse_query_string function refman/lib/httputil.html cherrypy.lib.sessions.RamSession.has_key method refman/lib/sessions.html cherrypy._cprequest.Hook.kwargs attribute refman/_cprequest.html cherrypy.wsgiserver.MaxSizeExceeded class refman/wsgiserver/init.html cherrypy._cpconfig.Config.fromkeys staticmethod refman/_cpconfig.html cherrypy.wsgiserver.WorkerThread class refman/wsgiserver/init.html cherrypy.lib.sessions.Session.clean_thread attribute refman/lib/sessions.html cherrypy.lib.caching.Cache.put method refman/lib/caching.html cherrypy._cpserver.Server.ssl_certificate attribute refman/_cpserver.html cherrypy._cpserver.Server.socket_file attribute refman/_cpserver.html cherrypy.lib.caching.Cache.delete method refman/lib/caching.html cherrypy._cprequest.Request.server_protocol attribute refman/_cprequest.html cherrypy.lib.sessions.PostgresqlSession.load method refman/lib/sessions.html cherrypy._cpchecker.Checker.check_config_types method refman/_cpchecker.html cherrypy._cpreqbody.Part.make_file method refman/_cpreqbody.html cherrypy.lib.sessions.Session.clean_up method refman/lib/sessions.html cherrypy._cpreqbody.Entity.params attribute refman/_cpreqbody.html cherrypy.lib.sessions.PostgresqlSession.delete method refman/lib/sessions.html cherrypy.lib.sessions.Session.loaded attribute refman/lib/sessions.html cherrypy._cpreqbody.Entity.part_class attribute refman/_cpreqbody.html cherrypy.lib.caching.MemoryCache.expire_freq attribute refman/lib/caching.html cherrypy.lib.httputil.HeaderMap class refman/lib/httputil.html cherrypy.lib.caching.MemoryCache.expire_cache method refman/lib/caching.html cherrypy._cpserver.Server.ssl_module attribute refman/_cpserver.html cherrypy.HTTPRedirect.status attribute refman/cherrypy.html cherrypy._cpreqbody.Entity.process method refman/_cpreqbody.html cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter.wrap method refman/wsgiserver/ssl_builtin.html cherrypy._cpconfig.Config.get attribute refman/_cpconfig.html cherrypy.wsgiserver.ThreadPool class refman/wsgiserver/init.html cherrypy._cprequest.Request.process_headers method refman/_cprequest.html cherrypy.lib.sessions.Session.id_observers attribute refman/lib/sessions.html cherrypy.lib.sessions.Session.locked attribute refman/lib/sessions.html cherrypy.lib.reprconf.Config class refman/lib/reprconf.html cherrypy._cprequest.Request.get_resource method refman/_cprequest.html cherrypy._cpreqbody.Entity.read_into_file method refman/_cpreqbody.html cherrypy.wsgiserver.ThreadPool.shrink method refman/wsgiserver/init.html cherrypy.process.plugins.Autoreloader.graceful method refman/process/plugins/index.html cherrypy.wsgiserver.HTTPServer.software attribute refman/wsgiserver/init.html cherrypy._cprequest.Request.throws attribute refman/_cprequest.html cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter.bind method refman/wsgiserver/ssl_builtin.html cherrypy.wsgiserver.ThreadPool.idle attribute refman/wsgiserver/init.html cherrypy._cprequest.Request.query_string_encoding attribute refman/_cprequest.html cherrypy.lib.cptools.referer function refman/lib/cptools.html cherrypy._cprequest.HookMap class refman/_cprequest.html cherrypy._cprequest.Response.status attribute refman/_cprequest.html cherrypy._cprequest.HookMap.run method refman/_cprequest.html cherrypy.lib.reprconf.modules function refman/lib/reprconf.html cherrypy.lib.sessions.RamSession.load method refman/lib/sessions.html cherrypy.process.plugins.SimplePlugin.unsubscribe method refman/process/plugins/index.html cherrypy._cptools.SessionAuthTool class refman/_cptools.html cherrypy.wsgiserver.HTTPServer.ready attribute refman/wsgiserver/init.html cherrypy._cptools.ErrorTool class refman/_cptools.html cherrypy._cpdispatch.LateParamPageHandler.kwargs attribute refman/_cpdispatch.html cherrypy.lib.caching.MemoryCache.get method refman/lib/caching.html cherrypy._cprequest.Request.prev attribute refman/_cprequest.html cherrypy.lib.reprconf.Parser class refman/lib/reprconf.html cherrypy.lib.cptools.autovary function refman/lib/cptools.html cherrypy.lib.cptools.validate_etags function refman/lib/cptools.html cherrypy.wsgiserver.NoSSLError class refman/wsgiserver/init.html cherrypy._cpconfig.Config.keys attribute refman/_cpconfig.html cherrypy._cpreqbody.Part.boundary attribute refman/_cpreqbody.html cherrypy.lib.covercp.get_tree function refman/lib/covercp.html cherrypy.process.servers.wait_for_occupied_port function refman/process/servers.html cherrypy._cplogging.LogManager.access method refman/_cplogging.html cherrypy.wsgiserver.HTTPServer.start method refman/wsgiserver/init.html cherrypy._cptree.Application.find_config method refman/_cptree.html cherrypy.process.plugins.SignalHandler.handlers attribute refman/process/plugins/signalhandler.html cherrypy._cperror.HTTPError.set_response method refman/_cperror.html cherrypy._cpreqbody.RequestBody.fullvalue method refman/_cpreqbody.html cherrypy._cprequest.Request.body_params attribute refman/_cprequest.html cherrypy._cpserver.Server.httpserver_from_self method refman/_cpserver.html cherrypy.lib.caching.AntiStampedeCache.wait method refman/lib/caching.html cherrypy._cperror.CherryPyException class refman/_cperror.html cherrypy.lib.sessions.RamSession.regenerate method refman/lib/sessions.html cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter.certificate attribute refman/wsgiserver/ssl_builtin.html cherrypy.process.plugins.SimplePlugin.subscribe method refman/process/plugins/index.html cherrypy.lib.reprconf.Parser.as_dict method refman/lib/reprconf.html cherrypy.lib.sessions.FileSession.has_key method refman/lib/sessions.html cherrypy.lib.sessions.PostgresqlSession.has_key method refman/lib/sessions.html cherrypy.lib.sessions.MemcachedSession.generate_id method refman/lib/sessions.html cherrypy._cptree.Application class refman/_cptree.html cherrypy.process.plugins.SignalHandler class refman/process/plugins/signalhandler.html cherrypy.process.plugins.SignalHandler.set_handler method refman/process/plugins/signalhandler.html cherrypy._cperror.format_exc function refman/_cperror.html cherrypy._cpserver.Server.wsgi_version attribute refman/_cpserver.html cherrypy._cperror.HTTPError.status attribute refman/_cperror.html cherrypy._cpconfig.Config.values attribute refman/_cpconfig.html cherrypy._cplogging.LogManager.appid attribute refman/_cplogging.html cherrypy.wsgiserver.WSGIGateway_10 class refman/wsgiserver/init.html cherrypy.lib.covercp.serve function refman/lib/covercp.html cherrypy.lib.sessions.MemcachedSession.delete method refman/lib/sessions.html cherrypy._cpserver.Server.thread_pool_max attribute refman/_cpserver.html cherrypy.lib.httputil.HeaderMap.elements method refman/lib/httputil.html cherrypy._cptree.Tree.script_name method refman/_cptree.html cherrypy.lib.httpauth.digestAuth function refman/lib/httpauth.html doc/introduction/000077500000000000000000000000001303523157100143445ustar00rootroot00000000000000doc/introduction/architecture-overview.txt000066400000000000000000000024761303523157100214440ustar00rootroot00000000000000.. -*- mode: rst -*- .. _architecture-overview: Architecture Overview ===================== Bcfg2 provides a declarative interface to system configuration. Its configuration specifications describe a literal configuration goal state for clients. In this architecture, the Bcfg2 client tool is responsible for determining what, if any, configuration operations must occur and then performing those operations. The client also uploads statistics and client configuration state information. The design and implementation of the reporting system is described on a separate :ref:`page `. A comprehensive description of the Bcfg2 Architecture (and the choices behind the design) can be found at :ref:`architecture-index`. Server ------ The role of the Bcfg2 server is rendering a client-specific target configuration description from a global specification. The specification consists of a directory structure containing data for a variety of server plugins. The Bcfg2 server has a plugin interface that can be used to interpret the configuration specification. Read on for more information about :ref:`server-index`. Client ------ The Bcfg2 client is responsible for determining what operations are necessary in order to reach the desired configuration state. Read on for more information about :ref:`client-index`. doc/introduction/index.txt000066400000000000000000000030601303523157100162130ustar00rootroot00000000000000.. -*- mode: rst -*- .. _introduction-index: Introduction ============ Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the `Mathematics and Computer Science Division`_ of `Argonne National Laboratory`_. .. _Mathematics and Computer Science Division: http://www.mcs.anl.gov/ .. _Argonne National Laboratory: http://www.anl.gov/ It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to Bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, Bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. .. toctree:: :maxdepth: 2 architecture-overview os-support doc/introduction/os-support.txt000066400000000000000000000032351303523157100172430ustar00rootroot00000000000000.. -*- mode: rst -*- .. _os-support: What Operating Systems Does Bcfg2 Support? ------------------------------------------ Bcfg2 is fairly portable. It has been successfully run on: * `AIX`_, `FreeBSD`_, `OpenBSD`_, `Mac OS X`_, `OpenSolaris`_, `Solaris`_. .. _AIX: http://www.ibm.com/aix .. _FreeBSD: http://www.freebsd.org/ .. _OpenBSD: http://www.openbsd.org/ .. _Mac OS X: http://www.apple.com/macosx/ .. _OpenSolaris: http://opensolaris.org/ .. _Solaris: http://www.sun.com/software/solaris/ * Many `GNU/Linux`_ distributions, including `Arch Linux`_, `Blag`_, `CentOS`_, `Debian`_, `Fedora`_, `Gentoo`_, `gNewSense`_, `Mandriva`_, `OpenSUSE`_, `Red Hat/RHEL`_, `Scientific Linux`_, `SuSE/SLES`_, `Trisquel`_, and `Ubuntu`_. .. _GNU/Linux: http://www.gnu.org/gnu/Linux-and-gnu.html .. _Arch Linux: http://www.archlinux.org .. _Blag: http://www.blagblagblag.org/ .. _CentOS: http://www.centos.org/ .. _Debian: http://www.debian.org/ .. _Fedora: http://www.fedoraproject.org/ .. _Gentoo: http://www.gentoo.org/ .. _gNewSense: http://www.gnewsense.org/ .. _Mandriva: http://www.mandriva.com/ .. _OpenSUSE: http://opensuse.org/ .. _Red Hat/RHEL: http://www.redhat.com/rhel/ .. _Scientific Linux: http://www.scientificlinux.org/ .. _SuSE/SLES: http://www.novell.com/linux/ .. _Trisquel: http://trisquel.info/ .. _Ubuntu: http://www.ubuntu.com/ Bcfg2 should run on any POSIX compatible operating system, however direct support for an operating system's package and service formats are limited by the currently available :ref:`client-tools` (new client tools are pretty easy to add). Check the :ref:`FAQ ` for a more exact list of platforms on which Bcfg2 works`. doc/man/000077500000000000000000000000001303523157100123765ustar00rootroot00000000000000doc/man/bcfg2-admin.txt000066400000000000000000000103201303523157100152040ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-admin =========== .. program:: bcfg2-admin Synopsis -------- **bcfg2-admin** [-C *configfile*] *mode* [*mode args*] [*mode options*] Description ----------- :program:`bcfg2-admin` is used to perform Bcfg2 repository administration. Options ------- -C configfile Specify alternate bcfg2.conf location. -E encoding Specify the encoding of config files. -Q path Specify the path to the server repository. -S server Manually specify the server location (as opposed to using the value in bcfg2.conf). This should be in the format "https://server:port" -d Enable debugging output. -h Print usage information. -o logfile Writes a log to the specified path. --ssl-key=key Specify the path to the SSL key. -v Enable verbose output. -x password Use 'password' for client communication. Modes ----- backup Create an archive of the entire Bcfg2 repository. client *action* *client* [attribute=value] Add, edit, or remove clients entries in metadata (See CLIENT OPTIONS below). compare *old* *new* Compare two client configurations. Can be used to verify consistent behavior between releases. Determine differences between files or directories (See COMPARE OPTIONS below). dbshell Call the Django 'dbshell' command on the configured database. init Initialize a new repository (interactive). initreports Initialize the Reporting database. minestruct *client* [-f xml-file] [-g groups] Build structure entries based on client statistics extra entries (See MINESTRUCT OPTIONS below). perf Query server for performance data. pull *client* *entry-type* *entry-name* Install configuration information into repo based on client bad entries (See PULL OPTIONS below). purgereports Purge historic and expired data from the Reporting database reportssqlall Call the Django 'shell' command on the Reporting database. reportsstats Print Reporting database statistics. scrubreports Scrub the Reporting database for duplicate reasons and orphaned entries. shell Call the Django 'shell' command on the configured database. syncdb Sync the Django ORM with the configured database. tidy Remove unused files from repository. updatereports Apply database schema updates to the Reporting database. validatedb Call the Django 'validate' command on the configured database. viz [-H] [-b] [-k] [-o png-file] Create a graphviz diagram of client, group and bundle information (See VIZ OPTIONS below). xcmd Provides a XML-RPC Command Interface to the bcfg2-server. CLIENT OPTIONS ++++++++++++++ mode One of the following. *add* Add a client *del* Delete a client *list* List all client entries client Specify the client's name. attribute=value Set attribute values when adding a new client. Allowed attributes are 'profile', 'uuid', 'password', 'location', 'secure, and 'address'. COMPARE OPTIONS +++++++++++++++ -d *N*, --diff-lines *N* Show only N lines of a diff -c, --color Show colors even if not ryn from a TTY -q, --quiet Only show that entries differ, not how they differ old Specify the location of the old configuration(s). new Specify the location of the new configuration(s). MINESTRUCT OPTIONS ++++++++++++++++++ client Client whose metadata is to be searched for extra entries. -g *groups* Hierarchy of groups in which to place the extra entries in. -f *outputfile* Specify the xml file in which to write the extra entries. PULL OPTIONS ++++++++++++ client Specify the name of the client to search for. entry type Specify the type of the entry to pull. entry name Specify the name of the entry to pull. VIZ OPTIONS +++++++++++ -H, --includehosts Include hosts in diagram. -b, --includebundles Include bundles in diagram. -o *outfile*, --outfile *outfile* Write to outfile file instead of stdout. -k, --includekey Add a shape/color key. -c *hostname*, --only-client *hostname* Only show groups and bundles for the named client See Also -------- :manpage:`bcfg2-info(8)`, :manpage:`bcfg2-server(8)` doc/man/bcfg2-build-reports.txt000066400000000000000000000013471303523157100167200ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-build-reports =================== .. program:: bcfg2-build-reports Synopsis -------- **bcfg2-build-reports** [*-A*] [*-c*] [*-s*] Description ----------- :program:`bcfg2-build-reports` is used to build all client state reports. See the Bcfg2 manual for report setup information. Options ------- -A Displays all data. -c configfile Specify an alternate report configuration path. The default is ``repo/etc/reports-configuration.xml``. -h Print usage information. -s statsfile Use an alternative path for the statistics file. The default is ``repo/etc/statistics.xml``. See Also -------- :manpage:`bcfg2(1)`, :manpage:`bcfg2-server(8)` doc/man/bcfg2-crypt.txt000066400000000000000000000110271303523157100152620ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-crypt =========== .. program:: bcfg2-crypt Synopsis -------- **bcfg2-crypt** [-C *configfile*] [--decrypt|--encrypt] [--cfg|--properties] [--stdout] [--remove] [--xpath *xpath*] [-p *passphrase-or-name*] [-v] [-I] *filename* [*filename*...] Description ----------- :program:`bcfg2-crypt` performs encryption and decryption of Cfg and Properties files. It's often sufficient to run :program:`bcfg2-crypt` with only the name of the file you wish to encrypt or decrypt; it can usually figure out what to do. Options ------- -C configfile Specify alternate bcfg2.conf location. --decrypt, --encrypt Select encryption or decryption mode for the given file(s). This is usually unnecessary, as :program:`bcfg2-crypt` can often determine which is necessary based on the contents of each file. --cfg An XML file should be encrypted in its entirety rather than element-by-element. This is only necessary if the file is an XML file whose name ends with *.xml* and whose top-level tag is **. See [MODES] below for details. --properties Process a file as an XML Properties file, and encrypt the text of each element separately. This is necessary if, for example, you've used a different top-level tag than *Properties* in your Properties files. See [MODES] below for details. --stdout Print the resulting file to stdout instead of writing it to a file. --remove Remove the plaintext file after it has been encrypted. Only meaningful for Cfg files. --xpath xpath Encrypt the character content of all elements that match the specified XPath expression. The default is *\*[@encrypted]* or *\**; see [MODES] below for more details. Only meaningful for Properties files. -p passphrase Specify the name of a passphrase specified in the *[encryption]* section of *bcfg2.conf*. See [SELECTING PASSPHRASE] below for more details. -v Be verbose. -I When encrypting a Properties file, interactively select the elements whose data should be encrypted. -h Print usage information. Modes ----- :program:`bcfg2-crypt` can encrypt Cfg files or Properties files; they are handled very differently. Cfg When :program:`bcfg2-crypt` is used on a Cfg file, the entire file is encrypted. This is the default behavior on files that are not XML, or that are XML but whose top-level tag is not **. This can be enforced by use of the *--cfg* option. Properties When :program:`bcfg2-crypt` is used on a Properties file, it encrypts the character content of elements matching the XPath expression given by *--xpath*. By default the expression is *\*[@encrypted]*, which matches all elements with an *encrypted* attribute. If you are encrypting a file and that expression doesn't match any elements, then the default is *\**, which matches everything. When :program:`bcfg2-crypt` encrypts the character content of an element, it also adds the *encrypted* attribute, set to the name of the passphrase used to encrypt that element. When it decrypts an element it does not remove *encrypted*, though; this lets you easily and efficiently run :program:`bcfg2-crypt` against a single Properties file to encrypt and decrypt it without needing to specify a long list of options. See the online Bcfg2 docs on Properties files for more information on how this works. Selecting passphrase -------------------- The passphrase used to encrypt or decrypt a file is discovered in the following order. #. The passphrase given on the command line using *-p* is used. #. If exactly one passphrase is specified in *bcfg2.conf*, it will be used. #. If operating in Properties mode, *bcfg2.conf* will attempt to read the name of the passphrase from the encrypted elements. #. If decrypting, all passphrases will be tried sequentially. #. If no passphrase has been determined at this point, an error is produced and the file being encrypted or decrypted is skipped. See Also -------- :manpage:`bcfg2-server(8)` doc/man/bcfg2-info.txt000066400000000000000000000045121303523157100150550ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-info ========== .. program:: bcfg2-info Synopsis -------- **bcfg2-info** [-C *configfile*] [-E *encoding*] [-Q *repository path*] [-h] [-p] [-x *password*] [*mode*] [*mode args*] [*mode options*] Description ----------- :program:`bcfg2-info` instantiates an instance of the Bcfg2 core for data examination and debugging purposes. Options ------- -C configfile Specify alternate bcfg2.conf location. -E encoding Specify the encoding of config files. -Q path Specify the path to the server repository. -d Enable debugging output. -h Print usage information. -p profile Specify a profile. -x password Use 'password' for client communication. Modes ----- build *hostname* *filename* Build config for hostname, writing to filename. buildall *directory* Build configs for all clients in directory. buildallfile *directory* *filename* [*hostnames*] Build config file for all clients in directory. buildbundle *filename* *hostname* Build bundle for hostname (not written to disk). If filename is a bundle template, it is rendered. builddir *hostname* *dirname* Build config for hostname, writing separate files to dirname. buildfile [--altsrc=*altsrc*] *filename* *hostname* Build config file for hostname (not written to disk). bundles Print out group/bundle information. clients Print out client/profile information. config Print out the configuration of the Bcfg2 server. debug Shell out to native python interpreter. event_debug Display filesystem events as they are processed. groups List groups. help Print the list of available commands. mappings [*entry type*] [*entry name*] Print generator mappings for optional type and name. packageresolve *hostname* *package* [*package*...] Resolve the specified set of packages. packagesources *hostname* Show package sources. profile *command* *args* Profile a single bcfg2-info command. quit Exit bcfg2-info command line. showentries *hostname* *type* Show abstract configuration entries for a given host. showclient *client1* *client2* Show metadata for given hosts. update Process pending file events. version Print version of this tool. See Also -------- :manpage:`bcfg2(1)`, :manpage:`bcfg2-server(8)` doc/man/bcfg2-lint.conf.txt000066400000000000000000000071351303523157100160200ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-lint.conf =============== Description ----------- bcfg2-lint.conf includes configuration parameters for bcfg2-lint. File format ----------- The file is INI-style and consists of sections and options. A section begins with the name of the sections in square brackets and continues until the next section begins. Options are specified in the form "name=value". The file is line-based each newline-terminated line represents either a comment, a section name or an option. Any line beginning with a hash (#) is ignored, as are lines containing only whitespace. The file consists of one *[lint]* section, up to one *[errors]* section, and then any number of plugin-specific sections, documented below. (Note that this makes it quite feasible to combine your *bcfg2-lint.conf* into your :manpage:`bcfg2.conf(5)` file, if you so desire). Global options -------------- These options apply to *bcfg2-lint* generally, and must be in the *[lint]* section. plugins A comma-delimited list of plugins to run. By default, all plugins are run. This can be overridden by listing plugins on the command line. See :manpage:`bcfg2-lint(8)` for a list of the available plugins. Error handling -------------- Error handling is configured in the *[errors]* section. Each option should be the name of an error and one of *error*, *warning*, or *silent*, which tells :program:`bcfg2-lint` how to handle the warning. Error names and their defaults can be displayed by running :program:`bcfg2-lint` with the *--list-errors* option. Plugin options -------------- These options apply only to a single plugin. Each option should be in a section named for its plugin; for instance, options for the InfoXML plugin would be in a section called *[InfoXML]*. If a plugin is not listed below, then it has no configuration. In many cases, the behavior of a plugin can be configured by modifying how errors from it are handled. See ERROR HANDLING, above. Comments ++++++++ The *Comments* plugin configuration specifies which VCS keywords and comments are required for which file types. The valid types of file are *global* (all file types), *bundler* (non-templated bundle files), *genshibundler* (templated bundle files), *properties* (property files), *cfg* (non-templated Cfg files), *genshi* or *cheetah* (templated Cfg files), *infoxml* (info.xml files), and *probe* (probe files). The specific types (i.e., types other than "global") all supplement global; they do not override it. The exception is if you specify an empty option, e.g.: cfg_keywords = By default, the *$Id$* keyword is checked for and nothing else. Multiple keywords or comments should be comma-delimited. * *_keywords* Ensure that files of the specified type have the given VCS keyword. Do *not* include the dollar signs. I.e.: infoxml_keywords = Revision *not* infoxml_keywords = $Revision$ * *_comments* Ensure that files of the specified type have a comment containing the given string. In XML files, only comments are checked. In plain text files, all lines are checked since comment characters may vary. InfoXML +++++++ required_attrs A comma-delimited list of attributes to require on ** tags. Default is "owner,group,mode". MergeFiles ++++++++++ threshold The threshold at which MergeFiles will suggest merging config files and probes. Default is 75% similar. Validate ++++++++ schema The full path to the XML Schema files. Default is ``/usr/share/bcfg2/schema``. This can be overridden with the *--schema* command-line option. See Also -------- :manpage:`bcfg2-lint(8)` doc/man/bcfg2-lint.txt000066400000000000000000000102571303523157100150730ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-lint ========== .. program:: bcfg2-lint Synopsis -------- **bcfg2-lint** [*options*] [*plugin* [*plugin*...]] Description ----------- :program:`bcfg2-lint` checks the Bcfg2 specification for schema validity, common mistakes, and other criteria. It can be quite helpful in finding typos or malformed data. :program:`bcfg2-lint` exits with a return value of 2 if errors were found, and 3 if warnings (but no errors) were found. Any other non-0 exit value denotes some failure in the script itself. :program:`bcfg2-lint` is a rewrite of the older bcfg2-repo-validate tool. Options ------- -C configfile Specify alternate bcfg2.conf location. -Q path Specify the path to the server repository. -v Be verbose. --lint-config Specify path to bcfg2-lint.conf (default ``/etc/bcfg2-lint.conf``). --stdin Rather than operating on all files in the Bcfg2 specification, only validate a list of files supplied on stdin. This mode is particularly useful in pre-commit hooks. This makes a few assumptions: Metadata files will only be checked if a valid chain of XIncludes can be followed all the way from clients.xml or groups.xml. Since there are multiple formats of metadata stored in Metadata/ (i.e., clients and groups), there is no way to determine which sort of data a file contains unless there is a valid chain of XIncludes. It may be useful to always specify all metadata files should be checked, even if not all of them have changed. Property files will only be validated if both the property file itself and its matching schema are included on stdin. Plugins ------- In addition to the plugins listed below, Bcfg2 server plugins may have their own *bcfg2-lint* functionality, which is enabled automatically when the server plugin is enabled. See :manpage:`bcfg2-lint.conf(5)` for more information on lint plugin configuration. Comments Check the specification for VCS keywords and any comments that are required. By default, this only checks that the *$Id$* keyword is included and expanded in all files. You may specify VCS keywords to check and comments to be required in the config file. (For instance, you might require that every file have a "Maintainer" comment.) In XML files, only comments are checked for the keywords and comments required. Genshi Ensure that all Genshi templates are valid and compile properly. GroupNames Ensure that all groups called by name in Metadata, Rules, Bundler, GroupPatterns, and Cfg are valid. InfoXML Check that certain attributes are specified in *info.xml* files. By default, requires that *owner*, *group*, and *mode* are specified. Can also require that an *info.xml* exists for all Cfg files, and that paranoid mode be enabled for all files. MergeFiles Suggest that similar probes and config files be merged into single probes or TGenshi templates. RequiredAttrs Check that all entries have the appropriate required attributes, and that the attributes are in a valid format. This goes above and beyond the validation offered by an XML schema. Validate Validate the Bcfg2 specification against the XML schemas. Property files are freeform XML, but if a *.xsd* file with a matching filename is provided, then schema validation will be performed on property files individually as well. For instance, if you have a property file named *ntp.xml* then by placing a schema for that file in *ntp.xsd* schema validation will be performed on *ntp.xml*. Bugs ---- :program:`bcfg2-lint` may not handle some deprecated plugins as well as it handles newer ones. For instance, there may be some places where it expects all of your configuration files to be handled by Cfg rather than by a mix of Cfg and TGenshi or TCheetah. See Also -------- :manpage:`bcfg2(1)`, :manpage:`bcfg2-server(8)`, :manpage:`bcfg2-lint.conf(5)` doc/man/bcfg2-report-collector.txt000066400000000000000000000021311303523157100174140ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-report-collector ====================== .. program:: bcfg2-report-collector Synopsis -------- **bcfg2-report-collector** [*options*] Description ----------- :program:`bcfg2-report-collector` runs a daemon to collect logs from the LocalFilesystem :ref:`Bcfg2 Reports ` transport object and add them to the Reporting storage backend. Options ------- -C configfile Specify alternate bcfg2.conf location. -D pidfile Daemonize, placing the program pid in *pidfile*. -E encoding Specify the encoding of config files. -Q path Specify the path to the server repository. -W configfile Specify the path to the web interface configuration file. -d Enable debugging output. -h Print usage information. -o path Set path of file log -v Run in verbose mode. --version Print the version and exit See Also -------- :manpage:`bcfg2-server(8)`, :manpage:`bcfg2-reports(8)` doc/man/bcfg2-reports.txt000066400000000000000000000062411303523157100156210ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-reports ============= .. program:: bcfg2-reports Synopsis -------- **bcfg2-reports** [-a] [-b *NAME*] [-c] [-d] [-e *NAME*] [-h] [-m *NAME*] [-s *NAME*] [-x *NAME*] [--badentry=\ *KIND,NAME*] [--extraentry=\ *KIND,NAME*] [--fields=\ *ARG1,ARG2,...*] [--modifiedentry=\ *KIND,NAME*] [--sort=\ *ARG1,ARG2,...*] [--stale] [-v] Description ----------- :program:`bcfg2-reports` allows you to retrieve data from the database about clients, and the states of their current interactions. It also allows you to change the expired/unexpired states. The utility runs as a standalone application. It does, however, use the models from ``src/lib/Bcfg2/Reporting/models.py``. Options ------- -h Print usage information. Modes ----- The following are various modes available for :program:`bcfg2-reports`. Single-Host Modes +++++++++++++++++ -b, --bad hostname Shows bad entries from the current interaction of *hostname*. -e, --extra hostname Shows extra entries from the current interaction of *hostname*. -m, --modified hostname Shows modified entries from the current interaction of *hostname*. -s, --show hostname Shows bad, modified, and extra entries from the current interaction of *hostname*. -t, --total hostname Shows total number of managed and good entries from the current interaction of *hostname*. -x, --expire hostname Toggles expired/unexpired state of *hostname*. -a, --all Show all hosts, including expired hosts. Host Selection Modes ++++++++++++++++++++ -a, --all Show all hosts, including expired hosts. -c, --clean Show only clean hosts. -d, --dirty Show only dirty hosts. --stale Show hosts that haven't run in the last 24 hours. Entry Modes +++++++++++ The following mode flags require either a comma-delimited list of any number of *:* arguments describing entries, or the *--file* option. --badentry=entrylist Shows only hosts whose current interaction has bad entries matching the given entry or entries. --extraentry=entrylist Shows only hosts whose current interaction has extra entries matching the given entry or entries. --entrystatus=entry Shows the status of the single entry (given by *:*) on all hosts. --modifiedentry=entrylist Shows only hosts whose current interaction has modified entries matching the given entry or entries. Entry Options ^^^^^^^^^^^^^ The following options can be used with the above Entry Modes. --fields=fields Only display the listed fields. Takes a comma-delimited list of field names --file=file Read *:* pairs from the specified file instead of the command line. See Also -------- :manpage:`bcfg2(1)`, :manpage:`bcfg2-server(8)` doc/man/bcfg2-server.txt000066400000000000000000000023401303523157100154250ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2-server ============ .. program:: bcfg2-server Synopsis -------- **bcfg2-server** [-d] [-v] [-C *configfile*] [-D *pidfile*] [-E *encoding*] [-Q *repo path*] [-S *server url*] [-o *logfile*] [-x *password*] [--ssl-key=\ *ssl key*] [--no-fam-blocking] Description ----------- :program:`bcfg2-server` is the daemon component of Bcfg2 which serves configurations to clients based on the data in its repository. Options ------- -C configfile Specify alternate bcfg2.conf location. -D pidfile Daemonize, placing the program pid in *pidfile*. -E encoding Specify the encoding of config files. -Q path Specify the path to the server repository. -S server Manually specify the server location (as opposed to using the value in bcfg2.conf). This should be in the format "https://server:port" -d Enable debugging output. -v Run in verbose mode. -h Print usage information. --ssl-key=key Specify the path to the SSL key. --no-fam-blocking Synonym for fam_blocking = False in bcfg2.conf See Also -------- :manpage:`bcfg2(1)`, :manpage:`bcfg2-lint(8)`, :manpage:`bcfg2.conf(5)` doc/man/bcfg2.conf.txt000066400000000000000000000511161303523157100150520ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2.conf ========== Description ----------- bcfg2.conf includes configuration parameters for the Bcfg2 server and client. File format ----------- The file is INI-style and consists of sections and options. A section begins with the name of the sections in square brackets and continues until the next section begins. Options are specified in the form "name=value". The file is line-based each newline-terminated line represents either a comment, a section name or an option. Any line beginning with a hash (#) is ignored, as are lines containing only whitespace. Server options -------------- These options are only necessary on the Bcfg2 server. They are specified in the **[server]** section of the configuration file. repository Specifies the path to the Bcfg2 repository containing all of the configuration specifications. The repository should be created using the `bcfg2-admin init` command. filemonitor The file monitor used to watch for changes in the repository. The default is the best available monitor. The following values are valid:: inotify gamin pseudo fam_blocking Whether the server should block at startup until the file monitor backend has processed all events. This can cause a slower startup, but ensure that all files are recognized before the first client is handled. Defaults to True. ignore_files A comma-separated list of globs that should be ignored by the file monitor. Default values are:: *~ *# #* *.swp *.swpx *.swx SCCS .svn 4913 .gitignore listen_all This setting tells the server to listen on all available interfaces. The default is to only listen on those interfaces specified by the bcfg2 setting in the components section of ``bcfg2.conf``. plugins A comma-delimited list of enabled server plugins. Currently available plugins are:: ACL Bundler Bzr Cfg Cvs Darcs Decisions Defaults Deps FileProbes Fossil Git GroupLogic GroupPatterns Guppy Hg Ldap Metadata NagiosGen Ohai Packages Pkgmgr POSIXCompat Probes Properties PuppetENC Reporting Rules SEModules ServiceCompat SSHbase Svn TemplateHelper Trigger Descriptions of each plugin can be found in their respective sections below. prefix Specifies a prefix if the Bcfg2 installation isn't placed in the default location (e.g. ``/usr/local``). backend Specifies which server core backend to use. Current available options are:: cherrypy builtin best The default is *best*, which is currently an alias for *builtin*. More details on the backends can be found in the official documentation. user The username or UID to run the daemon as. Default is *0*. group The group name or GID to run the daemon as. Default is *0*. vcs_root Specifies the path to the root of the VCS working copy that holds your Bcfg2 specification, if it is different from *repository*. E.g., if the VCS repository does not hold the bcfg2 data at the top level, you may need to set this option. umask The umask to set for the server. Default is *0077*. Server Plugins -------------- This section has a listing of all the plugins currently provided with Bcfg2. ACL Plugin ++++++++++ The ACL plugin controls which hosts can make which XML-RPC calls. Bundler Plugin ++++++++++++++ The Bundler plugin is used to describe groups of inter-dependent configuration entries, such as the combination of packages, configuration files, and service activations that comprise typical Unix daemons. Bundles are used to add groups of configuration entries to the inventory of client configurations, as opposed to describing particular versions of those entries. Bzr Plugin ++++++++++ The Bzr plugin allows you to track changes to your Bcfg2 repository using a GNU Bazaar version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Cfg Plugin ++++++++++ The Cfg plugin provides a repository to describe configuration file contents for clients. In its simplest form, the Cfg repository is just a directory tree modeled off of the directory tree on your client machines. Cvs Plugin ++++++++++ The Cvs plugin allows you to track changes to your Bcfg2 repository using a Concurrent version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Darcs Plugin ++++++++++++ The Darcs plugin allows you to track changes to your Bcfg2 repository using a Darcs version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Decisions Plugin ++++++++++++++++ The Decisions plugin has support for a centralized set of per-entry installation decisions. This approach is needed when particular changes are deemed "*high risk*"; this gives the ability to centrally specify these changes, but only install them on clients when administrator supervision is available. Defaults Plugin +++++++++++++++ The Defaults plugin can be used to populate default attributes for entries. Defaults is *not* a Generator plugin, so it does not actually bind an entry; Defaults are applied after an entry has been bound, and only populate attributes that are not yet set. Deps Plugin +++++++++++ The Deps plugin allows you to make a series of assertions like "Package X requires Package Y (and optionally also Package Z etc.)" FileProbes Plugin +++++++++++++++++ The FileProbes plugin allows you to probe a client for a file, which is then added to the Cfg specification. If the file changes on the client, FileProbes can either update it in the specification or allow Cfg to replace it. Fossil Plugin +++++++++++++ The Fossil plugin allows you to track changes to your Bcfg2 repository using a Fossil SCM version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Git Plugin ++++++++++ The Git plugin allows you to track changes to your Bcfg2 repository using a Git version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. GroupLogic Plugin +++++++++++++++++ The GroupLogic plugin lets you flexibly assign group membership with a Genshi template. GroupPatterns Plugin ++++++++++++++++++++ The GroupPatterns plugin is a connector that can assign clients group membership based on patterns in client hostnames. Guppy Plugin ++++++++++++ The Guppy plugin is used to trace memory leaks within the bcfg2-server process using Guppy. Hg Plugin +++++++++ The Hg plugin allows you to track changes to your Bcfg2 repository using a Mercurial version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Ldap Plugin +++++++++++ The Ldap plugin makes it possible to fetch data from a LDAP directory, process it and attach it to your metadata. Metadata Plugin +++++++++++++++ The Metadata plugin is the primary method of specifying Bcfg2 server metadata. NagiosGen Plugin ++++++++++++++++ The NagiosGen plugin dynamically generates Nagios configuration files based on Bcfg2 data. Ohai Plugin +++++++++++ The Ohai plugin is used to detect information about the client operating system. The data is reported back to the server using JSON. Packages Plugin +++++++++++++++ The Packages plugin is an alternative to Pkgmgr for specifying package entries for clients. Where Pkgmgr explicitly specifies package entry information, Packages delegates control of package version information to the underlying package manager, installing the latest version available from through those channels. Pkgmgr Plugin +++++++++++++ The Pkgmgr plugin resolves the Abstract Configuration Entity "Package" to a package specification that the client can use to detect, verify and install the specified package. POSIXCompat Plugin ++++++++++++++++++ The POSIXCompat plugin provides a compatibility layer for 1.3 POSIX Entries so that they are compatible with older clients. Probes Plugin +++++++++++++ The Probes plugin gives you the ability to gather information from a client machine before you generate its configuration. This information can be used with the various templating systems to generate configuration based on the results. Properties Plugin +++++++++++++++++ The Properties plugin is a connector plugin that adds information from properties files into client metadata instances. PuppetENC Plugin ++++++++++++++++ The PuppetENC plugin is a connector plugin that adds support for Puppet External Node Classifiers. Reporting Plugin ++++++++++++++++ The Reporting plugin enables the collection of data for use with Bcfg2's dynamic reporting system. Rules Plugin ++++++++++++ The Rules plugin provides literal configuration entries that resolve the abstract configuration entries normally found in Bundler. The literal entries in Rules are suitable for consumption by the appropriate client drivers. SEModules Plugin ++++++++++++++++ The SEModules plugin provides a way to distribute SELinux modules via Bcfg2. ServiceCompat Plugin ++++++++++++++++++++ The ServiceCompat plugin converts service entries for older clients. SSHbase Plugin ++++++++++++++ The SSHbase generator plugin manages ssh host keys (both v1 and v2) for hosts. It also manages the ssh_known_hosts file. It can integrate host keys from other management domains and similarly export its keys. Svn Plugin ++++++++++ The Svn plugin allows you to track changes to your Bcfg2 repository using a Subversion backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Trigger Plugin ++++++++++++++ The Trigger plugin provides a method for calling external scripts when clients are configured. Caching options --------------- These options are specified in the **[caching]** section. client_metadata The following four caching modes are available for client metadata: * off: No caching of client metadata objects is performed. This is the default. * initial: Only initial metadata objects are cached. Initial metadata objects are created only from the data in the Metadata plugin, before additional groups from other plugins are merged in. * cautious: Final metadata objects are cached, but each client’s cache is cleared at the start of each client run, immediately after probe data is received. Cache is also cleared as in aggressive mode. *on* is a synonym for cautious. * aggressive: Final metadata objects are cached. Each plugin is responsible for clearing cache when appropriate. Client options -------------- These options only affect client functionality. They can be specified in the **[client]** section. decision Specify the server decision list mode (whitelist or blacklist). (This settiing will be ignored if the client is called with the -f option). drivers Specify tool driver set to use. This option can be used to explicitly specify the client tool drivers you want to use when the client is run. paranoid Run the client in paranoid mode. profile Assert the given profile for the host. Communication options --------------------- Specified in the **[communication]** section. These options define settings used for client-server communication. ca The path to a file containing the CA certificate. This file is required on the server, and optional on clients. However, if the cacert is not present on clients, the server cannot be verified. certificate The path to a file containing a PEM formatted certificate which signs the key with the ca certificate. This setting is required on the server in all cases, and required on clients if using client certificates. key Specifies the path to a file containing the SSL Key. This is required on the server in all cases, and required on clients if using client certificates. password Required on both the server and clients. On the server, sets the password clients need to use to communicate. On a client, sets the password to use to connect to the server. protocol Communication protocol to use. Defaults to xmlrpc/tlsv1. retries A client-only option. Number of times to retry network communication. Default is 3 retries. retry_delay A client-only option. Number of seconds to wait in between retrying network communication. Default is 1 second. serverCommonNames A client-only option. A colon-separated list of Common Names the client will accept in the SSL certificate presented by the server. timeout A client-only option. The network communication timeout. user A client-only option. The UUID of the client. Component options ----------------- Specified in the **[components]** section. bcfg2 URL of the server. On the server this specifies which interface and port the server listens on. On the client, this specifies where the client will attempt to contact the server. e.g. *bcfg2 = https://10.3.1.6:6789* encoding Text encoding of configuration files. Defaults to UTF-8. lockfile The path to the client lock file, which is used to ensure that only one Bcfg2 client runs at a time on a single client. Logging options --------------- Specified in the **[logging]** section. These options control the server logging functionality. debug Whether or not to enable debug-level log output. Default is false. path Server log file path. syslog Whether or not to send logging data to syslog. Default is true. verbose Whether or not to enable verbose log output. Default is false. MDATA options ------------- Specified in the **[mdata]** section. These options affect the default metadata settings for Paths with type='file'. owner Global owner for Paths (defaults to root) group Global group for Paths (defaults to root) mode Global permissions for Paths (defaults to 644) secontext Global SELinux context for Path entries (defaults to *__default__*, which restores the expected context) paranoid Global paranoid settings for Paths (defaults to false) sensitive Global sensitive settings for Paths (defaults to false) important Global important settings for Paths. Defaults to false. Packages options ---------------- The following options are specified in the **[packages]** section. backends Comma separated list of backends for the dependency resolution. Default is "Yum,Apt,Pac,Pkgng". resolver Enable dependency resolution. Default is 1 (true). metadata Enable metadata processing. Default is 1 (true). If metadata is disabled, it’s implied that resolver is also disabled. yum_config The path at which to generate Yum configs. No default. apt_config The path at which to generate APT configs. No default. gpg_keypath The path on the client where RPM GPG keys will be copied before they are imported on the client. Default is ``/etc/pki/rpm-gpg``. version Set the version attribute used when binding Packages. Default is auto. The following options are specified in the **[packages:yum]** section. use_yum_libraries By default, Bcfg2 uses an internal implementation of Yum’s dependency resolution and other routines so that the Bcfg2 server can be run on a host that does not support Yum itself. If you run the Bcfg2 server on a machine that does have Yum libraries, however, you can enable use of those native libraries in Bcfg2 by setting this to 1. helper Path to bcfg2-yum-helper. By default, Bcfg2 looks first in $PATH and then in ``/usr/sbin/bcfg2-yum-helper`` for the helper. The following options are specified in the **[packages:pulp]** section. username The username of a Pulp user that will be used to register new clients and bind them to repositories. password The password of a Pulp user that will be used to register new clients and bind them to repositories. All other options in the **[packages:yum]** section will be passed along verbatim to the Yum configuration if you are using the native Yum library support. Paranoid options ---------------- These options allow for finer-grained control of the paranoid mode on the Bcfg2 client. They are specified in the **[paranoid]** section of the configuration file. path Custom path for backups created in paranoid mode. The default is in ``/var/cache/bcfg2``. max_copies Specify a maximum number of copies for the server to keep when running in paranoid mode. Only the most recent versions of these copies will be kept. SSL CA options -------------- These options are necessary to configure the SSL CA feature of the Cfg plugin and can be found in the **[sslca_default]** section of the configuration file. config Specifies the location of the openssl configuration file for your CA. passphrase Specifies the passphrase for the CA’s private key (if necessary). If no passphrase exists, it is assumed that the private key is stored unencrypted. chaincert Specifies the location of your ssl chaining certificate. This is used when pre-existing certifcate hostfiles are found, so that they can be validated and only regenerated if they no longer meet the specification. If you’re using a self signing CA this would be the CA cert that you generated. Database options ---------------- Server-only, specified in the **[database]** section. These options control the database connection of the server. engine The database engine used by server plugins. One of the following:: postgresql mysql sqlite3 ado_mssql name The name of the database to use for server data. If 'database_engine' is set to 'sqlite3' this is a file path to the sqlite file and defaults to ``$REPOSITORY_DIR/etc/bcfg2.sqlite``. user User for database connections. Not used for sqlite3. password Password for database connections. Not used for sqlite3. host Host for database connections. Not used for sqlite3. port Port for database connections. Not used for sqlite3. options Various options for the database connection. The value expected is the literal value of the django OPTIONS setting. reporting_engine The database engine used by the Reporting plugin. One of the following:: postgresql mysql sqlite3 ado_mssql If reporting_engine is not specified, the Reporting plugin uses the same database as the other server plugins. reporting_name The name of the database to use for reporting data. If 'database_engine' is set to 'sqlite3' this is a file path to the sqlite file and defaults to ``$REPOSITORY_DIR/etc/reporting.sqlite``. reporting_user User for reporting database connections. Not used for sqlite3. reporting_password Password for reporting database connections. Not used for sqlite3. reporting_host Host for reporting database connections. Not used for sqlite3. reporting_port Port for reporting database connections. Not used for sqlite3. reporting_options Various options for the database connection. The value expected is the literal value of the django OPTIONS setting. Reporting options ----------------- config Specifies the location of the reporting configuration (default is /etc/bcfg2-web.conf. time_zone Specifies a time zone other than that used on the system. (Note that this will cause the Bcfg2 server to log messages in this time zone as well). web_debug Turn on Django debugging. max_children Maximum number of children for the reporting collector. Use 0 to disable the limit. (default is 0) django_settings Arbitrary options for the Django installation. The value expected is a literal python dictionary, that is merged with the already set django settings. See Also -------- :manpage:`bcfg2(1)`, :manpage:`bcfg2-server(8)` doc/man/bcfg2.txt000066400000000000000000000126661303523157100141350ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst bcfg2 ===== .. program:: bcfg2 Synopsis -------- **bcfg2** [*options*] Description ----------- :program:`bcfg2` runs the Bcfg2 configuration process on the current host. This process consists of the following steps. * Fetch and execute probes * Upload probe results * Fetch the client configuration * Check the current client state * Attempt to install the desired configuration * Upload statistics about the Bcfg2 execution and client state Options ------- -B Configure everything except the given bundle(s). -C configfile Specify alternate bcfg2.conf location. -D drivers Specify a comma-delimited set of Bcfg2 tool drivers. *NOTE: only drivers listed will be loaded. (e.g., if you do not include POSIX, you will be unable to verify/install Path entries).* -E encoding Specify the encoding of config files. -I Run bcfg2 in interactive mode. The user will be prompted before each change. -O Omit lock check. -P Run bcfg2 in paranoid mode. Diffs will be logged for configuration files marked as paranoid by the Bcfg2 server. -Q Run bcfg2 in "bundle quick" mode, where only entries in a bundle are verified or installed. This runs much faster than -q, but doesn't provide statistics to the server at all. In order for this option to work, the -b option must also be provided. This option is incompatible with -r. -R retrycount Specify the number of times that the client will attempt to retry network communication. -S server Manually specify the server location (as opposed to using the value in bcfg2.conf). This should be in the format "https://server:port" -Z Do not configure independent entries. -b bundles Run only the specified colon-delimited set of bundles. -c cachefile Cache a copy of the configuration in cachefile. --ca-cert=cacert Specifiy the path to the SSL CA certificate. -d Enable debugging output. -e When in verbose mode, display extra entry information. -f path Configure from a file rather than querying the server. -h Print usage information. -k Run in bulletproof mode. This currently only affects behavior in the debian toolset; it calls apt-get update and clean and dpkg --configure --pending. -l decisionmode Run the client in the specified decision list mode ("whitelist" or "blacklist"), or "none", which can be used in order to override the decision list mode specified in bcfg2.conf). This approach is needed when particular changes are deemed "high risk". It gives the ability tocentrally specify these changes, but only install them on clients when administrator supervision is available. Because collaborative configuration is one of the remaining hard issues in configuration management, these issues typically crop up in environments with several administrators and much configuration variety. (This setting will be ignored if the -f option is also specified). -n Run bcfg2 in dry-run mode. No changes will be made to the system. -o logfile Writes a log to the specified path. -p profile Assert a profile for the current client. -q Run bcfg2 in quick mode. Package checksum verification won't be performed. This mode relaxes the constraints of correctness, and thus should only be used in safe conditions. -r mode Cause bcfg2 to remove extra configuration elements it detects. Mode is one of "all", "Services", "Packages", or "Users". "all" removes all extra entries. "Services", "Packages", and "Users" remove only the extra configuration elements of the respective type. ("Services" actually just disables extra services, since they can't be removed, and "Users" removes extra POSIXUser and POSIXUser entries.) -s servicemode Set bcfg2 interaction level for services. Default behavior is to modify all services affected by reconfiguration. "build" mode attempts to stop all services started. "disabled" suppresses all attempts to modify services. --ssl-cert=cert Specify the path to the SSL certificate. --ssl-cns=CNs Colon-delimited list of acceptable SSL server Common Names. --ssl-key=key Specify the path to the SSL key. -u user Attempt to authenticate as 'user'. -t timeout Set the timeout (in seconds) for client communication. Default is 90 seconds. -v Run bcfg2 in verbose mode. -x password Use 'password' for client communication. -z Only configure independent entries, ignore bundles. See Also -------- :manpage:`bcfg2-server(8)`, :manpage:`bcfg2-info(8)` doc/man/index.txt000066400000000000000000000002041303523157100142420ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _man-index: ========= Man Pages ========= .. toctree:: :maxdepth: 1 :glob: * doc/releases/000077500000000000000000000000001303523157100134265ustar00rootroot00000000000000doc/releases/1.3.4.txt000066400000000000000000000025351303523157100146370ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _releases-1.3.4: 1.3.4 ===== We are happy to announce the release of Bcfg2 1.3.4. It is available for download at: ftp://ftp.mcs.anl.gov/pub/bcfg This is primarily a bugfix release. * New probes.allowed_groups option to restrict group assignments * Bundler fixes: * Fix parsing XML template output with encoding declaration * bcfg2-lint: * Resolve XIncludes when parsing XML for validation * New TemplateAbuse plugin to detect templated scripts * New ValidateJSON plugin * bcfg2-crypt fixes: * Fix logic * Improve debugging/error handling with Properties files * Fix exception handling * Handle error when encrypting properties with multiple keys * Add new Augeas client tool driver: http://docs.bcfg2.org/client/tools/augeas.html * Restored bcfg2-admin client add functionality * Migration tool fixes * Schema fixes * Add Django 1.6 support * Use 'public' default pgsql database schema * Refresh essential packages during Packages.Refresh * Allow lxml.etree XML implementation to parse very large documents * Support ACLs without a specific user/group * Explicitly close database connections at the end of each client run * Fix verification of symlinks Special thanks to the following contributors for this release: Matt Baker, Simon Ruderich, Michael Fenn, Dan Foster, Richard Connon, John Morris. doc/releases/1.3.5.txt000066400000000000000000000015771303523157100146450ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _releases-1.3.5: 1.3.5 ===== We are happy to announce the release of Bcfg2 1.3.5. It is available for download at: ftp://ftp.mcs.anl.gov/pub/bcfg This is primarily a bugfix release. * Properly close db connections * Improved error messages * Fix yum upgrade/downgrade * Enable bcfg2-yum-helper to depsolve for arches incompatible with server * Spec file fixes * bcfg2-crypet: Default to only (En|De)crypt vars that need it * Fix email reporting bug * Fix debsums parsing * Fix solaris makefile * SYSV: Implement downloading and installing SYSV packages from HTTP: http://docs.bcfg2.org/client/tools.html#sysv * Fix debian bcfg2-server init script Special thanks to the following contributors for this release: John Morris, Jonathan Billings, Chris Brinker, Tim Laszlo, Matt Kemp, Michael Fenn, Pavel Labushev, Nathan Olla, Alexander Sulfrian. doc/releases/1.3.6.txt000066400000000000000000000025521303523157100146400ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _releases-1.3.6: 1.3.6 ===== We are happy to announce the release of Bcfg2 1.3.6. It is available for download at: ftp://ftp.mcs.anl.gov/pub/bcfg This is primarily a bugfix release. * Fix python 2.4 compatibility * Fix stale lockfile detection and behavior * Reporting: fix filter urls * Fix client protocol option handling * YUM: Add options to enable and disable Yum plugins * Packages: add name to sources * Reporting: better exception handling * Various interrupt handling fixes * Fix client decision whitelist/blacklist handling * Fix database OPTIONS parsing This change requires you to set the *options* value of the ``[database`` section in ``bcfg2.conf`` to the literal value which is passed through to the django OPTIONS setting. https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-OPTIONS * SYSV: change instances of simplename to simplefile Previous configurations can be updated using the migration tool. * Authentication: Reject passwd auth, if authentication is set to "cert" * Server/Core: drop privileges even if not running as daemon * Packages/Yum.py: Fix dependency resolution logic * Handle filesystem secontexts properly for contextless filesystems Special thanks to the following contributors for this release: Michael Fenn, Matt Kemp, Alexander Sulfrian, Jonathan Billings, Ross Smith. doc/releases/1.4.0pre1.txt000066400000000000000000000114401303523157100154170ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _releases-1.4.0pre1: 1.4.0pre1 ========= The first prerelease for Bcfg2 1.4.0 is now available at: ftp://ftp.mcs.anl.gov/pub/bcfg Bcfg2 1.4.0pre1 is a prerelease, and contains many new features, including some that are backwards-incompatible with Bcfg2 1.3.x and earlier. Please read the release notes thoroughly. This is a prerelease and as such is not likely suitable for general production deployment. That said, please help us test the release in non- and preproduction environments. backwards-incompatible user-facing changes ------------------------------------------ * Completely rewrote option parser Many single character options now have long equivalents. Some subcommand interfaces (``bcfg2-info``, ``bcfg2-admin``) have been reorganized to some degree. ``bcfg2-reports`` syntax is completely different. * Added new :ref:`server-plugins-misc-acl` plugin Default ACLs only allow clients to perform bcfg2 client runs, and only permit `bcfg2-admin xcmd` calls from localhost. If you want to change this, you must enable the ACL plugin and configure your own ACLs. * Added genshi requirement for the server * :ref:`server-plugins-generators-decisions` * Switch plugin to use StructFile instead of host- or group-specific XML files (this allows a single e.g. whitelist.xml file with tags) You can convert your existing decisions using ``tools/upgrade/1.4/migrate_decisions.py``. deprecated features (will be removed in a future release, likely 1.5) --------------------------------------------------------------------- * :ref:`server-plugins-structures-bundler` * Deprecated use of an explicit name attribute You can convert your existing bundles using ``tools/upgrade/1.4/convert_bundles.py``. * Deprecated :ref:`.genshi bundles ` (use .xml bundles and specify the genshi namespace instead) * SSLCA * Deprecated plugin * SSLCA functionality has been added to the Cfg plugin: see :ref:`server-plugins-generators-cfg-ssl-certificates` deprecated plugins and features which have been removed ------------------------------------------------------- Plugins ^^^^^^^ * PostInstall * TGenshi * TCheetah * Account * Hostbase * Snapshots * Statistics * Editor * Base Client tools ^^^^^^^^^^^^ * RPMng * YUM24 * YUMng Other features ^^^^^^^^^^^^^^ * FAM filemonitor * Removed mode="inherit" support * Removed support for .cat/.diff files * Removed support for info/:info files * Removed "magic" groups (for the Packages plugin) other fixes and new features ---------------------------- * Added :ref:`inter-bundle dependencies ` * Added support for :ref:`independent bundles ` (replaces the functionality of Base): * Added support for wildcard XIncludes * Add Solaris 11 IPS Package support * Add bcfg2-report-collector init script to debian package * Git VCS plugin enhancements * Removed deprecated plugins * :ref:`server-plugins-structures-bundler` * Deprecated use of an explicit name attribute * Deprecated .genshi bundles * Added path globbing * :ref:`server-plugins-grouping-metadata` * Allow setting global default authentication type * :ref:`server-plugins-generators-packages` * Add yum group support to internal resolver * Change location of plugin-generated APT sources * Add new Pkgng plugin * Add ability for per-package recommended flag override * :ref:`server-plugins-statistics-reporting` * Add support for POSIX user/group entries * Add support for Django > 1.4 * Add support for separate reporting database * Added option to periodically dump performance stats to logs * Added option to force server to wait until all FAM events are processed * :ref:`server-plugins-generators-sshbase` * Add support for IPv6 addresses in known_hosts file * Add support for :ref:`encryption of generated ssh keys ` * APT * Allow specification of deb-src lines (resolves http://trac.mcs.anl.gov/projects/bcfg2/ticket/1148) * SSLCA * Rewrote SSLCA as Cfg handler Existing SSLCA installations will need to migrate to the new format using ``tools/upgrade/1.4/migrate_sslca.py``. * :ref:`server-plugins-generators-nagiosgen` * Migrate configuration to conf.d * :ref:`server-plugins-probes` * Rewritten to improve caching * Add probes.allowed_groups option to restrict group assignments: see :ref:`server-plugins-probes-dynamic-groups` Thanks ------ Special thanks to the following contributors for this release * Alexander Sulfrain * Chris Brinker * Duncan Hutty * Jason Kincl * John Morris * Matt Schwager * Michael Fenn * Stéphane Graber * Tim Laszlo doc/releases/1.4.0pre2.txt000066400000000000000000000034721303523157100154260ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _releases-1.4.0pre2: 1.4.0pre2 ========= The second prerelease for Bcfg2 1.4.0 is now available at: http://bcfg2.org/download/ Bcfg2 1.4.0pre2 is a prerelease, and contains many new features, including some that are backwards-incompatible with Bcfg2 1.3.x and earlier. Please read the release notes thoroughly. This is a prerelease and as such is not likely suitable for general production deployment. That said, please help us test the release in non- and preproduction environments. * NagiosGen: Add bundles to configuration * HomeBrew: Initial add of plugin * Rules/Defaults: Add possibility to use name of entry in attributes backwards-incompatible user-facing changes ------------------------------------------ * Changed default communication protocol to xmlrpc/tlsv1 * Diff output from files sent to the Reports plugin from the client will now be in a unified diff format rather than the previous n-diff format. This fixes potentially long client runs when comparing files that have diverged significantly. * The database options in the config (options and reporting_options in database section) now have to be literal python dictionaries. This allows to set arbitrary options with nested settings. * The Ldap plugin changed significantly. The configuration interface was simplified and new configuration options for the number of retries and the delay in between were added. You have to register your ldap queries in the global list, there is no distinction between LdapQueries and LdapSubQueries anymore, the names of your queries default to the class names and the Ldap plugin expires the metadata caches if the config file changes. Thanks ------ Special thanks to the following contributors for this release * Alexander Sulfrain * Matt Kemp * Jeremie Banier doc/releases/index.txt000066400000000000000000000003001303523157100152670ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _releases-index: ===================== Release Announcements ===================== .. toctree:: 1.4.0pre2 1.4.0pre1 1.3.6 1.3.5 1.3.4 doc/reports/000077500000000000000000000000001303523157100133215ustar00rootroot00000000000000doc/reports/BadListing.png000066400000000000000000005704061303523157100160630ustar00rootroot00000000000000PNG  IHDR/:iCCPICC ProfileXYPTK;8䜑$$D **H2`TT$E}ommnO_>}:"#C$cmt)N.@?< y/H++sߦaImboO b#ach0Ѩ_:v6z0fGVS}`;`} ;X'RXG",,bX? /67=_ 9#᡻wֆ3^f [ ց8m`pݖﺐUdGZ&i~19eft0n1<PWLa>$9Y#G M`ƠX`af-0āh$9x%pY a׈DGO_$E n/?/Klw1{2i?USIfVfr0JEi4QbAqIJBep/?>k/HTs3GK;ߎOϷC7vǁ""c):HIPdd3YѢͯbyl.>GG&z)\b-9N> ae<@,:X; <}0$}`?8A58 y ``L9:AX"C/$Cr @ yBP8@P!T@%!4Bߡ5AD0!i BaC#Q$D&"QE4#݈q H$RCZ"]hd*2YE !!!Ǒȟ( $a?5F٣|PQTTՀjC&P -4- -VCt}}}=^`0,*Fcqc1y*Lf3bٱX % –aOSUNj\p \ )nNCQI9JSGs :OkR| ~ H  kB!PJHO $2ňzD7b1XO"". jӺ6Ҿ]#IљҥUе=BOC/DCAD_B <$L#yRINA!!C? #Qрї14c/$I }u;)& Ʉ))e]adfcamn^!a͑q<':gg+k. W2iG\?y#˸{yXxyyyny5yxyo~0St(RJeϘ/1:?ߞ?@@ >&B4B*BB' S P٨&$juLVDK$JV(FTE4DJtP !((V!D!$$^%>$P$JHK6INHHKeHK}v.'%(*S'3*(k*!{M\syZyC4o ~ '^*-)(n*)+E+(* *{*W*0XWEꪦP֪U]R=D..]u&554j45)4ǵjk hj՞ i+{UwEOM/EKo؀ޠ!aa႑QQ1̸xĄǤdT4ŴόhfkVn\<ԢblIJ*5ʺ>{d=ltڍڋ8;94:8;:;I;8=tprp8ujzuM-m؝׽߃#=^{.{==ynxYzz6^93[;W7_?P0X8T-8:x%2>d;1B.331<$/'boPxdVxZh1P{LG,Lʼn׌_MpHaoGb $}{ۿo"E'&JNIHLJ7Jo؏ C&0ckܙ陓6eeEgR?T~|Xpߜ2%y>y)=ѓ0 h42&NYSs9_PR}"xyiG`ٱ**WTV==}:zTЩ5F5mµ%1OswFLYg7lq;ڄhkmvk<E˅܋bOPyPa#GW>VzDIǠൡ]Cj=~s_~14l?rmd˙W>>yCzSm;wƕoNOs/Wj}0-E%V?.-䬲6Tyoqmz=aQ)ymlkl;l{;+@z;ñ x1şbG` `^ .)ҁ|3G n! -jm~IIJb{q44Kb?@fēQLhf, =9G g.eQ^E/F`ZH^8@MOĆt+9>yӊʜ*vyj6ZGb4 _cLM=Z~clMg#`jgaxЩ̹ۄ=O[G?, 90/[\ؼ$p9JՁNk}o |;+;'wo_ܻew~v1 ӡg]/.|Uј177oގ;4<>?>xRvrCTiٌO>ΝLyK ߥ{:mnoOv 4#O Y6ZFFl,kezD/aoF`"G#ћaI^d̼ºNpsqGQ3 X f ݥ"EESŗ$eO7,v޷%T+Wc; :3ݽq+v[|];N'?px(q'>M{9 7M|hs/ TB>ACEO;D'MO@oKA%e5S4'f?8dy:ώ`/i+[{'~$)BŒƒBQwb QZR?ʾ{#?0أtMJjZ]424Ӵ+tt^/dLLk泴Jnyjceo0*v$x{k=wsEhGG''zD59b߭Tb{zgÁլl\ռ|kE%'yOWFTT8橛?Ps.9q퓝+oqwYݓѡ"5FO54<"e镔?m7h76g=F8x C1!шsH Y|DPhA8nB#L/VaqT}wFe8#&jbZ3stpϐHiFkd)&nRfv*VA֋lZlC~[圪\i܂܏yxEx_Qrt/ 5 R婛"wEyoK I^,}Il\)rB#ʇT&ūG  ҊN=wU`cL5176+3X䱲>`b{.A‘8Kk[{Gj6gޫ~y!FaiEbTMJVr5u9]sLY &8}W>}lPxDOdJrՑjS-u3 :MZ/^molwVB{BODDQ_PG_QG߈sh Ҭǖ")+W֤wm(ob7Zs Y[""1T@#Psh":9Ec#} C4x_0G"ޢդ{MBREap2\d4\Ȣ򙵆͕}3K#Oo _00JʼnI\R2 F1JuUj꡻4iqk>g2p5\=z<㼕}I~kA+!%a#e.Ǩv'&J''r>M:f΃Pš'G]+9_9a\zlB2jZZg54^lknop UW&MvvzsDWN^;7=5l>{<eҫQ7oߌLorX3=9v.u/_/-cQzҙN?~.(\5\}Sgᵉu ō+sΛ7"osnmپ1?BD]oY^y6 ů@Է¿|#2p^., pHYs   IDATxi%ו?W{^]Unt8pc<#[d)l)šZ]}ݧzjnnDAAA~׿kOoTF1`$DY~6xx|Ԕ    ܧ:}##C}tW=: /0pT__*   >DQqBFN!(ȇ(ݍO8]XXXZZ    #qv>F0=u'y v    ܏co~TWj]LPPʡWT@PF"p   pRAy/*(䀼z "rrQɘXAAAASRTN Gh$0J)8AAAAoQVk-P'B-"f)*    ܯ( BQS88AAAAR# P(( H*@    ܿ‚RHm3b M~"GAAAv6n0)Z A/A܃C   $Pdn !NPOw6!    1X[Bja6F~f    g2R(XbY)`f"ndՃt()   pl8NaAHM{-dh%#8>G|uDAAAAdQd/MFA!Ej""kRֻ|DyZ?&iž!    lglYf&@bz^,\p L"R{N$v;$8I&Zk-,[&E#O){a˅\RI!AAAAJM  U`ftJueVܪ՚f+j4Ia4u.sB>-w z 0=Lp   =s6W 2PdrF`fW9BDlJ/,ٛ7+ ť[jYVhI$)') "OWӹ ʇ|X床rwalobwb|`rzR d5*EJ)1t   ]4|>HC)(B Ep)ZHtsڈMb'_;uǧܸDbOAPr>HL% `lRnQfPAAAAO"Yδࡨix@衵3h>8Z+'޺+.\>5=5e\)(|;9fX fav"(V;X8)[BZ߼uq|yki~G:| [[Wr%dtj\P)  HJ4SJ#B ,pdv}euk/v7^:^%r CelAXhߩA dc߰DCks}%C K?z/?~ɋ7}}ގ&=`O9ʷ}_:Dg0igrw~`  pa%jXQӈRϽOz&ݱZ+y>g=toQ#pt]=A7qm%TAAbXXPj)WC1H/IztOy[hXXF0 P`ޱP)_sn,P,[FԢho^&V6j@Qja,RFއO`F z~2֙ o.W>ࡩ^p2&w\R=wpY6a ;}YVlsO^vIdpOgbp!s9 g~qo1oСwrbL۩YtnlJL7   w/M 7SeRx eVZV~޼y}Xh+gđC\@@HM` V 3 R"aؕzfqܬ8\oݪ֠5|a OP-V+oog291 K򳴿SO{0:w?\Fg2pc1ƝlىfJpk<%Yr6lb8g\{;,>Leeto^ fg4BE+rZ[rb0 [C`VYDLp/ RD{ZPr>RwablD1濉-:h>R3pe}ܟy+_|\k!sd^wZ>G {❥]\H;ILa1p}Y;ivJJ)2t.&IЩa݌[ql%)VN0DPnSCQn C1@0K@ D) _;8K., pV+0OCb!`^S=.| .뎢ȥ鐅Z92!ӯZkwie8ΞcFZBP(r\\. {,Qzjjsp垞edNƑI-qWLQ½Ě0 >rnNfqBF$IVJ}bX,|vߺ]ZV՚fղ\.84`~d+:T0fv%$3Kki;R8AAE8)#5q쵀AeA$Dtҟ<ϟ\@# 9uÃeStS&?:Ẇ#ff64ȵa +)TIAm0;{d*{˰ypeE~T_1_*o\|w~6E@[Ą@#IK^ت׿cیQYvjs\&mgY{şEQN=S(qLD.Uv }ؕlI9&c'([ BwwÇ||ޥ,$Iш(jUՊАR*S0tM3JVNT*aP;:Y%S˜5ͭrۛ9:V6jX, LMM9$;"kkF8$i6Jeii)Ib!N".wMk0 sW(|DlmGq;ZQitJAI!@) C(+AA^MfE;)5(H-Qq Xڿ7[X@1b(hA  2fǏrQhB@ ,lcbX1[&{SE?Ѝ^c "v 6-^ e"Ԭ7ZzKgк0- #|@y>5_N#NQv$a,LVj`{{^Zj5fr؝V2NFq҆Kq$rXժj^VF#NAk绺rX$J^__w m5 jTV/n۞y .IC/A*kyKKK[[[Ih8vGҬ&388ei4J0 ih4׫vٌ8;RP( 777y]]]GQ3 ~Yx2׆h6΂]0(w iM8j7vlFiZۮ6u*r9'cCPAA{ [ )(͛7J%<1s985\.J3Jq"ZfbFJp> Pj S*擅[8be`20]o % @ie45i5SO١?;y ./ 2}(Bj)AC`VONB [[[QeMt5av~ʕjt,vw tvAHtii)Mx ϻ7nܸzRRqٸk n \X,}MNNNOOObz+WFGGs\Nq^Ć;FQ{{{sfZ Ά}tR0sCCCR rd sDQjr333V]v7nܸufVsYgW xtuu ߿jjj߾}Ƙ[n]zuee%^+'ɬ䧞fv't4T*[[[#\t#v5؂ObL*B Qgw/ÝU|>Z>sgΜ!b<w*a#0 3u_YY\[[[[[pA"]L.Z0{zz~R9{+W]Zםˠ*v#v6|ёեRdiZQ0N8L\8h3|8@%"#Gp+Z< `eew~PkD(jy Jy̌}Mx[oF1)E1"9[0 0+ 0#eEP]>}ijb-+a$L9320Il+8eird68m0@4;^*B{YC޶O>sik$y~d. Wd቉^zuaaa}};;9 g=)I8u)#Z3::zQ+++.\8u;Sh4\D.+ ?f3\FQT]v̙'N;vkkkN4q{]ss_饗FFFL.G_.1cSjl6岳]XXX8yӧ/_永$bsNsD$nZ\\pɓ'~|nnn]]]W\VQrR-Lq55NfJtkkn?a*V>7n,?sZ 98fMLWytѱA\ݨT0nΟBAAA>.HX+4)| Yxw˵F@'(σP qhġ=  D)P RBꆡ(Xb% ɾRkRz-cMM &F`d00;:Q=ϭz}i$)?}5_:8}B5Yp}uPJ Z[k͛7O8Q,~~'xؘ]t*SwÕEiZ.><44$_zommmkX`0Jьy?ݿ[f?ZJvf`5VrwIȎh,,,\|_C8iVsGSЩn)vlʔ^xĉV afM@~lȒizwܼ~Ňz ֭[Ƙr$I:qbG&%81ōqqcGẐ8}!sd[6huu5MӇzhffF)u^zٳnr Gܠ+~}j`pYE/g?SSSZW...+(r&~ If^?x̿=shaihE geX"bgu=zL>vsR XܽE#  p!UJT<< =v#eeQ?~[?\[F!"(n0y̍#ӏW46L ZeJ)200%`,RXyYx74#%RD E~lLZ(NRCFV6؋MY2*00)o3li'{"E٬㒧vp;[WU]H&_=;SfO;s,L{XnO=Jٳ'F2fyƍͯG9tЩS677:DdOyILjуAp̙g}7ިVRi``Q?vq<=i&.-_ZZ~ǎ9vRT*6"e,! O })r\7668os=wܹ$IdLV|{<nd9H\ve;'NX[[V_җǃ 8}`66Ή2\3fxmm#sC#gkϼ.6j(i0H*Ұ 5>"N\^[[SZsWվ>giqwu<8AA%%V)%J D񫯟W|y| σr G~Toz<(l,fRB Cb)GcdP.cc嬬Xwn#7v%AJve) aAD Md?2)\CP#XIC?7Kkmu}I~N:PGh4>Ks\>>yd>/JSSS MF$ngì km4sssJW^y;ŋ pY;u 7ӝ v싙 1{6ȲV}}}ڷ~ }ꩧfggyii) ì$v-s7>(ԸFV=;vը֭j#LMM5gy_[[ 2Y'>q-99qR) "sl:{>;;/~ҥKn`Yv7ԍ͚ed.vv.;4' \~g} _tVT*===Dnxf~%6 w+׽sn1rl2C=4==O+++A -Y8tV͇\0=~_Wݤ7nDQaEټXll{h4$yȑѱ㫕'gCZ0`-R`F6ݹEk`kw' `-;۟zL88AA& <*-]a .M"o鏾q!zsةL|0P~pvo&+% k&` #4q(2c (0 )_2ɜ?֕=i RcYA1lҔ /frJ '-ĤafEw1RjV23B"V%}7U*bfח[2R w,G=PxmɁ.EGaR%o;s,_:;8_B^,$IPp=,N:C=#7ol4===NG.uVOOy?~K.r^ow84u)RX,Jn7ĭjUUhۮvu`Z~>[,xwyR 9M'bf>jF~~!*P4󩋭:6?fooo砱ޮ]r 1noooomm5msdR%F__8j_&W:F:qqiynzr#[W`,a-R "N`P {JHRԠޙФ\$Rʃ6Jſ٩zUTrnwDAA#W J-# c0i8\ }>XS</@v-٧izE>?/l6KRn 7ĩ<3330;(_~WuӀܹinnnk_FFFE+8otЁŧO_eN E"`-rQ;5ZsX"NԎ](Q Zo_}BjΔJAAAx)lRPHil*!%*+ c@E_ (5D R l`5dy TL1dL fM*i(11r%,@ j#tsF›ifB2RTSVb eqȀfV2=řQl{`Ƀn_1.dNκR1;;O՛|c%IRTy睷z͛ƘBYIbW^T*iVѐQizȑg}իa "r-:f3;vlnnnll0"Wc}? ñ|~~ŋgΜx֖k 2=wu}+.@+XcQkFOώ~mth=Z[]]\Í[sh%yXZʩKgdžf ]oG[]~Ϋ&  G&1pO:  hEBn{7o.>EBh _C)x alh_>[ <2 1A[ E)+)&y`&&J^|3R,)3+f٬vR`-"RCo&NW-+հ hia6`XPL x EĮ2ʂ%/q>?66mX 2=tkg-6Κ RT*4u#6zU\=߷oߡCFGG .oZfommmll Vl Ke(`]^^nZo*SnPm"zǞ|ɑcLVhcccsshŁ~tLLL</ҩSף(r*ݛZvĉ~39tЉ'xf  F$\СCVCn766: ZVݞwC[ B.C*+u8OOO?g B]-a_(\<~:uZ$+WqVOO֕+Wx QW]sLTs@oy3I٫XـVH!MwMafO>_;[4I}"=cv80ý9\]Bd,` tl篿pdfkלĸh  ql,s ֖ZX22ο]o2zwrz߃#_nYPjIYؕ,A A Rbn\Hc!O{M֒" `8oD[>]cƆQăDHs ڙDi;&-ͅ@MBN-bSccCw$ucއqyyɓ\M{dfSQ&''O~[ߺxn۹kNn777zW"dz{rr'N:!hϥ;v,omm]pauuutEwVuڵw}GFF9299>͛7]yɎ~p~~~ppi.{Ƌ왢id!v<Fz1`prpEo_~+:ry>Z$P)Ӗx8Vpm<J+g^O)*Ƙzh4ƌ1a {4MvV/^Z$I$)<ȗFq̙8 yKTV9Ǐ޿SO=^hZYNr׏viiܹs>hVVNrJQ}$'.a I @޿/MS߹U(kmf͛>;=w7׷(0Bj?槆{s殠AAmıJaV . .1<_| xj'W (o WXŠ YK "VNgHZ@\'z3~2(S˱I<"0[f%bkI+3I[IlӄdRB D7e1l!R`XS0Q?X+}%2HSa߃eXycW:vdzxxN>rj \~v|5޾tG~ǝ%؝b 92weko>k3gn޼z=(4uw W2055|رcFܹsCCCWvJVWW[+nZuڽv*ʅ }ёJrҥ=j}ivtj?V2i6f3M홙z/,,2Y0UZ":ttG]xRzafiJ(Vx8 f/dX0 IDATnoXQJ(k#ڽ/>箭7mb9,U`ѨT+ΜS@ (P@6ذ8LH m,CpHQ^E Bvs%(Y(OqUȂ"%X84PDeyR_NG%Yp`-+2#9#m\@zCJ{lS lʚYLw<22[FȜ| `27锬ecՈ sޞ@aEv<杝???gn<_ t G$!D -Rv={vffRH r!_ D)u"Z[[AZJU,3+W^~e"uܜyv'L)I{Νg~_w廮Whtk׮U*>j5K8{rtןp(~FnwyRѐYij3==k]z5I7x9s@Zkq͐!gh劬 wܹqFϟ777}~"v) C}BȋZhX9aZCѸ"Ƣ^W/ʹ:{;niۭVK)$?|Od݅Vo.~]X HҖE@8z7TJ,Q/ (P@ *e4U<#xX&j>Ty|EADkko:))9`F A Df (&:&=SL!&J-abF \3I7R6/G+l-&RcSf2,QBbpP Ƃ%ͰL S(di*Z-?QCo8R4, <-+߽?|t"p5-N0>}foˉ1ݯZkq+HVmwwwzzl[nI…c EoH+l6677xgժ \WpH,q_jil6Ϝ9sʕ~ODNPtz]_.YgYja>5EA\Z][[}v$z=)Z՗_~s֭e= @9y(Op8f˾G:HuߤA-l`Ŭ@`Xb,Ofۓ.)!RJd 0i`hX@P|! ѬQA}h(;wse'Ϲ EB)Otӕv^?R)U*ZIXyJ H~{9B=E^xҥK|(&''Eڼ8%99biZo߾u֗W;󎬉 íNsB_km5n>'sr٦<7ȹܹsn߾}xx(|Dx:"F%_lP岈8TWE zVWWWWW׍1ot=>>^XXhGGGnLIY!T"X믌OͶ<poرWl2Jh!]mVQ pf#Q4_ ʁ?ۅK@ (P@-HqP"l W\yܴƢT$tÇCk(Z%۳% `x_[kHLlA$٢ ؂Y"4dXX6<_mphHd ;$ n[RDt:Ĕ; kEj ̆aa,:0k = UC 3je*8 _+rwg~;Evq~E}u +H _pC_ |܆3"CEfsnn.MC; C˗%Zb0,//#Kp٨yD;I -Wf)7O=ɯ#8G~b'd. '+pqDtpp0==-}s޽+Zw S=˗hsss4MNN(9891P9I~t׿yi͓E( RsssЍ)RFAi{p7a&RAk3텪? (I%U<56p r!b3It(=0?.%Z"OnEfֹBQ@ (P @`H hο~ "#P hԢQ-̻f[p4'U)O)PLLR# S7]eHX )uc@~J(JA%DxV%%y!3;eh`lg%`- ;7߿^,ˏTO&8\l.'GZT9h" jHYy4n8t: Y={vrrr0FFgIdQU*sz>H<IH؍"eyh#`p||<99|ʕ+B + frr@^JpQ -OYG$U>JT*;r… VEQnJ"뙟vM;FQh$DR9ג_O0"wwPc/I^Q 4"HRTKsfxەmR¶( 0%Mcd BB@ (P@D*ECMpRDQ|[hA0&/*n=XRiO@yiO)eJRI+4&bE"(1մ^&DNl4qJd @ 0L0΄$z\a08bfbVleì` kckSd6X$&5uk'=30QQ(MQ ,ypmW"2=~u<קƟ$zZ v|uU"2G peeիJ?p}}]b<(VVVt͝V@ (P@g0 XVDUIdgƧ!8NWRiEaX:0''RYi7ZhoN{G &C%eeRP DDLDFqB,Xb&bRPHRgmU,5C,D13 C㔏XZ-̂e+g2[Zc 3[Qw_JP{M+e߫IIR¸r+v7ήz=IgڃR=7[!(-BS{#bi"1Jg,S)0A%bkzz'O %}e@%&c Jj2R)((1R)clİv@r͔}kښlkG8E$߼q+zp<'•a&?Ap7eZvʕ8E1?ƍwIDjl)#1Jʕ+K/o<2iQ*~='IǡVm":::Oz^ rκ8mBpr;cȺOOH7&>IxY~EQ\P$0Dv8>1*60#*g\:q ǜf.+OJEPu" -"אָVYRzxӕBۣh̨\ H^LH) |?;?=n>R "s?4KD0dW (P@!6)[($<46NI&8k: `DPQ9F&p/ Hg~)Lv`€PJYXAGPi E V!I`SHi=Is-Y$`ְV*x*9Jo988TWqCF}fS5; Ks33bqq/^l;wh'&&k˶@Ze(D!aC,Jds郼 *o r y.ñ wJݻ 9| ]vmnnN$c)d=]*J~>J)+E'8q,JnZBcq发ma"T**y6Ȉ$"¿1($,V)%b٥/^|_{bxZ!M ߃etC=\:ƍie1Sh (P@ (lY 40 1me3 8\ٶsOh,Up䅒@e(B(,$ -Sjm{`A̔)8Wf,Ls1 ,4HgfUDR=$`IJ#EJA)h (wî\ v#5x$@pH&rO߼BNժHzU6͋/nnnv:fh4*7Uw:'TR] Xs;qr8g9Mp#*O7[-I<9d$kƍ&9*7ɇh+G$JtgggG92(D!~DEwq7xg݈( `0;=+/{gwZ Z#M@)X"Iq~_zrt F (P@h60 e&R $Y*h(ʤr >CC%Z12unRFHөTF2#H#ŀj{G)}3}?K@IW_Xn|#oua4(z@<s8 T A R >mnaȈOGV=D$!zn/,,ͽk.խ IDAT7nܐ';CNCxʹETzOy⑯Og^8G"".ZL$k(0lt$oq,ԉ:x8vy4UU7m+ ~~dp^F0hIlllT [7{wPƌȾB#6n_ K7w$|3T (P@` h\p F۠"`F 3"8/voxS(c: Q^h~R{P *~n+g`;!8P!3 -l!l^A MVsVmQao _:݋Ɉ(43#=Ɗl&.Tٍ})L 7\*ӶCʪqi0ov+xy;6G.[NYEQ=88\__p˗*Q\PIgq%pv/ Gr<ȳ (#_Q;? 䨙ȱ nyJ$҄Eey r"y(upwbw88[oEQ^o⓯"V+#Z!$]E/aWK~w;Bm !}pQWձqU:&hGp-of},[vM 'kEOź*G^Eӊ؉|3Ip .Ih4 (^Y^;G[7~{ױsZ @"P+s_4Y۽ݨׄYJ`7 (P@  Uh%xGL=|g@pH ;]$@))(sgCoHES7]^V+q U 9G6 ˫+FDaS,$ WkTkasg~}o'A1&'D@( ٦ GJS3_NEb`yɆ <A+oN+8p#+ԴI ~E.; ΝƘׯ+_|jz] _@2A/._ LIpfx<|ĉz!?k#)YǓ׉HsVfwR'3FY@ǿp&q'4nB3Fq<cz-LOBޝ}&1L(T˟+ Soy$F36)P@ (P3#!mPci\?C)Gy!% )c3Ȃ-7d&.N-LUȒI2ԤZxd&kB?0kE2@D=Q1)>-]By::ra(xB9S~y29c8uBi2d(^:{ʥrvg}|&22`0iW/LVJ;'J3gp[@ (P@<"fU%X#4M1ʎZz"IPGI}qTQ'?Ԧ I"VZkaՓ儑u` AהV& }Ek1dIJKS0Jc@!sD\zIJPJ94%kN4^Ksƃ)t'(NdЏ 8MpƘýr< RY>u{.Ba޿aaayy_b$}QE%&H766VWW>h`8cJ g-rU~"rdNG, IGH4/24MáZ}p:|oZr:u4'~?z^+Jr,"jUQN{D 10l4z}{{;MS8A=#jHr<#y+g'yRRF*D}qnʕoܸMcҒc7JD (P@ |+ @hB|r΍".\ (Q %үNV5߸<;?;R&v0`fE ,Ql,lOa"`ܝvA媞菡GRhԟzʃpH ,H%cLJ ƸJ$qĤ_/-U|Y9v'zX@exițIFQ opÍFQ*`$ITT*yr(>ZV."x'xQ/ѧ󳳳+T$^ :&÷z+cɰ,ʂ8H|"JKz.-N9#s)c%R}Mhk+1N뛛F{pqH\KEIKtʋ/j{qg DH "H 勿+nܻwF$3T*yvo(P@ (kE,W gCpXk(6Ǝ5{4o?5W\MhJԦD1@Bls!(=1تb+z3 . +yrF 0$XFPc51:OpLT9svsyc?`zzv[%7aJ4crqYPq5TZT]YY988#Ex qLq\TZp82{45M+ @)0h4E>k)kQʬwUBZ]XXwG:5eXlnn; H~UI(wir$h8&i嶬lSёyfV\|ߏ鬬Hބdĺ< T* oYV FY7wQnV?LcnH[拗Y Jny!|c`]/?v MOݽ[ڙɩr,BQ@ (P)`@QSF" h ΈDpZF)P9P KP B+++?<}~59˕9Mk"T11l7 ؂"ev<"JXc?~F46HCH`Zجi<jkH( OiCCؔN{'GpH/%qCtwm4Yvxxhua2BT:7 ~$ZF`0ֱqf999nE!y' Q4듓q_~}8j"?PkNG~ vrJ%`#l8BV WK5q{;;I.,̋G88䈪BQ@ (P Ű2R>d2b ;|6#Wm L'}a 5NQs*5 GlŪ\NV)6*#xL8 lMRcXXNua5l<:`0 LN^Y_/,rlooZ[<\RJIbStǑM (P@ KA`BCcTaOrm,w:R̩d,EZHo8Muz'N41i"M-IRS*fKd,{dMbLb#g֗qq{HI4a6%9BgA`=|t}?f$z牧Ӯ`{{{qqQ8YkBF j}qV)\ (P@ Hli`c"5H2adZJk;;H3G+4E/~'AZs͉caDU61ldv_^VhƈfI'x1["b$>o% ɮ`/WZI$IR&1&8RfR#xvGZ7E7Qq>~ҥׯIjQS,򋷁,c4sviQQd7텥ҥ˕J{/}R $C@]ЙC/Ww])'&&mSPK/;<<<<<<<<<<<pطL:a'GpH2֬Vk%, n e1F)!R ]5S5cAof{Qk 6:`hK5:-*JzXva8d58rb6{?069кLݲc$Z"B&ߥIk2HB)G)?<ި5jZKQ,|vM)0s6RТBQ_\\|2K-=r8w8D63zF1===77w]q~_7|T*MMM}Wrƍe]$q>]ꇋÔVW_}Rܼy޽{IR6㩩Vv7777Ƹ^3(8Փ9л|G4vӋrQRGn!/]x뭷VWWa@ь#wDQ>h33ߺ3_&G5F"5YN0ȳgOsɗ[oW ~TD[w{\#:8r}0 -!BBӄ(D-ʴO @sެWa48A# # Oi|j0V"@höya>:I5{?Iǩ EYBG}\5Q&ouWmp b²ӤkD[ar;ܡ7jh k`-,f9Vy>~}E@$vhjd^axVmnnnnnj9Kܜ%ٱVizppC)UVwvv:N<{N6իWRW\yfښ=Ayb$X`N>}…Jqui"nĐfsii)[n-,[TJ+gwhjjݻwRΞ9sfߵYDTZdeeT*]xqyy9U399){"1NgooZ>s.]*Jo߾}Da4GCO*I奥3Nq8/kwA8LQ @0W.?'שּׁ}muuu)I\.KxS8 "`˻FO L\f P8Oh@Q~"d6+ #|A c-""P~zH=T 1l^Âa솈8%p ?bkLӟaC)qs`y&z?ƿ5 svC(V68b" bD w !C IKAc8="8l5&ZcO(&8hkkSTFzPAឱs9== /TׯK8*$Jr!3 ñz6dllmnnNMM:uj{{[2 D1ܹXkݻsssW^][[vq;J~gZa7$1ξ⋕Jekk7ّGERFVIl8sss Ƙ$IFl8xf+̒233q ð]vZ{3g~^[[ޖXסByXd[ӓS庌1(7SHٳg&*fhaX.MpDIĢPĖJ AWJJ%)ܹz/_|pp̮ũPؙ4M֒$9S{ .޻wo{{PSޥ/^\ҵWnmmQvvկRV>}N$Vtx`qq,Lzvl9sFL+\h[`īrn{ɗ^z… {{{\ z877WTJ7o޼yS9.Q] : Z` Q{MtzP J)E/\*8f RT[dWcc|2E̒@F¸IrU=<<<<<<<<<,;;;;;;i `nnܹs|YkmTr-*NXQ<9zp[4/r9MJvgy뭬O,7H;F166JXRYV:w ^%E.1Rp¦i `aa… ]$5d7:EIZ]_____o4ϟZ߼ySKhLnFV+r9cWi$It:RiiVA 655ufyݕf)vQ@V*ɂhƴ7n0\tʕ+B"ưmI*8^.ʑ5d橩L?z(僃cLQ=d TK/| *]X>+mYOK Z> 8P!%DjNQ1bA{+Ξ[:06Z "L`Re#H-MPdߋ1Ĉ)8#C4a)ke`P^2QF+ #@;gJ6ӕXNba,R K|$WB#frRB;Eb`,X?3VNA0w]]$8g`#/(N*fE^ .\E*^ZNLLax .0ݻw.^!:( C!)#2u+aZk%tll… 333^ݻZ 9 y?"y`hA@y┇㳄  ^'I:666??`0Ɇk1TI'ɑ}# " Jv&Y4!'KV+ ͛kʒz! 0HFb2;G_{ /c$xxxxxxxxxxxh`Y!@,bBsbɄʿgOUð7Bd]` CW/YP, ; e E <|T`)4uu]!1HEkfSQas æy()չ+d bݏ3u% CϞoXVXT܆ N}RMbe_\̅ &&&vvv_t$QLz}8޸qclllbbҥKJ~/(Z`$ yu-)KV[7oR&)!@q(C6;8I\*^MOOomm]v>ax}:27/Cb;PD}Ϋ8ֺ\./,,,,,j511ܐ)Q;㉉(Wd4IF"K(#8B^/ED [}CE1,,H[] #&E'/@ vťO{*A(D! (@@EPa P 0J ]XO)YۢB^ x.q#0o G$#Q!SFQ477w…NsΝn;==m)J+woW_|q~~ҥKrΝ;"pO_O׋6}fn6Ν[\\ 攩)HKnz,;w!)֭Vkkk뭷޲.,,\r\.z=aC6#wg/xbVJ%9oU[}bbq:H-*t{7)#7G{xxxxxxxxxxx|5l4[(36hS|TKHc- ]X^Dq PcA!B T*Rψ $GbH! AcT :Fg Ѡ G&v)*țUX`N(\e,=45j kZh Bk$66~sϼs8ƎTy"NRmJx{bč0 '&&fffލ7zԔdJV'mp)șyrrrmmM,--]phܹsgww7I9)INޖFtrjΜ93777lժ"xqtEufk8B5c^GmA nz^Tݽqziiʕ+Fcuu@S.@*dpix~~`8ժvH0sQLC:cB h)8B0)^'z<w   /[!In(Sx?-ʕ!Lq ` H QB@u(@H jDmI(U pJaL<dF>Rvd*yHFQz2d3`dmbmc[560zm`L͘ĚRjF4 U:laXRՙs.׫Ć`;bIxGEGɡ,˒033XYYٙ97(b@.:zA^zUk}ԩ\__w]`*e 5r!r<33\VEorΝfY\G-eϜf;āR "o `nnnyyYsggGR(mOGNh4Dbp-ERS±F=X9@oԠCR61HL*ݜ-#:q$ _Xe[2dbiBY;뢒R_r}RD& =`#VǷ >~:{.Ư̹so̻lro߾-)|g:Mp8 $fSd. Y<ٜ>#B+}fUH\q?eý^$1dffh o؍)AR|MћHggӑJByZ9ZƗSKVTPspp%z]jZVE4~8i(`p5-r#w,}Dqڵ BBOI:%WS3if%I" 2AA+eCK4! QnOp5 Ϟ[4Ey`#Α#8u 0VYA$K!y3Zn2Ċ fWv:EaR 8^[#(f d 0fXINR9Ln]~khXhik-g~ç'GA+J$+++bFp5O) <)%1ZjwpyxLEw 4M(ZZZb`p~a^E(&}Pb] n鈫e0jMMM I'2&ZW*}4v)I;;/"y.H9X֢=ֶWc$dllh4 Ȃ8bm9n+,#=XkׇáEge0 ûwdc,8S)tTQEb.nxxxxxxxxxx|P`2c2Z&Pc IE[ZkGm)9Q[ )ȝk,N9R MXnqegg9@qYLY1Y#nBSGEˆp7rjфZb>m;H^!8P c@Dt3W*zHR( @P @@Ad+ ;:XW(+t)&wW~xfLB6jR4sA?ē*oOCFMpb s8[HRh>>߮wA**E# T!ׄZvAR;Aȃ"y!4K*4nx8#Cy v[[nLAJz7-qC)' J 1T<C[O}K#H YĢR~^?CJjȍ_d@ P2PC!($'i&d$CD1{{/O4+*31zbXR{zt(ZH]k`u.Lܓod^ θ0]>e; p!.}Oj:շO&BΨ"BjGqER8]DI)lB!gt@!:9-v EA'iܽFB@*0 u P!Dpzï=?v%IU RARPA9;Wr}h 0Pߴ/RPвU<fi-K;_!Lt 饙@%:V,3Ȑr,ý"٢)X:(?/~r SHHm#T<ⳏ8HBt\a8~v*Eڕ'KVgdd1O$Å6+nC*T1r$brũr9>y#Mqxţ\͓E@BYB#QU<=T?+::CTb(LmA:#J񬫫Ds,gLq 1ҔJ͵zmbvֲ5Ƙ+Tw{+w6~e{AnB15`fg90 N{$0#?:{15H5t foCNg1OT֮Ȼ3/!*2,Wur=tN R|t[tANďy"2",+)lV%G-n#<ᣋpf;yj{Ű mw3D, t1&O+;>@`,jRDX37l)6oHj/՝s%u\)H*^p"Mw~wI0 Ύ.j-ml(,zm2GDf`%Cb8hpS?sTxSxI)~lm:J=xp"긏B[ 3vCl)iM}=_^B=G_tAg7Z:==a5#$7V H཭E _N'''cޖWPf+|_j4ߎՍo'wA<ȵq ?E͉ZP cJj! t-*ypڊEHU"C(P2)6aF $TBbH)Ű`"^ $Dd RQ@LP1  VPHAe|BRZK)ʙ EE2|]]sv#{5|)+ ]:GPNt:j 8ma6A%]xGʥRcy~A-2)|$OgYP a {HLdm&X),,Ch?^7sݹ0hk5DdDQ,jt5ր'n0Vˉp#09S9a,pW+?#+?cfQŽYnc!` Tͭ{$M3[Oj{5M 5k>7{ Eע:~P 86n, MsI~=j1Y9vNi5̈́ʂHU0`)K=G鑒 ݦAL)Sjў !4E4 Ax+ 32H(2) B@ @!LR H0 (*a:AVr RED#$)3PL PL( B ˇ d=rP"R , AF&,㇉A@2݇r5<<<<<<<<<<Zh[%Q;) mʟO߾֢D0RYbH2(gBdCu.G^JA6HU[d9IS&F{7!4{,/a2tB9Ip|IvCFc0D?ٿӧ]wXAq"f{D_@(Gc8T=b@R9o"WpH)nb=٭np( 9sXT9'/0P Ah-.>pf8"YNy7GXR( @확G۔kU %*5ED%ED"LjXA1) IQ#*@7!S[P@D@@ ! FbPC"l1(׊>"!)9@)EL@@-SW*WXdd̏,ߊ93;x0T 0`ÉY׎$ @ݠO§NpoP#hhY)a i>D60N; :_*Q! ~oh ,)$a IYu!&DCE$a_й_?я<]Q@#@)x>xZa'y#8N? Ly#djb1N1RS\sb[̙{%'Auu&)`@Fn Fn{*`>6ZXe `0ma `1cYV" ȗĽ( "3V-!E7^[Wk->o.b a4?hT53jOC ˗;# _!lVZ(Q" ` $N2vLnQBTɝ߮NSc#v͍kd&f0֦¤-`,RX);lNpQ%M0-_R8MC)ȝr|[Z(' zD`GF IDAT DZX 2G}@tq&0`tf83E2Q+)oを#{GX2wR1EkyNɓ uXfrK2o+'/QRF`0$5G{61-34p"b__~_[?&_'^d(0%tPW61(@`&@q)q {P^Ij p֦;$,2D{ 4#0*;Vj@1YLamaiz_??)"JSEObJ3ʸ_;}0N7y[dپcMIw8.kB >ndP-*=*NA dcm3ν\ʐ+A#_p( !`(@sxA9 OU$,X8.F"ԃ! l>\|KiLFp<Ešax㈰s5!tlGئ<<<<<<<<<<<2U%@HqEPyp>r*N #C"Ȟ' EE -:=@AXn.Eq{6_88`d5Wğ61`iB*@p))2RA0OB0|R?ˌ#R eʙf(>CXIɟn'GV`yΌ0",I!0)LZ(&Z#k<؍>A%ǖk6>*4!R8Pw&HKI:L3<^{2h@{B@&2+^$5 Hgf=Ee²83.ð ,',bNc8kPetiڋ\{y5zYztBi-(kfڌqPB!B9T8S3=Ve8l4 3s]Oyo ~g !`PTRW6n@ 8.CH@ @"B7i)98,0`` )aV)Mb%zu%#b5jn͔$"B!BN:Iɽ=}>U xU qu{؏xDiNNaAjN4A)J,2(T38 %,+8PloBZp$t0LX’n0`}Xe9H_tq>휁}/l؈zx,1y !Q\@DȒϺ}CD!i/$dLD_ LJVq@JKSA@iM}輌3B 2ފূH>IiueunR{i]J@0Em U;euXtW2{(,PTASB`5{"ݬSU)Aiyjl!B!FLr\BC"!P5xΑ8sUiKS S aBSQ* KB7`^Ki0 0;)d%K`q8 L2(I]eH a,R S:L eB 'n@X11g\;q=:)鏍N4!9㜹>*O0|p?li2,)s(ܝ;%H8/RZ8*] h'GH¼@+V+2dhA(@aPg3A2'bGW81,HɝRa49)9.gI 1`DžKDAΚ,`PWgRrɸ *`J9bG$dNXT ܉kD1D `B+B!BHI3@QƛpQ<ZE8lvRFUCSX@8,BqՙKbJ(*:FDŽeB7[P:5!n=, [D'!a * bB`Â4-MݺbaJKkj{MRrQ%@8` p0 ɠp^&%+ ]iueÉН!6gN̂h;$ à01Ę L{ArWJbq@ev|>pET(Q΁Gpn?Lp @W PpYK`82@r1&981sZs?)nuÜB!&`AHTpAQ pLR%Ab|M'˶mvP5,_",&tLp w"?tJd{v wvp— 0LH ӄiF 0LH7aOTta4nܯ)>OBBӴL)c:r^UsV-U0U@fNA߲3Ch4razD•7t XlAgeKQGb?-Ess}ҴVx.PU +Ѕx[C&gθt@a큰S+P%A#>p#w%eBwʤh ALGqHfR0< Qvԃ3^aYeLTw{$ θq̪(#R2 0)qiLqϲ_3)ǩp.+D/7p_ePfi!%"wǎ |j|!B!FDs f SfI4XLwX? {Aztz{u~ Հ@Q`JE~i/jM(B:AƜ,(ϔ%c:v xF`?ثŠ`pbÄeuˆ:isu2XE$ҝ#*;E0<@\)*8'1gNfHŐ(^@ )~h_/0-Fxˊm)mtF8+XP tCDC!cgP@20()|STqpn848T8=1v=r;Hr w:=yJaPD؄eNG2p8uAq=1"%7]ʎtȒP!B!QM0H˔,nrDUܼw7htum,q?z1H  @a0%TLB T pvpBI: )*Bq1(ty 9s];n8%p8uv,{FjyDvTfvђ*ݳݕNJJz S ] g0<%1ytJ'D9Wh5JNt1ъ] u.EP0AiigؒR xC3&8@tflҲ{spkN\Iooc4wvs0{q0a !Cn.Fp*1z8==-Pv 8u&NPdnx>;pA8<4;Y/o |`@!B v'Ògl%PtE{Bݻo8Ejl޸m{s`Ih f4e À_b#`#I܉ 0$HLBLQAJ˙cJ&q-=mwhi0 UU벳$Uy2cN's\2E-KDGGeRp[U4JIhWD PޡA``@d7d`SBE~@+EH)ܠ`N@9 q&9LB:q{;pҍ8C? 0&eЊ)R:w %̝Sr7vص4JNlH1l;QJc>{:-㒠ǑqY8I Hs"KN Tء+{ B!cRZBJR ` tY!Q֣Qk(eF)_8x^%?Z5#+!܂x[P3v!fAY @ 7[pp8 ',gM z1 r [Ev^nݪ9)61M`ܪna!Y0s^V2Do\ t>C쳡lP: S2ɨLJGa6g=WϕURtB!O0%`gTdJ40-b' Q0>ݺ۾cz/Y|,GrvPP e${MHdRodX. MAN};ۥ_ώݻk`ZQnB!B!z Ғ KX0%LK" ւD&NݺU֭uKݻҹsk;#,(< }D\O s:5zw_ޥg/!B!HKjBJ`S͔Hb<U[Z6`mLqvmKߵ_w(+';.i@IA7;Ò,+&96abf4k!qZmNiޢIh+h:`a˺<B!BsKa1_0ʠ9§G9N=iEJ"o.5w{$P֑cy)(*֋źa!,R9:UMU<1^O/:%)q S5Il2՗2p,΅e78g'|B !B!@2%Q,q{!eHbH|QY pJa^}#::]Vڵ <],'/7 ȯn )4x4_Lt/:> ZELJ)%̩B!B'4;и9s6 Cp8 0î]0 ӴL 씣s*pfqB!B!\T8S <*Ux;NѠRJ!`G`iA2*bY #c EKB!B!'c*cRr=EECU^ES$pc%|y4^L!B!d+*!= p<JJ@B!B!DH* 4ބ3x$8GpTiH"!B!BN $g *WWWà(n`eB!BH݋Ȓ%t@p!D] !B!BRM0H! -L pB!B!'a``Y% A7o:NB!B/&%!` fBG$'B!B!SL )X!L i,)FB!B!_iIݔ`J0,nQB!B!J ޠHc>4B!B!&Qd/j ~4B!B!&Q$ LdJ3((!B!B?sKa1_0ʠ9§G9(A!B!K2%Q,qmD=sB!B!g @SqES9 l2<$B!B l  Ve :@}UVBȴkH!B!e}K`LeLJq THq B!B!_&\gP9aT ^6(A!B!K* 4ބ3x)FB!B!2 BrƠx{x< 6h!B!Bh.! L!a, ]B`Y0 8!B!R&e MX& H &b B!BHeXpa|Bb N1A!B!KJKHC*" %IFi!B!B/UXȔ Lb B!BHXMi i:Â0&)*B!B<@`/ $ 1&L)@#8!B!RIKjEBJ aDD>_ B!B!39 *3h*|xSB!B$Qі!!ID13Wh !B!B/q0( 4W40P `46N4'Դ]ZBI!BST8Sx"ZA0(A8uKfce(9NT[pϓ/?[ז8e6j>չv| xU&?}GؽetoYhH:mjuuY!B9S{)**|(RR uqϻVV\Ҟru#G;r5gJSDp8E#˻ُ<iZf?ݓf&]Ҿ{q&\L#߼vU 4MM"B!'IK`H0/9PUZ+-KVpJ]C-9Yv?KW#LD˓P IDAT^r'T,#T]B! E o™G<Pw 889 4B(0 7j7NWN\ԶJT{<ӮSJ]S2%NGɍ*IIk.B!rR0 BrƠx{x< 6,QwK[ChЮ Vy'@ڹogRV!wެ^8 ?>ڔ:p5n6}{:EӬjt巿"B!X4`0eD# e4buZ|HOOe~fTO 0GM[P6酧?xmE|[s s £y%'x=ԫg9x %AbLL%ê+'؟NJ׍FMOimÌmԲ}j鼭3֍"xtj:ҫq4{iЩsj>E@}9@x|q) at#:i,َA!BjjAZ0d%t[& ̝R]WuHw7v7{<s%??ÚzKEzIKrR$6h˱XxYS?|wZ |tơeO|Kh< $Œѷ}:]L$ξg#[*?^!Ǚ/]rE~ZF;Y!gp[N]Ή;ƴ]?VOϸR%Zi#ϿL74 煦})ص.YZ39o׌9J/Ꜵ`O~1sͶ//U-˝BE!Bja``$Y%[ >@Gٮ<5֝ң \6{ENqYCzի?ѭ v~Y&]l]ys_T5/\ c5PtC@\b ʴK컟x%e>;"x`y ˟KYfѹ[/rG/^y1tk1gO~=`\3gSR(+cѐ}w†/_g_^]9i~yoax6p4p(8!BꖔRT) E *ɨܳbowcJuZc?}xïm2kuK=F)ȏ}X _ݴtkޖ&LAJ~X\9;u̐⽛lO?QEC .Va΁aRC֏G10maie6'<.I+MǴi/selذ70/8!E=5t漨ݘgtJvyMܾMmjQ?YZe#Wݔg*LI-ZF{s?q-vYkL7w.6"ii@ '_9=8z@ƢUwqWZusfHPw͎%w?R;L)gRTҟdQgn#3shMZܥl">w೹mn Ԍ2_4w7K;g^{>ήP33bEa9KԼY0}G~ v_Mr&뮛ѫl0f\NF.6[ύWnM_o97)(TQ#49". ysڴqŘ}no(@w\VsbYS3gm SgGa;5NgK*:l[6!B6łnL u5Qd]3CQ|p1zۅʋ4;gJj&~{dٱ]uwg& }w FLgxYNo2"Š{JAIS;ܗ}m}yr!y믮{oMi7~# ~xρ0i3ۭtt@K1,,ypԗ|$4իU3fg|'9[. 7t%GOLܔce^n}oگ$i3 uۑfLpfni̇ŝC;=;ҖQesԹ#E.*n8< zW^AjU;iYԿ>@ ~ !'=ݻdvM"(>v`YEtyG9@ 8kWWs >5J kN#iD@q$u G&4<#۸ޡY=^Y3]≁HoϯI'C3/ȫBܽ`ΌV7w,ѓmC*)"n #Ȫ?T}gj ? 46sr}56H=LKO_/ sBVh5Ks =3ƅXUo7 @5aj/Nz{Q6Wg# פL}Mig;l߻MpYߴFsNL[^;uHr;rc-iuz/>W餙{wFgHm2SPmBH_w 5.qZZVe/u$cʔ?;6Փ&[ټby,Ǚ\p?}8Wse7t͠vaK 9mm_=kj푨,Og8}Ւ_=yR":؈|ŒLҶәڵ+Wn5tС}ʤRal3Gbܜ-_~G0K%,p‹gڻ+_ k:GZRRJ0,jD cm8黼 @р[mW=n8 H}x@Wq?s3D&, =>7>@Diz^U@T=kߋr ]wdࠆ0/KTKHYS,_~:gͺWioN_o .PܽrŊ]rG^(zvdQj?Q^8ʤ u'g E[IJAe(/:[nkbJݓ3ĶgU.K+,8@L^q7iPzǧ*'`5jl !D0v>;Jw9Uy:RM2N=rO,wv7UZo廷,F.~bho6cXt8kKvY3e) ~N&P#Ta,呜9;^L9㇖U陗_)MO߻~W;e3ω SϺtEK_g >_OEYB)ZG>0[=F ΝѲOĵt?ܙj19 *3h*|{\N$Ӌ*x3\NiX ZQG? ǔ΁h8c]/K0q2/wAN2#Nu*-sG.Jm%)4;76 uU_ rbp#N-{>]O>)֎Aח&ȘG"J.#|;.lڹSwQ8qɳo-q1cOdwy9sO̎SQ3+yUl[y"<&'m/}$A_r艕Gq"=@t#nĝm+6+̳QEΈ[Xdm~{4<1sN|_`|> s7/xۏ~|YOl{#(8 I O"RRڷ.o^ٲ LɨX3=+yws' yZX wwz`WYU$*c 9ʊjȞ~qA 7s:Ō~kۿ:r R9f_;/to 寜|MA7{6mۮmk05~?_"6Z9f'b%MZYs"4s|f~Mr"zБ& RO@ß;n}wm⌞uQ曤9?c6웇LazSݼBSn%dBS з!33bgeOvTaq핊ƪ@g#~= g;q][Ŏn y8{WG_4?ϛ8o y:yFL:nߗv 2zӞ|Zj&jq$8 @SqES9 ;7cU _m'|"_|$3+<}ŏ<}=e]~~`BU8==+7!_**TaYmay@I˾]ն`^عL$e@+F72fL{h=<:.ki}hjѩ˜7 +vo֫Ɯ1],z_}|ÒU6j ?qnpb[}l?(u q1 i>-{Mpyڕw=$)ު-;rv;Tvu̐⽛lO?yPP&U׆[6o5=XFVQұe3?3xo׬ܬr_rPp^)|{1ۊY@ms {wdԏl*=-z3~$j^kRUlco>E\FDv砠aZ+<뱯 P{ݒ}/Qۿ2 wUX ]Uk<Ԧo苍xrɓ;0ഁ݃=%yk濹I_=x/P6kL'ԎCw =_]^=A*)@((z盽M7W{O)\4KGԬʪJz 6is6fLkhvݍ^~!Y\?~ @"|nNn}EuIKrI/lT4`Σр{PPz64n.ySW%Ͼo0)g8"S{)**|(rÌL,s_[ypdр[^r" ÃstIo/I_6GP5K}aAݵf^=ޞ6tLe^:ywI}V$8/cEWnar])NWvmAK:._|v6D4K~TVĦ|„ &L|ĥݪrzx7ڏMJfta+*[jkF=Nka R֯& `2})Ȟ6d x[*JKJ_&/HvsT2c{M|`ӳgL \.¯zRWU@;ώn9iwzƓ$WT0$BzbU𹁍 p@k3c<'WZָ!^3sӰ_? 'gG/]=~?/?㏕=:+n|l[s*T5w79c-{O*p ;}xN*#}0ij[~q{}X- nckI3B͘(loBjH9W{;DÌ wWB#\_}cun*<w\|*UJ N _>h䖿{s5a v}|ǜar >;IW\7p𒙋*D.{s;j7I/po/gox7] AءOۋ5/?vs/prRͫj_\zoʾC{IZdъ5gqءCzޕ^H#)5Lj[nߞcδɓ'O<o?7=_x0)tv7f ]ˬX`ò-v7s6/'/o@!孍P={H{Jz?U[Jץ)Æ ;[ZZ`/_2>Ur@m/{֑}?t̍s. +DCUveԷW={.PT81h g@)Y3spyIW`S097ƔMBXLܚw]鬟,m+|? yj+**UG?Vd~͙ѝPRI^x,ڤn9[]K*/']dlY;@R.ڥOYKԨ76q?3߽yk  tL,OoM {R?KR(ĥݛGQC{%yɵ5x ߏ>`\3"N]# |Yt2oL IDAT8F5cբgxmDb; wOL4ܻoW<0KT2!6μg>w՚Ɵ?̄@~: CG&, f{ӅW`\].*P%@<˺kN Uq~"oRRTYyzfV(pXAzf WEqWQ 0׿9&vt7gddҸԎo9 뺯0 jH5]T {=rc$kD^WT۳z?wݝ>ؼKXE1799a Q飏uѝTG*]gD_BI4>o\~"aqu@6kLH @s&`AHTpAQ sVIFێ{N+̏7䅶dۍ>n<}Lh~$Ͼw!'k]Ryn%@+~Y5y.6g W^]to/kcEʃ*^;b6BM![kzL_S>}:8:O jZ3푹9tC2GTS5Vlޟ[ jfɟ)ɭZ@,[qZ{nQG6g7BpE펧[aPtV5R%ϼ>tTXcƑ7ˊ=3&aTf}U7%֣.YmJO#[MYleWn\Wnpjx&eJCZB7eȀ*_uCKmRqP:Ըm=Tq]Ԩm> qƀvafvڽ5:.kv.+FMM3mS$KSZ)E:3S|hP7k޾l'>{Jb"1c9ާdږ%p{{ cj1u DIh~ՌoVV,%k~< z/OCڗo,\0;0Q}sk}޾tүlؚܬ{wH7UƍY Rmqx~spܸ[vy -g`}4]yj\u~ܗ#܃{g&+bC;6v2?tZ̍ǿwe 4_M>&d`a/8ƃX=ڻjy^*p3W_Yg.<ظC_ta.os\!3;XBSl;NתOEwj﫧W:=֮'a``$Y%[ >@;d:$BH.xvzM#.C&I RةM/V ظP>z45ASR=p犷^k{8M?\8-Y^:oOfcI|wQm{$N 5Dz @ޤ)QD iRD*ȫ" ޅ"By?]rI.] G}s%37cG 5(נņԸwP-7_ǟ)MPNß譫G,Hhzx "䱐'e?/fV~ uXIHcؚvCʋ9nGj}+2\{>vM R[?~c/5$v݉+gTV-OlaSWv@bgura[B3G2~6g^ +| 7@C[{U'nsrL l7;/l]A=d8M )P (&ˀ5'wQ!<4~6*S ֫Z!T/|"iۧ-M}H7֨vH:!:PxWRjZAU\( zJUi\'QFkota•)\BɃ_!þv nP\LuQ?2-2W˸yOEnWr|reYd+dɳג_(ֵm?-?{1#icSf 1wΆy=~6ܥl,Ev͚p!/6bw}w^N|_Y7DPV2?q-v&܁ȫ^Aʶl )ˏ=vGMg')5n7>a]cjǗBr?\1Pu'6 G|b9ed;N^7F!?7Qs] QWk}d6o3g_M;Y!EkpʺWvhLMylˇ-; tYf)f!رv2bYwo™ٝb//Ji|xu _`'w w4\7 dcaZ/S-X2o |rSeL(bkKJ: x/:XƏXJMsnrĂe˖-[lkN$R?rhLxK`7}r}xtgQᆱ_3E[ UR\sR헭WO/^hc>U"7d ·䘺Yggc~_:Cz'l.I8wϳ-Va;s0vޝ~یm¶vwW~8p+UzlѥrnY% w-UҩwXzj3zhULʑf!WLʵ[^^zBb'W84VφYG,o7>O~ot/ 93Z7q )ܺ! Bpeu=~t#ݳݮrG`< Hp @/^KMrN.̳v`PZe/̖ߧ>m aX@Ҵ 5C}rb[#&VX15<4yg*ؗʯ/$ HpA@|>}'۴ˢ s.-ݴnf%p8עk%0_ϪNڴ0[]h+) ~>)i!},rVxIٽ+`ά$E$?~d0jD5ָ~쏞 oP7;4j޼yxNfFޠJ .s5G9$,CB"G Qb<ˎ E]=>;8Q򘻻\OU3^x/&`@h|\5MFp_ VU0ܹ@_ p^[[txL!y7h+#B1gg٢mi6IJLIu~z^EsS[FV/=;F&pܪ$qG{bQ`oPNޓl֭&WY+˸kf_^_M _Gz|fاOZB!9H&34 j/_Bj>h1{`RHNbd˃NT\)ν*]=g\۾Bņᵪ)2.mm2E=qGZ ɡc\B!$VFg+B!Ndyȸ^鿥a_Ly/^;Z ,Ղ.9[^@ի`KBQ LdjԂ!TP L}sfr!ba5k5E!B!QvC8KLwxDP#6T:WcO\>hjVNBl&kAf`D& o@B {8RO-O21^p-_[wp)#B!R:AbGǀ]vԩt=Pl*ẃ#&G>_NySIwo&ݼ|jWmڢJkk29{16d^}@֣cGeLEB!Bȃ$2.0 (6ܭqe^H!vЮFk9tb~hCR 94h\fȏcC+,擑G] A Ϗeo[v0-}t6[:B!Bylݺۄያ*`3^5=je )Hx>}n tk֬? _ʘ)`r@[daYdiic"^]^ {>~Eu%-}ru_낂b:=өM>So3{U(#Qs&{^c}xOWװQ l+tn: pQ"B!B!2d̃ &3%k1:tԩS 6̵*<ۢrۇv_6룟٩i[0#O~2<n=4h5Mx6>M:?FFj[WS*hdϷ,|e+$톶6[z83K?,ͦt B!B!T\V87A ɀbB4܍IFw]V;fڵ#GۦM*stO?}8pz)aAbڕ5EedkkgR2/\I8o+[ GY95QEx ꘵J$Ft 'Xuj-B!ByHH\aED@ `nq֭5j8Xf͸8+U ?|݁wn3檘v]stԤENyՖl[sTl; ZҚ~Bmb$B!B" .seE!& EBu 38*TKڵkWuT~ν5u]j-${/T5hF[M:trOnu;q+.L\ݩ=+ B!Byh4Qq )2#""ʕ+[nP#b`F+_Ho4Ѣ=q%)Wri(Fx@_/뵪c`)oB!B)$AEbP2ZO5[`ʕG7@ 5=7s3~,6.1h4&k"ۏVzOjnW 9}ã]e0=Lv*eʺWvhLMylˇ-; [uuu3a !B!^qȡxqqL.IAW<8۲UOߍ(k=>׿73"eMjp"aIm(#^>aVg٘R~*w:gE1c7#hwXB!B!ewQPD5AJ%fp.bͶfN֤Js&]af#Mtyb.3. \U{[dM*9/{[^e8./ӕ |ZbdVgdWVC^jk3YB!B!(@dxʨefp0ι;,:`2X/m*Rok0<U w&P+rp֮I}bzo`w!B!B g֭& _PVyY%, L;lT^ӧOjzV;ը#=J8VEܚI@N ڞF7!B!fC% /@XY-KTH~߭ѩt_ !B!*A-TF4x$o373 B!BHiWFPI ē8qeHfk1x4-bB!B!!e+`ibYB*f:B!B!e00&M(Hd) B!BËsY.* ɀbB (dB!B!J* PF ek1 B!BKenȐ8$dB:`HH? IDAT)IFmړXQWDp2 v0^ISWع"r#>pL !B!R4@`̨X8dxHpOx|/))䍕+kcrԖjq꽃^$·pڻw]w'Ȥsk׮ uIB!h2WsS8 `!9(h++EQШys_+`^spct]ɹwï^/隖=S䊍 w]w$x[i4B!Bȃ# ,20 *x!8<yO_.}}ܺM3zvϾ370saޮ$>< B!B!nqdpj01:ʕR?1z7{}ވ-]O^3S$c6lp5j>Q3AF;V|2^.m6l׸zR.Go?ubnMKdu"lJ2ПgTϗk2A0l !)lXgښg^M0U/?'<{ؤ7naA6mѿM`\O.oO݇^zLq_AM !vyEmڵlRBo8qHg1&ߊOo2i4j}#&?/_xm+d^'q3wbSftp !By(jY2T(Z DT,}c9wkeK8n5qW`j?7H=͘ڜ:7۳|pHVS\S͖ƹz.}r貼oao^_@;F&5`M=zծ+Q|~L-o Ɉl=az_n0Yu{c z_6BʼE`$|5&4dx)i G~lGN>U.K<ݤ6R*nŀ__ꋔ3–M(l?sZFƨś~zGɈ;vcкA^6rϟxbwBy4M8q龾%Bun )1rv$,G_f>AO(i0LO>{[nUP!""*5mԸWNc)'^9V0'n|R*7a^ _hJs%Jt=\{WWNs}ral]oC.[Pgu bj׵(7d| 矪_^'|tO^* B{l+ֳOEs^~rQd F틈"'FW@c_xN1߮}\$ۣÛ޴0Cj?ċw{ uS{~*EQ1an|\W/=O>?;SgϞZ sX1s[*dS3eR?OStR͚5U*$Il6_|ԩSꫯzQ,QB!YS1ƹ,QQSJ ЪnmիW߷obŊcuLqgV^QV跭-h3|Vuյ#"siVK71sߖ]:mMi88j3{L);>y×['3ug(zꃽy@#aWʾHR홭{jXvNcw`\e)ʫg^(| ,xop PJ={qB)^C% /@k#22rС;v|uaÆ\U%{{p^v.s[dR_$ /Ɩ`[&$!\f2eGn $T=Y'ޟZXŬ;\ Xk^"|Bfkbͤ:yNd[֡_PĤ)1)ɜrWm,Pv$^w;uet³%ֿ:~[dJ411+-3HYw69ƴJ3P>flۿ%&)ƒrKvK4zOϩ+mrúG}4qDoooAA`edGpuSye VSEwܧ^'|[OZ|q_A'N8w !. UPA`@@l0{Zjenڵ#GۦMS]YH(k9n57 @u{sC@ڣgn c;Tz33Oe%9ku9tY@I8o+QKl8GK߰>rE'ݢiK[#6|-V ep٧ lטSV}z՝;$Vy|/8q[٫囟qqe/=x)ɯ˖?ZwZJI8v[<Ոltٽ_}Z"M31WBٜ#˖M>^?ۿ*mo*XTAϫ[Z2,n͊R1-9nW|eU[ْ;s?Zw4 zOz~… w9W ~\\+J TQԪUߦB!Kd0(\` *- ZZQ0q֭5j8Xf͸B2Za-~夿m-5An/fފ:'?~/w x {]Frʁiܳ'́tCzƘ }T|;E)c3-{9> }*2?OcgϞmݺu-,*VS&B @'q0 ːbnPBLLL.]j׮]!jG>OeϷ/ۻیL϶콘ME΋Mm Tҫ`Ly|d=Z1oy5<ս`-~ֹ?L@,7x܉Ob. 3v-U;H:k+lܩ,gĴ?mUgUӦk:&j~@%F|x֢`iڧuy`F%ܓA@ơVw`f{P_-q+]TPЉ{29oPfOϫ#WoU?O?gΜo߾;v ܵB!H%W d$ t3T[ 򈈈~֭]vٴiӍ7ڶm[4F۟f_X=k7\P{b b%Gs2 `S~do#Y 5TY#m# 騶z&9F-+e`MA٨]qm4bִ/o3]f7^EW xyn-p)%1M(e|u%KݰmsRǮrfJHѢ>=Y| xϊ!ڣw۲KUBS׮Kk6(Z-y?zwTϰ> lPZ 1yZg_' y[g²?qߞk3\vxCp11=x,Z dvc\.eJx^(~/3dqVZ`ʕoF8m|&B ,``9 2Vx2HKhŊ֭9sիg͚q/ժ uZk?azU~Ŵ\es]h@sL0(sx:WOaSrB*ؕ_\>[Gw; sTdU,3%[URHzɧwZ>Q s1ÕJ?lysXǘ|ū\<5!wFqkl٩qFzT,W~r{Mm*VMns,w;Xdm*wܖ(7k*0c϶F]}Qn=5u[~_~U^R,F3;\$9|ͫ3UO^(~`O0x޼ygϞͧ B!$p.+Id@1!]FnQ+WFDD0"""etâ\W p>X~aƎ &hHyύs~a&@HwUǯFw|ۢI\=}gw|F`0ܺ))LNȣ]'U>UֽFcjc[>lieSXv>A:5?< ,넵Wy:Cz'l.I8F˱wϫZNn_ib~'K9O(zdt]9wwC%rmo5Kr"*Ty߿xų Q[if o*U!o?-nshli56&|l ILElU;Gi}iq{ZFM6͟)Y} vYOwXg^e)BǏ\׮]7o={,_|ȑ9aB!%Gen +2$I6T逗 t3,=қS[N[=>B~ybۚR|9kvoNd[P>߻CsΜc%-yeSpoo;=NߕW~=2umleZ'SEyutead1hj;|w; >oPi_ݶKe)]erkG|{ݳ/ndGw6XȵGx]#5leJcW_5;d帜CJk(*[Z\;K5J[5!|y0GS^׺.OS}5{֜Y{ R4@`h[^'CsXv(o?[.٫Kw 3P4/\S+xϚmż;qEțxO/xZ3k'YRϪi1:i|Psb,pPqb }ud8J*Fg{DℼFIw޽߹9\C8!.s5=s f*Ï Ri(,^w<ܥbN|Tnw4v9ѐ7GW>8؅jڠb.^ƆQdsmkYޛB#xpJIRNl*_9ꥎ?7lܘ.hڣ^ ;3]?/Lw΢,)R?j7,˙ˠ !# ,20 *x!8!. ߠk_|ԯ.3@ueW1qo:zxsPBWR{f On4I[r~BxO߰ݗj|3+b $Y_Ɔ./_746y1_׀}bcUuxǪ;* p^ΤZ$)Gg8,tPbȷ3pOcN_5$„U[kL=k5ǘ٢qCnɘxGHR'iy:cY`/Jrq`{| ]\u9OSδeys`宧AC qռyA,C*JQA!$(j @Z`" *! pBQi0}g,[LξL&s6MPIm @{smE)>+j¯ы&D;٢-4ș~3ϑrtPW0a~zwO֣>^Ҹ-K|=F-rœ渙\L̿9W1ܢ> \DOK[?OWQeYHHHTTY ڵk7zhEEQQB!%Ff`D& o@B p!B*u{/tlPo_m %5zbJǛɞ_X>yì9J|_;/מ> 6Y˔?}*a89T8I9~~sy/ BNr}Zà/vw*Z Y㚼X:!GQfAh+(~/J,{&Ol2}þj3!Bc*4%**x PPZ`\ ) !Ĥ &:@ϲE˹v3Fa1ZlBh4:422Z-o=+['=Tp^sAd܍{hj|R* _T*N+SZQ?Lϟ' 1gϞ]6mڔ)Sr3$7lؐ$R\nmBK Ci͒"cߊoIou%xt$b>}h !t=\ IDATApn7wPo'&G 'f_P3Kf2{<e~l+1-}g)~E.Y G=ڵ{饗ѣG e2,:DQTE- !?. UPA`@@l!<rfϧ$s4=ܗ+G!`PT[" 4 nؠB#Ȓ4Om+apÙOKl OS|Pڵk]˻($ݼyզ !8(`!$r&@d-^)0_x*=|{׬[H:Ǵ rvy~BBHR{l$M%zرcݻw b^Sܝ$v˔2%wLa)縊1v *B!$* \f&+&I%`R8oe4Ztnڵ#i+$J?xxm۶ AB"5&ܒl6XVclB An>+ ! 8K.3fx !4a„SN!R qp 9H2$ɶbn h-;40kl9 ]sɑO9W_o׭9{ɐ5N|cXDgΐLq7i WSOܱGɠjwRLqBxŋ;B!B\#27I$LtKVB"GeЀ-gǀV= c4rII>Æ7>rh1j1/vȏcC\Haʠ|eAX֧ʭ]/zQdVb#IO!B!bGqƌxOIL+pgݻwY?(V:#3cFzUMѴ1jJ@O\W-?p ?9xoG6n߰oW2ϙ/O12!<9xƼyzcCIVW,ϙB!ByTp99 f9R nO2Zz}}+Vx1\o4xÜQQy7=vO@OEQd]IDK+QaзoŽ &h/3"SW}ճ={B!BHi% ,20 *x!f gGddСCN:hРg}vذa>'fxeut<hr^utP +صGUF7,4ZڐB!Bq395/?.*nؽ{wZ:vyk׮!!!{}dL2S ΤdB |s5A5s%NB!B!䱥&2XrYUP L @PAepǭ[jԨ`͚5|0 2Ƣ@!B!<^d. pހF2*hnQB˗/8xҥ+>`D R!B!'1bLAkYUMP0qʕ[fٴiӍ7ڶmyԁu%'v^ʑC:B!ByI "0kZ6]TVX1lذÇתUҥK7n/ɃUek[',?ri GzZhC!B!<(0@TAPBiD@ -Qq[zʕ{yfDDD6mV7u>cz>^I{˖4A(qw@!BȣlvpCx|I  AżEA+B@ ;ž&m\ btXѶ'#NbUluϴh7>_GoB!@ѽ 0IxG9 l-(L>`p*-4Ԟ&[OY_;w=^eƿ{\AI|9k] gSt;jӶ;Tߌ>Q1; !B!džJ%@ɊId f8c|!qOL8RvҞ;!$QW B!BJ.[n6!՟ҚdW8||PHOa}q.*k78{ߘy Җ GlNB!B=esQQ P (&HSݞdqq)xWᯜ8~reZй[c#B!B:*WX,@Q #P$Cl+ kWcO\>hjk+O!B!T \TݡB!Bqf4q7)b|1Trnɓ93A pEQdnaRC2ǙE Ԑd؃HpāO%' *bӰVaM֯fIG |Vj#B!̋+>7TM3te+zIQ)U1)Px ᩹.D1 ^* d} wf.\&ɚtnڵ#Kj|njh8n|B]v#ʹ;Rhȃn%!B#ej߲RR.󜢪جK|iھ/)jq#]RUԦ*=(!EQR7Ø`QZ..޲/7?m÷i k溔k-42R&C`cZǯpcxQ7p~ZSG=9sZG~%1o03VF㼄By0SFlE^)?ti6..0BW'}=)p٬AP9o2eJoٌCwY `˷cC:|qחQL9pOu%?Bo yλ&&JߗMn!%ZneCJ|%Øy !Rq(DRj`g9OA2T#@  jhVRۻdUnEO\f_Ɔ<M:o:G]荧C_S̑@_;6gÆ WSh68p35^)}X~]&%C2S:aRWq3*V5 Q]f)'شk٤V+6l"qXbG<,]ڣgQ}X^ҕ'h(!ByصrfTwex0H7Cٌ{:R׷d8.Ņrn75S.\`@hyשm}'O?nVbl&0em>߲ ݽ_*1ދھYK³qБߑvR[߻ 9q!7r&B!Ak_|[^.5PqCNJt#ޫ\fsg/.PA@EEPOjU[h-jXW]p ( *V2+AH H~$$[w޽@y 1# YW^zeҮ!GMS>Pƴϸ~- 9z_c `qWsz&':ppiVS49‚У+S' P}g1iO>:сn%&Ua`ՔQSr0߸|T3'/CzٛVg;pۉOlnZ͞S(4U&k_Xuj/s^rasV\7]pNB!j h4NנBT74v- {422"INdqS)dHתl H1@Y0Z>jߕn<v?ٮbmW˫f:4g7lz{{%kuhu;I_u4z=W]Ǭ-+m􂂘l5nJ6]mˊ]R_%Nd:mL`¾L]X2I娜 ߟ~[pӶ\T.|`yz"x2~Pxmwұ0g4$!Et[rxh:_IAu)] B}yI|.\.Ph$Q 6s4^4dvV<_  b1N3544422ե.⌴QIv* ǵggv,ؓ¸_͡W>J:\61~mN >=՞4K6W`)u(o&6}]{¼B!j\/Ԕ:wZ٘a1@EN(JOO`(A$D"J2G,qkAqz]`B;<[# @R2>* [.lPm2!ea̰ $4];J?t/8!|G<;Z1^['%a?@vغэ7+NI"H6x5(a9G˒WU5ɪэ]}?>$w7)G7lfm4+ $5(&^>%ȫՂV(HKK{w#p';=GB!>DMt\ IRԐ!FK$9֟qEt(HMSs@][w,uZ([11tbi'C;YjRVa#8WwID)VVq G*W7+b6c̔.m3^6ׇaTNfʙ_گM^ܹFz"N=(-D } ۜT_St>o.Uߩ'9)@"ۥqȫeեi!#ֱ/F>ǟc 3O|3Ά] !Bs <0Y+AYןNl&Vs BԠhmu{'Mh/*˝SĢtzlۙKͬ϶7݉ U|qLܚ։ ;V$ȶTG:9*>&XF3_Dܹ.XsP $!cqĥ;>iZ$i-L:J*=;q,Qη{[$<(׭d&QCυJΨtVC"[ŶU+|^Jd~:C !B\&O*n]++SsT/ w,@6bӠqQނ6$ٵ6VCNcs]]zͅ B!/-ܒs"X'7R0bǯy+E n߲lv0I4̱U07߹WzVf{Z23, 0 BEy.Ͽ< p B#p:Wݎ0paOC9.gfԗn:nTq:: 8aSԓ^%4vT?-߱MS6 Baó7>Y|) LڈSfpozSol_\^R_ļ)v5.y%ZNΊhDX1r;+,$TahCRkB49Br"BFγ9|q=1re PCe,#"@|q}ȕp}լ*;ukl?qDnTp3Wdel~ˢU}mWs!y ۶x^<\iPɼz>e()]YY҃Ͻ?t2Yގ9nފa9{~LRO'l}N?SJĎV(wEҾAW>9gOg!U [4 pi_u0/~ZzX4NnTB!Cn>թ9߃8{_u6P {ti0ux[I䫜ό #*JkUؠksVe)J I`ڭ8o>(ФߏOQb^" TX5[IÈ+IN<돮tI-ߟL'LziFfݞ_Rfl{OX3fuz24LW疏G޴صIQ@kw|$֔/T_|H2hxw?ְ/B!PC߹) IDATco[Zio;vرckj/9B6X9I‚L'4KK{RP(!ƜJO'E?Z)+L=/94%ާrEB,1GSoԔl]P>CBQccC-%!B 1I{UJ$_"1k޲CjevX]QI(JGG(Hd2 NADqqqQQQQQθFU202ZL)ۤz"H7k']0׶)SU>[)/fdXn?ŦٟB!-w]:'FH`bW ظ$*]ʳ(nUQt5@}ڊ.d&$!܊=#B}.'[ mS'-98B_*%b{n ÔR!B}ApBorߋS>T+mhj B!`!2v7ܡB!B*NM7t?B!B!T!zΝw#$gcl"52kޱcݬjw, m^QfތooD)1A?v zlGaMG B!B$횼RIZ[H "9LtlRI3gP4 $eӎbUP mO@!ykM B!BF~e/L{Lf(jH[/* pHIنղ M+ϲ~hojYiT N:UB!B!5K!2p`+=Ů&/go\|6M#cbFjSSBi7F)Fy^e'$?8fUB!B!jI {t+3߼ݷF>)ᡏݎMz%~G7+q-[Qzts7^, lgAIgrE;of}=Lu։ى`cy{.N(xr#ŋ/R >~nmtJGwF^РⲔ2#B!B_x~sl#?n(ZժEVֽ@54r<"aFg8ٮqm;*/1Iri~"n@1f8v򮏉d(-RC 㶸̖d= ]in5ݺ ] ,2dɆ^!Ϧ[/38X+RB!B_ pܺu+;;ͭd]xL\X*%ꤤ<6Q|ze(R. ;/b1}z?{pZ}6=ឝuԁWͬ+sw/=z2iא#Ϧ؁Qe~s=姁-e3o$~{bZv52 !B!Ԑh uɓ'GGGKӧWhr*[E3τމ}w~?bٌ/,6p`Q޳?V듴'SJ gyGYw(@NȦ@ΒS7Oe[Ǚ~W~L"02bǥ_=&lH_Yb:d qB!B/RÌ>ݻ...Z5d#ベH\~Ai]`:&+;A,Y=A3rוⱁu# z =1cZQN&ӻ0 `Hy4)u?)㣫.m>Qܛ/ڼiڝiK|ŤCvNA!B!ݺ6mڤk۸dֵ<gN;ZAĢQI8~3C|Ȋ_v)vl)&iQ,mqRyiii\ X75Fҡ hC|LJ;!B!P#8̢ _|}#$)܍AY2mfWRE9~&e U bU^bMߌb}8ϱEֳZVtXNX|;ӄ0ز;v'ԷH758}_(::[{P4cE!B!B1knnn_v횢$((ݻw}պ ݈e[F}*i @4PmDUbK;(.΍靧x0Ӡq` IF&-SxCf5@~K͛O\|G*v!B!j4f9sfݺuNZ~+W_  @PtnƂ3> @k:M_Rgef?G6/zc\Վ2 9VvB:?y@꺽zuhFJE܌s{-ꤳlEf wm'?%,{|_(:G;{nUX5>+R4tB!mrh.J4X2'Efܭ>7 ͎ަV B4t-=GB+% =4LE1V/2u]eV#(("Ӣߒf|C;*_<B!P/R[Ƅu6=.՜ng{_sR= ز ]M\2wUgѳLV[vMWOagՎMQ66 WYF6K9`B!B_2B*6tj{ Bڙ1#3G$)˨aU3[ Iss"@!BvC&I(("Id2 :NAEEEEEE 9Ev1ls6֛mnnUVQ5B!BѸ B!B8B!B!B!B!= p B!BB!B!/gB!B_$Iv9ADPl`<;ʑn .E) DBj96z8B!B}!DxT$yYÇjeybG!$eJ{| ib\VC!B6ueK e[WcXshEWۚCCԀo !("KQJ2VT=n߱(p?9oFc-խ2//9ƨ Aob#{7+=-;B!BBлsLKQFtp0)yaةHΤ״gh┝6$5GL{ljor+ip&}>Vպ>gP4xKvo@~N;UiՒ ;tJ;;\i5J?Hi#M$j?}wOr削|C_ߐ4i\_e12d복E!B"A6(n{a5GSDL@מ=Y fr޼ˎ?hIǵghYDջ>R@aD0X&Ԓ_(/EM:\8eK)!ةsQ=:T6 bg9BQ{Yy?/ΡFKw^<})=HZ2n?U6 c4+B!BGSgQD1ص&$ V X)!LC> rdL%INNk|PʝXHr^}wuM*U8"V4$/ϋ/K7F ]m PfY޽?,8:~!BƬ8H"YUN6NO⑰FuVA/^|Ze' wk#p.Hޝ$ vX5&9L;O>Jep3M9vƸ2.=8%ʣ|`wnˮu(=:Թq/R zVc3;,H}#(SnVXқwˡC7kʮi"3'bS%?D1 *4g27}G Ӟ!B!4j~>o<_?gC¸-.UsC.47 kЁ cUs8~9L?_7cioQ܍s8yw))ݺT:#Ǩ7\.7Ad-F74vXiim&$ދ  }«KfnRV+5͡%?RJs J(QDi?G8E5hzr <{7gH^>dAi Y&w`̬7v=!B!T'2q֭l33377Zon,]c+t^) 66k+ ޽=~ˤ]C8kUzf5Asޞ#,H =z:\hWUB;B_u#vL=_* {Z :­ĿbrIX85eԔ\,[UֿPN@M& ;Y=9xAϳqƕcMk WoF楋1{ϞDyU⁹o]7v(FuCmB!<$s,1yd[[[Ǐ5ke aˆQ>Gy4y_rM/QM{nY[Vԭ1sL= j >l0Yd8eD\ocL]w]ۿ^TB'˷3Oއ'ZB'l3^O\;ݏTnbg$?+ yk6o5Z I~SB!/xL[qB Z}tɓ&MrwwvԩS:T'! @1~K 2S~? D"#K hXedn.YD\r萖2egIgB JV @BMB҇2 ]%(81&ʻ 8475~;n\+YOB3ҋ3۹佋3^ac  zƏUBG~6tPkG[nmVA߽{ťO$,@F"OV'm _ФeDP#Ɋ?{Z(gZv&3!^(RJr:[}'TLp~Ȅ={Yض673o RR)]ƿWw(|~=; ̈|=-_KVf֏C#+vX:l @4m^!ǘN_zqq:M]['wJY;O'fr/B!Pmy@I3PA^ 5@#;;ujmڴIOOXC&$-%@Aj7h%*Z%m*}1h"yCP h=qݟ}2&N ?9վ!jt /]g*SK)ϴF}VVq GRkr]?]Vh9c*iB_~j(9xC.i!#ֱ0O)g.h=,48Xtu56sKV!c

    +rn';WfT$HH8gMPJߢNDM9W/IN.l˒hqf:2NYw^F-;4[IJݟ5g:r9DټST3,}c#+GWl>z_5!c֩]SW^0jǦn%?G`\@fwZ^~S㥔|2Yn֝i:gT%7b#f IDAT=jʠ!>H&[e:}j.lRY| A2jnB] y|W*"+rw:OV Vq6 v59jbƼ+wъă.JD5"Z 9B!Cd+L=fo:M$EQ:::EQE$d0t:  !l9]iͫM&VxJP2UBň+|3KdUnQ0Rm[ Kê!{b0_ͦ'#;!B#eEsG&F@fxu5Z?p߄L _?@?FuSvWY0_\zegn ,^WwwXb}lR@Rԣ] ZѩCR>D3M;[zā z粥V3D!B"vOO~*ےplͬz[Y[ϞQ++jST9aAv&EQT*{d3zWUd%#`,LjmPx\H ep!BUѥ) `BטImO }G|zؐ.~ST NQAʠjl]5Z xQdMKB!90:»k4otC{Cu񮢂B!B!$B!B5z@!B!PB!B5z@!B!PxWQ)H"Hd˫yOBo& ]ó^!B!B8);mH^!Ҵ[߯(0vvEB!BՓ;Ej)ȶZ<[Z;b I_B!B!ToT3tOEy7QmN!B!B4)*%,ug̘/l; LTJ44n^7=ĜcgkK/+ZGvn}騧/y?Nuҫr2SnĽH-dYY@ē0=;β)xr#ŋ/R >~nmt[D!B!F:Ayr8yTvqK.YH1L枸cSr}fʒwE1I{;-M $;akoD1R.2;R?47Ǻn6X94t/B!2vC&I(("Id2 :NAEEEEEE 9֭[fffnnnneOWFyAڝL'Čy\mvF( _u2e|ʊuu9lfL1=|kYpg{|>4┕C%3@jL?y7 *=+Etfƕ}mtŹokgS0a*B!B!'O<~׎@?_z7;wuwo}LzuCLe`Z] d+8|ˈ9qчfMsyLLQ\Mxz/D`MVօ3/pCG4y_sB6R pυw:υ!"}aTB!BHI8&OV B!B!֭[i&==96t&AZWQ%m*}1h"y#zg֮A21Ib 0P(Қ8~!B!R33(—/_֤6/m8 +?Ubvdl%wX֠H}BJb[: b=La!B!PU^~ڵkw۷F}kx;|7 (`>chWSRS*ѲAI"HLYl!HB!B!a -%̢B)^J^ri B!#~H*YzP(Ѣ FY뿸N ʫIB8B=_CyB!T1E)a]:;X(h>oLRY%e [drQ Br8E W%!I>Ӽic(z&/ ?Q /#2"6IӵWCv3UsQ &M$g?)W$<3iV㾙=zG -̝w$ID"Yyt?r>"iKO2IWKbe39CχΤ+g%%Ƕs4l.E1~XRɓJEG'Y+_Uz@HҺl !("(oޝX~m#>͍;Ng_/bcbcs3ĒaѡcVu pcg/hPsoDV>`P|'+nY/Ά7J 9?/9O[nΒJW x÷w0}EjOIs%Y̺ݚj܅B54Aܮ>H=|NG[p x2ewd۲;%ٶGpoQ'3*hfv}Y&[crTo0(zeEE'ȶA_H1@seˢD^MQuEGG$IQEQE$d2 N' ⢢:!NٹnC.U p{ꮏ> !냽(=t7o f@܍@piT//uYO?0'9Qrzo.ByŧB!(3. T&TG,ws9N=y4l.)Ԥ~I#(h*/.m j:E}.f|W>DƸf-,UUzPSTI2ZRI2jH}.R =ݖxoxӧMeWwEٳtHTD:ңO"gծ{gO°s,姄>#9+[^D;=/z, lgd"w \//r|̊:pU:ɿ$ًwNK#{cyy~S"޿wgp$8 !IC+D^2oɷ8at~5Z]@P_Vֽ[WsO.>;dCȻ=tكǪlu0f S&03zl8޶JmqL7aNs#}! qi&o3!^UP͓TV=mZ&Q¸ 4-dXlFܰS N¨չJmU-}˛6:ie$;aSsܴB鋿R8dg/ͣ(G@Vb1\.7A$F7'0rj/*y8<#PlLph_HG3-j[CӥR^*~W>~1 ryF5 rBj)zifoP(T+JTcg\wfEzb5Au>CO^^K}#a*7R|ze(.y-66k+ ޽=~ˤ]C8`?1A[׽KhUt(z,OUg]=8AF6 0y+n$ Q^Foٲė%(hJjaZp%SeXt{vO-I2Xʻ{F맿{Q͆M!.c.[3ǫekcX D4 zF7RvzloλLJlgN)g|VkdxH| R$%˂%AACd;:sA(Ye׾]8_Yf@elc;:v!WB2'tz'm4p>uk\ֳiAon%[Yn B}q˖),MV8%ңo7bhȮ:FC굝׌ݴ7޷{K*>-x.K窴[-oѯ,#ŅoQ/t@w?nTiDV辁 Iulϊ$I V $R*Nŏ'EEǓ^.RL2pJ5pXC~.779F:-A8(Η.X޾sz.Fo{'Nr%?[%2Y |uxyB̑՜;l`yp2b\~ ba$@XR-0l7L3~p?_yeUI^^>^ߣ58 eo0엗j/U6OWa-yzR?$X\SXYa,C}٦c.<6uA\v`syP#\.giͷҠغ'796)MsBM {{k _MJD@jfikMMwVOmj֩XuؼChu֭`Tt]WhOv6A, f66UJj@AJoQ}PtaVz$&Wj޻G_]hurν&n "bt8X-ٳ/'jHA~ߵ[Plp~S)BS޽#$Nh5[wzOV$?*}g=F.1!h6{'8u@)A܅pu!6ѱ=U|C̪;9::9յ Aa99::k|ٖcaT[] yNv65|ZrNBg7HaNvjpAJpdT]5"4ʛ]QB@ѣfm졎~ʓR$ Ion qͯIf(KYg(M*=uR7e0Ѻ-W/72tA(!7_+v2-blnE]EM-6.Q'g qϮ]gy!t Gʍ՗bR|kk`6vHgxo!>F] qC:    D2F~עY_542Aqrr:75g7Dg6& .qIRQ]+"w)e&@*]vf#1xM0p~c F9SRޥECd!Wk]Zg6>u$l;I"ɝlt'lvVXk9zAAAQ=8 T#')ə/ (+)OTQ8bYvɬxh+zπ'B;zߵ4]VPY%An ~WEK#,DkgE @S|'o.ـV9 hO|h֮]F[3:@fu%[)zY*I;4;@ՁWQZ6|]HWuAAAn./L3oTjE Px6D !q^= rsI8l`yq)+MoѠs{h-8_UR1lU["J b5긨M#  RBP6嚛s\.pl6b2A/H$I9EW5^Q|\.k+ ,m,i$X &75mPЂ6tij;D65,mPf*DBAAA*8WMcz>P[{2l   B 4p Ed?h{a   y<1#%']$Hy5jl_j;"e @`jA"/\=̹"eH תշ    ƢEAAA)u    H    {0R%$ۛ-%-V)`Ss 6v%.L\;;;ܼF+VN~F 8ek{[H# QU hTGn;ܽczr :lkj١eVnuUٴHC^[la7{g:}vF#\#2mܾl s*baxZyOikH 70 wvN7jXUKϼ;~ooHr_?y}wbdBa״W^kT6AGk"cvl\ԲaS%\G&M V-{8ȏ7Yw5Oԇd% l<7^߻C@6xg)uR#jtgNB׌p6sao/@لͩ2dg5TBZXKϼX|0l!$!0`=R/ Y_u?;sA%A5ި|}Ր!OWŴr"HtyS` f{˸%&Kd*eOYlcRˈ= pY9Umx}A 2 (AVj>ңC#_S= ,?[s\,3 7)L~*1=7>|ﯷ3yPcG}Aoc=&rlw^vM8PAPR5bFA wog-G>:Y.z:;o~OR@D8mWT`.I92k#/l"-=rcBy*`0߻>O apläػ!KLcorfz1 +VӢzAiVn}Ҥ nfߧuhjR T #ʀ eT-H ϝzʹǒ;-zܷ,r ?<|-<'OI!6}YoٿaG"%f}/pL{~.tZ9e_w{ce7\*GOFx+&_8ÑF!y ^]a'/D>zI@V`VOȫ 'CR{TNN(GX 8|tΕH"حEn_+)*6Rf:vhܱʢh̉|>켏^EN[Ct+B5>vudKhQݓ^ S3vrz!NsKY85Á(YīWdf&4ټ@EO,B|n@Q+O?pzz. o{GDY*ko^s&fSe?kSnG] g3B5'IQ[βA8 Zwj!$,g 4R+UWi9cq/ 2KG+`h!4ۧ!?}E5Z0ISc"]R˂ \0 VYW DVIœϼzhum'݊n-HShI1jOU HهJ˥Nks[Ƈ!etifsOs~F[!\Ml>BJ \H{CԷSg 3\B%%sԬ͒=3j i AFV״s,yќ[; 檾$ ?kfݶh"JyVEmrlY@'!^{=m_;:3sZ_$7znpG455g(9dW6)&]]ٛ&50Luu_ha2 ՙ~|P58&76*A#Ьr8wѨZ/iVx3,Pl89TUH'Ng|gEPuё5ˑ_R\s}gG+D?[wX㆑:-R~9wƻ&~d+[0Cit;qQ%A;|ʜKqwA/l<9ηTx~8jZ!hum#^-HHL݈nUrRaT(Hr͹\.p8l6b1L ϗH$Ĕׯ޸qى@l>z8 ,z7|cGqΗ@o|RI9J^=|.@bwfBM Neg=qNQOm|,]׎Gs;M\Сo_=<z˞šݽZ $nG߹Ԕ6l+Q%@>fe@rPb @Hm&lA~ؿoʩ giTJy U[:wSNV2 _E z.\ri#jT8'/\rʕ &u.He3# mE?N9|Bs%UQ/i*?=;F,+\wGܖē}44! Oհ֭^&ssvv[۷?~!CrsJ!H G:N菉wyvsj vpyr0lZPE. چ ރ %܆lݽ:`W'nsctpW[ߵ[`x{w5`rPXI!ު:QE:9v9kRc6~n;=`NegUi?u[SVW=vL+)\N>n 沠fIP5\JV}v;-Ve mUNCg <ΧbA`2bHiw粠z^2Qn ,f&f=p6sƆ%cCs]Eiv\.mfvh9axl)s3(͠Smԭ[[}^, {0Q6%ڳ NZLƾf,>~翾Tz59n[ŔRV4Sc1>ĭlR: /w1 ]8w`ik^9N .Qݻ9ϾZx9#v1p6sc 6jJEv{:R'8Iug#0M>+B3YQcvupA?Yc%μ z~o AjjhPSKiׂRBZ6>65IU-,ujFSNԘƃoرVYza&Nh@~qR|,%!٩5ԫ8e{DXUr{2Xuޓ,Ni!*WRfCM,] )e3s:ylJ{CwuM%ZU,}sRuzEn+(1k7~&A*{t J_PG-"BIc,ش,!x}KiA,$2$).b!O|OÔ>;Wkmmӥ TxKngpi"| [߷d:40xYWg9|h7E [Lf$JHxiԻJRČ)Qv{~ [^S1U`^r7mUx:B}88*f8? vG;7|\z:!4)0K1NeB[F3p U.ET)RW-t`4AL <8_ޠA={nܹ3" j'j{=V@jQ*K*$HR)T~ lR&My,'!4WGKV9ϬJ[WQƖ<%guɫI6ӧ[IX0:)BbM\xmyFm׾s'{J cnHG(BHo$&1(#}N=Dq/~ڗxl:nJա4GH1$ AZSG"AG TS.ja,AL S9Y~?.+</6{5ӣ$?ITUue7%Way(RuGm2U@v\tPp;Ggj /RWۮ:۫+_Ӏ˪1<*kJ A/@/R) !f _|uCq[aŃ iLq ^Ӌ8Ԫb]pƣbT2toFƁ7݆5Ȋt7oN{g-w27#!Ez#h̦Pc~<ۿ3-æ֟e32&XIJ#(jL[/zb  }B"NZƪq1!&0pEDD|m׮]@^=mt& p@LfdRwl!-ݵUX3W5aha2:ӛw޽>Cu?`t޿=bx?uIj#wmL\쪹/xzRΎZ?fvȅo:V R,(,^+!Jyjxnd{ыz|5ۯrD3včjg8Y;h^ ܘz!/vGPH8,xER3wH./_. E!FHt& {)չ$)oZ[9Ab/^ ILLҥ 2\y0eVdmJ@tL`;@ȉ^ 4SPx~ 'k#[ V,9м=m\ ?SĨ:{f\)Ϯ?V,;i&ԊӖ̍Q%?WM9P܂m>'2Hʑ lu@;ԶBЛ]hGO=<@Rǹa|pp YcQB>\Xʒ>Zjѫ_Nb>[]ee9B H*ti7]߹]XlzgiҘ҂ "U$UjM#*'b*LcÇ{xxauM?:znxU' Eb 5탺 ]J~jh]>29L=b5[wQW{̷IaRY|DZm-O_}$D" ¶PuMԺĬ=o~=_yA@D"0s#;5:߾lF>nL=E{7= _VR ~&BطociA[n|e/?=LBI}((LڙO=M&܌W=|8X";-q9$IՕ?DpfIgR,={}d蠱o9yg^S=Tc69B H玄^J_U2ge<]+^R! &yw(*Yps=J V-cNk =5api :h5NVHs"O8yi]Uw@EaJ׊Br7[vdkмw߈-wҵY#ВT3'Nu~&CjzDn%- x u\ی]`qzg;%xJYW5 XR"juT Q6o!gzV䴓tx_B1WNhP"~M-F=b@ʼwJ5XwTd/ 9؝]3,@)F{>jUxVK&+[QEZQ Z*:=yu&3ɟǷP]w-ZJls:;~Pmo׏oćcBXoզSs~i(U[]ú&jJ!+TWLK զrUA,n|S:7aKA*?v, c+UUݗ [!\[ R|%|I_Ŧ߁Z)H4lw#P˵/3Vmٳ@L5 CQ/aTS{޼TKROtD' ֫[֭[yxӱ۩4N>V|llM+H!JkN&G1%հhg:iy)yCՅYudȃZS^%Z^!uI1jC% HVIy99$ 6V[ʼԸ꓾MAt&u 朼($IxJ+X HO 0x ҄B(px;muIU(ˢ*>r`\IsL{>z(tD(!I`xV` |Jf>;т [M҈U m2W9)A[FJHdd$r\.r86bL&AAK$DRa&Yژ/4X겔o$Kџl>z%T^ڗbeBb`1mEg(x<;>yex:+PB [AcnoWXW|O״a$^f\Eyk^C;"\[s(DD"vk]@{J%&yrh.E)}Jw%pUFW-`LALD1p R2c1lݼZV[*ONNJ*2ӼHCm*gOe_yu]  4p|w,Yt#ŤJǎM)k" P!W+.!;yeP^uXWY8m)ieFC  b2Q{{򤜚 p_k 8|;;S.1)rMRLr~xʗ/)9_rI ,k4w`D?ok'׳`  RAתqK Mel_́m af AAo8仦xAA1jnA    4p    RA ȴԸ$@AAAD37GPax665AA <̛b*ʽT >r:OkivoB|NkaIȈuFCY|4!h0v#gugrQ9g?ܹ`ΩwohBHD]<^M#d|t7Ykݼ7ϛbJLi~zjjG y>+"`P2(`Ckh`T\.Furn5%_/@iw nG{|w~Z R*ϊR~N_r c3眼XgK1012}#UPPHM-|@igf9ulmS^~cW!Rɾq`qS~f>gO~6B_^1Tyx2 Hd#ٹnݺo>~CWNvG$aQvu6Lo^ ^ n>nWR 4cݴ7wv+x!H9eQ`۵Xdɞ-% XQU8%YPAZC/e5Vq_g>(; )!'/> _۾f&h(X5=zZ$=xek4 XMQyX2 H4#oر={~^xqĉ6VRH6K>6Df4bOsgdzFXY>1!>]P /T(1zGuíShFp%6z-O1+V|;w :(ぜL6™\JA @ (du6SkNl>`FSUn0%7'YLT~z ӧσnݺչsgCK5$AYdҫ3ӀM y<;/5lÿ=Ϲ`I4mNA_'?U~V3鎡Rb@ZzPzm€U1JFAO:NטL)ccik{Clh҄#wAcGF)^Q%k )qZ{7hb͖yoP~@o-jYX}Ɉ2kՌsC|*ByALi<V$4MF}3G,e!s:zi^-H[G-W~ÇӜ7i(zT5CUۈDFFr8.knnr\.a,dAD~~D"H$&ళP9ۮ]4mb)k(oL[ `mW_˥:2yyXJ^mY;"60u3tk!Nw{Z?;x+W:<'w ,1]B~k\|h{pB^vڋW߯"FߵC^lъ(,(}wDZ7s Ek t\ηV5I+M U@A,&0pxxx?~ŋ}΄$&&vXUxE?ds !Gv׾۪v~/\)xd`?ƷO[,(Y4D28}׹ B*ҲϜ R ;fPgow ?t5.5b 5 {ڟZ&dՉ^\=^Da9vzloλLJ7)$8q WP3)'X݆vޒdn6^ 0ȫ_1Lyl:W;OFvܾ@\ve=`՘%w Wf[Yn B}q˖M€ h;=`Neg>vSZG紭MM҆& Rf elwwСSN)q$(ˋ^<z{C%mp6;p!J bڍ @XNGUe?GSvq5 Ĭ?z)j lVE67'ŽzYJlX>CQ@!c@}&Vbxt0D<# g(,8-y/Y7,Ni!d?;>6^ReɚM '$*nSP@ze1AQl罡sV:.Ԃh:O(͛ ]2A!Hdߣ>Qآr^}֤)$H0;#рc|h# \UԆ=xVg _k?mxxV}8{C[ٰ?yǕ! Y.3GS4?$X\SXYa,C}Y RǫftG."@ҕ `vtЈ(\Dmreht/QZBmd6 #5Z~UbTn99?,I1KY@2 Ho.6ÿ%zDW1MR*ATJ&E^l|I~]wfae_hv r99+<(! NL.Jh9-Ȏ .`zM(#/ZHDG|*Έ†c@մ3@<#^E4~k6Ӈ(g ]fA `+%Z=U 7K[BQ~=n ;p={+^χMU҄aā+YՁ<ȕt·UA/2֝ HY@z0EL=sUk_ϓZW}7i= E`xhQԂɬ*JY@*5Abʇ?M-HLK)4͒?A!ȷ8uF=XJ7Y?.6߶ ` /^{uOJD@jV]%.3߈#H$/6wUqd[5D5J>=&D|/O!!-%otfDaЯg(!l+W2?͌z)i3I9HFA19}Ԃh:r}?oGQ5 |[GGGGGGuC;5*)wBγU`Ŭjl\ ?S{(ZEDEm})bD%{GKdO)zGUAxFmRlp~Sx>ޑFNh@dLرcǎ;v|HFAQBqMPu QSVmm2OB=8§ 즮:ͯ+O>HH$&y.`d17.%$GJHs70w > ǨKGv88R2aupr֠ ڵg\4Q Y"QNzңП{MTKIUӁxFj/}}dr{kp|M\1L7Q Kkg\Rn4zPQ_ IDATząjs8xٍ F13t¯ H#3PpQ>t]uD)_ULyT#qlt)Ң!JN=nehrpkd@}ݾ R䍅 b< g{U^o1sCiɻ"O'u$N;"r7()E%Ԃ CMyUkPb*ЃAw a\(E[=y| / ݽyTbYv xk.cgU6vpEUѬ]vm{|+g6uej}><_i{&Rj EV_{ ʔ* iVW w^f>FEU xrG BO&U\zyҘU_F-m+Q qt$J$c,e)娮|W>̯HU۪!e!sq>\R&]G K@2!j؋A긨M-ŮN?6^w/m iB!p8<85b  s4y 1 HY:Z^!uI;oPT$6R"##9577r\.ld2 "??_"H$\ 7W'3E=|=7MU% 75mPp^{Z4<֑ɨV5tWa̪;:8%/NP4 HY:Zd$Н7i?Ze0 & ]y6ѲZ k`Xu"޵JSoZuo AA)Ak*]~\  ;.qC3 |+ɿA5AA!R[IғbS 8gS A4õ`q2LrnjtA lҪ7߶  Rv)G!) J:س=|5FLX$qoφF5)x]YJ;xe)"IQ6@Ȯ w V9 `Q\J)qAAAArohѲo8|g#>u 4L~x:;[xN4Zc䅓rM\ұ^~lW;4 z:!/LnA|7WN@+!9&.G;УʢZ S  >_}swuHE>PG7pxZz ro<LsF%+(?|"N/ڻYQۋF4noڶF8wpvrk_eY;ֆ0>P\9,{g) qyדˋu2TAAA.aׯ޸q$,[ _|5 )do ZZ*ɒ(Fv;zsN~saΧ -X:wLϦH92+Xö<ȑ#G9fj)ܺQwF'OůjEkCAAAAyp9;;׭[Ǐ?tP#owQ\[*`A *vbŊ{'1cXcb-XD,XQT5؈P.v߇ew2Cx *L<|.j&~9k:Srli̠ndƬ:pc7?X)(|0B!B'dFp 6lСC 3gNGQ@3kӢ%񹌤ve{#ޛnh6ΰ%& !Prff˸"Kаj6|0d9 !B! (gΜYfǎ[trϷnݺpeV|R4\1qSVvSx7er UC7 E|H h~nߐxb;Dਅע޴x/mb 9]\}B!BEjRboB>y%\zu5jԈ/|s7[ d}Ǻh.w`dj7{ 7{ @6\SBy?2o]:%KKv-Yi ŷ}={zxQ !ܐN$!J9""`Gڶm[bkBЌϙi nu~]g$[Uryyz`=NB{ ɑAivumbr2EƆ󹣶}B!F}npHZGHG~ rpl5_UB1bL5VL1BJ믿?ޥK͖ϟiӦHڶZ00򥵏vu|nWb2ogjfqPFH21Luʼ`$6|Bu.Vȁ" (rBHiƕ&޺Ɖ+ON YsF]'7wv'P{Q4)]JxW B%ʖ-[Fqʕ5k>z6m*z{Z8R8]uswig]޶({mwJ皃߯^-;,h8PPu%8-Ϩm=Oߛ#&^e^!$Wm6|ߵO-%-K+lBȇyS^KC%2O;+[YPli.3bR[n OHH-ܢl^ >=9-/#6qK]T -c8S!qء`^`Ӱ1 7|ݼNZHM~CZG~!$Ý[q%[/a$wϜ}qo*7gpVGqlݾJ2r982kٮ0ٳ g>)hײE \5GpgFdZi6umr۾& nfٰ{tg߿Y\ڭZ԰r>dsO:iռEM ]Z:fkUK ѽCN^5Trk?xyJ!𪴙PN5y%{yͨogv_~D*axǎ&_1M$K|d1H,$ J=}=ñ߲;ߏ{:9=1)\fiZ6  d ^wRY☚cT't0Rshz͞X 2Or8lbTz`хs7V/s1BcQDw*+<"Թc:3:zZ~qao|Q}jִ7Qkx #7Z_m>uʹ1Nիg;yZPU) ԆcwQNG: *NZK%e%[:kυzu?sgĪOB!Fdۺu?/ߵ+pJN3Doǫ} nEo% },fA\0[ͤI/2Wիgn_+QlO,QZ8z~^̧jlJ >cVPXye1wPSCxm5&aͳX#_.FkC8Mw+M-yil^$B1، KԸ}m7;}0zpCH_ 0Y;jV>o)究]w1wFVʴ- \t{5Ew Jz`<0ʘ]'$x_Z=2dY Q, ӤuyiW=@^FdKܿ2(QF׬Q]ݬn[IX3m啠r0x&\ЬRɟw߶ǘ^$BaCA3Zq>*eDO\foQYDxcI]l A2|Td'àV<7> @ӷO VK+ uӴC-tOؔ`RBJ_*ڷ7~ko^llS;$$:V,/Q"t jt8E(SĶڕ'5r$ʭ#$R)@ 91x]0 #`4o=_/O>"A0iV!#~S~߇wDXѠèFR s.9Tj'Vi2+N>h7$/#XY /|._ئs:4j΄>E|\c5@Z-9P&o]'N &Rh=~n5D|ԩ׻oB!f C&mÍ"/ho$ %Ԯ''[΂jAk?4ɔ[8 _y(&_j/U?u s^zOrfS56%g[#l*;87XR1_{|ɗ~N0g.B鐯9r#YX{V5 h]C}[Q ~oz/iSYwH&dx _~Xɪ1k0 5R^\z $B>|ӻc ni-f7I\`VYTddS>Xj,Mޤ%=}e<-g)F1*A)nlЩ*:Exk|f~>ᗍ6lyf^Cg)M^B$Q^.uVZCpiҒӏRigKo 8Y*J39l]L BQ7iiiRifRZø$T&e!KRiJ\2Qẑ_,e_(, AddԞ@ps_ŽkY7  f ~{y\&-橵ϟ)%R?YB>yLAttWW/qz޾ jǀiC3fck櫫Y'm[ α@?PWgc16asԻM5l&cRgS[]yC& ºmR9?39 !|=bCk=J{J%Lvk9'DI7UWh!`,b$SglT+X3?x6&^{ڐؖ@HG#8= !@尴C*sxCNUqY?P/7Mi8%dv'D9pAX23lݽ#Ψxe;jڴi>=#9`0U-36NU+O`Tji?{:S1!r[^}LT[r0ȫeV ryLiY5f,bf$ԎDopk^ \ǟ{{ښ5ӛHpy}(b`_!P1f\ O2x,ۦ~v5/EGgN̍n EDrCIӮbqRLTR*D"G;R*M_$RH,Pt{ rY]d@ r"mėRGG<M9Vt07툩ؗC-Bۤ"##P( BP |ra&;;[T*ɚ&W~RpgP@aa|>j!e} 86aӹE'ƛXHTm lx~uTwmeOBռ6i +Z_*U4 jժ;[\!W !~52y> + w,f뢟m]U߹\jCJ%9!B!TɗsO@b((5?+^N>%eLٱ@\ }`gwrNwhQ4? !BP}橒LepnٽS]VܺQ(Ar982o۹#'a'oGOAֳѱIrDu}[>yn۱i^2.Mk܍vBJJ vv.[7a+U⫤Wo=z8ۖ RR \B!BJ!pl۲E) "}vr œ_ q s0a]j& IDATG|$]eVz3_Qe-T pJhK'm RدOJ *~/J ՠ B!4(3g$'';;;&d2BA9"8g:{ d]^,,c}xi u*Ie-^3uԹ gYwy1?@fV^NVۤ֟fviQ%RniB!'~RK)=JcذaժUp_e˖•glWpg^W5kUբf rrXm.L:0$ϝ˜M=fɌeY[,ҩgֽz0̹!B!^tp 6lС;v|<~#nN>h09w욟kjs\yhϼ9f^Ecڕ..eI%Դ44^.bS%upB!B!\ tp9sf͚ ]tz[n]vo'2*L'3,Mt Hz_չlUȊ֪o޼z[uf B!BȻVիW7XFB^PqKvSz|5Lpp쿃_.QTN!B!*gg爈=j۶m!Jd<0ɋkyY8;z`{;y:LGh{_&B!BHqS>}-!!!ϟ?oӦͻ;iE^h?6?78E|hnB!B!}+IFl2bĈ+WԬYѣGϟ?ߴiSኒ -G [w[A¯o8=iUB!By_Jlح['$$bnQ-L2so |.՝B{f_6wP`UTON!6Ӂ3G vvX_׉'M}peׄ@XbKHx"cVt.SB]u,Ĥķj+՝JPΥɉ !B r BL+E|'EQw߫Я,2Lr"o1ۉE~sk>u` ~{F < ;;*t]5µ[eZ_>}+v]Wj!H \@aa|܊to)ddYv V^ZY>B'+3n|wAU9kҎ_BK c_ Zl~oo\Q޳.^elUy.&yqpYt ŚSŋ]M/v{*(w r5&Gf݀Ռ>u WOdS%!RHpn WK)GsF'*S'ddM{j]'ػiR!6;rnr"B?u܅my̪hYZ(wJhaB!;u+>wn8R 71yPw=*4 E1/BJJ) 1CC8paL&kATb|.ns\ vn}R{6tw[rj_66N @ytߡWdmj5ܱM\yv0eŞ 9z7-) \k޸aȲb/o3(#Iïeʴn:d=yn۱i^2.MkMUשZJ{z,pi7믛j\y Az{q2= Iۯ!۩y!^U;|?F)pmٞ>z82O_ !kܽc{n5,e[X3)zu7:6I.oJ&wҔ`. S"4V9RVRQdA ̉حq15gTC}u3e^Yz|p偞{M $w,o+#kچ&P%G`41$['2+|xu\PAL]}uǖ5F2gt)dHIfO3f:^f?c,_˷i.t };Uh j?0OaW!ƅѵgޱ/7qj%˻7=~2hQz-2,g[X3r~lա9B?brpOY N1k*eZLe"iO{>aŖoBZ( B@y<ealRT*i!$O|.bL WkjcmSFztw/K+Gvut?U~#[fhK(,k=2X(A>e{dRo-x2h>糀y.rIn-Ǫ<.YQ{R# 6qn& CzpBn"c[y9Yo"~Z@څpd?Nkccl/8M~?h&fs5\&q٘1em-gۼӏvZޫ)Tͷ~9{@ay XVIPb1ZdYV{B޳`bY C@iIs ITmCHiGKB/<~H94XȃqfPPߜ!ncVno1y՛,lebIS/ Nt1!@N_&G,|Rgx_w\5ߪh󼈽o qh԰o)lN57qE>zUA/a0Iwͩ[שgֽz0̹H>R3`y53$_lHؿRʃ?65=`!蠧'îpO H@]vn+@<`ԭ-oBYl-fZX=c۪XYk!])Ǿ ,)jywuSi)s 6ITeCH)!F k5. V͆FyVnm\%[d\288̂ d]M_o@h0 T*PqSce|OwX؜\ Pk*X6LMHwt{ĦJ4E=I|Ena";{o8W{i6ΰ%JMPF\Ouռ7nxOw_{SV!!"["ضZ[=}vefv̂X:&$S`] z4#Z ϻv+ClGsT{۩%ҧG-𦭇7H5pЭlaH.c:X ּ`uܵY*U]3iLvUտ7o^y_#^O~B8ѐJݭd{{dEsMHM[*~৞zKs8NڴfKqogć|.k}B8Xh-f[X:"SbMu uUK 4B+_Bȫ8[|Al\1i3bcO2 d2_Ǫz螛Ghvxn]_m1w˥y\=S wBaVJu̎ލպ6Ɍcy* 8K l}ׯcSmF uWdF!|$,d[\3,bKu }2iy%RJP!7 yPOC!yyz`=D/"\r#2 q[zs.]:$-g+KF cAs6~)g,F3-Wj[=] k ƮVזlt̳mg{wޣ2> 2eߺB),dha e`I˻)]q=$$88ľOxX|Ⱥ6PI,.#5 A UUN^I*T4<UOv Ox+gϞܾpT7=(E|h8 I{iƷ+a!W5SrsӦJ;Yr+P 2 B!d1(H [ʂX:Z1X&-fb(L R︃Cd˖-kwѽYlٲ)0!eYr,V8Ϩmo}3("[b0sO+ g~ܗ07-Pާ-fo&[Gf9sS7P9T.Z|O >KF^ɹ4]_ћ~++h7+usR#/o[J90.Wdgyjb_zcdM2= ! t}bQggHurKLuRb9%Rj*ѕs$)S?fM:L"dis jŘ䶒$g(UH$ׯGK䀕EI]ѫq2L!8|P~_M{|Yj~LҬǖGdL:{.=W;E vވ{-e&\?O'>?^I\גA-'_s0(ٰ%{:g~db&_F8t9NoM!{HzsFQ9u_CqBȇ*+355D"MH})(HRSS%lPdDU3h9(@ [`, AddԬs I P!%Yy>ctnj.ļ'vWj l51WA}GT >&Gw?0{1בfG>f_ma {!g鍵 !Q9?7U-:1^-2RslڻE JfV}}:*R8HRp7>Ppg^LuqD8qN;t_˧@}-0Y ͤI&1X'-c`]!޻QXA' ڽ{ݻSϤO?+4*lџw޽ې+F@S,7{ݻ =,i2 5m@Y!Kڳi7;nݻw>fDgo8EL7nc&[क़4WyWyÊCCĽ|sX"SDžmr\k 3od鳀yj4v}CgepdNS,֡Afͼ; ݺW%[Nenڦ3rި4]9W:12-߲%;czǎ~7kKE5XfN- (^ukR01MA!(ur[,-2fU kv΂,i&M2m:i1)) sf;/ sMy8v: }|xg'UO-,&= ͼî{M n,OY/51p)dH}GUz=ҰϞMXָp߽Ƞ:oqPB"y)P "ccP)$t |iNXt]Xk5ivd7iR͌6.FdK2&U /.W7 )rVe+:[:cVrLֱby1% B+}ﶅeY |dub2%|oBZ( B@y<ealRT*+*'Ml^S viQa}5|.~ԣu̵_DsecƔYQ?oC7-o!q𞮘|.Tm٢Fs۝/ѧ ౶%sTQ]Qkmk|ѳtmQem7mlr9wN!  xR׌.|3Ov:gIS\:ש;wVRT%7g4/1a ;ya5Y-ּ>P^P,t$[rnbçOFTtqc[c !Rn[X6Y̧IF1X'-&c`]!{h~v~߳LؿR3ʃ?65=`!蠧'^x 1Mկ^,I[ k7)4>e[,A83(fj17\7= k?}w==4ݺZ9~{j_W rq֩gֽz0̹xO^st޷װ~u론R8sYӁ+l=/Q έju9K/1Y-u?:B!)mspd{@|u~6ΰ|7Vӻk9b MBĽ{/ܾ f#O֟x7=Y՝0"OSnde&$ܻqvt bS5;e]MٿnA_E#$p콿ۿПJjIx `c8B!B6#o&xq:?kCzMOatfPğ54n߸GvX4ɒu6gȍ1\/JX+ClGsT{۩%ҧG-Gތ>g{vmq4yʣRw EBh pk8P#$v..&:+5X<*ɂjQzMۡB!暑˽l0w\^J4|AlܻO2 Jc]ev4m^9 XWbᡰ@,Z~TWQT/.]Q9B!SS q 3.@^s-mT5{7IW*bB!Bȧ [+{:g'gpeĆ3Np07kpz!M:,u:j"5yRkE.>7u튶`$w6/_LSk^?ə\;;,ē>k ԫaϯvuېbUǎ08R8C B>fbXǧ~RK>\;8̿Pbdó1toZtbz{9MX֜*Y5Tz`хs7V4^I?bzTSύvͯ6yQQ=u\[ IDATmLǸ>~`FH!B!|+* *5䚵B5XMZwokjT+H( a/_/UK^;^0_p> X{4xsL:2-߲%;cW:4h֬wA[?ҝ#)`B!B'Q#&Y1I") T @%DetT ܔT.gx"Qn$/cRr .͕&_s~_|{DHƜ4By>6>%½R|Qdd@  BP( >\.0 dgg+JRYaN],%6b .&.gV]jn?E֣c'_s>Y B!B!QzqD1;T7o<`Z,\Dc#$Ĥrػ Jf~,-2 "5)1Swtv5uLWFvZg)*gk[opW%bB!}m!%:8a9S_xFL͘58DOV7{ @ֶ.9b+N^X2 J{(*Frk]f74|]F@MC>/a,˼}zGdtCؗt,/vصdnoz(m!%:8̪뼲ԂJ5mD+ݑӒ2S%YC-TIbXvmW*-'^il8;j˱b .ˏ/ {Yoe W+5o<]A*$_͗,6uN1sD2?F=joe[mWPL%[B}E&vuD2B;'73!I896l )Z!fQWTT-{!/9rI:,jdsB'+Muyu/T|x+ON YsF]'7wv'PTJ{w-2!%:8!.o'{jA~Ѳ4sX(L Z*-^`>5Dj*K:'|N G; ?^R.c>ލbl R#B -8Lo㒭0gN>r俸73k+wSY #8n_uʵlA;م3 JuTskk٢Nn (×ɞt'R so?vV9O4Lx̘aQ!ߥ!'~a*;8d8㟐r,ګ]kwq_SfWB87_}12{7۩b{tg߿Y\ڭZբ `! S iq>Y̝GŽzʔq.[Rz4uYRfB>ZIvq)3ʨ9\qZ*oc+<"4Xg䠎Cg$?Ω}ˉ2FC< .} `&wz y警:y8g k4}0?\tZg}#ԊVl)lkϯ9!!56wq߸ zdʐ ,o{ԆcIsԮȀ A.s燭FpZze(v8^6ck[)cZ ηIEFF Phmm- B@ <2 0LvvRT*4'jejUyCH9O3ZTRnYZ˽v6 )ݿs'oVj?Ի nYQƬ:鈐 FҴ'7 b7קUi~雽1WGfb07ֳv~UJ=cӋ=4]05/[[VvsQR( [k֨nVq7-$6> rS5PG4A ua@Vd_(uN!(1,ox7p-ǐ ev,bǷ;0TO*}}T&_Z0jy3|̦KR*C“@'srs^ElW_(]]#-\,#Frm &i`_ /O>"A0iV!#~S~߇wDXѠèFR s.9Tj'Vi2+N>h7$/#XY @+#gÐjG/pL Ryj">t|.L]@ x<PWyk~u}QR$^y\jyj7y{:鈐wwZ6cS3Fέtjd8;h#|> !t;;h՜ =|*$vH2ɖ |ڏjf54SןV:}{ b 9/s9s5LH{ǯ)YXϒy2v~'L||zQ i"%"I>>K?'ѳ~VF`UZM{!Vt 9+^#t z8ZWh>l/j5P@/6矺1?76Lp/. &}Ͻt_TZA;<}I6^×w>VԌ8yC !hX/iSYćfg w 3XeA,R7 j9}7i)qqo߾u57P B黸ײ `,UUNS'8 w )RE/=Le7c~^7I_;X? XxzRS%rm*t/.ldy%;봣jR$vt oUV"%rPp;/ii-Wdԛ+2_!:/lC-E|WƔ:'ZDbJu )H'r*GCY5<T^o%RJM ee\of.?w|:_I6xS%.F~usw|gŞ |V"ǡצM 铰_ [qF'a'oqm <v.Lrl&v5}ݰ.dǁc2]4o kjR66r=wI2~.Su˹kg/qkY|M@Fݽ*!-%cڤyXxvkv;y g?pe:DȃVv;#W8.[|F]x" &f(YT.k d7}+2_!:n8Q#WHYWF:'^ fAJVNXnɅ?ieX1G/λkgE/R$IǸ_q}>9kAc  w?J;{.nߧA3o7b~gn^?5]imä l}E6.]'keqii{W~>݂[ڕ=/l{^$W=vrխ3&op=Y]jز^`= @ۼzkȇ?L3ܾRIBԷ\BlN=ZxxO>X;~c obW-ղW^ E";С4O?N\OFSk *a;6Q&^a!׭bs!Ϩ$| gϞ={{;\49!Wj z do, jwM}qa$R7.MTZYJYmjmuRE\PYʐJ.B9矆=sO{<7Ss-$?B*rj?O.ˣ?/zӳ[c^G>Au>}s]z5Mo԰/nOێx5|Ѕ䴌[X{$^Wڇ$i&"@"BOҴ̭Ǿ۶!g|o~>ܱ>>3֏9n\}UYn]` >t~_풮[];~ḷ 0`] XSKpnY<'TBO/Q "mLa:~qg qWNo5}і۶v|~j"Ν  i?}9w3 EݠS>pS:[$( EzK'nI9C;@I>b,t3&E[S`*JVHDZ(z3TIwN^l 3ڢ <ٳgϞuM T(3d1G1 "99Y鷒8BHI My^8V nhtj>V;H#}qq}gΉ|Cr +]kPXqZyN6 4fNu͸5ZeE jw_w;btpVG]tLf΋^- =FH* 0y(,:OS[p YuŰq8U#艺k3۔ib ;gI 9Rf#Q$9P\FxVx^&'tzM\mz^:nm0ǭ|-Q~zɄڍ>ێ6i.4f9GB{F [a1Kwxo_ l9LMiiB+/3,?v:%0Wf*4ΡG5VA {eib|)|!Uz} &Z(uN!|g_~<(sO]j+q3^rZfugvwg+lhxVO*,^PZo۽~L㩳jlݧs) /U[?:6t}kt+9%m. @3#hv5^p($BvH9_aĆ? q8 bUZ ]oyTi!FhBM~Yw-UxQj&AYj% O 3h5V:'}Ԣne3{- 8wֽ"b .ޭ AFY]q6jq9 ?B*r[2a!78CKuV0'`틊&^x@^ҢuSM=ዼ?]]Z:f ^yW8@|Io8eLn& K@6J6AK"[\O=T`R[: ]{x:7TxL=?fPxx>5EPsDRuBA:;۰YyixViASG&$hೲwv ~]3FYj^VgϚ.P9!~T9L}kbP_fviK£d6Hwx$n>+Hz@$uco z˰@HWΏB3=,BuWzuU UU8 !B5upp'[] M^9u[ Irp%%3 7rsڨ)\ %'',,f CKO9O12#nE9+YdU< apReuXo߾?i]龪RUN7^3ME(!ҭyB!Rͽ[B###"^ 8~~>~}xb1yuDOw,$.WZp@oHXx[ -C}(J\<ݦu\sd^{&2A.?waQKh ~vb*B[B!+98OMom1V#֞\`%~o$ag0 ۾t~}j$m3z{^7=bZ&7}qnR+ XLAGV_~ g>eq7ohҕcsnW Z"T<<++5k@#EG5O!BJQYwpHje7[\vh٥K/^]d&,<~՞Mr)lrz{6ZW4˦N`ǡ_daP~&z9{vhؼΆgjC`fM@W+nZ^hrs?6xͣv3 h$9Rmo,xO0b#eOl mjtlQk06s0y:^^I%"R!BJQVB*Fe&DkGOX;ԩi+DK%bOBAcah_H&Bv%T%.$/r  9u(s0o 7*lkB!%$$eYDbii)H$ ˲bX$ BaRj-EmWW>2W IDATsu_jGGRj!stZ^L[B!BHESB!B! B!BHGB!BB!B!upB!B!ҫȫBMFl\TՎK)̸4ΖQNfYT@LB!e{fH;B* ~F:U94lN_Vi7\8}ߡ3~LjVBH%vzW&ڮc)}΄Ʃ-dQJ$pIBP!Xb^|"n:1H,{ϱ%n@NӨ<5^ VYt/|0I#!v%Rm߬i,dox[vps"*Gא`%W$ӖU TX/beafS,)WPJ\*=@"RԚcd2&B,wżP{~ReU>6ΈS'l-F(;Y)G5SZ ۄ򏁐 eB hb.gX^#93Ҋ@*S G!T|1'f-7ŝ1 HqU<ޞ?z`XJ!]^kcQſU !EVs6ړWy@qCz7ѶT2n^v)B$ͮC.m9%j1苁Nމ;j^zyã}_ 8ps;iSvih?Y^.vme`Vi<[߫^K+3b"ӦO=ڛ+]?/݋w7:9%69%cҡ]\ C)Xef]?r<7~s)]@l) :{d{H2n^~,H,w0) Ӥ_ !>)IpXX-l95zJܻ;4^dt{MQ/~l`MUc z^ӺbZhP i۱&wwbk  ՝? 4鼷YFܴH!aLտ^M&4mDg~>͛ʁ-jpqC;( =`P"0S:(}GW8  $6{jw;OK;tyO}dҼj1WM.TP$VLۧ5̚-<!T_@yJu?{C0 2ӱܐ(m\iNN֊.j&7EJ1CHGЭqc=~~H5 ΰ[F{ٶ-+(RdrT1VyVgsSB8v_avo;sn :;7Qyvv_[ ^d-)2 X-)tQ7̳NQH!kZZP0ƻxm[7 çoyAH@HE@-TB?| ΢w V/!HmNN03i^ D-YB7RXtX}wO}v=ۼ~'|[KsKV[~y*J\ұܻ˶)Tr["`9o5 T*Ur3Gw2oSWx&tDOw,$.WZp@oH}bmsN^֭[W3O[տCk? ]ahxWJezRSbp~ ~~~~~~^fy!2R.W&dDD$yZJTryBT< E Xw7 ADn5~2,C}M{Ô"($"@HGd !/y <[crn#/q3:cSo{CŦ3vۨ]Zh:٧&xA9#ARtO?Y< "Fu۷Cgw ͒}uyV^h_6hUDO VS+Єջ3[T8!GV\~]2WP ^^z+q̫c.i8`? \+4r-/ AL*VeX9f99nFc FpRibv%vѻ^8vƿ4m_xj- ^I?4g~rq;\Z.\h$9m#;& Yv gn~wF>~m;w9`<;ʊ}bK0Lg$+aX;tQI bc]9E_φSoBH*߻V o&)TXˁ_SxP=MJ"$$eYDbii)H$ ˲bX$ BaRj^Q!zS=gb L,MSȒ91|,j;Gux6m ||jՆ;HdN*3*?>(殔z#B*iMVx3$:ή|kj-5+b!8}@;'`ڠ}Lf  !B!:8l=?; -Q9{r\wz-B!*:8$6NuV} H\$y&8hu[whLB!RP!Jv&!BhXB!B!TzA!B!J:8!B!RTkrC B2LPժC%U B!BHGB!BB!B!upB!B!ңB!B!Tz !B! 0;CFq6t衼ccq{~M[)a_*ȓػA7z[2νռ@⨃B!RUw7=0Y_Q.XYQ1Y&Mթ'yKY#϶wq8FR0fw",pHUCB!~,pzxL/[{v XV]#i?؊Yȓeͷ,|? ߥvSk6x+XwB!BH٪L=tĩ\`$Cg."Ojw:xidžߜV&R P!B! i[/.WV??)K/;_0 qKϨOE̽O&R]7 g.]|B=:yiŎOy ӣ.EWo=W!gD Κ=Q)ثg]wLʥi>ZZ)8yȭwfoߘ(ahۉxFhۤOn-ua58<' }Ǐ߱U3M&rJX59]ݴ30{8q l=a>0_MY;>+D{Y P±{{1*$ B!{,O!$mygŒ[ `x S_ijIsK:ʯ ]g vv0Ɩf3;1g ^kpWM_WۦI4s|"RRR>t¹]5(p"!̈z#i9}O <=7sz]R(:茢 M1ծ Ҥ0֖AK .V.|bm#]5A~O]OAJupB!Bʐܲ=fcJ<}j=kiݼ_oAAԍ1 -Z5Q7-78tC;Z3zcÈy'G޸pR[ލeм1V#mgH~xv6e&#|'~X$p\-bn&ElQjݧ׭Q.z7NxI+LwmC65$v6 n5w; :8!B!eS^] '/VzIJƸoKѭ֜]ֺSMOPV#~Y:zm  4_6/WQbŪVmˌ-g kLbi,c;zIШ[;![+i?֓<6H(h˳OHDȶgw΋3.S>?f9}#׸5iݺ􎮉'V.O&(!B!U5P;`ƍ_bܞ5N%ј(Bar u|B-[05[Q;Z P>@%MWliNi& F~M*ˑF?вVOXޚM },V@c9%u_UO*lXM$ic cc>HT-`.BJ B!^S|cQthvٮ l.>zw4-m ߱c]r&(Wz1`ɨ.>N;ٖ'fP]|f 4.AmoVͬcQŝyn V( iр= FB!2ea0G0{Tmj߿x:/7 J3k0WÊ몳!a+KjLXb:1.]1OuSRNk6HBupB!B^ ,&.OL./D\ qŨ[?Xd˖f9l.Eg A`Yi-iL%q@VBW:@o=jCZbй@[0n(#ߍm@mW7ou95l>p?ۭ >j9M2JJupB%(iie6\ӧ*ʂ:52 ZLOTq\FGj?go0RQ0+{?Xyޱr;}.?zPp\Û/mǙIP /7[}qÇ_v"߬u/_qoKq B@,=]Q{I@}ܸwqV(bm۟r8#Ez|*˃ݺط`I봟ſ)T)_9yE[>A"]+ Eܝ;e"B1$=]Xxg99_[H#i?؊reE*rkUB io}~3;e"M#o}$ɻNٝ%Ρe IDATG5VAe2^YڮȯJ|Az/R6rP@Gq%Nk7l;ڤ9JJ }݋t&w-} L|ٳgώwY*Tˆvn)|H5YMmBX.VYǬl6YMas~`!fhDMVѱ=l=uM'Ax&R={ZuرCo! 6 ٷ3rw7Paϔbc5F`甏k4}pGj+ᮠu)̮Jxњ/[.Az,8WmRkjhx>M|ָ/ƁrhˣWe/>~`<'#ẃ' Sm)cn_ueώ/FO{8p^^Q>|;жIZtoknQ"^Aƫ3}gEeTDĤJHKbj85iH׎.vF%b?v1R+ji"}iŨOE̽Oo퓹?;~Buwl`v g.]|B=:yi*_$%).ۛę`cP+/ Rk{,21&AW>,+:TυV(R K;SH&%²D"H$eYX,B!0 deejZMT#|.&`A5aƭ-4cFj4 Ĉ0mGg5 ̈z#i\́t]&`0cOAEוmkǦBsV?5]c2Z8Ժs'ly[~nI_)&~wtjWxJZܔa;[ÊI\zLl=n0^=rf{e$8>VBBHU3.ܳم;%a^oঙ/^uɄf ׏=G :8*"upTqq"HpM$`2;O\3-Mݣo)/B{mئR#v6 n5w;1Feм1V#mgH~xv2l?ܧGݴ﵇8'pֵE- ࿄/?$Ƴf:wKQzr /Ej!ֽN#|'~X$p\-bn&Els:)¿oaߴ+7"k%,.)E"eҼ~&]%f~7mK~}E7cںe[{ nb@YB.I^7eԞ\7ӄ}dvoޒ? wd[Tyب;XŏZoʫgm?o#\|~r"GqkozF'V.5噭ڇg[kLbi,j/KqwW14V_ppFpr{]G[a:@Ա/n6]o{=qwlvkҩ&4jANHDDݘOje[w=[~zJ\d}'~vzRsv-|vx]v,ڡ۩x]gA3s}kib mBB!B4<&slWпE'pߘ9k8!upBr ,&o+#`cY.$LEF7rVQawT#7h?{e.E,[0sqG!՝hKlʌdBJUC %Z%MWxfdxEnZMωOgWWe\@#LᘼIF4N>ϯ4ClVfW)Hu%-Oۂk"Tx ׷%Bk%SRS28puw7'kuI>;$P!$ ,Gh B *sK%wDi LH`o?O޸uaUgqS0>㮓\1y5 aGx :3 4SQwЕY6F_ڷ}Ivcé8d;tY׻[yoi3`X5d4 WYe5~-BvY%%Bkjjw P! x%$:;=i,&׷ŸB!gbYGBH e;v:XXSp_@xuܵ0+[ַ(%:B!aFĦ[5,Q6ۡyt-UFyjZй@[Wyn<@=7S}ϡq@VBW:N7j︒35#3\ZXVڸ{KZp8)Kk{fv@.1O.!T5AABO/Q *NI봟ſ)T)_9yE[r`*\V(yPӗ yq>8NP >? ?V;D#}qq}g xޛ>ظoC/3JfRDZ(z3TIwN^l ٌ*z B@q7og@˱'AP(X:q=LYtÿڳgϞ={V_#)Ʌ LP''z+r;BBH%;StKNG?v¹߮kKi&/-:rYbݧFR B,^PZo۽~LY$klݧ9Ǘ@jt}kt+9%m. @#Id4G`8_}VW4碯ZA@ ˛R[zرc>Clogi1>FhBMÎ Yw-VFtnjöǜvt6_缋\PJ# :p!NwЫhA5oL  >5FpC-bI ,+H,--%D"aYV,D"P0 0YYYjZV+*Tk:_U F!JM?zRG99>.R+ ܴ,Ď%\\ ORɸʢȅ{Kv|.1+ A!Bb?v1R+CztH@~1)R㒘NvMztҵ]Nۗo]y س׿~x؋@򨃃j-ơk"v>4[iPFY.̥~C;MgLfO-Hʽ+B^61_nq{,{isDܺV[ o 0Ϝ>Mzz؋@򨃃jͮÂ}feu$Yc|m5nek+2Rj?p=!!9AW#W\xkwkEңW_ȗlÆumQ/!$}'8G lEݾ=k1vb9RBNHʜb%@?ǧĥħdp:nN֯ak+2SwgjXVu(Dy^B!e-񢟿@I_1̞ý[̞X?Fpr{y]G[a:Ѻm%,Zh>9]I !upBlVVEfvJ-5k@E,B+#:r{,V6Zb=*#9YP_g#SiSs*NnQs TmA!B)m;,!XQvdO~ 3)R%ρRQ!B!1P31I[u*\5qϾtkˁRP!B!.S//\}-kaQ{'l̟9B B!2 +ΆH0e.%A㋁Meo0Gr TA|B!B_)}_BC\#)Lfe2Znz t9B$ B!:^<~͘B{7hTea)YF֑>K㸌3xa\V(yP)BHFB!BJe ̰hkjD3WpK.8CmkK&dh|s Tm4B!R4=K"-.i VڱoU?g諯wĺ /U[?Ԛ-R*XRFqm_ryC!LP*ن-CHz@$uco]ȉ E*`b)7e)HuZ$no aYV"XZZJ$D²X,DBaajZWT!B!e%cafI\|2I%ˁR%+*B!BB!B!upB!B!ңB!B!Tz4(!Z&&BHQU{n*,!B!BB!B!upB!B!ңB!B!TzA!B!J:8!B!RQ!B!B*=Qy@2b]jWNfYTP-!ʆ{fH;B* ~F:Uu @'}WxY\'8E.+i7\8}ߡ3~LjVBHyQ)b܊s)2${og{$zAR],=X-|Խ>5 8{cm4ؖxɠ*f+Jl&0b ¢ل#O, 5ħeOḒ}4Ram2}lgHDZsy PBiBH1p5:ثek⯬q4_~uyVZl|;#N 8dY:հwCOI9l?B* M15 `{юtX/坫W)T"W̍dM BʍĹޛmV;wˀ!8'H,tyݫ ENo$8۪go=1UU)5dҋ :8!@Ê~F{ <Ñ#H\:=~H&ږJf͋.Euȥ-AD7vtR}1@;q{GݫW6ox5ߠw_A~p落^Xƈ`єޮzGgsҽ{wSbc_2. ޸~ (*]fڬ8yȽWߠcw_Ar@'c/8\״S46B qGVoзm0\JyUJM eaibW3m"aŵy=t0ߊukq4k;[do3x%ދfl9~}hinAUg&E²D"H$eYX,B!0 @4Kse3)Gfr3,Wٰ,-sTbrJi"*;p?,Y?<鿈+-8vYe둩k .Ҩ^k'<|5읠QЮN8m_٬m<[ypH}O)!JK0T*]ٍ"EH R"f]RCB 9nRn.5@tg i{>r%JsqU؃"* "śk/6]aJ47׭>d4`MPa [>=eݸCv+>w|`|Ct<ޙ}7 cؖUi݅8jYWZ{5鹾3n ¾S}+aeD m9k~!_|3ٰAJE߶2ԭ}; S)NYwֆ IDATkpyOW-8n{ˉv71.Bl#h:Ј'݇nl?5e[}iDxWEG?gv _uAbv޾n z['fېJil7DDs>x6#e/BZhS̺WHT71!豜I9"PP1_ҷy*dH6QRBiWMbQ[fxӿ֤sv? nmF eR1}nm~-xeC=t`7 ƥw :vZ 9%9>VTԹS˧x`TQƛj QSOp;.gK0{e Z94y5^힟lET9]/-T KCU惱Mn[&t0jRR8g}OC{ZjWΉH2H ͙D,$E)HMR )us= s?_r^ <}5kۣYϔ(idSOFKrUVs<2bֵ'4cŒ@:j/qFuQ\|J:xx呷[aIPa$knO9\0jPH:cQ44i"/윛h׏CJ 䪉 vZjժU_)} "*R.zy .sX%o繌2Cs.-D"f&xk5)Q[g{2HQA8k-蕥W_Γ׵^\ܷi/}BF|@rXQy%p z}.>:2Zڛ n?O CWGFco]V/MX3.]x=pX ۽  WE<-6S=w҇c4tyw_ ;r1Nju1?& :E=e˖-[֡˻nߜ!J^3BǝwjBіZV{+*׽:6))\GI9$HI0nRn<%fձq!GD%X.8p7y)srqbDtgƌlj8@wv>kv6C ^x.b6Y ZW"m/}y<@*e76= WF}7=jхk..u8hf)ybf+!T.yJܳ= 6L7ƺ&DU >:B/|5Q$g k]Y0ʧ<пT|Tʡ!j,?hbWZ .W/2CsH . 餛{iҐM4R?9A= *ѻkUHs`NT6ϚFpJYK>lj[L?^}Cmє쾈>ɿݸ3[rQ_?fUzBʽ>[1 c ڿ3^[jLfmڴ >x[_lعq^؏z u&m8˂V9^PΜ?\LzO%+V+ci'"[v)?QP]ϼ8)ʆ """R 2|[6LH5Պhre )ډכndB`!UY0itE72Lpm*/??Wk7&LKצZV tGV}qYVп6!ta$WB[T^ [BC#%"""""" """""""* """"""kpiZ@DDTi4ˋsTeRDDDDDDDT1ADDDDDDD%DDDDDDDT1ADDDDDDD%DDDDDTEťu9dᷨQ!' - nMtaB^ Ua#Wxvs%& """""*x7|zY}@ĈIuMzJ{(IDDDDDDTĪier5jp'jy0+5-#.2LpQ!5s]aqb`ڿ}ht6;=T{v5[Xy{uJJDDDDDDTL*Gl:Au两ܢyroϲj8zӒf"* """"BtZBOf0nTՖfezCt5Qk;St3%vc~ ],sL7?@Ҙ ;h>IƬK 1Ab QVͲYfJB@}ݺ^"l;Y &8) .72]ޘv$H-܂J&82Mub-K DD%""""b,ͧAdU6MZ}a(1ADDDDDś,V{ý .9?s[TST;ӌ)qyQ"JDDDDDTĊi*-a5bN[T~ro?Tpz}nd!RL0ě9/eNj^"""h߫h*ktDQvr+yoS>RU*JEQT* \. &d2q  Wme"""""""* """""" """""""* """""" """""""* """""" """""""*E]"*1X4_/_xBSb/_& O ~ʢ.Z2c|\lrϳBDDHsq `"= U+ATړ_ #H [%""*O*R'lXRl3lYI>aßϖ/bN>vfүSM DT "Gt4@Ԡ-.>nR>m叾x),, LjY|\H7X^WN DD=3j]nLp8X8wҭ@⪠4˹mL鸾2 nP``1ADd1;;<Ӿo4 RcyQ^)mS O_J\iʯQem"TE],"!Ş<#_|y Q XQl-̅̚ y7DZzmT&bJc(>ً>[O3)z;z]U V&9' Mxu̍Sf]Be.Dzυw&O F {~v1[Оݳ}͖-F? Vo/^kYFjO8~B` 5:3 ӣb.ͽ I떝gd|uth߰C355@w4ٹ5aa/ ZO{uhGQ?LcrVk޺O+8Tf9cæ~x'y 4EOշK0Ev>4]:[ޙ[{M]6yjС=xk}vlݼiYJ*[tzʔ_~۠Gn_soؼȩ Br6K8mb o*q"ܯ͏ T]:_YJC*5P%B{biaEgμ_!ȩ9x2c?n޴+5BM }l1sI>xw❠;jꪯbSR:Yw㑛'sdw#*\&D̨y-pVQ0cavh.p|a76iֺnC˦f]2Sg&lL -յǬȩ >evr==,>c=v52՗>zn8$Q˞-$eX`V9HEQw-ۖu#8\ 3ᲉsN9챧]8,Evk˛6QɥѤvt:3z5m Ce3SWx|w$\Cӆm]k{Zߺ32qȒs!S$!uWxh]O %jt/j乿M.JrwwWT*JERP(r  L&D)haLHiRJc.eWwfJǒ}Yxh]u=v#SC' S^oQ)4$.ŽKfJ-tWGDHnׯ_݀"}@ד谵Ry\VZ _p#Er¥{;PNFfur`:3s26W[/}}Y&,<|z6yu^kv*[4mv2{!g\5|?ͫgI-Ihby*oГ,VlҳwJ̹h"RJ_(\Y#Q.-~]ڗ?B4v勍e/?NP9H,8㰛ͦ=_Ƿ"MXfL4͒lpW-!nQg:(pa׷g'Eupӂ,'6i1UV&xڄ|WV˗flj̠Mnw쌝/Lwool`v;D)+Ԩwvگ~cF~J~eB {fq;}y{E3>=ێep.:A 1ͷ. 2b,}NF|;5u]v$9u )Oy!=<5z}DEN#oE|`j[텕GDtl7Te;`0ZvvR!hO;>mWn;7ƴ/ *?H}ԩ+˙FB2@}IFO^_r{A{649G|/_3A'hh͐ uz,~QUN_>Du%oW.EP\15xbz z{(}0$>8JU{7^N_K|6CfZ޹ ZM)*[eMy86\-M,} "!/.q3KxW.iLztiyДKkd'*zLp{JSW;VҬB Sɲ[ Mv1>^@08)@.>SNRrwyrY/Mb²> F8*m ;s <J=17p&we]O#wrٹl/Lp|zZ:4-m? IDATT4iYNG&zb/,^ny wMlڵk׮v9c謄;Xi"胲HbCU~[;p K?ן7lC {H.Z'#*jLpnxG]^ =Tq}:`%^>炗{ߌ%-^v:.٤Ş2~/jyۛ/W˖- %˼֬}|H2c^n;x k_Ξҷ̰dCܱع,ɦ6rKc|%g5/ WMpz+VX15}s "*u丄%q:.!!AǷ6\@8 S~z,%-xEL~?%f R0t:v-ɹo/ASZFs1v"Hk|p;~|†?-_]>a5Xp3KNذ聛2ϷGqHvS'$;D~^/oKBAsnPjȯ0՜\4ˠ);u. s/e#*AT]߿J'"9RəhHSǵ*?wd7Oj@Xwo]>vzw6f__eLǵ=4y)d̠kрO4~[u3Ua?lAYokW=}bB[wO< cW/P/e4\rs:ҁmie|>v nMIÄm4[׈YFDDJƟQ?~9TMECT{Wp!`)FD6|n̐M$ƻS;-/Vle=ƽYrNkmGUДX;NB:!\Qa9/eNjz#,5.pO\ ¤Z%fĤ%nTY&[VkD*r]Մd OU\S=Vд_۲OoY>z# A Ag= ٻc26ߤ% P^&r} ADTi2RZhKZe]\c 0\/gN5Gq\MIݝG|t+6Sxx(*]RT*QJB j2L&iϿ J;mZ +R}>Ώ}z?#enǥueT!jqFo4j~&|kDDT깎|3r?;yb!,H*hJu=z:/^ѣQxta !+kJg +>jnyרU| !ͻOU+Ԓ3/N!eL<5my1$LpiZM\4QS57chNx9crIYcn$o""l݋K!n4Lpii7/KxTO|Oqb%{`ؼ#2Ȫ,T' R̢cu(DQ6Q*v{Q "*DMeMNm-ri~fs,4=X"""""""* """""" """""""* """"GGf\!҆ """""ztb6yqԨБ. .Lpѣ[W˯LuڣG_S dDT1ADDDDD߸o4`I`*'J Q)XiQuԒcذi=\ǓCBuD߻+16^([n{ܿB'?x^!b QCJ)W R """""*CӆsX>l޽{חpn@0.2'\3g5HL9i9u.*+-sX]<9묗{ DT*0ADDDDD,Ҡ]9Oy=ZPa:.oG@DacDDDDDDRiPU:7³=Ñiy"* """"  )C?=}ϤŜڽ")f7~ԍ$AB]u E8\/^v:.٤Şc4,s|!_MHPTtr#NR-,Y* TYQs%mʧpQU*JRT(*JB!A!55d2L&NQ!""""Bgj *JBDDDDDDD%DDDDDDDT1ADDDDDDD%DDDDDDDTqQ2-oHQn[*KDGpQxLpQxLpQxLpQ+)qW}X EQ """"R)q>ޢGO$] )+ZQJ4&8[:g%y cGGu0=3ڒ,?RQUKr95k:'i[U`,[*#.2LpQ!8a9:QJ+&8۽}huVOФvn~ƋQTcj *QƯ%""""Btz֞+`͢WٍfͺUoPgpPOD%GpQ!u8h_װw{ۇa*|\_' `MPa D)QDDDDDT%[~@D3nmF ᖡhJFD GpQ!x/gn0%sHMY ̪cgbHr g4 b} 1ADDDDDřҫV^M)'j8VrEF8iP?\zeoʕg=BQgnA,zxv5z.Mwxq-*D)*DDDDDTH[.#/6=n'Q: """" fq04D1-tgg#ZN:t֒3AD`6 D_3",R:V{W&jO9UQJxx(*]RT*QJB j2L&Qh4.GBDDDDDDD%DDDDDDDT1ADDDDDDD%DDDDDDDT1ADDDDDDD%DDDDDDDT1ADDDDDDD% @DуzBUr2ZG,TsZ\(*j4bQJ/[14_UuQ&8(] `&5(J8xAAǷS⍊O KұE&?g9b@DDAr_xgT(겔61a/_|(@y7#GĚlټ}#۽}hZVZkM==q6g\9왳%܋I+ oնގU5 ߺםG.F%k<ѭGAֳazMCӨ{w [!l _^~m-O\ۻs_;&šj.m[6oټcIU#w S9{ZAuΝJH3ׄm ?~%Q@==.7ΚX۷J챽G6V,[VOhڽKƕ4QiRcU*c=u M8wuH_6aMa W5)Mj~`LorNZKdgn@Αuhf@D53*r^@v*>Ε2'0cJF b e6%3O-?i~A5nζW,"wb-[Oa%0׿<Ǵ[l[,ֶkm1pvsՆfWoIРnلW5BrƑ޶wLG\d7 kbS WMBin9zH ADTJ+NX)vQ8 gZ?x[;F~5HL m(go_y3S$:( 24/۔K(T*wwwJRDQT* B. Bjjd2LAD#Tʅcd7pk=[xka]3=C# -Ghա<}%oNT"BZt%+6ןuK/Ʌ؏ǎ89ZJ~> |IEXvm*[L]̧\pF]j_~8a^(Jmb8}f^l!=Tj薛 "*R+k$xsk>-Bqu;#燵zcÂ准߮ 57xM ų_=M/INZKd% 8-,ޛs)&8*)bfڋm}R u xS9='m3TXBOOhY7nJOfl-XBi׿>1Sa}~x `uYƶN.S!CF|;5u]v$VNno{Ep^Hu˩GTE}Dց1sv?nmF e1}2j~V*xe6DxTٙo/@oK1{YuUTҳ)1EFX> k-*Op;>gK0{e Z94y5^힟lET9]/-T KCU擤Mn[&t0jRR8g}OCQI2q63xu@Hi?qJzv<>ͧcnʙ /,nXd&wRk4p kI .O ϑ1FDv|3xP)=/x̬:v&rriy)d_)9|Y=-˗/X1;=߸6^!. iW}$R IoڴQf-?rH-zs*EZ%J=17# enM ˠF. .S *^ " L%4 .=rfʕg"pa)vy8Eƺ(@]uYw~>yZjIDATCv<~z}r|m>~*)k?ܥ/3gאV@quzcpA ה'Հu{zʺX^sy7[ϼMؑqZV9h $0eN.-[lٲEg\u$? ADT$ju1wEgbZm||6esxK̪cBޏJĠ;qzw0^_/Y "zVފu/Nr4:o$>I=^Qc'l^tPݘB^ngnfh ZYL4&˶AXDA߿//@gYHf省]ȿƹG-5uͅ\ݥ>ެ>t=OleQxzb#j}5 3/X{gݦ4IXW6ĀPvT r "*ѿwl0ʧ<п |uEzXx5bG=~]L4iȦC:!=!JO.^CCMX~Į}5L]fi {y ,^Q{V4/;tLnlD Ǐ ŴN8Mi[ۓۍ?E.0{YjaVw2*dl=P~;#( yFtm֦MņuӬPgs^,aޚc? e:;.kZZ3\圎t`[ZY϶ [LbjY^>L8m"kܲRbC*]OSfR5QWjCpsRm[fx7Bh"1DnX=6z{Êg )*8i WASZt $>t#*BD3"۹gw.f vQHEڧ\vAD Un jOo*D{7*^@X'8L1o]Yʭ8GO>U+i zA.g}\_옣׵5Z (r*gkxJib)[7rDDT깎6avPerqvgaG"WASRwy!J)?EQTT*JRT* \.ATd28El3'_ԥRh\SIrYI448ޱbզAukfY&FZWƭJ@#T*/**;u`<4>xzA4zc.~,`%_Xsr<;[F"WASR s#z *#~< \ Wgjx|;Du!=꽋{=gΙ rI6I5M1ZLIU.VekKkmVuUs+E Q#Q5㚃>\<<|k}}y& e=/O0vu,>(pk>QIM}Rk]֤/8_}yL7͑-3)0yͪ(Q+tu$^/;f*+((d&3x_x72U9}(ʃ##wp6>pȷ]spE58ԫ[_mu=Wĸ^8EQEA҆>uu-r,gH_ `oRE Al6}@TA+eՂ?0J@U14}4#8{UWG4^ñ!b*VQAz\1oT({QћF(ګaXf^QSkXCIaYf0j~/@OK* z9Lf4a0,N4h mEGwo޶s5!ISG|pQxӹs *W_a||KR--dX"zg!q>f3&;8hZ|6M&I#8f^W(vQpk+O (*=ggjgJTI;Xiw%&OL?w߷4RL|U2339+**8NRT*8EQwd2dJ&<}DQػޛ=>&-DD߭9XId(=wErh^5)3voJ;FD=2هkLd=tХDQy>//OөjyKjf`01`0]lߚ-J`ɳ#Hz3AS>/w(UΜ;`N开?9nXy7V<ʵONĢaaa/^tUUUFQRx*f&n`ݚNTܷ!+N(]U֙JeT $"knfzw#(T*Byy\P.WtaNJm[~S 67F.,qkjjM]b.g-.w~!#oc=@Dz0cKJ PbV٤D,5N+O>N|` e h86`] a@DitFP4ܷR7õ3&ZpAEASN:V:P.#MAxܘP\j_}1f[tN`(JkUA2rCD$*~-3¢=d6lF\ ٺLm` &""7OT[ttvn>ņdDm6&ЍQ?wJ^siκMֈ_1KܥVzG7K{d y'vx}NeYu9"tRvvvll,afg-v<9WIDJƧoulf† ˣ񪯉}[zyDd4onݾ}`pi4ъBǹ: fK&[Tw%_[MD?د3,:]%Hk8@{!vL ӥdFv$$$,k ^Qn u?TLC^عy3f̰lW^<`Y%P=8aQ荴Z`4LEAt:<ϫjFRj5}XaiB n#85N2F#mgnYp_T{0  vw޾}(&eYeRRR`R(Jfƌ2VzkіԸ:6@#- +m+ʤ$nRRJ?@S2xfrU? Zt*ԯn}FMξ]i>.*QxuoA;q䄅?^*vk(p@ceG?'":aH5 >9kߦ.>nH"/̕a+8`tG>zqGg}򶷲A/fk,/pMՅiRӆ.9BD999sΕd){8!8R?%PqLԉaMyq@V]_C{ғ=XfW<6a-m{cMt&m\eС>hhh(0 uh0C5ODNds:QKrsYZGp_@OuQOOBtW[gH@[ѝv洴z"_wӜ&ĄIOM'?rchGCbfbM?yM|]ˇݖ7'|n2yR1!rd5^#<|+㇜_1LphK p=fٳgs ^+,24af ot{|"b&4ztd}_Ff9z>=.#{çY{zOuH̸O~$Bo uCrBNlɷvdq}|DD`}Z܀˙wjÅ/~|~LI7md];wTQnçDžƢg/^.7H="&6SnPNIs.mh;i+Us>iU junطA`ZbU\wk;ȗ6_I?:4EhIk煊b>KLS<_{9"l̥-Sml*勧/63hoIu/HU6SgݗNz~rD‡[9MLvjM²t"Zi6"}\zel$UhF\I[^:pC{6njӞN^{c{]9۾vqG &=#7瑩߬Kw>ҩ)̂ rc?AmNdLzdgPۥ6tÇ u7z=2L.K6`0`ԫ<#cć&B^?IC֊Vnɮ <M9wݖqGy>'"ƚ:4ODősצUUWޖ"279֤&k39GEDk4!'"|̉$2e`?iɻgpDsSD~#n5 oNىZo`Tm\W~}c\[\LBS)SEg'"jޘ@D"y˳ě=rl=%pFfW@ 2ݚlT;h3`+xQج&* O ,S3ҩ )g6dg{ZJZԎT %l6bX,zh4,+-kXol6@Fn9!Dt%cm#k,xm7^kּq޼Ucn[8G+pj+{ޔ2E30)nZry|l+;Ky'OD4u->䋟Ee'+_&"ڧ(A͊U1wg*g|^ED2*<AecĆ"[IQMkn!;VL}R(QuҨ*edlbRbO GсQbb%;of 剜"Jv+qy& $"wG _T;{nC+tWu}:"#C$cmt)N.@?< y/H++sߦaImboO b#ach0Ѩ_:v6z0fGVS}`;`} ;X'RXG",,bX? /67=_ 9#᡻wֆ3^f [ ց8m`pݖﺐUdGZ&i~19eft0n1<PWLa>$9Y#G M`ƠX`af-0āh$9x%pY a׈DGO_$E n/?/Klw1{2i?USIfVfr0JEi4QbAqIJBep/?>k/HTs3GK;ߎOϷC7vǁ""c):HIPdd3YѢͯbyl.>GG&z)\b-9N> ae<@,:X; <}0$}`?8A58 y ``L9:AX"C/$Cr @ yBP8@P!T@%!4Bߡ5AD0!i BaC#Q$D&"QE4#݈q H$RCZ"]hd*2YE !!!Ǒȟ( $a?5F٣|PQTTՀjC&P -4- -VCt}}}=^`0,*Fcqc1y*Lf3bٱX % –aOSUNj\p \ )nNCQI9JSGs :OkR| ~ H  kB!PJHO $2ňzD7b1XO"". jӺ6Ҿ]#IљҥUе=BOC/DCAD_B <$L#yRINA!!C? #Qрї14c/$I }u;)& Ʉ))e]adfcamn^!a͑q<':gg+k. W2iG\?y#˸{yXxyyyny5yxyo~0St(RJeϘ/1:?ߞ?@@ >&B4B*BB' S P٨&$juLVDK$JV(FTE4DJtP !((V!D!$$^%>$P$JHK6INHHKeHK}v.'%(*S'3*(k*!{M\syZyC4o ~ '^*-)(n*)+E+(* *{*W*0XWEꪦP֪U]R=D..]u&554j45)4ǵjk hj՞ i+{UwEOM/EKo؀ޠ!aa႑QQ1̸xĄǤdT4ŴόhfkVn\<ԢblIJ*5ʺ>{d=ltڍڋ8;94:8;:;I;8=tprp8ujzuM-m؝׽߃#=^{.{==ynxYzz6^93[;W7_?P0X8T-8:x%2>d;1B.331<$/'boPxdVxZh1P{LG,Lʼn׌_MpHaoGb $}{ۿo"E'&JNIHLJ7Jo؏ C&0ckܙ陓6eeEgR?T~|Xpߜ2%y>y)=ѓ0 h42&NYSs9_PR}"xyiG`ٱ**WTV==}:zTЩ5F5mµ%1OswFLYg7lq;ڄhkmvk<E˅܋bOPyPa#GW>VzDIǠൡ]Cj=~s_~14l?rmd˙W>>yCzSm;wƕoNOs/Wj}0-E%V?.-䬲6Tyoqmz=aQ)ymlkl;l{;+@z;ñ x1şbG` `^ .)ҁ|3G n! -jm~IIJb{q44Kb?@fēQLhf, =9G g.eQ^E/F`ZH^8@MOĆt+9>yӊʜ*vyj6ZGb4 _cLM=Z~clMg#`jgaxЩ̹ۄ=O[G?, 90/[\ؼ$p9JՁNk}o |;+;'wo_ܻew~v1 ӡg]/.|Uј177oގ;4<>?>xRvrCTiٌO>ΝLyK ߥ{:mnoOv 4#O Y6ZFFl,kezD/aoF`"G#ћaI^d̼ºNpsqGQ3 X f ݥ"EESŗ$eO7,v޷%T+Wc; :3ݽq+v[|];N'?px(q'>M{9 7M|hs/ TB>ACEO;D'MO@oKA%e5S4'f?8dy:ώ`/i+[{'~$)BŒƒBQwb QZR?ʾ{#?0أtMJjZ]424Ӵ+tt^/dLLk泴Jnyjceo0*v$x{k=wsEhGG''zD59b߭Tb{zgÁլl\ռ|kE%'yOWFTT8橛?Ps.9q퓝+oqwYݓѡ"5FO54<"e镔?m7h76g=F8x C1!шsH Y|DPhA8nB#L/VaqT}wFe8#&jbZ3stpϐHiFkd)&nRfv*VA֋lZlC~[圪\i܂܏yxEx_Qrt/ 5 R婛"wEyoK I^,}Il\)rB#ʇT&ūG  ҊN=wU`cL5176+3X䱲>`b{.A‘8Kk[{Gj6gޫ~y!FaiEbTMJVr5u9]sLY &8}W>}lPxDOdJrՑjS-u3 :MZ/^molwVB{BODDQ_PG_QG߈sh Ҭǖ")+W֤wm(ob7Zs Y[""1T@#Psh":9Ec#} C4x_0G"ޢդ{MBREap2\d4\Ȣ򙵆͕}3K#Oo _00JʼnI\R2 F1JuUj꡻4iqk>g2p5\=z<㼕}I~kA+!%a#e.Ǩv'&J''r>M:f΃Pš'G]+9_9a\zlB2jZZg54^lknop UW&MvvzsDWN^;7=5l>{<eҫQ7oߌLorX3=9v.u/_/-cQzҙN?~.(\5\}Sgᵉu ō+sΛ7"osnmپ1?BD]oY^y6 ů@Է¿|#2p^., pHYs   IDATxkeu_{}u~O{3 @HDRHJ؉#+%?\q>_TRXˑD(`A`0`ݷ}+vE@`(Pn=}9=Uk-ކ    }Ǔ$iiQ{{6DQ}ԫAAAAx֣cccJ$IfVJ(VKAAAAGJL( zlzһc"p   p? "@D̬S x(bP?ZkǓ&''EAAA>ԉg5 bV[!suYxaԩ>8AAAAPr PP># 7DAAA~$q_`&PΡ;D!DQC=\qvssSAAAAG1o ]MwtPl Ae5n"p   pD9cL=5 cF/H   ptO{J|{/|cXRjAAAAޗg_R((@[(QѝCAAAq?JyBBo@S|DCAAAXb }*{=t)ΆR"   K^1,,(V1#,` ^ {p{pAAAAnKA`Z ᦱq ;t =8]=    Ib EKMtg;   _meR K1Z0p0HAAAB<F"5Hcv7Hyx?u㇫؍pAAAá pX8Z@ LJ8ޏG|uDAAAAdQd/MFA!Ej" 5Z)EZO>,r]\06طw=X@ZkURbAAAi |}RP<O RifĚ7OzN_qc r/<|ڙKؤHF=350q_yv_ {3}0UOOW.*X\   p0kE`t`G~;Ӥj:^=N}lD (@@7>k*W8?~+o\Og>~f6heq%.uFm]<=ٸ<[^gNc6CYQݾ❇pCw.v=Ggܥ{yOO|LAA~z+r1Ĉ 48Q7._^|'OyA\VP e  !0؋O) 0Wʂ,` 0`em$I-+f항mbek6P"e}f@W*7op/}<*޼g7N%.USpeݾיeZk;gewDwd,72Ǟp }'g*tjȝeO7{ȖĚ{#  pRؤlp8U&E+Ǡl~8\fnOk~ `ើ*  B?0]] ̍4q `b0.!]pazꭍs+:ѭj Z@Yb[/og?й$?K;N I~'o~et&.7cɦi[at6\B%h/- s΅*?[ ZI&`vƓv7k'"dK8(\AشTG>AAa@ђMHҝ>z%iO̅<9A{hH J9LC/Q[ %-' À5ĚlULD "E5) *I.,u (1\XEbY3GxQ6,W=i_Zk #K~ϴ?,~L/pt @IdgK:W޹0fNӴSPJT ާs1IPdNU9{{dÞSHDZp٬;8'ƘzDZֺP( \.W. @Ez^ZZrl.'SqdRKݷnWvVjfj|?e>'y8)(ǯnlE>%LZ{Z{nλTAAAol8NaAHM{-dh%pYG&p$I>ݸO#"CxN`]+CJm3MiLڋ0\Aݕr؂y6jL l=eX<8ز"?MA^H{pro7F.3ziu"-bB$ͥ'/ll_ұcmyG,s ^nZݹ\.63O{nni) 8&"*;>p J6XDZiRj֭[Bȑ#|>|>NGIf{NhDQEQZڪjbqhhH))a:pTrY*[,٩eV\ tyq+njZ,&''PZ5VNH I4J$Ţ8#Lvrw 7u0^P=OY(jEQjiU*A"'0 PAA{I4A\&נ"`FG%pbKk_W_{naq!)X4˘OrQhB@ ,lcbX1[&{E?Ѝ^c "v 6-^ e"Ԭ7ZzKgк0- #|@y>5J#NQv$a,MVj`{{^Zj5fr؝U2NFq҆Kq$rXժj^VF#NAk绺rX$J^__w m5 jTV/n۞y .IC/A*kyKKK[[[Ih8vGҬ&388ei4J0 ih4׫vٌ8;RP( 777y]]]GQ3 ~Yx2׆h6΂]0(w iM8j7vlFiZۮ6u*r9'cPAA{ [ )(͛}:BKwoܸqJq38r\X,훘s+Wr8"Y w4s(rj {`0 J9 +.Ljŷ0 ]JVvoܸq֭ZTήJ)g?p}1nݺzJEJ)WNY9\=ȏp@OOϾ}|߯T*gϞrʵkVWWstvZn]ՆyD__ѣG<8;;WV766n ;էh/===J)i=L".\o28] B033o߾A7o޼p… 766\lZ n*pAww}8pȑG.---//J%cL܏zqYeJiZz}h`䁱rХZ֭oTzj47t۲\z Ow>_zx<7;8[̏n[˷Ɯ}ZeHNu\=tիW׻{Y$S72RՌ1Gr…SNˍFMrB93[m>l4JڵkgΜ?qıc~ᙙ ._tL#pۥ777_{^zidd^ 277 ]]]c+:ƦfM7jlegpӧO_|yuuaq-HbNv$Inݺx…'O=Ï?\RV(r*.YYjj̔V#T(&||a+X~V5"$fspZ/>>9H#c@.^QaՕݜ?d   |TX_&Wi(Spƽ8aq?o| <R`B1m8fֆ8%J`bXR7 E,PD%`8OuX0=VomLlj҄@DL`h01% Ѐ!Ā JHGf4[?ַ IS'J%8|hN4k4v^j7o8qX,~x'GFF.^066>h=Fpei#G %Ir^z[[[.wU0t*>I|.ɓ'/\ꫯ>䓟g}ʕ"MdG zzzi\v6gufWnT#Gh^{W^yQ):l "rG p[c\?s?pv8O IDAT" ;Lp7tdaWW{yH[n5z 2Q_ݬ)Ѿmn=|W^MmZAAA0kE`t` Dk((߇8Y0,_|qaaӟg?IիWǕRQoeYo?$I\e 3ol 蟼~ߝu4۰V4"32MXD1j~u>zo=pt>vsR XܽEC  p!UJT<< =v#eeQ?W][F!"(n0y̎8 dƲR  c0)b(Va; HQD*$:PkѾ bxֺǿ̰  sJ[Lla-fɞHb6}/l@烀$bt~㥉e`?~gzgΙ`O{G7{(\Fl6oܸkksss>ufWW\=6Iu)Z=::zС Μ9ϾjT* =Ǯ}3';ͤ"}ߥKKKۏ=ܜRڵkJRڈxY$3??r3S}< rٕ8qbmmZ~ _ :'fpD9㵵5fgFFϬK'ylԐP*`ޑ0Uap5j(@;}Eb^;66&&&ΟV}}}lyq  GKJ4SJ#B =8(W^; < hA)љ(&xPY2 p!HA0 H R^ƨr\,YY< FoJ -NHwRH)4`=Uc%`!كcbRj F (?|6tf@1i|r|>}|>_*&''\$Iڃ_ + 4 ]:p).^?;u<!C'u['H@bb┡-Lƒ%bP bKfj*5Ё_+ϋXv)[0+D;YmkZm(i(L #dJڊJi3)&D JIJNF _>8LwRh1*\zO>3n Ə=yw(,pY&iqcG=}j:qtAgwNh4'N_8+JPL+fy@# ^|o~.]r;|p+p)nh,#}p)h\.uѧA8i<禦jZR!v3K3--^\;wɔ;gq >z衩~{JBoKV3 Fl UEhn6L߻{/e7iƍQaEQ6/048I?:v|oUr}X RP ̈&;|b_yӿnW*==NdCAA>jR`YnB" h 6$"_~[G7[hg>;5u_)a0X3l D3h@aHJNyMc `d^!7kV]M8l & 0+!6PVD(Rh%"`}e1~`C U.ݖ;^t(zzz\ֹ=Gլ,N2yI ԩS=#<2::zFtl1θ[nx駟tR.uv MS_*bTv@[VZuv|w`WgQJ Vׯ?b'z+АD\p/f&=죬fGl%M=j(Z]]=xLxlnnAjX:hpkm>%˹pcVڦ8WN&[\.h4|+lZׯ_wJWR#x<;u`rbbW+0ݰkhc' E(=h `S$)EjjLhjgRB)tA C?xDݪT*\];O  +r1Ĉ 4 .u?y>B)x|!fupR%ր ÕX)Y&K6,`(b D(z"0yU[F)[dY(uݹոZ̈V;*`0sX5oe, bXO.X-<e>|t/)(*Ç&kYt\.裏fFCwW$IܼuښK:LzpQ.VoѣG]h4T4:YMZW*4M'&&Ϝ93\|%ٱ2_ˇ{{{<811144T.KR2|yyڵk.]qFZUJ,j$M|>yޥK}فe 3ӌ팶a ? LNiuƖ4M=ϋh}}}jj~_l6R) CLqb\T twwaWE[[[ׯ_|իWۃi@4u`777_}}s### [[["jc7:z4zӧ2P"I0c 9JsX"NԎ](Q Z~t_MqBd5gJ%]   Y6)[($46NIJEktݱ Xύ z")a6cS~b *&ef`]P&4TACCJ| f|{#4VYCk!I)s)h+jM8d@3[fXYJI( 6k=0M}/oc>8SnEbgsp|t&XIT*~7߼y1P(t{dvbիW+JooowwiU4d9mEi͍?W^ ðP(kih6Q:tرcccc.Qka?/^wd#dJ՗^zirrsss/_nZNqD.ިj)̍+XDwZ Z D88w|rf+CC2.z¿znBְfG+<ڗN]:;64oZ;ܒ\R^5AAA>4{iPdE(]w8|߻ys./BkJHcCpbaN ڂW(RHYyL1uC01Q =>蛑b!(MY1[f-ki*xqjy]e%FH3 aÂbRMc(b&v) 6PLl-1 x1sٱ6lZ@!o^>Ǐ2Niy(JR)MS7bcO]e5}>|xttk,u;qiPoooZͦT6"ᩩV/[J2Ev u)q&{'1j(666677k~XwJx^zԩSQrlMV;qġC>O>|ĉ[[[Y<3u#I\.wO~?tiBVnOMMϻB!ˡQq秦xO}SB!.zŖ0/ .wO:UV}8Igkkʕ+訫Q®9&۹\nUl@+x ;&i0='ώIڭV¤l >of;8ý9\]Bd,` tl篿07={dbvfʵkNqb\UD4AAA`69kKH-,h_7=;_]9hA`Rv7i,(,QMvNh)n1@~$鱐'C[=C&{F>abkIA07-Lcè|A" $9 \ L xYB &H!A_ש;̺1|\.禽fHgΩ]]]x'{{{_x}75lۛ[[[q+p2z=11ĉNrN}}ff;緶.\:b:Ţ;Dպv;###sss˿g}͛xռdr[o a8???884=ELQ4z ŇUX:ky̌ 9h;!%`(r?i/]T՘9 Cųhlll\t7777==_._~MdA: ܹs333?x__֖Ɖ~ǽ|t|՛M4Z(`%?>C_xW[%Z[,gfffуc]ӯ (t%lpe7L ܸqc}}}``on  Gg2R(Po0,RN|O?ZRG.O2r/ 4Hq!f0b0CY&fqCN,8%E1[^FuzC>dV(c*EAGJ+*z"*Z$eشU2 0̮%Ǝl‡Z*e26jngZXiE~gptpdcL^o4cccƘ0 ]=GvV/nnn-..nmmmoo[k0tRH{hcRYLq=666><88rRfstvH<^c/;vj={v}}hE1|w4JyԩOӾ?7np_[q]0/]tܹzj||ܹ]\O1?D* 3gӃzt*>jj@S(\T499>{ӧ8v:EY:I$I#8::Zժժӡou'&ɫKX etHR0C)0 `=v کoߪTZQuϵֶzuJu{OO(m,!)0 o:3ý[w sWP  G6`q c RFjk%0E+pck/~<+7+FbPJH@Y  VT+3L- buk=i? $-z$]-$i@2D)!RL òE )afIU)Ӿn)|2,߼o|h{%*nZ׸]p2A>w;/]G_~qgw vBD.v p1FkZ~۷Z{̙7o^J4M❽• LNN~K_:vX8w|wwP數] R$VF(7YVݱvJr…G}tddR\tkϴ"pdkl6!ixwfkfLt{{{zzzbb^k nLlU7=z߿>ٳWԓ4]]]=sz\!Y':ť١^'$eq[6<4/oo\1'pXV/2i|⭳KKK#}MFKRO2ƓxK_*S*JNb"X M V}~ag{u3 DAAhk]Yz9}z"==ӣQC=ɂb0C0HqHq` 8?DAqFd[a~DIKtO{nYdzWݪ:Z":ttG]xRzafiJ(Vx8 f/] IDATdnoXQJ(k#ڽ/>箭7mb9,U`ѨT+ΜS@ (P@6ذ8LH m,CpHQ^E Bvs%(Y(OqUȂ"%X84PDeyR_NG%Yp`-+2#9#m\@zCJ{lS lʚYLw<22[FȜ| `27锬ecՈ sޞ@aEv<杝???gn<_ t G$!D -Rv={vffRH r!_ D)u"Z[[AZJU,3+W^~e"uܜyv'L)I{Νg~_w廮Whtk׮U*>j5K8{rtןp(~FnwyRѐYij3==k]z5I7x9s@Zkq͐!gh劬 wܹqFϟ777}~"v) C}BȋZhX9aZCѸ"Ƣ^W/ʹ:{;niۭVK)$?|Od݅Vo.~]X HҖE@8z7TJ,Q/ (P@ *e4U<#xX&j>Ty|EADkko:))9`F A Df (&:&=SL!&J-abF \3I7R6/G+l-&RcSf2,QBbpP Ƃ%ͰL S(di*Z-?QCo8R4, <-+߽?|t"p5-N0>}foˉ1ݯZkq+HVmwwwzzl[nI…c EoH+l6677xgժ \WpH,q_jil6Ϝ9sʕ~ODNPtz]_.YgYja>5EA\Z][[}v$z=)Z՗_~s֭e= @9y(Op8f˾G:HuߤA-l`Ŭ@`Xb,Ofۓ.)!RJd 0i`hX@P|! ѬQA}h(;wse'Ϲ EB)Otӕv^?R)U*ZIXyJ H~{9B=E^xҥK|(&''Eڼ8%99biZo߾u֗W;󎬉 íNsB_km5n>'sr٦<7ȹܹsn߾}xx(|Dx:"F%_lP岈8TWE zVWWWWW׍1ot=>>^XXhGGGnLIY!T"X믌OͶ<poرWl2Jh!]mVQ pf#Q4_ ʁ?ۅK@ (P@-HqP"l W\yܴƢT$tÇCk(Z%۳% `x_[kHLlA$٢ ؂Y"4dXX6<_mphHd ;$ n[RDt:Ĕ; kEj ̆aa,:0k = UC 3je*8 _+rwg~;Evq~E}u +H _pC_ |܆3"CEfsnn.MC; C˗%Zb0,//#Kp٨yD;I -Wf)7O=ɯ#8G~b'd. '+pqDtpp0==-}s޽+Zw S=˗hsss4MNN(9891P9I~t׿yi͓E( RsssЍ)RFAi{p7a&RAk3텪? (I%U<56p r!b3It(=0?.%Z"OnEfֹBQ@ (P @`H hο~ "#P hԢQ-̻f[p4'U)O)PLLR# S7]eHX )uc@~J(JA%DxV%%y!3;eh`lg%`- ;7߿^,ˏTO&8\l.'GZT9h" jHYy4n8t: Y={vrrr0FFgIdQU*sz>H<IH؍"eyh#`p||<99|ʕ+B + frr@^JpQ -OYG$U>JT*;r… VEQnJ"뙟vM;FQh$DR9ג_O0"wwPc/I^Q 4"HRTKsfxەmR¶( 0%Mcd BB@ (P@D*ECMpRDQ|[hA0&/*n=XRiO@yiO)eJRI+4&bE"(1մ^&DNl4qJd @ 0L0΄$z\a08bfbVleì` kckSd6X$&5uk'=30QQ(MQ ,ypmW"2=~u<קƟ$zZ v|uU"2G peeիJ?p}}]b<(VVVt͝V@ (P@g0 XVDUIdgƧ!8NWRiEaX:0''RYi7ZhoN{G &C%eeRP DDLDFqB,Xb&bRPHRgmU,5C,D13 C㔏XZ-̂e+g2[Zc 3[Qw_JP{M+e߫IIR¸r+v7ήz=IgڃR=7[!(-BS{#bi"1Jg,S)0A%bkzz'O %}e@%&c Jj2R)((1R)clİv@r͔}kښlkG8E$߼q+zp<'•a&?Ap7eZvʕ8E1?ƍwIDjl)#1Jʕ+K/o<2iQ*~='IǡVm":::Oz^ rκ8mBpr;cȺOOH7&>IxY~EQ\P$0Dv8>1*60#*g\:q ǜf.+OJEPu" -"אָVYRzxӕBۣh̨\ H^LH) |?;?=n>R "s?4KD0dW (P@!6)[($<46NI&8k: `DPQ9F&p/ Hg~)Lv`€PJYXAGPi E V!I`SHi=Is-Y$`ְV*x*9Jo988TWqCF}fS5; Ks33bqq/^l;wh'&&k˶@Ze(D!aC,Jds郼 *o r y.ñ wJݻ 9| ]vmnnN$c)d=]*J~>J)+E'8q,JnZBcq发ma"T**y6Ȉ$"¿1($,V)%b٥/^|_{bxZ!M ߃etC=\:ƍie1Sh (P@ (lY 40 1me3 8\ٶsOh,Up䅒@e(B(,$ -Sjm{`A̔)8Wf,Ls1 ,4HgfUDR=$`IJ#EJA)h (wî\ v#5x$@pH&rO߼BNժHzU6͋/nnnv:fh4*7Uw:'TR] Xs;qr8g9Mp#*O7[-I<9d$kƍ&9*7ɇh+G$JtgggG92(D!~DEwq7xg݈( `0;=+/{gwZ Z#M@)X"Iq~_zrt F (P@h60 e&R $Y*h(ʤr >CC%Z12unRFHөTF2#H#ŀj{G)}3}?K@IW_Xn|#oua4(z@<s8 T A R >mnaȈOGV=D$!zn/,,ͽڲj IDATk7nܐ';CNCxʹETzOy⑯Og^8G"".ZL$k(0lt$oq,ԉ:x8vy4UU7m+ ~~dp^F0hIlllT [7{wPƌȾB#6n_ K7w$|3T (P@` h\p F۠"`F 3"8/voxS(c: Q^h~R{P *~n+g`;!8P!3 -l!l^A MVsVmQao _:݋Ɉ(43#=Ɗl&.Tٍ})L 7\*ӶCʪqi0ov+xy;6G.[NYEQ=88\__p˗*Q\PIgq%pv/ Gr<ȳ (#_Q;? 䨙ȱ nyJ$҄Eey r"y(upwbw88[oEQ^o⓯"V+#Z!$]E/aWK~w;Bm !}pQWձqU:&hGp-of},[vM 'kEOź*G^Eӊ؉|3Ip .Ih4 (^Y^;G[7~{ױsZ @"P+s_4Y۽ݨׄYJ`7 (P@  Uh%xGL=|g@pH ;]$@))(sgCoHES7]^V+q U 9G6 ˫+FDaS,$ WkTkasg~}o'A1&'D@( ٦ GJS3_NEb`yɆ <A+oN+8p#+ԴI ~E.; ΝƘׯ+_|jz] _@2A/._ LIpfx<|ĉz!?k#)YǓ׉HsVfwR'3FY@ǿp&q'4nB3Fq<cz-LOBޝ}&1L(T˟+ Soy$F36)P@ (P3#!mPci\?C)Gy!% )c3Ȃ-7d&.N-LUȒI2ԤZxd&kB?0kE2@D=Q1)>-]By::ra(xB9S~y29c8uBi2d(^:{ʥrvg}|&22`0iW/LVJ;'J3gp[@ (P@<"fU%X#4M1ʎZz"IPGI}qTQ'?Ԧ I"VZkaՓ儑u` AהV& }Ek1dIJKS0Jc@!sD\zIJPJ94%kN4^Ksƃ)t'(NdЏ 8MpƘýr< RY>u{.Ba޿aaayy_b$}QE%&H766VWW>h`8cJ g-rU~"rdNG, IGH4/24MáZ}p:|oZr:u4'~?z^+Jr,"jUQN{D 10l4z}{{;MS8A=#jHr<#y+g'yRRF*D}qnʕoܸMcҒc7JD (P@ |+ @hB|r΍".\ (Q %үNV5߸<;?;R&v0`fE ,Ql,lOa"`ܝvA媞菡GRhԟzʃpH ,H%cLJ ƸJ$qĤ_/-U|Y9v'zX@exițIFQ opÍFQ*`$ITT*yr(>ZV."x'xQ/ѧ󳳳+T$^ :&÷z+cɰ,ʂ8H|"JKz.-N9#s)c%R}Mhk+1N뛛F{pqH\KEIKtʋ/j{qg DH "H 勿+nܻwF$3T*yvo(P@ (kE,W gCpXk(6Ǝ5{4o?5W\MhJԦD1@Bls!(=1تb+z3 . +yrF 0$XFPc51:OpLT9svsyc?`zzv[%7aJ4crqYPq5TZT]YY988#Ex qLq\TZp82{45M+ @)0h4E>k)kQʬwUBZ]XXwG:5eXlnn; H~UI(wir$h8&i嶬lSёyfV\|ߏ鬬Hބdĺ< T* oYV FY7wQnV?LcnH[拗Y Jny!|c`]/?v MOݽ[ڙɩr,BQ@ (P)`@QSF" h ΈDpZF)P9P KP B+++?<}~59˕9Mk"T11l7 ؂"ev<"JXc?~F46HCH`Zجi<jkH( OiCCؔN{'GpH/%qCtwm4Yvxxhua2BT:7 ~$ZF`0ֱqf999nE!y' Q4듓q_~}8j"?PkNG~ vrJ%`#l8BV WK5q{;;I.,̋G88䈪BQ@ (P Ű2R>d2b ;|6#Wm L'}a 5NQs*5 GlŪ\NV)6*#xL8 lMRcXXNua5l<:`0 LN^Y_/,rlooZ[<\RJIbStǑM (P@ KA`BCcTaOrm,w:R̩d,EZHo8Muz'N41i"M-IRS*fKd,{dMbLb#g֗qq{HI4a6%9BgA`=|t}?f$z牧Ӯ`{{{qqQ8YkBF j}qV)\ (P@ Hli`c"5H2adZJk;;H3G+4E/~'AZs͉caDU61ldv_^VhƈfI'x1["b$>o% ɮ`/WZI$IR&1&8RfR#xvGZ7E7Qq0 (A4333sssn '!8vwwoݺ555u$In߾-n̮0)pgbf"f0lnnDZ$EB-)D$S-9s;CH 3M` ǟ+Nw_jo4rY=V-#7gxDX&I>P7(iSEQRԑly`X$aCbBj_gjQbXc#!R[mZp s!,cn՚u fzDp~p_(I!s [?Ȱ5c Y@) ٘Noopw0`T-AC1QT#$㶮,r ' )\)ŪԖ8&m'HԢnJ%qHQ@CI2^Ok=>> f].&rConn޾}?|$ߗG~_THw E]@a>쳍FcssskkKr|VMOO+a/sT9(;#N"܂?%vGqc8ۻ}vјiZ ZL 3եgJ~UzoiSnU摋apҥZv=DZD%QdF$6tzsqٯ]; @J!a0?+?Y7߼rgNMMj5]p*iߋu8i@=f Fˠ-QTV,> FH*JԂ,|T࣭ɱz*`q 2l-`4Xa:"5(.\MekZbX)' R@^́ a)85 ,_lm [IAjWwz6F` "@ dn|aQkL˫bam a(n('pfI$pDvI+Jz}}ԩS nv#.ٙS._:;;+W{v8 DLYXXN͛TAGvjyfzn*8En7w,u.hpgϞ>}a׉|J^vh_t{{{ZTm K*pu4Mܹ#]*kZq !{{{atrR޸K_nc P#PW>tЋ`շoW剉 "qDT'n>#ԒKN'61-3w-,R5Z $"B`!gnQ,`B͔~͘{ٞhZ*Rj΢=Ch˟V]o2YM )ܴAdAA{4@2̩ w.S m֩fkws@Ǡ?@91sj7jͱڻ(R b]káp ̜iM(gD/_ RK4cg*Mk^zhLOOݽ{W\!t_v7,JSSSǿܸqcyyYj$I\O0{ffW_T*7o޼w^$r>xjjjuM1 $Nt<`$F. _:Ā#h\ǃ]*͛7\rkN:@@n{K.=zU'y;P4%(Q/̷nkQHM+# >|t/}ەr9~_*$#׈'n81\_q=L~B+eCK4! Q2m 8D7U `pG#Èl$%Sv?4LF%Ph5ڰ-x^Ϻ)NR  nᏤzqBQQW`eM[]U[3C0:#B894)Zc8FQĖd?wh~j ZX -'c+|p@Yke__#-*$In4}矯jZkyN7gIvji(PJU՝Nl6Ϟ=S iqUĕ+Wn޼t&''sOP*/&I"ӧO_pRlll\~]ZH[10s\ZZ֭[kkkcccu KsF($kZ柦iZmZݫT*gΜwmV;YYY)J/^\^^huuU:LNNƞȯq@۫V=ܥKJ۷o߾-u9PJtyi̩So~K]9S1̕ ;++k_[]]]:uJR`(**h#*T8NùXD$ǓFZ&Pc)Az:lJwnXhHFc`a-T_!x#>R:.8` [װ`"!"` yg*iPJDXz;ߠoq Ü7 ,,`k`4QÝna{=ABoFCR18FX)kk&[֘"88(2T*Tzg윇DNOO juwwN \ pll^z= zSSSNޖ &D w.{|իWnNߙV؍$I1/bRz7vvv}U‚1fcc#Ic1Apw3$,//ooot܄(b0z׮]֞={̙3ֶ%u(E!ORiyyҥKƘ;w\vx38MPySfݭ? x7o@0VN8k64Hc}E{o51nT*Jp08& sGD9{  VAe9 w=F(mq3vwڵ1֒}ƺBIuI|NAa-?|eʰ>pa amF LΝ/PY{.#r 8==}vb6Bp(rl4aeq\z.Q6(Rizz:`nǕRRI쩩;w(^|˗/2kqv&Mӵ$IΟ?ԩ{… ><<>)wŋ$tmmի[[[-]jO$dp:62qq )jz^n6gΜ )\~^xqrr^pޞc^/..U*R͛7oypTp2z0Ǵ{pp~tT.oqRl `,,c_|i~7==/ʥ8ق)U%0v+d0n\UQE)qqJғ#8\573՜[Yۅ1@$`H l `"ܰ3/i|GtfXlj`2G < V`bp9Uc3l2!PGjX[Qa|VrtG4Ӏ@$<|Ulj;fh;3՜Wyj|D@ټ|b8vA$Rvw][[<*z>??ODյl<@z}kkkgggrrܹsNny* T4MZVRߗ*BpH q_~mr (e^`0x7Vu]GBʉzxLjEţHRjmll믾b~뭷A\VJHpuuu8>}zaajZ-;;^Jl y޽vB 8;kGpP")J`0l...wsc Q`R```7>|?xnyhkg/Kna}_!m_"l`L?սTlnnʄeO^A1XTB!P Nf&VVs2uu? PR,,?lMp .i5龨ue+M` !&,HSQ`yZ $"ȋ!!D 8̰`6L_$[ b$^A`9PրMɛdmtц,eq8?3l>R 5< XZáA#bxO^[wE `m#Ӣ^~in5QT"(Kz̙zX3 X]]r{&V#Ta@(COHy d3\ʹ \( ւ,LHRXF\J巢"@ Q^.f9dA@P P^B@0?;Th/_DEj†H>R `Qp[>2I'dG<(VzRnnn:ѣK -\iXXXpk׮---I-ٍNQRV7oޔZt:8S3nQr\i&I:Tn'AȩMMM]xl޽{weeebblJfq])PòJ Z뱱171.]rzrH 1;;;v[J9}a@rd yjjjffF2SD%$㏬0Jrj|*6wYG.1:o+_s p1J[A/}ik֮ϮAe S@@Pl$ -LcV&@AȤB R a"8`Lf71"&a -e#MXE!sZ@&L >Ιut%a)X8aCi&U&نEbm όU+EiG j']Wu y0z= DZ g$E1~1pIrdȂ;v¥Riaaa~~nI A 8RD=H}ad^E RNQd^ Ę<<70 XVDi,P%x2!DS0 P'Gn0YGXPK A!(B0â<#b~B"38% X3M]h(g]WHL.8RYTc؜ðiJJlau1YX ;:jiIzF%гgn,U#vp+eTfX;g033s…ׯq<==$F,l^7ntRjuuw8I@^]K=r?!i8EjV%D͛7䤔żI#$(wh766VVVvvvd# ʃz^󻻻W^Z:ujffF>]آmCx \\.,//WUћܹslj595QK#fG/3'"q`T*&''߿o[^^ّ'qxӑS.y4 na:11h4\ a7rQǡ2ƔpQO,w 5(ЅGp 5J7d7';<<<<<<<<<<>(0˜K `M~!x7Lj8*C j% 0 IDAT[gPCJcca8D!G: " A0` G K€hfF\9  L2t|'3(f%p!/[)٘4ՒaEal_0FڐuD@H4)>D)`Z>)҅%Q  xP'pPTǧD]~][[xffFR-7aշ!"+hZn?{3<355%}^ŏF7F6 $ݼy7j*N|u eZpbi8(i͵_}}}oZFcwwWs0H ?0bGQMNN.,,:u* _yWƂ t6)Rnvu=0# ň>=8pp(n{t@hlkcBhL9^-{ jqr(FI8q0B$4G(@gGa J£m@@" C@6ɻP‘A0l2u"- 7,is XQXa1@nKqQĢŐb2gHS MH5iaAd$1h|S 9{\J8OTZm||Lyqq\.jV~033S_AR3 PT677WVVžNG*siZ'\䆤-H\jfsrrrbb֖47Mqn"ZEQ^oZb!8$"]q+4g7O!1"J_}|||vvvffffffooowwwiHWm8PEQ5Y 7wJIӴ듓j5.qV1ً5\XZ9SNL1XF@`\l%c)Xd 'h6IAIf*'Cp$p9?(%L"aQF"E9rB( AE`Fbr~ZStD򦳒 0AMdEHhR38 Fj@5B aݜpd9dd0V#ًi j)N1L^ |"$.LD^YXXE^GcĈᚭ [ADZ$I$ݽ{vE 4!r, N!r!MSihpxx믷Z~???_*v4Mr dp6rT*dڷo^]]F1 rRINJpyyyjjJZ+Z}߹{u1y2a݋Ai) ( @0pFCیIVX>żɎx41dXT@r0&#,Y/X9Ke !nl0`h^xӧGDM J Z\kmt:{{{NG RizzZ:g \1>©$$Z70 4q秦i$Iߗ\kڝ8RIpeeessSk]*FgE @yV!ٵqy+!t _׉hzzzii1>,tZrX8ZV*u=kxE(֒;Iq,>4aaW<<<<<<<<<<<>Xሱo@;C'r^{a 3 $)*~u&l֭ YXZPNE|+캨>\d:m isy-텏;A^ /k/0I{d qEpE˳0L: e<GϜ_nXPm4QPK 0 .w%H$Sa)0ѥFϝS*fO\3=Ń  DJa `ڵ;݁JC~ !/g1ʙM9zaQYlIa,Tk$_kMk!$Ąh)PVQAǻH=wQ:Նk!,cm\dDb}}}mmM8g(&"1D iCå). $T*ȡeXyJeG s] YL=>>h4ęBD~nޱ;illl||ܵu,ZNLLq@2p u"|wۮm\/G ~hw) D!"DJAU}v{ݕ_$Y<2P"E{x7wЏa:(3S02AQS6X55f+Z>?rF|GQU|5eNdH^> u(P*j֧aD#w^Mypܐxv뉂B4G 9Oq@:q3Vӻ{1A($bV%)BOZ|?rmp0* @#92pf(I>ş"Q~ދJ kDf5a)~A"J2:ayXG(tpA6WEg/ji¢3DO+rY*:*%\ȫKo*,up퓪 3*%b'Bq\F(NmhA gR-#GpY8&P`N"p-oG8GQFw}wQ$P>!*b cB#p)k~feO&(+@A'EC"5TFsT5fPN䕆\G-Z- e07g +w>*f8lYZnw Hzi&`GF#ˌ92pHh 0J}yr\;0R1q#f2P,WX<δq݇ jyivɒ>S8p͊FPǢASGg4%'?rsm^l1!(ʝ_aP `a ضYN,EI6ȏ^L R ¤[F?ХY~)[mbVo;&Od|Q\M>Bs䁔Ә;}\)F"G3r\[#|8xq2/{@&ǁ B T)@"&8'6VW$<_?~w}FY(,@Ԍܢ D)0[pP`X XOn|z2G RicRkuoV0p1VyW#…F)J1Zc:2vC:28+/U*e. {Kﰊ KUA;{]PcȻoH? g-dw;ۢo l$qdx5ܟYhDE1,,H[] # f=-C`?ʟ}N(A*5T#d0#%D8/VM"$&ۼ͛!8 ~u\I>Wf R mbH흽];_w hKZ`[c&_cU?x]=<<<<<<<<<{_Boz NErϋT +XF ^ͺ>""AO4cc$ D]e(tw/v} C6S87ɣC)'86s$5Doh6Ќ`y ?_:/Rʪ*2=QTK]Lk|q~T*=[(EJ`K JcN7oM Z~RD(1\¥r8oݺ{E0(23e/"Fl8A{`Dt}*DQnqT"L8PA(1<&RE1 !$fѼ(R(9ʓD,s.EQ_1dAR &bB?@rHnS#*=p" %⑸ӣ#<R^5B{0.Iڳ~-!r7R An)0g L[E}[)! .;Y"7$e-H@Õϯ EH,# ([38X` :#F[JfmSA_,~~쥗/PO׶ï"*!a'lPo ͿVNOO?zX կ%HR#>xokQ'+IƘn_-Y#&_on#8dwu ]#=rmmg%1Cc)Ovs!$CҥZ{ ~""e*0aG@)p( TLJ %E@D@1 ( RJ1,!$8$ q?)'@s@DL+*$# %g$RDDPTRrB)@ _ ydל^%_ Jr=ndr*}a=<<<<<<<<<l,s,A[X2MwI6rTG?ootP l8IYFV12Bx'Y5A@ )q P\mw. ,Ze 6Q)˽?+]45` +Ukr"Gz.r7LCnin 1ʏȇOXsi#pi{'X las^)IӌzDS@AF@>Bgd{f6ZDdJ@MG@ 0( B+11b"VP@LJ{cRFhȨ DP@BpXeC &P*3B  ĵ @HJ2PJP~ԕ4#P7(i,)[7-m?)"" 1" AV|Jn@CkbDڸjy<` FBΗJCCEô߻5:4KJ8IpkRxV]p{8%ifQ1lؗ?t~#eEok aVxIbbF|$'E䳚G oTT|!\d|s^!gkIb]I Qn(炑۞@XO( V!Vm$LvngXXd X3%qo,ʂȌU"rKHb'/E>UZ|nxxxxxxxxxxx1h=0ZU mB??_{ؙ; IDATq,*ȢLQ0BP.x ,]( )rѡ )#UD3wO>? Ejc"V Pn<.D*'iT1 [긠H^<VG";dLњsbqd;C2Hh(l |}ƽ b", y2IAQxxxxxxxxxxx| G} :a@.g_|W% y ?ԕ @ 0 6PyJmWڢgj~0kio?D78̟"A"ʎeha@L5SphXn A?3OOTGѓXRL#2n!{NMj>~YX%#EvS8Cùq{ |+٘pLGsg-@(2J@" "CHA u>^P`zDS -=|-2u }-`O8_RnF*OAQGq(h*y 8"sM)AhL@U`h !B&D!jT#80?w\> d9!"C@"1>bQpGDF0N[Ka@Q=-&Y͕=M,` -)i4w3-i #G 6`..LS?' 3`2cTǖk6n^/3~^1!}Q8|Pw&HKI:L3<^{2h@{B@&1+J&0 Hgf=Ee²83.ð ,',bNc8k!Geo+x㥪zYzwBi- kfƪqPB!B<T8S3= Ue,8l4 7?}OLgj\T@+@B7  ˀij`Yh $ 7%| ;|Ý2k*  ֐vlŞ"T X>0w^uIF孙RdBB!B911)׾"CU^n?8H)q]jDFF-_G ),H͉&y @%TXe3*vvƂH< 9rY$hE0 $ $0 b!`Y*R8\FdvqW|I,bq^a{}aaa0Q`Ig>!i/$<LPH)bf;-<(\,2^h+PHV>nVNPN M;+sKRA[$VK˸P[SZ'Ew%WZ-  7 }[ pr~NTssr%(-IM#B!T*IK`0D/92 qu]C%0Lh* ]iteÉ;Cl ɝcuRD ԕͤʸ/g1{$Tf7RΝj1;ʠ0p9p .( c}p8k a4{? CH03'rN s4y[0'!B!,0ʢU0( 8iYլ1lxz5~[s0%B8*T 8˗ ӄa/\$<0Y^ C9 R4a!|  jU].ajbCv`Tޚ)%a,#OGz.sjΪ& HNYvohoFJJL/(Ia,e19n} RJιbOr U evR" *8q7{1%T3!B!"K0SH2E`:ధo׹}&yKmO4|&LU£P9r} ^0P8tz9YP)tH7@ \>WK1A78 ˄!yFv5Whڴ!!\UȈw#:TKwK!eNQGz<&,9wi)ڄh?G|kMA|zi0o7 XVDdDP삍h's I[D~ % :'[`3-1[lkgc \!@"!1@F1( Sg>ss0`)1(17!ιb`Bp!(3cpR 76aGf83n_n7]ʎtP!B!QM0H˔̱nrDU߼w׬{وl/YjP 3H  @a0%TLB T(z.Ck9u@ST򄅼ew(%d+Ӡ.7SCE~@+EH)ܠ`N@9 q&9LB:q{;pҍ8C? 0&e)R:w %̝7v~T'@1l;Qc>{:-ǑqY8x$^sS Uw !B!՘RT) V(Q(wE5v+/0__VmnvMɀ)`He!#;;*<|-(,?ц-Ri8tH ='M ȁGDFxk6t`5iƘig`ܪn.XQIXaJEs^V"*Do?t!VقlP:(68'Q.\}.B!T)!˶="Yi;,Q5ޱ}]{qǪ[oޝCe${MHdSod#59&ґզG^=ҺS5ccW+R B!B!TbAZR7a i麚 DX54aBة:B4ߤq-vjt[2etHB 0phބ$Mҹn/!a H ,Ɛ@\8w4SqTH:oѮ];#,(ܟClgM-3eع/[mN+=X ?*xO !B!gȑAX a&L)88v?{ c,!^BB#?v'3r<^φcɩ;)iB00{UL{ {!=(ٰTpVcQPӈ Q.lKzߤADxX({YmY_#T2C[}_]]zVx !B!EZRHRJ0,jD,|ҸnmԴk&g=x)ipwN{X AeAXa5"֊iTv[%'4iشEujFZ [@!B!Ә[ |AT 4xSpnؓV*hj:vkwHҡC'O9y"5=5=;7OaZ,K@Hi`\STMS4U y=qQcjֈWNLFuã#U.Ź,{OH!B!B $!yA3Xh \"9<3[ ۫o4o޸yg3Ӳ3R332s}a覐N*7D  RJΙS!B!BƤ:"L,@ȵdA!B!1Ue[H Lb B!BHXMi i:rÂ0&)*B!B<@`' AX a&L)FpB!B!:$reTM`"Y"'IIF !B!R[ |AT 4xSB!B$!yь!!agB!B!T_BLaPh*4h` 8W pB!B!¡0HQGE /9(A!B!j11מ"CU^PR B!BHe2p 1@i !B!B?p(*8cx< SThB!B!T_&`AHTpAQ  B!BH%)$L"K, !B!B/L!2 D)SB!B cΎZ2K tYSrpB!B!Rꐊ`0#D~QA!B!K0%`%|,8!B!R})%uSZ‚)aZ\ LdIFi !B!B/#% {rP"B S@JB!BΤ%5\!L`"Y"'IIF !B!R[ |AT 4xSB!B$!yь!!agB!B!T_BLaPh*4h` 8W gB!B!ź`10H3q@<*BUxPg4|Jv`} !B!02&%STTs T"Wq B!B!T_&\gP9aT ^ lPB!B!՗T81hg@SVQ!B!BHeAeQ * x lB!B!T_\B@B)`N1 pB!B!RM0H!s,LL:(A!B!˰00f%rA7e:(!B!B/)-!S@0 :r-@'B!BTSB mq@ Y'`Z0rQB!B!՗bAZR7%, DdB!B!9R17(%b-0D8A!B!LZRRJ &%|dB!B!՟ɹ0'T@ePрAS8(A!B!K2"'qbp{ MQ!B!BH%4Bspؠ)Gvk}CagZғM5ݩY*B!TT8S("TA0(AF'RLZiӦSӢΤO2 Y9Qm[ݎPmqـg+Ɖےso2U ?*<}/X'oP/:}g*2Gݥ}\:P ciA!rv12&%STTs T"WqJQsޑ>xV-X&bwՄD8OxbЀMr`]wU-R+NÎ]YQZg]UwJ6JM\CD9z˗>]+޿~N IDAT+o.A!d2p  1@8G& pr;͓oA1э|͚+,y]# >V_!0 @^tYtr'.;W\Lje=f|awW9]HVuk#gUzS_[jy}ǿ!,h& B!&p(*`x< ! pr:b1Mz3fڕR]՝jrmTifkoox?g!von>wB!o,0ʢU0( 8cCV]ˊۏ3f^}޷SBPsϞ>Z {եw4i̙mz}ͣSq󆍻=k_ybcU}ֹbuEU5"bvN77Y'v\6YE=yooީqŵWUj^v_ )y~Ko#۫B!Sr f SHԖX7H#emz@bbe/&kx&Q1);Ν v 8H!k0/~Q }=nJ\Oݝ+nԎ[E͠ig%B!D iRÐ9Mn5J0>n8R^ Dܨn2J9˜'x7(c?g8c=U򫟐1.K!]zիW_cWB :Q nLOdA:ITn??o<cRB7`rɏ7#R C c 牽?tX1 8)`n8qYj]%)59[3ht7x%K/tbp>"0wB~qB4|`zf!B!%!!`Z\ 9$O3 Sw6F[^[Nw` o8_ =p[,|d6:v.2hI2%%lĦ#Ͼ4Q_^t6{Q HřN?v*W;/,s՘ {rxg[;}rNi|{4oӆQv}7٬zG\[Ukħ/_[g趷n>2|ߏ%=)C|'{`ۧOCݬ:9ȽV0czt07&?GG=?5ebL۾~]W3'BF Ғ KX0%LK\ ‚Dޯ)*yI;RKQ)P;]wKK sXzFC M}6`>pFKs,ֹ$Ȋ{HDձ= e{|`܍K!Ed%0{l}+o]8G]cJBK|sS o~?#z?p]xk[Yj!S`ȏn8<={_xkYIh=e:蹗oٱpt@ݑncr*r8S:i?̍I~tݵk7r"z_~h?/czz7j^Man\X-KuMܻ$phCK 9kS8v4l#@5l!gG^;02O p|t_9;߱cWW}2<eVlẪrHm"M $uˉRG&:5۴Ԫg:=}fa?_= k8x| wܹ;3VO]CZǾ!⚵o̠A/cXv@L|ˮF ԽH2R9y]拿._;fVz,}P}Miy:4aͭZDZRHRJ0,jD,|naU8S(@6v -IvW;ƻ;N2g }j r]8mHnY`q^[lިhAUTTU2xW\ MB\y{gUٺ}Ղqdib4tB 1]$q=*y]Dx2*CIbD4\2Y1$jw^}[S3:k9W&^pqT3m/C]J8eIV,_ +dVpG*SkrCiyi kys'߼-!"/=M~?g|4erxl8]o@Anqo>a6STR|UBCߑ3DM~+;H?[}>$cԞ/ה;>u|)~V{;eفm>{ _fȻNYփot~%t2~~{͑i1D(_A* ,5O<;~2U}g^&.w̃41І?@ߧμ8؃ }1M}%7'x[4 >j|`6ѻ6y%D?4 UȦ58_pr})OEQW%sKaaO0ʠ"\w29dzn)=s+n}ƈDtT*\}mܿ y&߽T_,47hh`/ Mamv1ʦ)hPY~\:漸bF(Ҥks&;ٴkm .EбG@;gK7>;cjKSMJ.ܧ]ܤO~ ;cѽ˞ztן̣_)Ϗx4RD=1s~9~|jl=)d2D"OB812D! ̕*ײI.6䶥`6i{醇  '<|Nww%)2$w"e!kZk:K?V-MB? {F_p^۰K?o`>ݮ_dse#Q7&Y]^w%)1¿ ^f#k}+^<:E7j J)棨f4lUt{Iܕ&NOgMU'pf6Y5oWnԶ'ޞNX_ժn}ծcfg-v _/Sy/ >4V)uAO3ko/ 9WU2] pM; n$|ʝ#;vo{C; Bg/_Vk{ 榟<2%:)aӖ|s҇ݬ7z"7dX:` ^[wO'&c.($Byިw͜xȜLoMRѥ/-K`_xu;_kڷ<|`l|=LkvoWt<~d}䏿dG7n8fPwח0/تیړwh]I]*<=IEݸbmSR=bƵn1]n;WlU9m/F}wp׸ǏiMY0q3[t:esr{&5o '_k{f YN]܃NDo~tݳPd*Y}]z;5" urKiM97uS?0!0=>Dx_E2q&:Kygtר)yC9oyO_^s;?z~A`azP[ҶKg10H3q@<*BUx;׎}?I'Z_~2 :FÏӡz[?F>uYQj[\ԏDFԬ*O;hy^2aD~=m3>@4 yl[[GOo_֙?T(-NgO|^WvuxOJN:v/^j<2z'9|̢T{HN ?sUw3V;y.Ŭ˳*aNr&6W^%ZS߹0v홴PLr`,Y\d%Ьیϧ(a9V;oE j^Ǟv!Zgb;Gʏ68Kmsosxmʧ@6IMq㚒8koņSr(iYnmUP&9GӚ$?jfܤ;@꯷=x~dS(#+zA-mX+5e_Ivq8CesV(Z{قf+(*u=sO5_NNuQֲ[ocJM)4!iv~5 &-eKޖo\,E778/ӝl\3W_7W>  J_vȩ}$`(S. prN`LeLJPWEJE77,^fΆ7ѡ}&$O\u| CCgFŵ [>U4ya[ ֖UQQ娚߰}!3F: *qTvlu%gA[: %vxyG/I]`CEy;.Ԣ$i)k+͐ Ѩ /{ϟs*|:uY;V;_W\W4@DTx|kܝ^6"6B}sgMNz+r: mBO|u_@jGc_5.i`٣gwF?H  鿟|~4l}u;B*q}v1xG^:%<Cӧ?yߤkz|U[n Ӥx@O>} H~/ Qޘ`رcǎ{%Ww,tQE1^~׫S(h- UL|_wgé}1%zU^]h!l8V/%ƣB{ ,}233'aԝv{3*ًn:dKN(4l'eP/(cfΜ1^-wd+ *b =  p@kzLg{=Y|kzzE ~RԪ~]?C~Yol=*PVK |"PI[p$33UTo]ޗ&u=;ӛ[Jb $hmF#O@jcϧ|M(c}f͚5kcݳ.fS>g55?tyi!=n ~wz ^~-]~+(ok^@f"]rW}k_HxA!&"$9/;oذyWZL8Be27w+!yG5_kyNq #E- hѢTOpJtF'`3&FLu4 ~u<33A`w8wC̈џL*RWWà(P[xgbFSQov/\g6}TG dzk.oY$O|݇>nozsIg탺L."ur!Vqi }o3% T{K1p362t^,1vl];Nf8t]w%ɭS3qVfEmnԨ&*'hc,g^ym&{PF! [7uXnpUؔ UJs*LQW_ernwcѩ; t+%sP7sFwhYx0 .Xqϐq t 2Jffz4oo@u;~m{tX_u XM*HZ՜9/}VҾm2qEn?ݴ^_bRRӉR{f*{ogo7o\Iԟ\%ciN^esLS.,tagJ~hĽzUJ#^Li^i ){yĽz(eEmp> 䨁UVf|¨-9h@w=>ϪP6!?K^/Q'g&_7cYoĕ6cu[tÌҧOE,zjjj&˵[C>G x?BF+.@e[,#H$.a!J]:ʦ TwqDUyL`Ix|ȷԯvn^l9C:5ʷw?9jKٜx72C% /؁_]!S&$D^oe[#oxAe/\j%{뎊G5jxg1ϑUiٵeV^^ݮ.( "iyļn/Vlg[>quΒy)'2wi[%@)!St\sbT ʗEUf yUf9/sFJHڤ ?;x`5/Pm$=7v& !?UN?3PʶiUes[ڝ. om!+O-(Z<֔t960\&S)JV,J=~p;Bz6%dį06ulZ$c;8{ē5׺Y.OjB@@Dk87R`tJ 0v~#F2݇7TIs" g+gGChγ;PO>7Ľ{"6[!X0. RZ{dxx~!{Ľ,#X$;8-Ŷ{BqI6w/`2C:0'8,e/&ֺx)g>eD?27kޣ&X$u(_q61FW Xmޝ~;.r|)BKꞔNn0v Uby`UE;uLP\*Ksa>RdO{ۚ6T)x2BT";mYϋiO}? kEǜXw9yW Hl OA"$5"EuIZܱ]x9.J+Piq'/Ae9έmCF%trU8 qJp@͂%U"IydBbˢʖPw嫕)w#튺U]Ww/Lp9DlZ˵KIgCa R⮥SZGn|LNlVJ޵Wlض\ ˚=">t`bt|99W-mH3?v7ݎc_ ӤGիcFJ0ޤ,yG{DZJv+Ohݲ(ǁ~vm7T#A{,jƇd#&B~k*k%="dv^68m۶m۶z6$ܜ;mi+[L[3ןǒ/,"Z5I BP(Jh% pf\ f⏮t֩/eJB!K~tȼn>7q+DJI8bTJ&{}=h^7~[3Ǫ`I!'wK ŗk'mޟ0GozLJ&{}ZDΎw'{zFG7wfdFٶO`Y'&E+P &ʿ~c?܍dxEvgQM1p-"=g:}R&R:LP(>=tSOAZiˇ׭[nݺ)/tM.oΏgL)ĘȰ~YQ945j0wqBo$ʄG eCL.앝WvM+ <5/kM{<o=!ι?40l_@"ǁE@l50WC͂w BP((p jOJeoSygvW]?3V.L qV|Kڑ'ImZc~HʬO3uENT  Rc(iӴZHMldeXH#")ËSWh&_)m% )w!LҞi4fgxuikdg Px Hz Źr6[+85pS_cfx׺CW<13Ѻiڵkjޭ˔?=up*$pN!8P* Ԉ@`!KPKcP(ynmBP(*CBV,6qUwJ83bYʹjc$ddb3K>fX$d C`%87w#|eoK3#W$pp̫-#Uq I83{{[oՃiuakq-0rǑeޘv&[[/\YnU 2OVٖh4"#41 ")stЁ.QP( B|#RsqCH0$HܤFcbӰvw yF23/YER'~yLaK'Ͼaa?Έr+UƎq%-3a`~(J$EE, !, BD 0 ! eNvc_fEﲕ*:( BP( -a8dt}%0ϟkFTU{}dyjQG,Na |/e n\ >GP( BP( &3X'[VJ~iN60u`L7_ber(Y\KvS{zdGV\ "$ DE BD$ArB;W=[Y}fP7٭yblKG]|W,d8w6BP( B)SۼN mO[*j10 @`NL` E ;`G5fRis);WG)c[P( BP(A B0,qfX! Rbq QQ\3~XUCkL%<;}G} ]tk/Oxvϐzif={.\{ V.;ӡwBĥ_Y6pJ&.if9Oowޭq?5tnPTh۬Xo Y H4++TuIS( BP(  X!Bb%`$HX p) q\ڙMzv_xfg,W=V:ݗ+δ/1}܂ tUEzszl7W3$B` oGWIc6 yx+ŢMUB8Np4s74z@?N,1l!BP( BP(V(a'aAd*̏ڙfm`%T x[:0h3gv}Fe/Dc_$rDW&ju->7L$k߰)c~pDI;6ΕX]0uX @QiCsRƤ9s@z BP( B*jp5PqVF$8$%l#ߖӳtϟ7mdRs=珊#c{Pvsoʞxxn ^Vqpސ";Թhs̛;ڷ߷ֹ\3GL7ha;B̛Щ~Iϰ"o~qN!SV= 9 BP( BPX$J'o߾{6mZtҿDX&*ݥS- ;K5f"#sfOʻ'V>k zc''")w&{7_bFEݿ~V'llj}BXYK`ըJ`Z\ (jN+ BP( BP a9N VPk *A" .9sãYfڒ-[^rkРrgyWC__?wOï˂=`"r[eHr r@wKo۸q;ijE@Q!Da{2ח6T7")4BP( BP(! 5%_4 X,b98PkHJC>Dp}L2ze˖}̊{4n? >kghbI6"FsC>Gi {;u5/FeO&veCiM*o BP( B pN4jjR$@FB ?"8 sD~HLLez̬x`aKP( B۷/MPҥK~@8™qs1!%,8+W5)z 7J{L@;1Ģ֘v/ʙ]շCJTHk*R3N jlsQs admRfh6sި!} BP(Ν;wD4[1]DBD#"?9wwІssKc~ kWI ~-a׀&8+qܤiQ1TiuY::;,ryV7"T[Ai]odCyU i (f BP(WܹsmoaA)Xh8CXVX"BHX 寡9E)e 0Za/eJpfv҂CTlԡ}6oC&t0q BP( R8={}5jGpV,.$VLp(H*t%,2>:tȇ]TrD*up)fw}17777w upsssss˶wx{Y_5 BP(JaQE~I)TG{:yZU`cU6m5Q;^׽O3ȡ~6q-Peҝ.l{KEuٗ-{6 ^cı~/((51+#@BL @n_ٻؖR( BP/X: ʁ}fSF jY(F} *Glՙpo۸v[ֽFha5ݫ'5}_jX<@*+Ϻe"/|>^ٝ[nfc^J7tYLĹ# 8\Q&I}^N1aFUĊKc@WV}Վ40X jP`jUr50s\!P( B|e=>M-k2bVWf7|8&;H{sؼJ{jS+o<#9&кk1bNB^5#K&Lipl;hZD38{&8܁/}|76GWJ{Ǵ7yf_k厬TARif|7kqS^69xhM~(a)ʼϲ `C߳g8`Ьo`]k4.{Ic)Y)bGQ *B58c@%jXѨI%* BP( +"#2mG-ٴd]|˻d1дUG|x%]"H:u) ,޶gǺiInS8pE"Kރ OxCO>-st=gI1IB[ڌ^{of }&&*s&U;R8 qJZhV$ Yp)[ BP( 05l=iqP_:׫X6t pj3j( BP .XQâs?~l-_@̺6vzua]uw N7vsஏjsiSz] xR$P¢ laUJ{wZwBI0k _z8dBNTz4f-Nf1d㮅./K gI9Cxخ;oory)?ؤ5_3IyQD>ja,[P7ɨ캩Zvڵk&F.tgԗ|3t>*scêGwqU njp5Bx:f"]jJ$Ż}]a~ZMyK0BZjѢE'O^H$q!B2oLȧSɓe BZ U*ճgnݺhѢ 6i&W|̞={߾}wpܹPIjRΉRI]2AX,p(2%kHN(T$2{:5={63g-j93k%ki`k^ZikXXB-ЧC~(VLKj]8D2IQQ#mf;uࣨ/$5uotv8~3s-6ҨZ)r>y3m>u{OdDTq}ݒ3'ˣt=6J%մ"$Qͻ sжYy{(mPNƹ{wz1:.q/ZNuҝvHճGn}ԫy3OݬRg@SzZ]'Uq]kѐS޼XuyWsO1qr@,ZYY?c&w=pC7>}0fBB+:Jط!sgRYkcN7o,ZhϞ=fff$USe$ǘ:ej&rYV>d~aJ,ݭ[{ؘjopbvbe8Ұ\SQ>Ϛg"*X@$@`0B|>a#=J1&sGۦ:8\^`CU{Vs>ߚDQsynumlif]7E[tKGۦrK)(yx+N|׃>C Gh*3cO^g r)u{~Ou"vuOS!= C@Ë9IC}7ZX:8Ew_U {'0jua;{Y-s͟*9N4ѱE"L"+MPrǏ{xxFutj\P(T*UPP/^FR!BGp X$Vp䳒YmvF 3gμ}7ׄƪ=ՎK7aisU?lP[+g 8 xY_~pʭQrQYN?ڸD|4 cWJc@T*|##rIia$ykWr2ބ-X v,ʲ3{9ZNb͉gF4sTE.*B@bb{O_G~o9׏.ܺl ~?9`9ջ gqo{e热! Glܛ 5F.S\}cwN$|:`BupDZ,˥MyέOφ 0&M BPq B@"DR ~98Ytߛ6mʞDg_aE֎CޝXx@"k?qϞN܆8R-JەXZ^Rև4h޶۰9?,b-/8%Űn9Eb52oB%b(? "C\\tƁlޠ}i9wOΎ@mYod2wND_(xxn絸{ cxUϬ7VǷӘc-Kf89ܬ.*هgm(L$(,{xZ$ -#J8,YJ V}.+Tʇ d9(~06=mkP&u~j.گ?wOzy!+V,SPLQFmBP dpbK@(`䗃o߾{n֬5007oΆVSԣK7‹-柮krc8 Ni,7 'V(*Q^u%BU:eς3>@Y=ewة$WE*9\kFU4:$V`f‹ Sũ>ٰx'Xd&uN7M<Ul~`n!^H+`IbUZ.K^udu^ryMg?UViZI8{®|Nq6a=A?ٱCGT)lð/s)]|-)^\~"wN=L&)U[ɮ+C1^~P/]|!W9Zm>V[WIIḅN(q#%+sD))Y/fY􅐝'ϼD,ݴMu]ud^EhQi WdvtLFYdV٠Lct OKΟ?)kkk#4 BP !@%1  1@ʟ|pp9sCвe+W 40Y0rϞHI&v:P>"97ER 'KNrntb# ֶ0rtB3!HO~- vb\ٷqwҔfPe&"BjzA6<5#DO"n#>g[ܪv] c6#?d&`CZֻqޜ4cKjpIL%p|g[mVt)tF{^1V|OFl\ea;SrR壷b}X ~N_)spi{8 c.;m&?͆/ӵZȮQ k2 @~mYI*S]*eE3̙3 ×#f<#~jn0 dkc7o7n$%@JD%H*~ J51@HD 1@.eloߖ)SFlٲ_Ά4"7b\qu"/bi`"qI @|smqGڿbl|ÚS1;MYˁ >uYZ- HRk"jϧXuwjMۼm\P&|8q \'م@aoWCl 㽼wC)mgei0jDkZ8KJߕ&)Y"iѹukXieS fpa[n$E'w8ܪ4,-Rﵵ[/ڽn(/x{n\z.hWɁ:׸J]h k @m(m ;x?uv4lqիy5#S V3ѣG.\آEJ* ` {|hN?4uH=hAB4c7!ꑻarUG_5ݞAJdp~|mVyF9̀FUtE%"j&r|A)E,X5Aq(A F*Z>88 *MF wCTl8 nvcV[~^ZR%+{626~ϋ^W_Z=pHevcܥ[ Va4oz#u[WL*!ם՝0ug ~?sK.~9su J!%rϐ+sڧZVsBm{{re_V.M_BΔnꢑ9~xgUk6Y͜+r3Oɏ+\^V3@[ D?GYͱ;jqHш M_Sry23c|j?ڟmŋ ꖄnܸqРAz2R؈&7]:vc+:LFJ_o8ՠ&q{2?Wg:f5Ǯ7i5o:ƿ4m=)/@G*a"AnS#K5wl[mzsA)<*  0$ZùH,*$OMe'Ozj9+>焁]qrޫ !m1'v5*¾n}vúLS<oS89A#NVOsȾʚH X"5?vct 3m6fȶ&Sen= ߟ4iAZRhP}xQu/)M-sokfiu&:0:7&ם GSO͡?^vQ+ !9d˶Hn_Pʷ iXSj,IE~&ټyshhhttovrf@K6EGÙ]Ss͎G#aVk*X߷iT_T,*攪kuYea;j>.kQnm/'F|ES=+,lgf0?t:yE*[r""xV4S%(2o2!ILN}^#*ޔ{gh0!#Cc˺] S<,0rjYKNNb凰u'8jimgK'k3iʯMjuPDgm|,"D[vjkf6ݘbb۽.M.5&4m2OwR*VZJ8 9A.">V(G7h?(ؽc ʂ9υt 7-'Un?M$L.lMP򐌼$[nݺumIFi *ָm[FNbE,j- F1QcÎ92JIw;5&yt(ѷMvd7(*=zNw!LۇȪMۻn7 )$ Y[ +^XF.!0#dʥ4R,QFFFf4a"JN(Jd?@IZ*}7 m՞{6|c7ȷmQxQ DR^tU 4|FSx& `a_1.2hsq ,1.f۹|'ז+NPҞmؤk+d7*-cf #l2mߒOםwE JWM?Ue凈# QK,q#(4o)D鹝fA$ q=[{Íq2v.6jm<_miFؼSܶ>|].$zbKYG`TvMT-7.oQ:Ӄ(j];+늙kmi8O,qj@Z,j?]DRTjF1ު|w4s9:GT?${L3'77gD"U.̘՞{a6:3XIm.`gŦ ''*zϳ6Ozܺ+?A@v?_/Yk͎ ڌ^MfL$ș]շCw#g3Xr@"{\{~[~劶[ҧ~ {;Ilr UߕQ*/1$:EumMDe%Ee&sEO-ǂ@ h4zϝ:+ҨQaÆbYV aRxuuf" QPOTtseg[cŧq{ wy!*ݪƗ'O(rR%ZÞ/to_SR'W)H9{mf @ (A B&M=P(7̾?fuz75ڏO O qJthVk'׮<eOnSfMTe.=@eދ_-LVŌ]6C 5$>ڬ uMK·ϫvFi~~VG)N29BHJ`|aĈx]_1mu~jNdYB2eӟեCNYx!F-c߆&UZ+e`XYBĵCxx@\M|"EL:0>~(XP嬨X_W}zP&S*Ȁ11h^a:TyW/d䐈bkVY5%6.^IbRhVZg|\M21ɤ>ҍDzO7~]?A9g˖-L>jժzJNM6u?`υ4ͫ{g ܆[ȁ"mHُ *zMlh_8E\V}BlۖVt!.(囇1D@DBD#"upP(H%95D$ujKZl)倭T TꐁfžFoI]iG2Lau#OJh+iԨQ~Ξ=;l0&e!X ;!`5&xg޵>5͜k]5K6Tӫ Y'זO'Z#m2{I#$K Aar:M}tKX+!  `Xb!!!` upP(Ja@/=땧or~jnՒEݾ}.튕,ͣP fZOzJ&ƄpAo}h95]4gfBvz|S1kꁁm+-=@c$!,!R&Bbr>Pd|A13_SgYUq;** v݋`kݱU@P0[BJy{Ir|D 5pP(bYZ.ӧOw^׮]x\5[˶y y yԫ]]EyŚ5kEP(J`Xc!B֨BPJ(SLiܸq֭Q(n(J "=Jv(d$JԍBP(D(@r !#ABHK3lPBP׮]۵ke˖5lذFxv}%=3pO:O wwaff BPJ$ , 8 X@mLF5:Ga_?x؆3vADA/˛P XƟ:ͺD(P( BP(k^|R I wv6e^?N,$1<|@&*ֹ18skG/j`PvtaV -Bi1Qq~]EZ륚Y|_usq2%DZ3wlH]6>p҄Z7(p `0J`N` r#P\)իWWnйs7ojժ(5K9&Ho u$s ^ LH0`3jS( BP(?6 'T6m ws牯}V0 nQ7}i @Ju ;`n| E.ϖ3A,[im~ r'-&_^]ak`m֧d"r~1WfR+`2% 0< >))zbbbV-ZjEL C"UBP( aÆ <G mm;:Z \]z0D Qu\0>k}Y{*V3 zܮ%hS 9^@.&dƷҾRv;Poǩ:ʲtCó\4RQJ ǀG`CD J{jammݻlo޼)_|+S,H BP( ER.x7 \8@ Q9}XWLӾ7ls-B^%7i@Aƭ7aN xod`dd-QޟJ%ŏ? u֕kؼ~SϕfX!zՐ(E!|B`D%*|2D| mb0p… )gϞC֭^"E`UB7YϰBP( RHi,Qi4aNIa>0,$%:(@hcy3N9wv)Dr]{hӯVRDN|}9>IGX IDATNd[>%jZtp`OI҄{Wx{aΨѰ8Mi(9>|Daa vqȑ%K:thҥgΜᅨEL>ԔsV BP(J $lgb 1{ɸ϶+H"=}x P"o;yzFz|$ cYXqߠbz'!3Vr$:`ܜ:FF ;Gi wϝ`nC`f!LaŶM]nܸZıEæjkl'@Tl?BP( Rx;nEҭ]} BEp|2|J`޹W}^WCїVsknQ5&\:=;6 VZ%Ƃao2 /]6sCi.xGYh;qӮDW( CĄLjxx9.5)ygQ$G94?/rivSc.;b",ߨJ/cFm@} ]+i--:q u5*"y=i9~c($ }g˽EN:uVP6=ۤƐD+Į[*Ɉ&vόKٖrX,!Roi%C(a%j04+( +ڤ.4.8iּGpP@@@D*8;)!TT*[i0p|_]:|+7M `?IQ)?& 'mvMa'\;Z17 Z_:(/o'}Lj҈מ>y24.s,YUݬy͝mD!R&7=C9}|}䆱dQqQ(_o ,zS&4i-ص_=߾141sPFՅ_yn{im=ʖ9͞¨C7,0yDFit'=r+e!Cn=gE`$ԦqB{B$ީ~~P;??|Phjo߲VZ Y 5) h᳐sX``\9@iيU!oo5iJ`i7u5zV5OfI ֹӏnm>ԌAofd{R.2c>vsokϮus^~Jw S'q/2Lw]շiMhN.oZS?[+n*9,4D篖@m! ~KUD "nu)Vo`k]94= [AQj]ŲWi泚Z WC%5W?T&_֍3x}U#8Ufz>͖YM祐C G.HJV,y~zӕ2Ee#&U한ᢔCH;o^ZwZ _y}7C;&3߯Y2޽24yv RT\!S#Drʠ|GRGzD1*umcJq9x&?\>tΗ߯rV \`GhH>n+ JU]&vuO御6~`evkK`qUa_eON˰nH\VܥF~R_OW+mi؄ װsY+~46*ItYGBp^s 䐳8( QLٙ+n=(?U-p;3ggNgՋ9-G|4}!q;&=ף_ĺ-q"ܒH\޼56?*Ӂq^ʜD| Gykv}D+"2'Y P9Q;]dj[yd})^7鯝7&?Q7,opU5nӴMն22PmCPf. \JK1b-{S|E ӧ@TOQL.r1Ky?-2"zjZ MÏH,-Dp 5Y珟͑JZ8)9P|_Οޯ ੏LDF@CzlODAcdo>PKM˾=P#*?tNkmi!H??}x>bwqv@:go2E8ebK:mXV 俗DgL#$s \jӪ I_Nij;V(WY>}6~ %ԉG^αk4cq[3 }%wH"'"Co/A(%HWy"2IyI] |2rj>I.3]ڶ,:Ĉt(hٸ M}C uqԚ|cVmFc}/t %Y wǧZ%14$hט[$¾9Y aW, f"օhG~l8.1]ـt-l]6͘)ܙZ+ W0P|e˩R~AKT 2A[:ո/ yoEn۫kY՚u8&wk!Uoxp}lb@~~h B)8qs0T%0'0 9(WPcdK}!vFpX%7pyo*6}9oQ'2<\Nܘԏ#qYc2P6/(Ь r]QkIUg#?OmB+O-] 1IuW_fGpCv^ ѥ,#ٴce-tKY:w8F;VדYm:J?>j˝+t:ݴpornC<3QMD8QQM!n)hi\NgkdNO?A}&nY@$nǣK$ B!#D/}Cu;"{~kв|-v=e%&ծ!e4.,0}m9i"A#|ji ߆ۖ?;ou 4= LA\m-2paë_+Nn %Q24>/1ER$\0#by70H!EGYh`OkN{,BDABR5/u1zg\ugZA^ȼ7qZ Ku1ރe-k?n}!҄K6 PE:Gky5l6Zmq3 ?E%HZt9cmq6r;qE7OwÀ̉ރj?v,zsv$,p*-oU(8CxX&}>D,Q8}(XR^^oƍ=~{IhjZ.̻w{*ͳ..vPJ=&6HBE:ǪҶ'LRYbS#s΍@odf,Vnװ{ڴٟV龑+J'K)H>ޭݥͯS&<41=#d4zʳHX6S=tЀʉY%1赞49~nf|~9-OQ}kJ޳{ҰJ-Z>}Ƽy8@rͬFoy)T*|'FT0 }e.lgT^HK NGKtN}f< *1tmoÖ{Z,Z&-l gnhuHBOnn1v4p8ppu[l.ǁ;? ]\qqQ.wխʣ.tԜ|ztGB ʔ]+}tp9X˝]UBw zť{:J?u3K{{.k'e{`E‚K9en{Bg(]_63a5j-a-sQ^ɨoy)L+_wsCkH~jؖnkLvϱgwv϶iHAͭD͚Pw~usǦU?>&JkVg:Kg)Oj9!==W`_ZUGD=me/K Zvxԃn2Q,vBV 6C:3;m|+VjO1Gw3mv<4b%}ƝRg3pB#/`(?y&?[7jv?e%K Heffb.yӥPeS7suIR>OU8gsIWof k1w9h޻|OFTL_J8QcԉB5|@l[NhR 7C_ʁ.szL?/dWJ1KlqqqM#&{J?S0Nv"iJ R͞B6ʀ1M`_S*<[|#ԡqS t 4#}^gb]>u?}ӳݤe [`gt#y 9:oH3%`{vNNNNu!pjPY 4hg}tn vN:)c n_#gcPܬ?/u`H Vn |D)ip<<>B `"Bn-lNX6\gğwСo<}Ҟe\LlGo@e:B)dm4j%fU&=]{8x[b~mL XO!1Icn۞2UoRe /^0Ϟ''b^|ҁOl[ہkf̲#G9/20Ҷ @a2{bti2K<$ `t{g 9ۆ,ش4= ф\6NGxު!4XXD]4=1c)t0SuY祈~,ķLHGԞ{X&S멥msmXtZNAJ;Z۾Tj|z jaa=qvec28]6,qGĄڤϤ՝@6?EA.3@%յ]Buav h\ѴJ߽Gfn(UNylnRH N~8RO:DV2,OLx !~p8R2G}'dwV|6{j\wsO/\ n${{&+th%Lc:2$󗞮2FNI|/qnCo9r7 DPS*sCݽuяHNXj&L.1k>g<-|= :gٞB)HbߗXE.^⨨xиeadbE"}odddl\])%$1))I(7xa IDAT2*ck1YbbT z$c %H}MB1bceVFQѴ(~Ѹ)URb0wySPhxII211(cBR/^رc?VaÎ*{bMnnKRwscHcwèDӼpl~&W+k?X  9Y$ܛ`n7}[a#!N׮WfC2f7N}n4nlKBE`|~MC37-S"nZb),˦q@x <& #2)g&M޹[3#X$(8DžX 5WnXY`l\.wsR4= )@4n-ŦavJjj𼔘:BnhJZR>x]1ڵk/X5rkS3$&#geb(?pMg'iKYT#LgzwyVVu&RZc < 4>؀jj>da2kavZf>[_1#to/#9凃/s,ȸ+3 9Res i P$|@?ggm\c&֍*csMC䯮_dPIF~1C _Jv:KtkLLs`~-.nhpq+$&< ZHMPCd (0$R%HRB*pN3">X޽U+MxqZiy/Q3]x.Ӱzjcg^ÆkX=T_oyY#}| }nm?6W&95u{IP( BPQb@VG@ŵr0Frsd, )8O }Ti xM>=G#> nM>||s @:] Ns_ΨL  v>Hw'ܴ:zdA+ƼcvX읇wݫ2O^zt J`9N YDR*h"CߋK1nەGoec?4Fe鎣 ~p.`΃uMXgJN *;rGW;9nz4JiB΁cI a!W@(i Q9A"Ά@קv_+`l%&N$2Mb42&3aY.E红A~]4Pڔ) BP(>=5T]Tiˤ:lyj\ i&nաMTvvZF#ҘWcDUIQxҲF4ܭw VY&r y/-w@]{~@W<))8 V9B*F HAPX:اQJ xdrE)^l}-?D㹻6vU@wP( BP(: _yanxq*~_!{źJ ^ y I8ior?p+|I|J*y{fK34T( !q D*xa0C΂㠴.*B! “mb=LcǗuIYV"ƺźoܸqΝKT2,>mZDP( BPJz"g(ؒ 3 W"8'8@c9D2p.*z5L{j%&dIm 9 ƦBҗiRM/;p.ڵK{V߈BP( BP(F#<"> | fN,\z͸qR7zw+CA{chlQ9]}[ɭA4ءðyGWO|l^+1:R)J d^VBщev \d:/%*|Je~ׅ)%*<pz @U LCzʕ+kS- ۆemGqq!\ڰGB> zG+K=]Ȓ#.+䤲{mYt]6˧{dL&5>zPA3>h1cֈ !D><gΌB6oEd6梧y/QװS*{,b)n((ʷ0`3PZ7yj)qw$MYNPjrSK`,'Z%5[&[/{lP_o{;Ma5BP( ByZ*P(% a CK\ݎm߾Niӳer+R*d.T*oRkj=kf~.''eFDh#bQ`7K&,bb$m߱fؗܦS=|BP( BP&q BcD%*|2D|rqEcRB.1A!KL ,Mu +KLB(?D$Xx BP( xcǎ=~s&R,^… [N\p$ˡk؈dr&IY3KF& 04d3ݽ(<82ghgX@6$ c}E6v BP( BP(1ƚ!B  lM,BP( Bj>xOS&Bp)(&5(i=?i,LmYOB<[½ Mcx~_P`Z_U־[}4/j6ilؙ-4I)X!"B4ύ"P( BP(| ާ6: }-<]VҮp>k{xN v W[;:TuҢYZKKu1ރe-k?n} -1n-ac (ޫz} Drsqr,P@f  Bx*n Կ5P(4s$! ?80r ~mY 0?hokurG7 ~upNy%vF@UbASwz|ܤ캘❭EGDz [lEq3c9@c=)Bα` RT(HρEKT( BP(J՞&cZ9/QEg`/5[̠vYݲ*i6:e ˒:KͤF\>!5]ꒋ46oەif#_QK j>}aݓ)-5ԑ# S D! .E ʐ$We BP( R5%wtڠν4?/Qmi I/pbuۻbgƏ܆4QEmYR/ oM˻ꥉC@i΄LQ3r&`{vNNNNu!pjPY*I:c0:yF8qR<\(VTRXpiAF BP( Rm1,@_Sԫ+{ v}J"W{UZ=q~ec2!KS;jshus]s{{,t5Vܤń@!Cvh'xuv< ;v;شtJǒdXV, HVJR(…lѱ&Wm_DZxrA(|+t5wuCxl^a}w?Pzzzhmi"&lۭ-̴AVgƒJ&<8'C* WHTH2(Oc"XMyVB&!Ç1:%1ÇfM+ĻdEbögNĆ,RoM>hX W?V*{N \=oYYNK.TI[]J/3Q҄ng: 䈳/&r( SFkrseq- m[3"O*7 `_x:BWo1~ec1Nt OK3[H8p2:c6P?[Ś WCF˯!O+^vM{Fnmjja3q9+ +7;_8 )j<ӻfϋe-̴ 2y_lQ:~+!\9,8JC޼U XK +ܺF-::[h$!.k-=7Q8E뫌7ǯO9q5W}ڹ=/މNwtX %_p"MKfl;H,]7粏.Hȡ?}|~M̓1qrlr\t  Dw[=sm#}*oGۆ5Ȼ/`b/.P]t!HM(W|UIH^UQԾ;cua- m[SbȨW-FSGzVtMlZ%+nt@ǧ ߏ%-2OX<.f0i½cn^5Ѱp2>.Ўni) _!ԉG]z7| \\ho]q }ykPgg';]i `8'8@c9D^J WW2Dp|m׀ 7y BQ%" 8)gWF;)'[mIs( z!>Ns0/t" |`=V)_6f,M _$uڷI{N(";ctfo>W;E;a_CFPv qnG@XM&82RiS|c@F[ڶ:w?}4ەzL"~#hSC4(j2og^XB}Cǧ@ߏ%-*wv\zj:F ;VjKs'[h%& ?=*}d68a%ᓱ4-SJck9Jc'5~.4kh-AW#g H P&OƸ- VE<.h5̀?9/4ppBB̙yMq;nS ɞ:)>~PEt%#1O2#IxihhHLH$p&W٬iSo7߽6ЮkӖu纞9{1 R: بz-˔); MQD%XkYWq&=={5,RS !=)Goz_A[Q+j@\o׉q܋kr';ab3i@YFy ?sWäb-׎5 cy͝+ {fcb3E\})W'Fxbh<Qe^g[yv.dI_~JZtI]ԫW}1 XWk΄'=T4q샰Ϭa F| 哔51(SM?k^ؼJuAL"'nhge xA8}T8ڛDkhuoۺ"7M 8+VS4(vY5}R!066-NtBæ[ǒ 7 { .I}tg-8_̙ҍwdUЊg=>f6Zt3J-硕n0myõ?:~> C'< D#A2:~+p `0J`N` r#P\ oܕ "n>.v-~Cf'El_>}MD[wwMr=|Ƕ$#Wߌ,O@$. K E >ݵYwQl_6)aH" ` `=[Ql |vH(SG*;?v%vY9sp;{ggwy.Y?sy}.iޏm,Ź3%FK}e[fxȤCdy/~ݶhyp{vv?!`}EV8 ϝ5)BZ.CӆJ}vv w7n”b768}b4ﬞw;3濩גݿ"^o|̹H).TX=sB:MX|k驹q]}.TTDw=>ulOA:8b C5&;M@mw-讬KB᳨tېjA':E>ϡOg%@ꍣke1K.һw:ȳ0 ڸz?9P]J"ԻAjbCR |A'yett_H]~V选qc2!XLB!٠à+_.k'^kOO@õL IDAT\PqA0ШQϤy&~7*Yi?etH i.oENݍn'$u 8 r^(Xխ Yc "2R,zw߈)Sy9)Mѳƨg? 9ŧtSsKI*DARt Br  4N1mŀlbwp|MGcKpL]G$,ݲ=&ϭWݧZHN lo^;!@t/n,LM )IG+ (A͝fJVHI]FtfZEkhHi>gߥ[XڎrpKά`)qv`maR͈3- Gة['CW‘f v>Gd ٵuu+W8.t~Óm Q2 Bi,k+0b5ؤvKҏoUny=١9@m OU[~]ڻTFyj6Qȁd @'W{ΖZŤ9O?ΪvFQk_ JEr}Xk?@=Be$%9A[ؙRJ퟈R:_m^Lz]~j־\x_̜^:zk7~qJ?TwTm+X7GLq%U5~  HƖ,Qa@ @]~vJÇgΜyQ/H @6j lp= @䟒ɀ<"ڶݭ0i3%K yosߣyAb |n4Ӿ ݽ}9\4zw'i`5MsP{u͘ 5(a5Q@[00+IPy Z\)ofԫnJ٤lbf#n t 2{Xԅ仚6yan ۻ.yOXV*$کEjM[9iaƬo||3H==,ׇz+2G̟7!>\deYtd-k(CjS@̯ؾxjkS~ƫ7;]b3m*U(in_xgϞ*˧P㷚If@#Xt:򧬪1v1ct&Mv &9rDqaNBdƀO2ܹ>dwgk*6xQX2 6 V?Cj7#7NXB :{ 6ͳq7v$ `ł0sC%~`|;ScGqHn|EQc`ĐNdh~8444;4d_,)"s#5d}м"wc C@PeIy]\?'A;J4T2bm"ɒ`k )^ jh.]u@΁{&n|s'WNGf}:E0 ^yڥoO`|7NLQu45yߔ|4 cLYAZ3aAi &唗)mY}QS*ԐM^ w򱗒1|oߊ d҅RʫW%0kZVY%d2P.{ǫuiRu15-B12e*YiU:Qfh$P*۷GHo_`iV\QEVuJ)}ᘵnBjj4bA$Dd $ͅH(-V98-,/4j(66ViZz(޶UKG vm@QD V!$ 5} Wih%&8/:#2*#2*%52%52***%9_f^Y'nI0mfm{0z{܈'2 {%Vpk-=[ s$+c$|/."Yl:wՎs{:svx=^_n+u#<3 U813^ @鵏rd~ry+DD|L(#")gB0 -E_./Y/<_nXT<<\)Ȥ+ȕV`H* T})Y[{Rs%W6.MdT8uD*Yi,S) )8UR7|4 .;b5Q#*Su+,Eo5\X @Hf"䊐#I[Q_9+/DhzB ؿ[ZA]Jx_Yѱs|= g@d>IaPCx%2AYIX""cjnsÔ,.D? l]c@(+u z%i Ϸ+m%ן ;ͅ6d,Fh_q/,mīQ4'-sIВY,=Kk)kٽ/9_zy D> -ϣJDʨޖ .$ Tgo dP$ @ +\#L;v ;?M)ƨ&;m{vu*ŋ'O٫MOd %>$9BpԳnrr|\jKU90~٣5w`I*^A=@gb2vW#d^̔H.Xl(i4YǾハJ*/-CV xJ\JVZOL:.z , ek(Jnɖ$fwfgZvoꉒUbRPTf @ rЈ\2Sl @LX,Qqrr:q۷s]~ܹ28V"U}9ajo5GɰjsÚ&juVLn' ҍjZu2v=7ƦM >koRF)j,$$?>0K Nr%4~ʲ5XL%_ .8 :;jid.¤ |ysINʱ ~3\ :6ռ.Lt"S%5YG^ҭV}kv[6]H_Y ܢ1M9nVnsi#?{FAj]j O#5v+W?a~}/ϣ@' 2EDBWF*q@I]O)) @ %6cUo4觭e fu,خa %[1}&Ӿ%yGYt[ȘiZBsP} /P+Xד&v$z?Ui([u+,G8oNs,;nG@;5VUTjȁ|ӄmҪԸG,v, `;]WNęcF,`=_ǹ @UV aG [Elïg7H2WLt1b" V6gϞ]zӧ׬YsڵIvv9|=~KXһȶi/H;? ;] [A ~QY1nOgYd}  2HV s gi֩ڭ6?)lmmmm߾v (9 [kd.AmfùϮ]+=yT#ٲ ބURZU%ꍮA ef@_01JEEx| i*!i(qwX)-:9&M}(퟇:~lT[ABnse. #EA@cοn͇E~in܎3}%ΖnCj OLY28O;2#SYcAd;Ɉ!V3} ?5W\)@٥~2I{4YC6ɑu@ݷ5~0 DV. b@d|1Dyc:$lb?..HܢW }ۻ0"|S!ν@o4{}fҧcGd/}v}vRGkٶ"F nw_-9cmear\X+m=T;^sC'ycP ҏWfN$K~ʐ=f3K<+Z*/Ƨ1KH@&ɻ Abp@̔#~Fd| Csꙗ}PPl7vِ`4ruJsժ 'NK Dl$ƪZ,14tmhi>atP³GTM1xY)CS RˎKM'@aUԟUV?۷-854ig1 ~A>msK_kC'iq"(Ec|&`D<1ﰼX % gmgzיŶ`˻k{]o[z7Db[:&)QuKJ8{\C;[7rϺ"hJj*+V($5~e}vט֯b誙顎4 ?X5J(˚`ZqZ҃ ZAH0TaMa jkG\;ڨbzJSvAik.pg;Nc:BQ>Et }ՀMQN*$4k(cGKUz9y\'Ito5`k,60kݼqmM&MɾɾbUPQ%W.HGBlB0!ҧv8+uԾt8J45T"|-;5-@KRiLLMQ 8((o܌}:_\JȹslIKAAAQňt827={+rs?JMQ+jW'Ÿ*^ZI[㮮Mbd`Ekks.N?foVvi , EިjYn <^}!XGO ܞ&'y\۬uRH\%$$I\$t @ F88@B'B.Gf&g%{(VwppGYF}z$73x^uTpkE Sޛ f45kʻ0jFӤopHb|GyvyHi)A5 Us*_ƠhРABCRC. y0 7/f.G&nnUè|>RG5Vr3_\2oGAAAAQfZڟc˓IGGQ;jۻi{yk6.y87DB{ _p Bʇi*H%=_ bvh[LKG5qOvh쎜L5 ^"W,AL䂗 E6\oBAAA[fTs((((((N~:?U~.nTǛ#e;.^?mƅYN ]c1DcŸUw0+56$i@ 7Rx ̮}˶n.YK -7yʄ]{5G clN|"Jin?8uwOAyaQM*(^Jt~ߋo+$D! >l!EI\e(ɤj /U.gy=; IDAT693}[81i)[̗Vk`" 'wuXr+2o" 70x; ce0o4 5~V)ͲzˣuuεC׻L> &gRS7oؙ[֪LQ jK)]=&IpE` R ;7F98(((((((((((7F솞iL8kx/08wwP{t>7@vߕŭ%O;dwN@?oc|O1@#}k,Me썔bazs  W@ fnG v9ь:L3'!+Ba] T!BʪUΟ?J/!8ÂlRˮblN5©Oj*+VjE(ʅtdΖeouF Xg@u&`H?񺿽U&M ˭j((ʅVޮTe4jͫU ?5҆5~6eêI@*VͩOJP34ؙͬ9{od~aA;lXbB$x_iGZijjRʁCr +Ƴ))f64N02_xr.p иMidmbbbbb~]GFuBf"ҮgVBװMyx  hn yaYQoqFٙ.{ [JٌB{h4&ZpeB:QVZt:],׿Vr6#{NhoT&b/m7֏I}H3<tM`E-Lc&f&J+\r[4l6[CC`ȃb1 b$_Ӕ@,ttOƧDGK%>?p)Uvm R@,m \W:ʔ/K< l% ٩T*e}U@AAAAAAAQqX,SCA4-77W,|j{?$ߋNxrp[9e\”~sTuO&v+t * Je SFI=;8X|/A9{egmv [iUKNKQMԐ@ 8UiK߿%]qfCX2qV֫Gys=&uH'I ѦIܰ俦sf@-+k^ ڶAm427h ZzSUY7g|}z_A(J2MWSGi'/%L?ؖړ.& HafuY¢l2(cLeHyύ7iCVIiAfZBJjj*`h5j^/{vY @VbɚZJΪej(aP^ªzrnuuzp&;$ k{u ;},[|j2wZE?_>§r{Uxr@w\5ݾk7:m6c1'{)3@Z)ޫlfL=]NSGOp>n IxMF\ Uz*:$TeTcر 6 8qℷw@jq8z<@ H"_ Sͦju}ʮl G1obA;\,!޿{&i J桔2,$>! 0zo\ }]d3vΈH.ߣkr[ iVm>s(YaJ ZfʃocF#+4o2Ogfy+{hȖS[b"Ͼy22, # 2oNҘ/_w5Qm:J͂MHk:1}>ˡ(C?eyv]hQQ[)l EUP54:t=☳. gm NoԎe:. O+CPԗ=N;ܙ wKܱ7lC{9_ي5$/;"=Yl%w"P4"|,,}d50qP^(1l ̀;m6;wXޓ" `Ͼ}w\WU7_D-{X{5b]wCʘ18y^PC_ZuرcƌYt# :t„ }nFP0^y q3$JyI=xºR/+9.12/^G *[´N"2I_(%2a,E?]qccO}=3KBʦIcp%M_< GvmZ*QF<ߝہ iWrC&| KmNv Md;X.&g=0 .@Ig׾]4111Iщ_@jYXXiD@BVDfh8dP ˤ ^q$1~+vNz?A$hLkui~~1i¸Рg h4ƱVJăuHyH,_ l;37GẁU' NVj6]±14t Gvz'O4> {6g2^'>ݿx ZӷiHbaNέꊲ|)Tmd۬@)U" $>={ԪCsh`c o 5@ހ֦vQb3# C#SCM qAIY&ƖLQѩjFӤo@FW?xEVĄfe) *W)BjP"?ܥK ֏I`6 Nu[洒BZhr@{u'hNSRoAiӧwHp Y7aNӷw|y}[0H \/CD}v|_!ڻbgٺ;mח-++?MkI coLYtp!4\_L1 )@d;3Lajч/N KLo MI-{KJTcfѫ7Wa$XCWIVYFjkX}lX-Py87D>aQGK70n[F=^0F(m>g͞UYGu(}_L<> `ӦeIHQL |V]wC]Rӑ57_rly2(%Ť5ww mFѕ\ zĜ1pϷ/PZؽ5T}NNN߾}}vׯ;w{n3SV-V-Onp^d%erY t;F3ME>8>W ӖV WD{'4W\RAغ/\QؗJ&JCQT ? e*T0]c1D?7Kvl8$Q6Kmj/!Ir娧UJ=OwjboЍx3zj r~Orqa{h*Ϥٵomŵ0aиUw좟%s33jP5_x{{O0!$$qƟ?8(At v;okKQځl S0h ޗz62roA5˸=gr`;."ozeeqC'/ζ`,߹Ms65$J Þ-$NMyi-(-3?ϳ#3y7ӒIީ}ACgަ˦24~ުģPW۽"emfBKxQs3NOܕмAy?/%2{~Ihަc]'SEz.O]V8ʪ3)id-[SdV[ڵA~cjk|5Z5}:\2PnS_Pd: h|6ciS}VunEhdt@ ҤV-OSOsOq:G|6wbn[dѻ]߁A>sq $(ygb]֜p0 UV~gWP6[-,H7&5Pz+GQn:2F +'Mf-eC \Nv Dn^<#x$/1Ҧ\/ #Oj\Z]'Rj  LTgmF:FZVYCȑ#NNNA899雈w^zM3\s. #s$uW1\f}||m[saK,">SƆEp?i|v|~Vr\حO.LIe86c ١"rc_Yanhټ] zi1+ n[2[υ7:R=*Eu$yOn[<_8M.в(3%.%%%%%.% &HFj~v%ʊ& {ܼ/( IJjX~A.h8lYӜALR2Q@|Kv9RٰnH\Ygs !ۢ2#ݔ9H͐XڳNo/eO/ww5D}Μ& &Y9f@H`” I}_R2dFuinS.hn-٘ZQD".yƠ<-_?k5jSӠ̙3u6:HuewG$T!*IT ?5ĆJ41<%\.ǚl&DPgEu0w [s<(|@(4l%o/\|iWWѾRO)_Qn܌y\׍{5G ZVB訥^|w ٍKtHON|, |߼0TM oEUνsMqضcK8k3tͅ.ik.pg;˸~MG HYumkc=i8wMKLˣʨ;LT?![umk%hC>pgN=&v)*VzNϠ& 2? P`%jJuDk.@x$D6, Mwc 2G*AC+>^pZFP1Ys`МՓ&_T6IB`%1GM ] )m;|'u`]g(?cD}H#í]BcY0H :XՒ}%S2/@rڪ ;y p\wO'3-xw 2)V3:){ȌnzԼ$zʥDװSlX^ r=S$Hz쩺|9d/N. Dn='ٷ[yvs.CߡHVtn<:b=㇝.m-C]46xEk{ IDAT1RD9iV^$]8lo8yF>ik`" 'wuXrd^|~͌ X/<9$2`#w=PyJfLoů]W\߫kfnoݶmgm-Jhu[Qfc/3 o3rVyW._şAB~mMvX1aqA+M _MhK'$,GVN[7h{^Ͱ#hS`|J0,kUiuyx]hM^Zeg%_6[Һqy #Ͳ ܠŃwkmckkz xX,3 T eˈ~E$!Hd~_Qf+ pϜ Wsr7kijO$?c(!]z?q]2hޮyvD/ݨH [թ†BI՟mCoOzv7յ_d2={&Oʤ;H'XlH5z7<ʆ)?Qo/`6\_1k=u \LHԯU'`伄pS-$4uGdr&Vm#}MWjZH$$GA-6{` ՠzGm>fdJ& /@99B@FRZIhiǜr# aך`ѻ->\%%'YLF%y*K*î^ og_ Q`#܁!cL: 6&E ̗C)Svy yBKL>AUX%1KQ$~Ơ |+OF%оċZn[r6KZl%R=g s;G :>&}!cx7T L̅g.pyʆB9՟l޲쥾l['c ܳzsΐS|Ry9; ߽N9  bl1bw[4v=v-FѨŊ"(boXhR"w5nGcwo̾}F]`v1`DRhQLJ벇V)h2dI%{4C-))όdI9B*fdg0i̲եS2UVTX+ֲPА |BKGGGGGGw\1utttt&PB3[5EAHqSB<3:U,ދ55c93P%($2-4ߦdq h+y4zݸ6C68>Vs%\6lDɈxȼ[<0&Zf*zi/A!7e{⡦@ TCflY,ж2*AS,  2׆+kiQyNQ I^a3% ꝠT&lB"U!VRFTڧ 2;vuKOa(7_Cڐ]VC!ábLMKZ (Ju5 n0gjj̻WS?A@jՊj AOh|ȶ)?Bͻ/`hc9.d7rĪw,ْn nG:AF\]swB<|.(/.fkzce2/o/2[}CƝ+iZ4;)(\;9x;\a&, L. ,P`Ih$ :n3&x;.c*F.vP=R#Ʌ }OyV`E]5-DГ'Nz:u[T#ωݬbTť󩝥UBח cd +([mȁ(y‰ x*E 4/JJsi (T:^9J-v5EsW,>^p;ұ 'vz`Go<1_>ۍ(I;ךa"ukTI2Jw(d<U+ȫ*29Z8IyU$4`߲Tqya Y(ur>7;nYHIɪS hNmSBL^:B4AmaxM udZ97F@{ӨUMTFw@\ego EL<yw,mL+&lw|#gJh!ے|O+5vEѾyJ3ulmjZJڿq~\6[q9t`QTS%m.+ݡ6ZDpUz 3s%upi-khʤZeeV:9ky{u[S()YVEnl]__fwۼ˂4~u> Zi(ͻ/Z>E^jZ]3^`G2_·z/sGriIcsKĘJ7O*}sh 6*K.lۼy]6`@ |]ӆDW8θx_D}tٿ:e< Ss3Q|`a<8CݐZXZ5O5x;mnZؒhռf'Sѿ-^c-z o(n\ѵ9㎸\9be^Cbt>2lMGsVfvUm ^R0(*Pz6d> PCj f@D"Sq_(ښ3x0%K%Ohg{#MOJJQ<cll/qizRgi='R22L56Γ[L,(<.ǣeRM=20T,N`lUU|VUZUP?Tɳ,7 n-"%  xB5Ȕibb9FCW%IRYYeCGh2Cc[XʔBkT>_CJڰn ovgϞF]?Pj:Ug*<|>\.aEQP( YB T lmojcPVG*Cs[K( 1ʇ(JkTgV!S qpBEBɒ]<,giKyPz@ eυ\88Bœd2g>sn? KL|d`z9й7c34ʡbDhq#&/ eOvl8`%L B=RSP4=o KU[ڷmbo'33nT|7ɷ{UZ7vͶ&i{\H+@ T(\2d*/J+X2IQe~W\TIٴY6r}jm("8[s!=Wܨ߽~ eS3ϝDGܘzi^5t)w @ BY fmZgkطvb( ^#/TsuQvV8Oi@ ֕nBAƇ (k7͔HdAIQQ՛0ah'Kѽy9w(T Qr j>ZtdPr*Je( ֫էûuU{G@ Peh8dj2(>499LsbW߻d}7II]? bZu˚sce؉}_ݠ4,q]K۰ѹ ltE)lJ+)5M?dGNG ,@*+L5[[BsSl5 *i*Y$փQ;æsקhȦÑ.!'rQ{ώR% BH@S<w1I2b<'E~t5_w6%RThB[aQM~%z{[ߵ/$䆪VTz[G!Q]7V}$,s}Cѭ_>o[@KIL @ TE/mʦ`%)V]ˢI_kR¿7'2-^G. &O֩i.nh3.C S:,fE~,ДM9ߵ1e!/Ŷj[践)ۧD mzsyƺ:XB;8=Xq%4KMҔUdjv9YuIP(@*@^mTjpB3_Ɖ7Ɓ~q7 ڶk$oR`h -Zy-w E՚fǍ7=z͒"2l>|q6\K;Cٔ3gz_Xz:q]h^{5یxg;cG1@?L`fӦV,]9nԝ#Gڡtx]2*w#3:Ȯμ˕@zrq;͔R߿{'_y^>0Y1Ƶ 5sJ;iH"E))N/?!SsbQqw%TW*,ٹnݺ_Ur$2tu?nℿ}\hW[{w_f\'3A]=4h$eMZ={7;. {ca&N'^ ;eh{Uu7tHSaޡWLtρbѱcCka>ҫ|_ٶnq!YS @ ՗KdQC׬D%}~^tiҤI/Va2EC۸NjeSKԀ<$3["3IZbX$KLJSLH`#"Qrp]:.J1o&LѾ!qcPΙ9˻H} \>&̀7(Y A @$\믗 Y91(YGGl+K37~A2VmdVFk;x L')%iFL»W^i% OxwySGZE!7W^׏.~FmWx"JaGmmҷg<ݔ޹Ż2,).N\6IHB7Bb.W ZY^@ T;,[]zk]ZJHB̌٥S.Ŭ5-JXߡ㔥KO7x(͠|%+"<ʪYఱ{n޽ܹsI?u %\1_ ԛVڮcd^񮋞co(˻ %rY 疔XE8B@ @(MLm(]Xf6I R@T.*aaa.]>r?~%*^^ ȣp֯EM^A9:]{27&*ZBa @B4@ @ P1פI߿ߠAw}?,y=3 Y%pwVUF{/)=^77k?_WhV:M=06$3oմD@ @ T3*lo߾QܢE996oR#N|},r{J@ pi3#3=;P7$5um׽-kd_2k|~c*b gC@.yZdCZ"9pM yeuk3G(X e?g݃+xI>>UWMϝX86EqZTduY2qKi$H^7qCcvL@҃+<Ң׮5.1 ;.EDDȟv[Ai%P{!Ki4QZLcS?Mk, tkT9a(b/~=xQ=n*b Uj,dV٩*J?kGFjO>};F5[&Q{FoBdIͪmsO;[egZ P"Q,M[}=[kR3*Ƀ?/;HxFjI|ZG>&2w[O<@jد+A ud栏;}vzJz=ػ5>CjJ]`q!B> Ѩ~O -kY5(>_"f{@c+[k`^T\Aci\$}(y3p&iAL2#dV"tZsWνk޶CB+pW( KSOB$ﮞ?{'aķCZ[p Vƾ7 E?U҆ 2u7],,IO/~,E '8QNjՇ$v/[+`P\($Jutm S5>>u Zw{b/r«r|[ jM 9}o>~dNٷU?D+WHWͳxUn2]u5D(!Ri~ؔZÒ7 !Brb- F&P.ߥE׼ )7Y-39{Um驘+OyÜ?\=_j% ^fi\}^Y`ޥɗD=|ܹ^V }{~^5E>8ݍ_ۂ}'{UǾ\z\BqJ9WNBΧD]퓮0(_C0]ƼFn#F@ћKrQ&ʨl bcRd_+Ʈ]UQ;_f4*9Mja eGjxDw#ݷTI2:3mO*iAjȔZ[ӴV$rYDQPZlq4v$pw#վcÃ<.貨)GUd8nnksܷ)"աEDo']mMG\(}3ݒ:ڧ觭]<}ϹW￉+%(PF6w4$kr5hOOOOO϶#_00"J xPdT@"8 Ǒؤ褸D2Ƶj M ^U}>Zxb\mWIk&R|m,ak\xGݼ9XsN;W(0P K'͝/ lHyuKGct;މ!"6{kFΛ1 k%0s'1-;<վ}49:/)&U?yp?s>UIFiAR^^Y<^Q(7_Cذ^rZ>sb`T'ZDp#|nnzѣ_wP>|l9sNxe.Dy؉Kftl}NLLLLNñ9p_|8壛/:uun{L4,I- ё>*j^\RCv^ ͷl @:UH 5kbEӨ~;>].t8=Yݺ]Q#f^xȑf =~}]=Zh(P6,]E,1TjiҾ{Q9G;y/̝)-<{=|sI ( P6,]E3bC{.vՈ7.m+37M7ůg !z{Լo4vm後?w7:sS: Me˽s{+'fN<VH2UVP(Y_h9 vOMDܯm][[; aKӄO,_ 2_O $֔$1ypeQ(5pb41ʳ,. $'nVۺ$9J+3"6)hdHԪ HWCVn: L,eqah6ךSxЪkpm(_CjP]VFu}Qv?yy%x7oDƙo=|:ELf5ޥsܓ뾰\Qe\/`ڔ>In /r>ۉ\H? <9fw.dV :"qeL*R3\d>r j/\wrH71,hj]ǔFf =4h~3s}Ȍ֏%04E2Fޑi*h~pHxzfP H^<8aayTYtgB+c"STm29mv1/8܃`!f/Wd._I#KBGhcJ-2Žuދ.[RQTSmX]V&u/8Q8gONpQ?Zw5Kd'=p(GӬY%𸸸W>l5xzN:qfɛsuֶ{zLժ$GG2$?*SW)Rs?ݺuC֡\;S?4 IL%HSճlVVǼ/Ԟ]{Ãl(-˹{Ūc'i{cÞϻŃ.cryU=S\$*xIY&)!$avJnUrٔuc;;c98BpDTS%:C:/=¼(_Cڐ]Vk{p!0600c%)uF>lPĿ@`^өq]GoOa֥O1vvKLժeH}x:{]9; V^piW͝1sЙ3]!e\୓;6yn1UJNnj0r\(6Mޔq~3Wj޷-Tup>>?> Cs܈dž|2HUQ?"1 VGoKR)~ @%뫱&_aΙ4-f U󐧽mfr޸Ya&ק|%軹ubblL/9I4sv 4f?9k'j51!gV~}mv~kYYBB\;Oٔ@mJN\ySc d^9@jG=FɓN\Ǜq,m n(Www=ݞBu}lrh)屗GB:2ީ 8ǐD%ƶ<\K ; D5 eET @&I*[x! r%GF[D]H>Ae?($i=۾[? y,_kذ{t ޿>dΟu}s2UV|BwBgv_4_~\#w2,BҫcǎPZ>{ѩE_Y7~5#*(-z۶ggYUonLJdgRǯ|IhXt. <C/#%>8=ϳ0(C|U㻮:GֺφgD#OBjaOLR_=B4k<|L=7ɉQ?6^YDx ~ ³?Gi U+3Tf5Y{І+1=hz  Kf>ܾ꘺g~Vk]=zȣe#/h)(n萧(U(zCFQW{MX%-L+@QQlP9|<0Du v:J>30~¯.ɽg<UOrΜenJ0o`%Z=}i.Ǯ%m=.#ukTE2=A^ϳ?o_D7Zfzw"گ*#ɫ7-\■T؅oyT=}[Ү Sۖk*dV}(~mzC~ͬ2/jrQ#<?f=[a[A c~Xbl64 ^&~Wֳ^w|нS8VJmuv[({pӬsZjʤXnz /zwgf!eڎL-4GVvi1g{7x= m-o$r:Vy[cN٢ڽV6!_nڃ[7DDx2Jz2JC9 S2c֚|בn*eK\x 6VvY7kncV8߶LՏ%R[wnЙ5}k}y "@T ֞WP\z y=9>R +@\VZoWͭndjh)wmi!kxG9e 6'e\9JGɐimٮV~squ5e b gCúӼ7\ט_17W+ql `禁FۼI}M'tq/0nb#C;GR_|:ҥ,TZAt2h ;7n)~y1~\#w&3$A+ͻn @d;Yι^O^s)9{U"l37+ET/,_YV1_(Y0$96Y(yZ2KRe׿%|JeKG0[ʈ#d2%Xh/L$ft)vH,QU;Svޘɣ },(Vn'L.Q(P:))s2+LKg̅sXDb}ڢ1p-m/D迆OuaE}ڵ'O|Y\8whk艻}UP6{jKu(b/CoB4p)+_Or\ɂ6ݗ5e^yYv-իWtL* <|>\.aEQP( v:hݬ1%O|sgwǔlYcWhi[U^Q<^[d 7Pg e@!=F9Q.E l\wjhtDOH k~"}.}tͯ|C٧#ލ"AjUrrp_.eeI9inX[cZjsBQq|~Q?L}_ujsY&i44_IW_Ytc. mmGd?REQЃ >S{IZWt[/;aZ|%I׏̿~vSCZJo|=Sd}vP_8`y"B9{WNV\Ѕ/jgV!ĻAz?p1obR+z@ GS/OaAxdFBz^SN+/b;Ҽ sʝrrp\3fgj10dH'S}ћ8x0r=ۭA?.{/+thC#A&G wi>JEǎ U/p\K^}efŅ)-[ac{G&ꊔeZшБ,VUBgԹ8NSN9OSy.Z۴̦ b|q.Yi WNy4[=yR:0PB";fGiMh9SZZ~Խ{J.S5pT Ȥ(BвD/#y.@ȇAON4 IDATWaF.u[ =A/BsʛEȬ]:]h$Wo"ѾJ1i?zA۬c"bL\Zk Є~٢c:l>8;84р>]cRʣEEy v eSyR*VyHCpa;]Y `j؇ Zӗ@ A998:sR|.N\6IЈjV^:Z?Gε][F/`씁ꛥ̋iyOY4QdgʗTC04\ҝ@ wv$$7~ܟ)znbRI|k^/_67C˳"v@ ÇW SN\抻"v-OXXI&-:oѹl$"n\xQGO-i.#8>]J[ѢTĤ,.<,9 2 Zڻa5}B؞q`CsMhӊ@ P^U J2Z"3ucѤ@MwVdH}z㑦ׁ:y wCut1D`tm֬+ Ezu|j5 [\jX FNeUF=^7$!hz܃;CI)]xy`Ɨchv_s_K m"@ B5"8^\=Ա$k4U4%_31YX$w?={ؚG1c {=a˒;.^:kVBҘ'~zj ˺LYOp[v{~F@ɒ_^ٺDE%4Kvq}+8aگʢF}k,\Xb2q.mpdG}Ǵ*Sm9:gҡlʊw5L >|[,33;O>Cٰ[YpH&TkD"QB# U\nT=_CJڰJV'EP*eP9#<9`gWmnq^楝9Ƚם-HrJ?WMdx_x2./av:j~;ȭ4OFx"m|y\P^Vh]Z(>:DiEkZspFFs9ff7l;yэsǜ/@ @ &eDouNpH&\4_a28n6b&<h+f7l7:fmj.+doG8$fwO>ot~ux}gM2ך#kZf\+w0cvkѮ]=F _U:H^2pS\mмWLV}/,okaN׏j_+@ @ (g"Q x\!dmEb1%gllQ#_&KT$Ve04/IZbZL56jY!SBYYmMirvùb IK$YDB =&6A&-0)٫$P!1OճH5 dzi]qJ@@|>x<.pl6EQEeff* BQwQ Zh-zQ:i>_ȷ-aSYtt,مLso욷mAin9/ 23ҽ-3 5;C;[R@x *Zuh@ @%&P*H>^*VѡQH%s@ @  BG{.$'@ @ *Ax MNKKI m\Yƶe3-@ @  qpIMGA([?o>jܚnRd2/ZtzAfRy[ ȣ̒bħHcc5m-ĘLHz O; _:۝ai28%.)99Yplj5iZYD>ϟ|ؠ\*ЌL&~ViYdT u`QT,Sl|(Rbq:OhE*Xx] 2+bϔ5ddb%OXrT 6,qgң?IA͒I7WL%A3?<4Mksiݒnҝ˺U1 %|m/TGEE)^t7Ҹ!nXIJ˷,RT-xw#Dq)88J NOS>"OiΫ^Xڨ]g&ӲX)à`_Cٰ(@Ks/ 싾t;P1 3Jrb7(y) =_(bZaVqF}Z.<E:|( $%S7'5 !S5H@<|v`-9?ewUNz!|uڸ=YU3Cb6|~}q4Lm’*c\; C5 exqoIBRz\6VaVKhطKB~5tZ;Y΅BjU )ظ Z`kk[ԕ$|e,a]tMbZv!4/hʺq9G)sxm. ؀[IihuY2чW"?֪WϹMf9%yNY7nō ܖ\&OEl&P\?37d9M~r@ _VžY,74ڸJotSZԫP@J@t 2ckA,9E?}$gn)4[:\Žg7"%,zW3Η##6 @ۧ MaGQ迆Oմ!yi꥙973߃\T7.XL +δO+%I}PWAO3؆;] |> ̤ٚ['RlML4ow@\<18 i~.Ys'Javyy/ixrI^:L*vVTGz{>N=Ү[7VMȣrBG:j7a/t%1$gkGv|w W-[+U}Ҁl_%cwJ~'gȢ?4}tSmH SU5O&n72!p͆24d"w;@xJá^??a|@}k:ѤPc=p'P3(I53Eo~4_{W%`ۈ5ro(سO]-=T.Qiy4u}qB (K{ƪGڪ{*Nܵ]Yg*nEEq`]u@2$ABHB^ p?^λܛ=ue߹snqǼ3%LIZٸ- 9{Uaygǜi / oOs^ڸKm5ֶ1O~{Z:eL07 Hxz0Z+ !c[NҶz&}ѷDc.=tOCQ4XO)O6쳢aֱBoȍRTd= 4Td?4by8V6OJ4bY$s9gѳUԝi3jxkod;j{kw/01/;wn=8s߾.\W.٣:;WCwp:yW]5K!;}~__3­{Q Q"o]8}othsWmbG5"^`&]&͛;5Α;[W2g̚|n]Z>=~sF Te쬠˗/ҥ di41)@vZJ~|+i:T|Yύ4hjž((<[fͲc:TyCq Ə?zOFEU=o g +736,FFͦ^cL]kv㧡 ? S´u$eJ\hZ\ 봨#cω( JU#[?>pr-D72xԚ K?o?y!!g/_*)YcFCQXO*\O9yPMQL@~~tm *+_ ~% \8"]'5TW׌?v?*tx墹jg@F[kebм޶e;_ZQ&͛ݷXjS3qcg1?K4UӼJI!Š֪Z݇voU-{ybȭdUJH:Wם_E 5d:{uf!h@;ݙK/: Ţ>_: v;H%OU\s}y4 yxpLИ+57aԛcݿ gYfcտO֟[ǎ;Hm'L80fcbv\$w(qWg<;ڽFqn6}])!*|wWͬf >yi,43e-=Q}GOM/{26l<1| ?2#Ș}<k.7s3]L^MjQ?..^4?{PuUf(ţ+k~ҜֶJ#'(meMx&ҿf5w- Ԇ| -2Rz;뮧1/4*8-~^>,_PX+h3jXxkXyzSV;JǯVzrnFrv-[94X\H\q,D.rmѽ}/:u1e~K;G?Z({-];Zymgͮ/KSӌ)EaKo85^k@G]MSo ^JJL{26%3 peҒF9XhTո2![ %-b䝎ٜ hČңEUcU]&fj_f./66a)&T)P=@1Ư]z9SeQމ-^ɵ$- #p,_xR11QΡmBݲb$wCaiXO))Cֽ&ͻW6 SyzV2s) 4[mX19O}a }MY41۟ĸF)9+`Akuر yadMBF?Ml=GFgbcp}YH"Mg԰d6u*ޜ 7YX;@%_螥|iֽU+k Z3:Wt#dž=>g9'gS3.ph߆Kׂl5 4RIM=ǿ_9<%yj+֎^{ | ;tg~˫IA;3˥Hrb{p*>XY桁5umeK_O4Om&h=?ޔ] 76Y&^SF4nw*(QYvUU(чtJ4@"[@\.:7TQW3yy@i"&NLl7[;O䦾M̄h&nNl  O;VޕS(IgS ҷDr Kbm^tqp4~|A%+W |RN^~@tβ{qخ-Sen+^?zjQknTL,3"-|Jw )ƕ:4 7#}"\P0654Ȍi4aByĚ}txL{DgwL&iJۉϩ.q KJJruu 0֌wWByx;<MU;?mzΡ~?4_-qh.9$u ǁҩ퐯]{9}s׍+ZYٰAd\ٶ7R }oϢEn;W{6v|yZ|-ɬ[Oj,S :8[Nm/jywq- эj/h[Frz{#Umߓk] Z]']ICY`TBɐq``LC.A\J-v9ۣ)y©vηWH{Gdq<dBS>NS=R$\gzרdT3!ErSxm4 ScnE3cRL?dKd=$/0s+M߽m@[Gqcn>ƍ7.KZTkt1nۅ ;}Ϭ:&B4?Pç qS휚Uxpowlרe@+N2K&ZS,Hdk s7+i|E?Kt9(i=ϝ%|`f3'/ԉS_ͮ~J<1ӢZl E1bZ> k3vssyoZp-1#LouSZ˳/df뙍:Te&[;nPcTFדNo 7nDdI]"PrRzVCfdEH1.A'U3qcuQd #G\+xP6ԔJ\ݎ,X1%%1!!111#'l~Z#${}ip7yE\<&2JSj(ma+yjU*5Y~@PWL4*-Z4s>r /} YOͱށC=T?}߅/j%JҢq(y E1bZ> Y>e&J& K?$[1'Td^LxmZUhrC%&1nČ -]㫂g$ooz qo<۬}U32E1 UtW^mӦ $WU}gXyhkP{#if>PL(tUA2dz&$)s{A dRF7^nM2?yX5M3jku<եVcZ^e'G ОVΣO40Ws >Q+h}H-zz7k*2-ՊYoh~]B<*N W|{EwkvԱaN f#7'> U5pkUo?- i"WZM d4Y0ިm‚Gؘt&c7Zʿ=xY\+VLB5PFQ[_ mVJըK2ޮT1CX*G۝:N 7ޚ1ǐ][=5J77ji&*#((.Nm?li ĚsomVi£m6؋„E_p+ZF?cZ͡\.|OQFۗ`tWyebTzu5jԈc缥xYJ_g~eTWaB вlgQv.25Bx화d꘮.2FgNM{6:::5E]iY--3Y7 K+M0o׀VBI$*%G%U ӑqkddJ漢WbKmPR 2iB ^30 ,=>SsctQU=r+w꺿qd>2c3i845I=degn7 d;GgZYfSJa.g9xt1͇OZaJ2hT 4hиm?ce3C rm6xiߏ*7*#z6Ve$WHXW4Ppr3pD7N.v{yfܗ4J<KpM-& j|zݜ4Z K笮<qU , &%h+Vj6؋΢M˯ mW``+s7zߗP9Ju!mG@:|͞j+SP4Y˗_yqҥڵk۶-4$4vJCʟj5:c?s|jT(ÜAp^QҦnnnxJkϵϵqԄ"7ݱΣ3C4\X&)yvv:{J&z2lsr3yn<”řb jۻ?za/ -Z!N R%9ߴS#\ʼAۋL&(DWhɔbY6ЕWfJ2 X9 tYYz˼džr( B(%*;{l׮C\CBB޽{:֗=q'V)o`iCr ޾,:MѷCySv)x`<>#6eآExuEBayoخgv=`T*ku{:Ա}ss5%; ,%m݌l<-LB& %_gÝy¢0vvV[şY_tmXJƞiǔf_ o3>ق V6 >}8| -\e6E"+ۢg԰+[!ZWR(w=z[n|ݻw;w4)}pm/NȖUt%dq7W |~k%gk.H"_ 8C]ۻ}EoHS3Ykʳy3Z]~hnv\K{K_IұI='AثdԇU?T8 %K….\tlB#@(T,pb;&vϞ=ETvEqcE2T*?G_^Ä3]=3Toك ;B`|ɔDhcv\4W *<;Z{ioK #@(T,p`ʚ=84Rςc[n9[A+.;ƺsW*5p*;SxXa[¼BB|,<@ B}! 7(ST IfJfLϳqR!Iz :X_Fr0/H$(]yr(5%3 ;ΩyF*}J38xEI@ "E{(en9| -RY2S .9E`mdb#|J^^nK~)"#d2FhjZ +8B|=@ @PSj(AYA/2=-?GoS*˰Tf`!E(ݔ=8f@To @ @ Q׭$4M@ @ H!WF@ P%*@ @ J<$A @ C@ @ J$ŋ|g7w6@ "KqM0t)n@ @Ipd O(KJ>NS@#FQΙuZL@ @ BQbכ=Pz ZQT>&XLz@ @  R J&[hѪqcO/ aŸm]\*$hr@ @  ;#9;Wx 5nLШ &q(P,(.-[Wj+nVΨLf1g75$SPy4rz~~%R)Z-6ɆԨ~9q/=QXmYY&RRRS%ֽjMWhNMGNixmW`HR_*ͪˮ~JRS/"kMo(˷)}eȾ0)r_XހLv=*<2nJf =u3Q$HHKIOv.΂s XxP |L_+fCGA4fpH^ݻѩi nN<ߦnpLWjz("Lbr3c$_PӜ-l2%?楡[OLh={ 2"Cz>̓wǿ!9::Z㏳Xz{DZN]/R\[J|Q˻Ѣ@Eoxn)yڸƳTЛڈ&ycFCQXO+CztmrάvFQd86-*raa s⽋U/ ʁp^_@|NWDELד$N̚^'D4xܢێ>uƽ{ѱQo]8תC{<@k\بkVB h惑W>62%0ހ+S4 /DCrkJOwPPۗ dZ|iCV>Qw~<+UʹŃ\~o9,~)ř *sSj(UF0(j,B˧!만u%))Ϯ@rM}nt=, fvgצK&)gy|T[(eHJ]W`v"_g~J +L,ߡ8G#x)S!CWx}sR6;>|+<1{VI}K ,[3}Eqoմq'v_o:k_BܣOvG56I}P. y |%m:s̬]e=g8'~~P@0>'wך& om8`H(T5S%[\J2WX$7Lo(˷)e)c'zZż`,"baqmZ~ThbVJq~ֲj,.dWojρA  ESJ].1 qJ>5jojծ]o߾ݻwNտEJ?syo8qMbuݩ͗})..?h:ng }7|6Ij'A !'ßԉWXXʵ52\'ܲe]ńKχKֱG"R' 2_6sIӧGqeo>Ir5<4RHHtSx"rn7DZ Ltp(ܦz 6E[h2d{߶həwQj6؋mٵiR 7ã\ۚxZ+9eՐo HysQ34'2(wV}u!p;+*iW}9eW YG/VIٕ JVhc\_7[j;5jȑ#Ϟ=;z={TuİffAk71dET8D@J.;XB8@0J4Dߊs6|sf]?^;{sںcQG'LSM ߕ5I۵R?lBOF5wmBmI eV4D(O߾R1Aʭn9+Q @{fg`< 33Ȱk)#(7+AQ;<(V\ iCzn?W߽p@Ιuᚧ!7E +yg']ςc6om]9 S\U#G{i^a`1gEeKo;Û>2ƅWio6}[+}0)cl Ooࡖ+;o^yW4h~Vn&5ł[h2ϻ ~ !8,mC>~tmZ!S',l#̺pCݙwx$7l5 -s@z[o<=c܍9wdd?~ܞyM綎yO#Z1j@1㣎nڵիWHMyCBBBBBN=zCϽ=S&>Vg1& .%r IDAT޶ന Q/7zU$[PErbImxG_7*6;n {Nt?-CŽq*4e[BF q` zLVɯ"GpĐʤӁ%Soܺu/}%mӦM0ab5%3>I=!x+NEU0۰5oima˔ ǡ?|MfS- eJ`LcEfm^cud9EAjdN$p(&Ѻ']sڒ-󫱤#Z^[_$di0dx6J) \xTϬ:h̓Ke>}\pYbaRCQLXO*\O9y)#犰QL@~~tmZ\YI%fE`֘ X˔_Ӫ雥7Zvԫo4ƽ_<{Hκs(Q./ ^8RyLGTˤKW i{twU5 q4R 38WsFqqqfE(T7imw9u}m/N|J-#ur물xVNBrYѥwB6`LSz'J O~>бy-Dn8;= >fUV|q->(/>dF H"wM)Q=QsQˀ^*`Č/9N͋ SNPͬ-/IzlfH7]0ӼB@PFoPקɺ;26l<1k7`;Ȍ+#cĮhAΜUv5w_s3{5%''=˯#+?{PڔT!}1ACdyix/+X6Ň[h2vYw=Egݦ7|BQ+yyyȜb\l';V$6Wxu (n6vj9p N9&Smǯnxm&{ Û0~Q\i'WjY puu~]w˗/۵kgFjz7w5c-l4c^ZTqAI$=:@(kԫo4ƃlY;| 7G:f}q2@`8^z|A /^nHiş&{S=V2{M <b"Cg٦,7ú_ϛHod7\z"rޚqV\GRquk-5՝:B$πnR\W~Mrn7 :jebp۷ٳ]ve{m۶kBw!C~أ{xUqFIN%pi;֐A|xh`g#BKG=C!Dn Jq2@%洶 sERdO~⛷bMkӋTTk0x"t*$}U(JW繮IvA9zg]y].z\ S+4xBֵʼih8wE2\/~Vup{ e̷k?lIE9/ad4Ӻ_5l1Ƭz@ O^z;oquCaXO)(CS{5ɻY) Q5XBlE3ήMcIi`U[WXp;uN:N4ooM,j0kt$faE.)oto (/̻jeq&v=z[n|ݻw;w4/)@㎮8h DܶCN©w=rkfp@waD+QYOZ\쥽5!; ڔ*ikuܝ$OqBM{b3gWu }sT?uVeFa@]W |(J!b1[LioPR8* 0.yф)֏zykϒ+>)' k _Bw{gYb8dml쿲gC2O:_.˃ּok0傂MCQXO.COY!ݸR˂2EfdMIf<;ζ:/e@'go%q,q6ZgϞ6mژgn>J&d qFjݽˏ<^׋ jIYK츪gJ\q[' fwNЂ;غ{|'^W ! BiC$L*%L,Aۓ+MslVCL>x4e-C{b>LJ޵kUBoO;3h4يV^l[W)@ D\kcp=3n>8m?c9Q 捎I|<\޼BF.(XMP4t;@&YPZ{pV_9mB۔BE--342' &Gfgf ѶsĔBzEjvg}!MtkPHd q;+Y,\E"Ӳvn96"KOۣI&Rݿ7y9pP}N 9??t :ݕ]}Jxoux0V<>3qs"xueY&yKSun:>2՝E]&-R+7wǡMYW4[Gbu%4{۞Ng 5i(>=)[!#{T ]Ǯ~Jr?tU6&(8 ¦(^,B˧!`W7b01yi0̀~jN^zZԄ̴ )gѰTŇb4oJ I3?T t:z58tCNVת6ݻgr]z9LkBW ӛZrpyq/!i Lc5<Ŧ^}/eY*W1 3G :O ooP7$ P<r>%>{Zߕ8XU $JDŽ:ƚ|26u;fѴua̧리 omp{U[%Yr{ؗFfj1ZՇ-W!:mcWv?"*?vk6bֱI2W(Q3VLIIOaHHH6[eR, H_63C9ʼ"kEZ`HL:sQ*=uTmϴRp _\ˆUj|/{'۴h̍KC0!F=Fm|yC7ŐDŽ%oV|4E[h2dwL&PTݎuדy)2ML&e<6-*f\gܨQ#FiRQAE~+P5YO+oW<ko @.6e0 )JPڜsJ[FoSxBNvqOA~垛.uOHIA[;zczN 5|ѹptRX1읠ӯKrѴTqLpDQHulbO_v_yQ{t^A>^zԮ;򰐶X3EN4{߻D369w\?˱Jپhvͯysg| -Xl2yO>ڽ{^ltLmðQ^ &yiVPGU6"Zpg1Z8:фmSyk5FxpV2E1h_ouܾKÔ$⪱7LQrgpK07 J4JjޓڤLzE~vZ} Y$Dnn_ZFcǝ_H ~Ak9uZSN,SUYI[T5ĸL>Zڗ6 %mrI"Īb\\hgMaVɟ102RG^sWq/Kkոqƍ9hoBZ)0l^i'wt .-H[yE';; deR'# N,"뒴4 J2SR3dٕվgĿϰxz1o`h"L&SSerfMHX*ybk)beggTüz!v+? (,E>$JKqޢ78KqN?L.Q(КsEL)H)ekm]yef$ мt12srFWEP ˷)eXf^/eZTޚZ]BBЮ]̻3VDDD|@P\9@ |>r( B(KTظ2terq['wۼ⪐,BU62 C_aQB5N,z5ldz'tv3)"Ll[:ƿ%yED4-|rټAƋlY)*Xuy% bEϞѹ,veB#@(T,p󊜲@T4^KDsߩAEՂ A;noYy@ Bn^C @*2)XZXy@ Bn^> {pB D v H,<@ B}! 7 KT@ @ x }H+nyܣHvvNN)@ P)!e%o-/>S62U]j̙tFQΙuZ-7q@4ȸ~Ɣo9ww>5@ @ 2I p_o ^CmlOg3~-g>fQT>&СCݿS->< 泣25ʐdC @ B8 T>pT~ürkT\_kC*$|-?+8}|4#tғ\l @ @ eȡ"Mq婢IiW\tVYp<ܸGG+ 5|}y$ A$ B vT梣-|JeL,R PZ)~UEŶ2Tn/ߐLQdr},G]|%ָ1cZzڧCq_n={)ڿS_qn@_!$o\O(`cld2iMNkӖ ᅐߎ6#{Yt@ @ $%>0Ǧ @步>CuYfcds%NЮTD /b._f~?lݦ3ƽ/3uD`|UXùqZߨke\WF ;p[FP?5@ @ &6cԨQժUvھ}vm^:'vj(/eu8MW~撏?_bEˊ׿Omp;do <v~݀Y-ҡоF܈=z]'?34hl}JN>ꘀ Cy1 doQpgdw=|ctAQ&dȿM @ 5cԨQ#G :t 0zhs'ͮ ꡕK q(Qڗ\k Gƃ ZD7l~з @Tu(rW"o 7ˎn0Xz"P$k @ @ ebҵk۷o_zM6sکϹoN@u#.F[lYoVs%ɀm Ӣ)q!16啙]z~i `E <ηH!@ $S ի\QF\\6_0yN@b<~,%xǯ3sTW,9N12F/Acf !@ $S WWׯ\|evl~xhV2E Ϩeh\˱P:/Ww$YǢ6fArp\ȋB)NiM/?{d*8 =pZukupmW[뮫nUd*I,~$2^Hy{s߽睻A/CgL[jY8IT2E V]%D&d1Z1ϸUmZTCPPy<+?dĈşeP\0;NW$K$yO\5 %gϞgvIɓ'?|вe\|cOvMX]~~.RĝP.[pMMP\j.vOS]C(⋏E D !0iI* IDATzU/ssryO?`*fiS?**Jդ S&_d񩇒,ߪ]^=uRIR^ߋ%W+bNMvkSW?79& Eb>E ;/B+Ut3h<1(bưqmZnTŠsR^˔爁"V_\7Sfm=ʁxao^}nGMn,';<o߾g۶mO֫Cy{/S{ H=TU}&%v;K-?{;3pP#ͽ'I@AC(332[iWz4 iC?/&LTwz]%;woj0IMRe/WXХ/ܝņ-S\/aP"oSʐS848y1K|dd<ˊ3 3jrB=u`RH_/*WI$tߒu冼r ):_Pڵ+,,,&&& {FZpl`iM=t}<7qwt%wTϕ*NF-?n ۆ}Jh21޽[|[yMxc uN7h"(}qukZڅ;wbn4O/::+QGa sů¸l*}hbҧ|(_?UrYtgq٧79A! J=>q`4OY0.;> -}d2|ʘ=A4/c\ Fy0Q\ 3ΰMˍ }^ڸ1VmTѬ^a)c^ZsGLm wS! fi[cjP6[jEB pc{V0|s0Jz0~4;^?!1fbʻk=87otktc%yzܲ { b|x{uy4UZjzG)S'<5- k1)ZvڶaY] RH>{65L ~ rL!;禍՚T{z)>0?<(ij[ˢ~XMx;vq`&R {g B{4ZA$~y ws;ZPТ-ji3=.?NУJ6Iil _}?HͿs핲)tܫ#G.} [N ~SuJRc߼$U;W[FyIW1Y+aoBX:u1)BXҥG}oWAfRiiO?*}(;?nޞBcR=qU7G^LJ\*GWr)f{F=eCyy ~=6¯Y̞2y~]OCFy)b,f gڦB+L,4zm_U ќ:jnc~\zJQšciHGܼh* )F5)Qan* ,eKi}AdN"ٳ*J4HK'9]׆&;,Wp]B0_RM2)T|X]#7WVq3ݻ9Ś;fk?uxɥ2S"?Ι5d l3&R_xw?ȲfTó(ޏoǶX F3lMQ9Mˠ[/yY|nnL ^Ӷ Ȍj#YχѧFNە%`$߯1 uKdL\M4QS|xqwFV 3᱑S7ԟ|'%k`](؇. jg?T+g*#g˙ڞJTn< W]ݻ7fSwtM SFoLݜh̼鍳? ~gL7Bv첥fC?^2m5v!h>9,+'G֯Yv|N|;%9}r Ec>ŧ ?eP=NYa(hθMJEChf C2ጩ˾ة㏜-LsS= ɐQxk9`֟2΀ڽ']|Яu9ܽ{){tP2s.a<ܽcкW<ݏj_m[~;-UDcJ~q_ET^.MPA K{tcR)V.7nt1mٕ|raDI*&,t-a03@0f땈tu۾gl3**q.8NW3Ǭy :L@wUQ7zzO%hfjvֱ2-ED|ٙ^ly ŀV+,ҍN=u<*ya,Aʪ'R`*t/WzRV Ǜ45!״-(UY-~tg(͉twGR(v؛27M#*D'UA{͢%xf}Fcxt aWl]䶟:] -~ ]sj^#Hh9P+:Yox vRU%q@Wu=ԥK+x?8OP_O<o_g7;Dyۉg87zn+ONFCwmK& d|X¹v%.|:5I33O_.[P8oS0S(aד@n~tm*+Nrya-Sc  O(ȓ$iq.?՗La.hjs'>qtoZwyvzÑ{e(i6ӿkGKأC-8Ur/(@4-իRcReZa9fm?G1wlsH nmZ9B ><&Ѭ8Qim֞YMH@m{{e-2-GOLFj 8SHj6h޹Y÷saZoP~e@Tά~eHE}g8H θ?pݞ3Ez:Uo8 +u *"e>ycfWՖOJMxȾ=G79U]VL\ML8wRCSX8&TICEJd'>\tTӗk7[h24ww=M2μMcdဠ\'U2f D82zwf]ͼ^Cxk%kbZw..Y5aYhPM=cb]m풚/+;0<*y|]CS=bu4WuugWp\5[88B\Nm<89~4Aȃ*?8*Xh pPJ*Q@w9Imy<*5͛7qI_G%MႅTB2 ΰDe*{;P/}'Mۇu f0coێ.n1@k?f+[1&T6nq73ؔ*m8h7^o2rVQ>kƝZ]c8Hx|]llE\\ ?9c`$ .K_ƥEK_$Ib6((FߪM3EĖ(Y?L3]?s7 (oSDʐq/OnnBi•SNFj_Y {Ȳu̱"Sgڦ1@=t\WYNQ)GYQMVZu]Kt0>h˨>m͞ƯujeC8#~803 *oOW^w|~,pP%k쬚\f"(K,E- CDOݝ]iSnY7{2@iq+ DoWTM} uHe6+X_?sYK.-}v):כ=kg.7D RT-?654(gҦDn#zq9Vdg!&ǥ…̼5>MSF\Nu),V 8`ĘRYtk'L7qM)cO=3wEHvH(}_2ME8~tnR\th!ZD,Al91NXU/crT,xҽuft?f]jHa)^晛(%ǨXUmOeUL aq ~rbN; OQ fNa"*dj@T1&˧JrPoSS/yhx313hr"qп3**J)& j 59Eт[ƪj2vol +Xm@wRܳq=hW+p}#5ib ;y2*5 ϞJ=X˶w6{ndQA:U+PƳA'>wξR@s}S[bШ/qO߼yo o0i-] `ՉGTRVUx螾Gʽ?5OeVcSL-40[gҚkoŒaAQ07s%8"pP%JEf(l符<2{RRs|vKzUiH?;kMha|]7?7[h2d8F5v=̱8|b+3nMˡ]ܿ3+&V2C]vDRRHtSʹ aFO#s&)fy >.ѷťiGLl[>0WKeB_<ty];jR._"noеٹd MQP?=1㛵iFCW(>erTr9 eZTTӴUW)P<}%o#O48fa] 5[J7 P~ɣGnceT^@ϓ 3bZҥtt%aAmЭïfR a5P·-+fvI.R8sn},,E >eV坥PP(D2qc3Uئ^EV)d&1jѣGt-YCE1k/t-~%kT53??JS0֒W d@(s N4{onЯ(hN\uydd%(m ;tUON?3_PnT0}w,\I7wra_  弜"59!0zWx) {* v0=nO6NPid^'OģEmhJrjժUnVci J3Cͭ-̜|l3K}~g15;H(500`zwR!ZCݐ tq_9p ަi_ժ xmbEܠ |n]սriXQCx '/_aP.oSʐy/0jhR2޸cf` ڴܪJ 2=#/^A(r?u`7*NYgVsZTFޚҩ_4Rei;{z7;} U;5)5O%sxjrdXejߺڏ9E"1Klg yN. ^qI2͹\.yXx<G(t̶qi8Lp!TP(\`oQ&2qI<Kz{ȩf S.J% Mj[;tT}ټ-Z%M$Ur5oZn~e^'EPT#;:U*Sl>'A`CyuXr,/$Mst a>}L#f E>b>E a'@ lܟ1_,xkD²: )t5+"<<|;;;>y<p8l6(U*Jj(nZa~J׬ p(n2,`XyzX;yx0w4p!2 NBg 5Oe`E&_(3:sf4Tj7!L|ucģj{Cω#tȁ7RPY,U*0޴`*!ӗ'0k( ۷)eXl^/-Fo {0u y_ _sPԮ-l^w}s7::GO @ AB p 6ϯ|׮]۳gώ;rN z_6=µ>.^:/^2{E7n  'STxjD\ٺGz޾E(~VDM @ !'Ncذa|Mv4Ϟ=;|]v:6ȱ=&7ƍu\%SH>E3KSs_jX*Q$Ji lD$J (_:(2߂?z5S#ӫXJ*e8Cx2?xUwS\mu@ @ rJ!8<<<_nׯ[jx2o>luu;3H|ضm[ΒR-#c=Ůkújȯ~io .1ɨ)| # w@ PP1v ޢ:^8&ߦKٜKeSw[` jV;Y 5t˗~5Df(ڝ?\We]H;.o ` @ @({x.פh2voJ 8#~\on9Ts; w {`OܪCz$^?]h_ nCnr@ & E6@ y6s9O$no]P{W830asڽ9~"BЎ<`GwMQh:kǎ SP֟B @ z(6&((%d@"=O"2B@ 2@ x|<r86MQEQ*JR" D#r1@ @ @BQFqтF×~άLjLoJ+BQDм25j2{QjbLbbb>% 3)L&I...CLm05\(Cr=saF.ùw+;ч;erj>_222_gYP oSʐy/`UޕIOdîǼ"kŌb!ִi9Ta\N3n"yJlRbRL :8.nx<&HocGmgLe9=dR1h낎rjK'Iy}/B\u"H#96Ϻ&1`^?eF՝hjCw_g;h( ۷)rehE/`eOdOzag]E933mr"ѻ:t.᧺ξu+1Q}b!w:.ؽ'SE"Og~T[ﭓq7%Mv̼1*xk߰$M7YeOu5c&_مIyPPʷ[ ]on_86t]o'`%[96(+"ƈ7s2b\?妽/39B-}`2~ʘwQO.6@Beca 1086-W*f 0SL ;A󯣏v܂íP ʁ%q 38B!@Q>Cַ0'.bC ͈8gāO^՛=?)i?7iiןcZ^YsN岏5+`{?E+'`E/e?ɇ@dMRdI#*B-}b2}ݖi^ƸIDs1086-7*yĻ+rTjQVun\MzE}r귋5gߍMRΛu_=O\^c}P+'!'ž}HM}۫+ר:'QG5R]/Ӹq, %1!JXm`NM2[pyʛO#"ǿ{-v,WR&͘z QyMz˿4Lc EI:5c!(L5˖ ⦞;#x{97[-M[3+TD r+MOͫW_$Mߺ)hdžu5?vŧIp,[5CeD ůi=o׀5nQ1~9 vUt̠@iMtC9$h#G@'[{y|vӺz)8:j{y bb궫5lQi)ϓ4?>z@Tzp~ĉ 3f[7֔ƮI(|Zyڅc:9拾+@4iĉG)qF'|Gۺv1i 's{tͿ-:2s6qMMaԴsE.SR5sl^#Hh9 EA;4PIl%k)^*pzLi5x?8vKt߿> 6mKU_w^Mחѐ]>Zd|XBVI8l _?4/i~Jsq} mbTٞ䝦̜+D)S*iVP_3f3i[_-l*QN%E ֘Ψbͯ[CimtC}}o-n ]3gK6%)y8j#u~: ָRvLeifѼ A }(y_p7ݘM[`iƉÒ; [k`Ag<:r,5?h³C> ʰ4kykk39Lɓ2EFBT眙7ɀ %kZ"FV_>>p:&萡Hvktu#8[2M3cFrIȬͲNҒOάV9&]ߟp݋qSrS~өZ(҃vPԩz/ꕺ nx`3"e>ycfWv+;4~㥷":hFUVKtYy3s51';3=Ρ]jh1X8&TIK)\vZ MFiA=(iUg74h.oaP&oSLSyg4986 LಥWb>U$^V.dwį[{R麰 5 :wԮ#mk_KPߺG)0Wun.W'M\`y|nH=1Wk;8/{<-oVH{'"J|/Jy|>uҾZ-"9Cn29?~4w7sIu URI>E+( p DWJc^z#NxIx76[Q/fbY}oڵ們:TurĽؽpΕd͏/ilAsf(.jm25ʴ6SN z^KFIl|< Kݧh֏Q;.O`1gPӺ}}'"[֔o#lFCO$[ح?n>aI67 web__W`t+qՊ9P%9]WWYM%rn[`,4\*(W&l~f'Yu:B!:W㎝2Mܰ|pFdgىJ+ 8sTUb@ԇ&# EI;yu;d-(fwObZȇnհּ|+ijV4Q*4Ԟ>Ĵ   ;-EdUU̷kl܂')lԟSɣ;>ehщU+Y b>E t&f'pu,9Vd<4hUD^ mnkS|>J*uiwEzJ}-4ʬs$=\'j[kmy7I"d[xjK\bF~;jK6+S"_<y1QOIUKRoȺݖ6,YUP.hji˨>()4 Ÿϫst_ W<PaU(oIKc5(l9b-(5]Ovt0p׹l%3gS뒘ݝ]b7,eDsR4a"+*覈Iŕgi6_ڵ)맠Zy/{?) B-}v)敺4ZeNP8ʏM (Aƙi0Q<"hP;1p*2 WX5gM|Εی5>@ɚVr)qՊd8"Qp`bLC!W\,hb;~m=e{"pQotރGϡw nEZ4nA_FPe P?w2&P p)Gʴi񄎖L2^aX8Q5aںY67rf]&pוY '"p޽30L~V,gcT{Vq1>݃;|t\M-- 3c(4 O@S^ij E!b>E -=ew~{$l5Yq&mZTKڕG-1?{NgK3-ؽ)VSTї0s`````9nB"}z#!rHn;jH.F=$Q eݡ6My|ack=_jQ(n;aHo:7ΝsM9?3؟&[b ?Jw`z©SG9_-YH 0 d5"&KVKI)Xzױ\­ȆR"iY;Qgk3cEb(Si7oB[*-Ӡ2ՁWL\=|@T9θw^|si=A'X]`_7,7Ǧkէ1eaRYrq\6%_ޝyM}("3sVly=)JQ64Tn?J{UnP!ػYe bkZ1 6 Æ} mZLԨî9Oae-i9SAy<~K%=}oiG}S];@bE}v,ޚv҃#oLOX& U (v:̰}f;|'"Y,MܮQwm+VФoޢE+?3@`VOwg͠i]v)0›/\qzt2˼2 0eKQm'O&N,GO4͈Ur@rd(.zkWoNӂo_lqL}frJސfLJ/o7Ԛ^'C f]%c0+ _ q<Ԛ!|nv5ڦLrl?noWC`BGh$,T2116&J` IDATQCllL?&Z}?64 ufP7: 4sy =4ɚ/bCZlyu. K|jω%*h8A@u)g%EHο?ON{43Z Yrd>P < "İ(DlBۧH!ç̺ 92YŌKXRd1ʸ6-*Hϖ!=G 6KG2Ts[.kQ?==;i ק-P.C6ߌ|zrze1;kfyŊ:ilS\۪|G#KN3% B{w /T슟~r}B2mwoߪpiFi} ;|r~}i_: DՄ3ˎ9PN׷mxUӺvp^y͔X4O^ѿZǽZ GCo\>yh۷o7.*I< 7ݑ ML쿮o߱Uo{A9Xk!`mU @ۂ|m:3LrI^FZaum$$CVo]L^Tr]tW @puzcj.s('fzd-)q޾M01/So{ixןEtJ]6!%~EA1hWڂ@,3)2=FFm/Uxu6-xdu֭[9'x+on!eoQ>F斟x.>߻ m{[e.[lټIxk>jrWwnޠ.$w@+\)z"mF7ԫUu54ѫԷM.Z2\. МpL,9ϴ~z֨UE~;mdƔ}/q6ήY:KFogg|>rfSEQTzzJRTD%`H_1%[;]N=C_)BJq >5He2d=h|eJA8)"GZ"}h]hV\/#[W# nqsxgQm2B=Bz ؗ=T863ZY\N ,y[e"|G&G,,0W  v}|NVf%7!L|qu7l sWo̳Y<'CboJ,ӆ} m\6+[FEeOaC\<7k5|K0ڵsRgKW~G8VWd35o) }8 zkv,D@ C`_6sō˯:mLc ׁS_=c}u˃ 7@ !_q_+p (cGbTmV̲p0S2:yvMGdd9n!ˣuV*:lԬ3eB+BoedJ}yQt6oLZ&6n@ Bb㾐WK2ױ":]᥈;98ٔj.vOʘ;Bѵe>}} nB^SV© }B+pƀ|͔\wM]_l%AQBED8i{Q[먈=g[ѺnZϭ ʐ" fr=wy.(B&#\@(&l2W)*\{}/to^S]_)yFo<2z1ǖ3֮ ބƥEK; q71jF_˧G( 'ٸy@ B6n^Qak4xoZ,KO9 ~~JB!>-~ֿ,^*JeW׎!*yT&"U,n"=)Ή[tA5|=/ڰnݺ{M2˃@ @ f }%F}u [\@P{sT6#ǗԼzj-I=f5z4NS/#la!{iM{.}z|S3VZH @ Bigp]54723gM7O=+M^.{{AlAZ{lL+ axXOIaKMc ! ~y\i6xC*]S>וWi(;oؼysߎϙ-VзJ @ @(P~'D31* *L@#NtV-Q0qjTTR<&$ĹkinN9e#}r}kB@ @ J*@()SF( B@y<˥(LZVm=D<=@ _"qHˤ(@Y!gOy>GVBɷxn;B@ @ J3(_o '`QQsn4%_٩JF( H6@x.EF+sϑ[h2,"v$%}8A)[* Ȓ{>jb-@ @ y8R䴠r(`oxh\rLr|zT%hqM@ @ ZH,B:>>fO^@ @ R pY' /nL2E_&@ S@ @  @ C@ @ >y {;G/STZWPMb/,d\$ZUBC'^\HJ! |:kRcS2ɑSթ*u(rj3%0ِ|?|Hupp{5яejJ,5>9%%E*VQ% V]-e$&d 1Z1R1J%Z 򽇍"R!!U*9ν/(T4I'P9׸7SA# ׆OՓ܉{/d&'<8JY3bɇ@d͒I2٤'žw=Խg՟cd=6/Mn{}=L}˦0-wlՏLd95~.9v_wU.xhJB M{y'RZ{2H`S{lo4[V 冢} mV]xUj'4Q1cR!?zM&pvv_ Pĉ=5 9}E(r ߝ?_W_"05l޼yF%3w  *)Je@ Cю :w6.$xe (4qi2D3)2~ RGnoή&5u.SROrI[6|wyd5E }TP<dN+`P'oSʐS:䘧a+-𾆑Az,+F$V̞\p(%Ug-^zPP$V'3kXB\5XI N<pg~ N&5)[4|𵏦(1)P=]Wۅqg$P#Όأ+siށ#ьJ]Q?G D]G^s\*֕=ipgҧB(O\?M̬]:~ߊ`,SH<.G? E1b>% Y>et|u}ct=l3EyW/]m܁}X+RB8|j1W8ƭw[o*Dt :wFŅoEV(@ۚVl8o+:O,(c.ϡnG{1<™qt9?AFB@:t}]:t"lwަޭc)_$Kčt|;kX;7CK9Kh賈tB|=z}]}RF}1ڬ#n_q`Jppiөw#SbcSݜj7ӑU aIݎ%8; ɻb9G*ɫϒU|{ ʵN:9W)+nM|k.&/}Jl̲w޷Jv QJ_?y:=rj͚6uwI!=ʵNeϱO xeGQՓ|D_F䧟]48WK|.4[}d09o@A\ֽzfkW$??w M^ݺP*iI Lyq© Ob Xuj njU*N_>ĥ_B)ts*u#'̔|^~/Su޸NFyEI).K\8mUWT\J~:<\JձGS'QNRbdo>*Q=:)$Ć'W櫯=:$8VX}ϋ^ @qL$dP#oS2ːS>_5m?G~f[QjVX|kUw@N4ܸ[zFlѼz.hzvb9kJƐy΀&t&dT.sTIϟ R+wIOVѼA:/ QƾQ7]}MvzWx]bFqqOoyJOvgڇ8΅W)5켥.֑o޶;' qO:@o%4^ݳ{~s.PoSz0S>v {.m(XحQjlCVȼKZ8>u7%?ke܉ݙypx&C*FaHٿd9\8әq!Cdu߷o_gߴ?yw1hlx:}9AcyqՊUZzg~G}_!}w!k ֣Ft֭]6[޹Ɲ2ue<V_GNdn1w b~ș} w^%5kPoS0S)v|cY( tů ~ \R]'uVSPӍ!/_9:l5AmtC88xA@]o-Aٓz7-zɡdۍ =W#$ٞn:FMf\)oz/'fѼJqZf,$eK]{x+_weQcy6 xlSӵA ZwuEOm!otKk{O IDATЫgz}Z/- ^%' ][ k;9z& -H<ӌ{kWjx~F!SO(x:x膃Χzjۨa,^ /|S#>tP;u՜g>$rs`[HQb!\,ple);:Wcfis/3ulŢkgmbV(o1׵?ͧQѣ]tOЫo2dU_wnNzl,4'a*9R?ZxwgӘ2S9 2ڭ=TGL@CcNN;r}|/j(ٗ/~x$eN#CKZjjv@tάl}.):qkβan o]1{љ"oE2P6}(ԭ}PlËDʴ{] -}Uzoٍ=YŖ|șMЦfΫ.'eqөC;c7 96 Td6E76Ϥ2-koŢpHP%oSJSVyg5ETxp@;>cEG0f$t_A P19jYټ^CXxkY7_v|)ƝK[ .4ǡ7|ЕXa]/ѩgO.0<,߈Y.Ư/hq qՊUy p@Bef]tڃ 맳߮ЎEۿ*3dkVtl/l7텳6u~b[~ On]opAGHߎk[o QoLЏD/5V=-Z{IAn6!OOMݻ@dE ,pk'hW-g ]ÞShJV@C?/,969999969uм N@K{˖+yw Hq(yߺ{˺93(u?7]>Je4J'cKպV=Ƅӝ',igΏԑnJtUw= -#'ҝ']]̫yK5Ds ?9QeE*--]H hsR%?߻77xd.4|ԯ3%XY.MEt,WKӀ^=F4iLϞgˆʺ= S `>% Y7&&^\Ռ鳳3} cfV`^G{K|n|\0vѸ 8p`A+Ihܪ,fydO>üF#h`rVt$Rڗ=hL.;{[uZ3dSVU+ @@*l|`XOO̰\JTf9MlM>+F R)\dr*hTYwqZ =-8،69;SƯ׎k/dY:~BL-pRqΘˆ6-Q[ڋB!#RZSxΐzmɠ(]7[L x,nVquwd{K!#ցvv(o8&@SrhE>Oo5GFld| MTZt3bR Zִ I ]:?JFQx2p<7ϋ)l4nß:xФuhQq S+Z66[h2&_y7T|9~L >1>ϊ 0~{9vyd%DG?6=p#y5%3u;I C߉hJyjb#9qG>3_P5S>tCA;\̌ #b/" -"2̎)Tps>:9z{u֜=s3).pB C(Ԡ ^5 ^URJ>ջ P> p~*9z%!hʨssmtF~}1Q/+CwF0psbUhQ{YzeP%o|v4D/}2.gBuI+t4.VXߧ9*W dh&tG O^J9d\aX_?N|?"ݨXƗwP1oSSVHy7ԡI1T*u:EEQyĪQ= u`<+2 -`񒳝%GħÑݒ}vޚ)ys/'>>ͅU+U+0]2Th.@jW89P>4V($UimvZxCLX8ĘJz0I3MGSca/f(4^kR>A=a% i\x+=R]W|t^Ϫ]U|D7W\xOَE+( {}67;cӼu /2R$ZsQ*-<+dTR]te;4 Yɏn Ac*k?}J4,Ow/x2 \SXP5oSSV(yh 1|*."E눎fxObgiI>D13V(Zt8u}MA\REz8|Lޤ GbB!\pfy 470' |:j%rQ)KU̙4!^_wiXA&#j6'|⤹~:4v-RW2Nsʓ%rs Z XKx2˶ks6jdQ! vS[/x:Bw%=p{P5sP&vm_zexă¤|L <0HM-]'L ĵ&+qHc.3_BZfi`vIkKen"+lTIh̓_QrC2-s'Egv1EXΗ'dZn,?/Suc]\#Vpʆ} mZl<Ԩˮ=O˦}_y+rLބ2T :,p§c̍f4QiqKG*?RwFRy 9?w̦oec|ͭ4W3kp}eFcK;J4w5oܿ~F+xi2s}yp~{פd~.L:~ɕFH\KyEW}GS(Uc</Tmi5qdTINTm~A*>i9)iϤZܜayaY?7wìC_/@ɛ7P%oUCQ,ؾO,COUy(*{2|+fX"ČR>@~UژSǎݕTX*=6{vr|]K t)vwD=nI,Lq}Q~Kkeo}aerU@POI¾~kٴ]_Zdqg} mY,2yvh~S Q{:i(b/fqn55AYNoH+ m w(IMe4y(@5pr,1 z^⪱7TQ38gƝc czO;^%wv; F;ŧТEmM% 7I/j'iwwnN- _0gNЛavCIEV1ev̕S& S>Zz4*OkH Hw|ww„cj?!l?]Lۊ² *JmVӨLKVt`窜YžV5Wk(CQS5ʜ7h;m!sɢiMt4fdꞳSV݌CT3 GqvWpWhf_x\Dk797eMaj* = O=pZ[ S|4hРA{Xe]'R̀-4$8gmnis @Ώ-L*|#  NNt?E<h}IqቴBگh,lɎn|lҠ l~%\.EQEeffjZ]|!OgfO9Ur1]TR{ Qbo]8'V_4]Y稒^\5yE-V\ذJy, p #۷)eX"3EH;%U+[rUd#ǗHLNvNL}Y$a&q8+-pPPϜyyt V-.$l<@ B}!7)cb ||mNS{tܞ0]#@(Tlq[^B L;t(r@@EfC[2پO,)@jW)YB @#\ 6n@ Bb㾐Wd%*@ @ >yH@ @ 'YB (>euQHe.zJ^~lBۧDaF P!38@ @ |@ @ O @ C@ @ >yH@ @ ' p@ @!YSmI"M @ d@ @  p@ @!@ @ +,K'2{zxxj' HO-4? eHJ]W2@ oN1lz2yylٰąM`F3.;$A|dwW7>CvIKND|1س0[a|.% }3H2O 鴟gt_uj{ VPӝzh ``T5S 7G2OR$75ʼn[h2dݻ-g^{/"b&aqmZ~Tޓo;2[#8Y~uW1GVF^ oQKU(`}9*6hU_w!1~~uJc9}m*]z~=Wz822"!"LھjZy: {}+bV<Աp^>eR&R%mzBPAk/O?y5nB_7]n0T H*- SsR(HAA)vuITFƣY; @Tƻ"eu7N nUkG$9c}j8"B$78mnmmPN(Dn(FlBۧD!˧}TC'g~fSGy(b/f gצKE goN $Si~'9gW}wx^r]M\2pP'rlgנ;9%FUiѳw{o.>}Q;}uzNYH ]- yvK^%72X@\uՊ J> B@c+/M;̕>9ӟ'{L {I&^ڑ5:tz6 ky2-d!v-><{)ԇI7RExþtI?RN.ƃ>X,[H BOUTȔSqZzh[>R/l;g\0(p{vٕ=ҷyǤ~@<_9o+%=}G N<)-=g^ĘQt.J9}-q`--.֑o޶;' => 2QjlC>~tmU*w97 @뭻9窱$w&]𱬊;4?o.'a)7vvslg 3O]4hsnV}ƥ}Ҏ6%~[ŹJ1?|uM)q1 pw b >ew]OQnjnӬP~nD>WquXZ,WO"sp}://t*gCGkoɶe(K};Q붌l:6t@4N#9Cڎ bDʴ&ٓ =Oׄm4aЪf RE(f)|C\brl J>ou3=ͮqe9y;~gfn4Yj%%y6BO_ *YbG%NW6iXhVy{_lg󏣓&9)k?Mlz-Ok;͙8phqdTn7c'x-Y%c:* \4Ǩ/E'ĹE}xlWV)s~is`^aCë"ppocN '$'\N94%%QUथr >J=ψe>Kʐʒ|z Fin~]wbHs^Ykmkzg\OS; ݲb`>% tgM)M8rw(Rf4-H86 Zʘ]1ufQө;OMIef-ɼ\vY{"oMs}XH*-r֘%>4&w[m%Z)8^3  <ȗɌ2Ȣ՟Υk"uUZSThsӿ  . !:]gijhjZ1x"q,gQIvCaؾO (Ck{lw3J3~]89B^<+2qvmK*14-O>II tDo"e2vXQB7Pb/:Vޱ#?u5pMCIŠ\e oHh4/v/qJ$A F b I f=PiSjW]aكw[37JN}ƆFSEH[PWF @gg&$ I#TYl>Ę sao 6Qt Sj r49Aւ(l}2CLp Z޼BGh>ݼʈG@ .lӾ2g\'Vml쿬kS2OQ.OĿo)-}Jv ):6_"LP:4(M (CٴiQyDȠbyUdRkN}26˳YykBp,qJ$A FJLi(JK`bBHGu=Z|mxXn+2bt 2*~ͽ3Q{WņݱjV՛3QezkDmW'^:"Cl[W@ oߍ )7Wt @iϋYNDTyc`**[wٛWȨλ8uC72͂{r4>݃G vަV+k6&#O;`Ө&dV?g* Vsk(۷)eh)+[|EM;z!a1ꌳiBT3HC|Lɾf)Z~'V݈{cSWTANQ!J>k %hjtY so_5S`pB%20-k&c#y{40JP@z,e*wPt|DfRjjoGW)ϚneĽy#&_{} RGE*3뻆*t&e;&LCLZt^.~ϥMT2ۻ6Q'i7F|E> 8hh<ϝ*j6Qg<$]BF~u!?ػ]~ ȫc_ 6 X} mZl<Ԩˮ=Oʌ[nry}C N,Pm&9/yQiQ0.֜E[n@zஞ$BM!@\5+=Pqy~/.i4 Llg sϼ(Ǘӟۯ-vqoPh bs[ɽeq;uyB6T:k˔;E_-9lqJf *i&Nj(wsL8maw]= 6鯒1[ϯ c5C{; j]Vqžw/3fCW\J87BIXkRR\lC\\܇?&Z4|mh@iaudi$+ ͯoF2UɊcL.jKy#INTm~A*Yч_e0ߦDJ?ο-9R߯&)`?> mՐף_zNZ Ya^ U%Դާsnc u9PܛR yw m7/;Jvrmߗkvpr&i T_Vv|m,XH 0^e|noCٝO!2"鄝=V |vs.k|?wϋ<//<͵;@e4_Uz,=v;JI'*$3lvmY!$ZrۮG R>W̮yg/פeW"S0 )y٘W4|2uW\%—@L+S@덿 8egx{X]@J[__+S~ -k럙I7)r'8I[ݪ5 `w/X5Ŋ[h2d{o{<(~c?.V<ڦaI-iVaP,m$kEzگeŶ켵VHQLGo ܾĶÔ$⪱7TAfp%W{MtbR2ک2 YuaKU&TN:=Km/JƝW5sf:cs*lzx-8oorM01/Sz1GFMgzN/;aLoV 3эn3Em7]\gMaV~ȏ}ѳ[ru6.]޾htƍ7nܤ9' WhBޒ}^l :W pVd*膃 &g@` nO8ăD7N.9<@|).Q)$3old@hioA8{J-Ea$c/܉6ݚ֯,sTI.ov OKy Gd s˚%Z"]CQؾOi.R"x-+E֢[!Z;2ew[UwZ[j{:V;Ծ]Zu=eʐ"q!qa#&O{_&8!Xi=8!J;%'/!BHrXɋWTh !B!BJ< B!BHGB!B)hB!B!%MpB!B!ģR Eq|3ks篑ddp{+8!B!R!B!BJ< B!BHG ÊVMU+"B! B!B B!B!x4A!B!A+c|jJ G4sUШYZKSd&GފNVPU\dT|xyy8#zȯ?2K4x֤6<(SSRRTÿR +WIu}多Nh4*%Ӧ5$I E1r:?kCCu)yY+i p豝,]qeaZaJ$R[w0#F2>Ud^*& *h4/JDSF@62͛U#fJ0u4%9niҠտaӿNx%#%د4]|;::Zw&Ne+r)<]٢ *L<>ex#PSF@KTqml9Q24%Msр+C, ௙#\iLn-=7n٪q%$o|7{M?ZuOKigk!S^q*bp<Ƶdfڢٍ`:Y?Ռ7dX{(^?x]nȪ iy =3r$U*ΫO_>R ρЁWp7@%{`[[]|{,xtsFʛшk̔O=}ydu\-=%CYu*PyTh3iyg=b=оeN6={N={ @\I^[} :zo6ӥ>zBuѴ^C+.^!)X+Trw+ݴ/EL`yk-{S+&jLT2kLXXwfԩ$l|ޮ%Y[c|5 px]V"Atë(V_Bm(_wJh83vdjx'gi(O@ Am& J^CʎfĊ,9 ы+њ1ƿ#'Zd&G^<1hv &X֔V=t! IaN3NÇ;1;wkngNxQlneuρB ՊMpn.nd^|Sf/ܜNsq.FnZ겣[ү0svUo?pA-OC s/..\3MIzӶMƏԺ+}1F@U1ʛ_X~72[hAv3q3M4VFzᄈ%20pY5'zCƒO9 '9RZ =޶,Le&ۏwKk7jFZQV|s"o+ /sDaڶۏ?֪1Ļ5yQʱ !M{Ԗ[cuyG?['Y< Hc^u @xǢ~ӥV1D/f'?n~zhkdxftU66}G}0rp~2gFy.'_:>͡,2,@۵>MfpKT_5c2:=G*On)DkR"~:4Yhձ=d o=Egf/f:,ogC _z/PC5g@ 41acsZZ/T8CIL=% ǹ [e;Ұb) {1wٲe NQE@Ӵ?2M.!4d:TqV]_u 4Ea%ܕϒm&0sS"@4bի+cHo'=&e}0&FYficг4$6}F۬Is㿹!S#́,؄3o5*ru҄iLS{諩ܭ1/RˑoO&I8n{pł`4Z{ިDŽs&lk~P(+u0wM@7M?hk-XT}9WF!_`wWpgAu;E2}c* Rr3Y;8d1J#f(Drwc%p`7}7{>QF=qbvdt^wdv= [uޙޕ?%1q ?SVࢺlQg\DҰo}fx<; Z\)J\߷pFKUߘӜ6YZƏx@d'P&u/ kQ̓ޚ?N[>SQ{6+£{\h%D^jblRfSuTIvL&Ig+ct߂H=6RZ?nWgd2:/sF+HtKJIZ< g!̸hDU%Դr5Zqξޡk_s-9͟`TWZLƵU'_jU:tdG~܋>,,:uRSc0fyƮ]msHavPxB5gAou6Ѹ4e0zP6"T)c4pTzm%.=ѣGq)ғMC5ΚO m]`̅bA=?ooxNbA:ڴ4 #1UW!HKbF\y/)CLJndQ:tn}Ia}_jb{b/}kng},W땪y0QQWOu9UȺ(5eňgr:?iCޣJ g"6 F4r夦C9#kۧe!\y7.}52ctxE,gYi_iEN>a=I =,DuՉܼ(m|^X% S~x[wJJLw0ٓbeFMegr:?WhCG|zM UMnZ9#ۧe>  fƛ"Er ~ɲe2;⑑)Ưæc/d俦OkW'oc%Gᮿ{s1 fK3Y J 4J(YZQ%JŋZXQ}>p7걠{ U7=Tlgѽ庉7$g?{鏁goD<+TnfY.tQO͋ݘb* tA2^ѕ^7aUG4H/sl_I  zdX]ĻxN@Ink֧ZX+JClެKj^ek|O K+?,n?J[?uzr7ը1y>/vK\ meTwۙz5;gK8Zγ^aFFy8>,7X=Y26{3HqݧƥÛ߳EkReTyKqPT BJ4Vih5X2n]7s+cm2xO:{V^wѤ?$~)GivrqOF{;s§nu[yp:b>W1iL Ȼ|Ekףқ1\Y?t!Z?8!S0EF,^Z 4;?i Abtq2rn:VNa|Ĭ2::]*'fջu VXMXѵ،+5uK\ eRw4i@K,h3r<u 7VאwxT(T+U)*4}87e6˝طFM>*TLfwDVI[)(۹M] JJ(8[*L-U/y5)w5sDb{yѣQxјD\>S7r)WL-YT[scJuqWȂΦ1aȎLC]E]v!P+Z3Ʀ{`r&2ۿ$a$'h7pٍTq-w %R1=x6ڻ{. ͏CZZHsdݷnBjݴBQ<8 K!ϣ̡ 4FŘ*=a7#YPi"'*S_xԴ}r.-o/>wأ{r{0-jZ#іIwox{os) 9PRNU$ʐ/ OIpnm6@`^jAq}gqpnf-ٓKFKl8 +9UZU'?nƇ2_$LKh;M{y|!P%3:*ozw}yai0FSkmt52q {;Ԙ}`omlc@oi5Ɩ̼kqOa {Ó6MQZ+|{Dgx\6e\~ ]m̖W4^~bae–AZU`qMnC[q⊛hPȤI 4Pa{ e<7|zU9ܮeO&ìD+7I9$o[1}b!k&۽3Q/s6ySG|/6@\ٗ6m'Ɓ/G,YV4r6ҁ$6/A"'m*h{'2̮wz,xKd{;j1y)T_R ĵ.0ຠ_I94iY㫾]fW^fvkye?*Ժ/{,솸Q鋸K/;9wיsGUR1]SY-{GV74mۛ;;ǯi:m혪ovV<̻<UӘNc\W`iSjJXCS 6\K}UF5j_meE/e+v,fEU'mV7?^~ ,ɑH,(^`1HCn}|e_ ˅/)"7Crj-l1njިj a~޹U\xp˪ZS yeQZ,fVd7vц؈y 0{WSt쉆(9 !QgM&xgj'ΣOo9eH$=–dOP>y1yjʶۜQyEkZ϶o.vLs?r,a2sfj DlGʲ^b:Rƿx 4 !_ƽdV㇙:5.E=vHNN0D$uҌ F"ﳿ̝)F/-'MF4.E"a5j_ErV/hrFјj2]Lf6<'/S&Pj^RwZ 3 aμ^nzF)M"AOWԌP*psyz}z*MP-[]*O,y1D2BYbFv;%t~.نu0ĺ"x-Qm]vRzmPZ{B &HRi2eRT*H$bX$ Bat:RZ wԕߟgJwk=ݽ/4W6Qb)muz,x4T&zڽ [V// I.l 0r|U!%{;1z`]REnGQ윿ί4a{Tږ_F֣?QV+)hBI'O^~C˃ B!P9y,+ztBH3Yx4%+ۡjA]PkNn 7sB!*'xEAK)%P4Q# A@hAWls篑dd+\#T{pB) zDIpB!*'xEB!B!ģ B!B!xDRbEk%t~.ن.Y)$h" B!BHGB!B)hB!B!%MpB!B!ģ B!B!x4A!B!K!-.UУ@!BB!B!.&8!B!R!B!BJ<!Lr?[^A26Ӥ6fҔ8Z"C"/'@Fd J T҈Oc|ȏ?컖-mynMV&%%'G*Q S&yjvj:[qujoR]R[Jy'=@s:?kCCu=bcwhv(Ev*H,,P XWFؔ XW.q,|(RNR*y OMpiI7=:^uYifʹ?f%ZጳY/S?|V4]{gMH.fZU_{MEOśײ"HE\j) wܠ;i{jSu3oxZڨO~Z˽ m_{"%t~.׆ƈYCdد8>-Y|[>?*>s]#>=?oҊfX|3ʗ _ϪU|~{MqPF@KTqmOB}ȊHmuA]kV~, O}~5 7/u;vq%ym-q,جe'.2tbJ2%.;x5jQǫ<EK\ yec,;U*ίOWT&uQ!ZUeM =lՠ GV\@fPP$6|/(T#+8!.ž}VrC0&/0ABf(\2ؐ{#nX({;IsJHu[fv];?ps:?)D*w&KM|aa!b 0Y? [ɋ\ېQƻ|Fzx ͼY5H IDAT`xEI3w'bS 2v&8}2%FݯEP3Medz|y2Q 3B|LXBz@`Zb݅QRTjzERy[cd\"Bm'õ܎ yJQ7(l_Bm(_w>#fOf ߊEs?6zmŭ+"]:W؜R铖 [ME'Z蒞; ~Ҙإ\͋ˡǎ1N׿OI}ΣN̶sc%)=<[x@@ZjŎ&8q}ݳCwaVDžN|KpsH53g_[{~@]?]k+'`RYkXtsA'yQV~>3MIx4{g}w ra,6ElYhɋLPjҤK, nc_=bšDMinփa ;xQ,?wb1'Y S p_w2472Oq>QSΖ\w ٹBW͡,tW?/MO-mRueܵϮ [5ɍOo9u` hnw`v}a㗹{ SfmΎ<-n!G}4j9!Iϖ,^4#o?LZ@Bm}K+LO E?S|kC?rIX@@ޟ~dݺu_-;P󻏷?ՙ]B>iLbb4Z#煂ڵTy7Y+>PO&0 t>\M ^_ASFݢԩSL?~kz c7[1{;2YO[\zjgZ`7dj=lio>y԰(ka-2>~,8U0 +med @ʕ\Unv#-}'v<4laf5 j6+{֭f3i8iAk,dPL M(:+w' m*;7 (4_BW0Q[1yfό-|4ǟ4[egU3?⥵jh~t;=>r J2e]UPn\]Ac-ر_-n[[N?c -UoWv_~yQ'n p_쭼|A H=}F@?*&iGT0ȍD+ b4Q.la#9YCDž+__߰UϚ2{V q2PQvu64iӱOJ톮QUSOuHk:Ug7,<~3\Y&_5OM4$Wl_U/{FW4KveiRjb; _&-n5 SQ{6+£{\h%D5^jblRfa[GlPpXю $1`x3CWC,|2[ MFYdQvKJIZ< NJ"^Be5!f:mX@ӟᮞf_Pw: hw+0vC״t }Ʈ4m tH]I޼#)0]`{gLon} bl_Sx\ø*H=6Bd< Jʘ$'܏|LH@j])a")[ŨuhSÐɑTn}s%u iiF !bVB&,!Č6Yy?.2^RB,44(o`T iXߗXrwb!jYN2:&c>t|+;:#>=@r:?hCFLO|5療b,hF3Eπ9#k٧aB|ۀC^_δ`bcyȄ^^V&k\qgnsv5QC,lrV+M=?yDaF!صyɣLmlm]\gĽ~Z{_)T+hחw 5n+,xDD+Ѕ r]Bj\.h|!?2lLе'O>F?t%Q *wf?!B GWhruzYh<;4"$..6ǫ @sJԙW'r֣=-0Lq4[i[Ml6}SƬ|:NbIK\ Swf&^}ip@p&8x2du{(:/4D>hҋe}fddBPϗ*c5rG;tڏ¡߿,LV䊝3D^YRV!DGAȺӺª9hͤH[vnR7<D5W*~4>D,5SR~ 1K+<" ƔZ%QVwq,G{{כcׁaUa df&Xlk}vf}h#T/+U d' y.}+z-=a^vwm+zWӭ^&4nFPϵQVHuw륋H$VhM mdLxû%n=Y$Uf푶΋dtzW&UE0PV!B#ś\>{E1/}1zPk2$cK\ aO3n,kǣ^ |-{\rb {SLS ` z_mU`Wfqcp OL甽4}~?3煈j'O̊$i5ăKu,_BaQ5~4 hܐ?OX2͋U%_ϋE6Gw+lZ, h+y2^zMsof [e)[!S_&yV]oT> +cLlW l8 !,uvVß'%3rew&tiQ CEfG]v!tY4<5 Hw]2`fl j B\ﳰk\v̞>"~ Xͼ^j͋'~9ۯ4|ySG90 Oί۱bs/w)VdX*_;z?^VH k!&ydO5|pyj4j*9\+e]v6Ih%#g_<;sƬ>bA=NaʘuWT굪6gfw=ճ'zUgMş˽(^_RRlL'66ev؆"y~ӆS}+'y0@?@T*6Y}p76y.+}K!HNnP({7%R1=xjIhoyf)έ{r߬CUE1Ļ(6_Bm(sFLC=2r,%Un,r2HUّ?~Q2}+όbl{jצ6ߧį}8J mo/F*^: [yR(Tsx "IG< G$yٸL`jRѕ:z14 }^?sΐ?&NAhq!m_~䷾SB-l/jI8Qŷk.-Y_iΪcszCPUȧ)\LtYV&n4_i1+[>n^<$jc@o52d ӯpzf),a+Qf[j[+v -M|bC嚷|t92Bjlg)U)^xkhϋem [iyV67( oʼn+n·C#&V@hСbxnr>3E,ػDKuz@M$m^0~b!{g(6_Bm(]w>#ufɬrKߧ; 3VD5ugFΈF=&ǃ@B]rඩ1#a~6\c lokGӋ(T_R ĵxzp]Oni&O5Vqh}gmqk6/j̈́dd]K,솤i[z[5!s[CdLcoPm+*ljr]`b; +|W9V%#`-fYv2__9̧1+}!͸piUİ\o"~X2mӦM6mewg'^jk!@y}^-[ڔ} {V yyrd*#!{@l`+ ||"vPʾHl\eUsa4njިj a~޹~ŋ .ox^Yeb`E1lTP<3+)nq쉆(F_BzmYw>#&Lm94Lfߊ򙅙L*!`'({ecjʵ%yw&atKBb,a2 iloG PJe#=!ʈ}ijuUJl K|lߗYa$H&+OOILHbQ; ]ϝ)F/JM0ud~UB5GO5hLBT*,/E};;Xٱ֋k/^(`Tzg5FUH&]Qѧ+UjF(ez<>=Y&T(mbzs ʋ陝6,u/YʨzTњ1:h; Պ8T+TaaaD*)SF*JRD"E"P(daLNh !{GN]y\Sy噠Q^r_|8@Zjղy4t=E[ѡO5mҽ?ک0sHyFxHGd%xx[AmK ( _BW۰ֽ*^dm+QDk[QV+)hB6qC6{Y;ñɋG!R<r9!,WI;[#?Ղj=~wi B!P9y,+rtBqjШۋ'/!BHrXɋtB)R.NɋG!R<rZB!B!&8!B!R!B!BJ< B!BHGB!B))*BQE $_r9Ek%t~.ن.Y)$h" B!B!x4A!B!&8!B!R=8!8;]E]B!b]A!B!&8!B!R!B!BJ<!M8ժ)G.fWAfMnj-KV[ѩJ?3Ym|$_7^zyyɂϚԆ4ej\rJJ yWY~[ڽ'n|X, N@(H]2hoH$W^EFŧX_?R0`%i˙:2C"/-zyauż}CG\~Lh4*K$|SǏ<~|2h1/SULUB@yرlHFBK/<Z4Ak(O޼9:= ¦-ߺ45]}+ܸYr HYsy,30FBܿ@X [ƳeÖL4Nf0[zf7č[j\ [>mQJqףHN~~7dՄ^,<幇!VB <*`c u & SydhOrT V"HFc؝l\_Zt!pW]bS7@03ءc˄w>gB"N~*275*B <|<+^=rc`%nDŽw " SyxgK=| IDATí^hcϟJF{.h2Bƚ/T̛ԋe/]h %VG!G.j^M-a\׶ɵֹxI@VuL ag:u.&A{{wiU؃ ӟ׶~ۧ9/I7jճO2@v9CU[ }nbf-[W9_TnѪL @Mq)J˪VIFLyzXg`euxngM*xC (^3Wt1_O^&d:ژ'/k޲u'e˗5VF7飧nXM h^d+X9֭0gO>jUtD  rtO١T;s-7 ӽ3f=w|8zLDI8ӭ){C\7VgS-= 1*=?||3ɳzDA{)ڕfcrct'z+{gFӻb h9z; Q+U^e^f)3#/8sS`4Iw<ֹll;ϝ 4%L&=>4{ ) \Y?t쏟9sNsr{>Ņ^mzRJ'/? v{6ti KeajNX2,ӟ6lk7Rk5{㳄3l?"-87 }׌3M{Oy+^n:l5K7px˥ gx8x8t-,K)gC3oI@xэG= MfB*5/o[E]a"@?p8Q>̼8'?n~z؜=t( ~+_1 K zU_}cէPo#/q!`11rBfp%U|WeW3&ADnER;, i`~> Zȋ>U+^]ڡ'@+~~)L^[yȌ$ێz Os{&;xp[`hTپרWl K>̜zM[wkU9eޭif0S* =qYbxu?2hMi'3/^׉:>{|JmKƙ5!ŧ?`m51.Ty@qZP@`ym8N.-/+/]TgRJ4QsI!Bf;ұqOI9]k>^(Z1 Ix94#sp1khKl2Jn.cJ^.<@\KvٴD<6O߹xɐq|N^)*CS)!5->K֊,q&mek-IUJWk*ؗ557/ԛcgwC2YBBSzݲ}˲ %q,iNi>[tFX`eOa nSMW*P18TVIlƿrըfL,1i6uhpni/~ݼ!LO\{4.npq#ҚnڲyVnP1ٛH,ܳ^Ѣ`&CP&M }zI>0|&PqǞ>0kRfM^Vde ix٣Qڻ6j ה閨¢*SޔsʹQuOEVP˶y7{<;3mcZ|J4\W/_AVm,ʑn˓%$  2w))4]F,.IXvD%Q䜨6ʇɆ8Z(5'mI*3bB$ e n[@;k&M82kǡCluGv Շ FtfUڹUX}l $Ҵa=K qp%P |{vEJ)$y5M"x_|O>43:UH֜Yu,vwWG;iFd44Yb33Aq@h淴I*g{LVgV"Qq8fLqekL RՆcِuŊ)h^&@~Aǿ Ω^ a͞qT S<Вo敇w^FZ'1Kn8ucf~3ml\b=L4}?uh<~"Z%ް,ߖ3,VC6IKy헯2&/mL (Yx_.M˖u*8KHӔYfUA qp%d9iU*(ڍpfa^;Yjۑbr%GjYkҺw^_hg{p"_IQ߻g|>e˵w^-&=ѻZYsAq(?r9?4uc)9>H4Z<՟,,vJWIQ86ho8*U.}/x^ߓi-l*[{n4O5ɡbka溘 -qF4^teZ8d&ޛ<45]8IEl d~H+ؖвEzFqi_8nڻY䜐 Pg75Ym2W<n!7  |8ߡ|e%3OVLIQhM=ƉUA 9E@($;qtn5S_)O.hF `wAhڱN4׬ R=m\]p+$Q((-ķ&tm@g_ rq/υV2}3'| UXD#'L-3|a&4ryx-L?|0{뿈Qѵq|VN 53i׆n@ը)G9{rXko /W ),v-g|_MK(u77E0Y4/~' zx`1L U:kC?u,= m_= !{6}'Rqe3aq PIaW$̬~:hA^8*YyLHR<3Tw* LK(󉈡ex% ?7Ƥ~c}id umť @oEa߁rP\y'9&i=[Kʏ4KL|=ëMi^c9B\QKVg?fι"YJM 9߬3Y@\M5-MnN #&ֹ P>sI-x6zC[yܚʞec3ţC[_[7< wn \9>>&:!&&&9cp "{&Enev̸ZO.= OZk^<;3}NfWkJ+JS=zsw.=JohO\R4kT6cp}g&v/A+=<t} J RP} |QX0(ͪ?6֊#X4L2~v|;wz<͜3'N7᪡2(Qxs}Ņ#7͚Ta''}15A* i~@#zaKNK@ -3+Q%*BG \02Ta6:^WPр[J`+寧: jӽ~~,~srPJ !M2 Lh5ozv֬`Ow@VU=ݜfxyr@h 14^8~Ol.-| ##w5vs},TKc@+͔pU?-06nevo{5^reVhiC3;zn|5yX.=GE.nMh'p(hiiGeF<67?Na(X2ww&6Ո suv^Ò{լ݆W\O3KYlv¡wK_cakk_ ϭ- }l}C0`Y >}a+lqylf4*v@換ٰ4yh9V?nU CUhH3۾tA<v+6U|3c_ttQE?PTeV_() 4 ')Cx% 2@(&9Ht=}6 씮_ZVY"wlO38eå z:4w_ì%Sb+;b]rY u@vhn>aRjZ 1"2" |glOiھvK:\g28c=LP5F;u &`܈Kmc¨ܼgN.QLY֫~7j{W`2 BSq3fUty_9| @T݁Nn+JeVrEQh4 YB J3'wqaF~'{<*A>[N.&HX9[]?`98bdDY7ZwmPj1y}cZ3Gۋ'vt_h.Q f*cč">5lڸV <Ճ'J8%@ ݲK(l0[$8,|@ X%#͍KJ…b/ s6tX@ J:VnXxك@ X#ĻK=qŚB"-nWp/m [Zxِs)Za @ rK+xB8-E& GR*%,XJZXr;YO|2S+ԮS "ك@ J"Z@6G `+d\,Q!@ P!@ @ ED@(VeuB\wn9~ beV] A @ C@ @ jaD,~lzrUљx/޳ەmBy" ȏg%4_0o.bh@ZH3?L| S,ɚ{B)+/g>sCUG>9Ebk2ˇW~}}S78ph-^ڹy6uNŃ^X!MPBGdS~2 ;7*ҬXm2ލ:g.]fbs+{ 8X a+jb)v)H*[ز}x~*Q,5U0C}!V?v=jrwOJtd)Č@~RW.E@fp[~El ݤO|}uM쿣C>k5MKM³@>{ 3rP)eY\Ǎ@>7}QMc~U=$&`vbV<䞞|_}գ_qmaMº/aѹ oEqԘyU2^U2)_~h Tޓ06!%eƯ[ں+f\q;\߲ŗ_B3αɀr-_\ @ qpşb.:4N&4_G~Ϯ=\귮g8ܐ0{a-&E\һ$ qN=vmу__rߺ^GLT9}wx; Hߌ[Q@[+7%S-Z0_YP}|pƥdJc'^gm׌eM³Sݾg=xFnY3z<(Yӗ ްzХע L)JU#2ncY zXࠛ!)Hl+Uo߼uꒄgo=u8 j!=W/_| wa/J=Z6v@ bǥ՜^\3Ig<@ޤ_~+fܖrʡ~&e/d*[i&ȴx!>1Yܵ/qtZMEꄏC"/&RP}{:2JM}^6NSL Jy[Usw!*$6խ_sQĥxGZU<ѳ$LygNVno|A mGwwwų=p @LU6 f^XXEGmM–%W]qL}`qP"MzP ֧6TC&l z(:ArL~IxocrvW٢yJi\ 8WmmprEV<~ٲIL<p>ޫx}t C@B}- ߼|h=zdȁǮc ݘw #@p|N0IG),mobeQ#ֆGK9:er_7%,*hB0@g%ZaKd @Wf%GŚj!GZӤЬ$U?idxw4?IV"ll6#i¯OxsWUyY}!`gf๭vqB52 ņVA  {lI홿6"vVj4njuҝۿaW*4M_I~*{I%E1oWӜa4k,۸qe:~o5@NZQ^W7Y}a< ޓ,c !OߝyW՜!VW C)x`iƁUwφǓrk\ԯE#=K@M?13 _W.f6l޾W-l)kBhl{L򙀔{Y\_leȸ@e*t?yװ!]'9 (CziYb$q ΃\'^< ܜn6\uEe贃oO1ۂ3VyX(^TijXF|Tu{旱KG`IkT{  mfnܥu/.2(}ݍDޕv܌NNFt,˨7&{hPW1ūk;}3g_9tӧYNf#[;lGɽ*% [KXHyO,+j}`-͗">K֊0k>E1; s_%h7[OPv7j->t3A |`Nm\x ǫݑXRދ1N%{Unէ#y&t04pMstplQ''(!T'9h\)uEL{yT<> gɨ[Fagu36PիDƕ[;ZǶC R9% x#S>4OȗRGmB|R~Xc3BX~?Xhb}y?9%!`co.R*\k:NP7n0YugAJVKXD\dNuacO8y);д$O4>\O^ꏉDC?.z>W%Uf̼/k^8e; Z;b&}Tg˾Y`|H+BƄYծAf%Vx Tj_Z,1JA D-c0i3Rk4XQoWFq L9ʾ\yهB$IŕBR6.5{Iʢ7aPT..PιMeP?9vFPh)d1[Nkg$S_s8e䜖/71fԭ'-={U6!ϊO˂RSo٩weI92{s`Y-ʟQ^ؚǟ?ضӭgIğml\ktR3.yf4B!6sXV~ү;gۯeBaBtf˗.M2&V7l"! 2~o/sXɪ $Y\|q7m1?>,*cqN=FZQs@~RM!@)ZeVY*QPxaLCTT^Ɨԙ;|/YS&t]~7O1^"j듔 Q/^^{M)Zx[MCB8w!s*.tuT][1K~Pr(/JӢlT/ų9^ʶ׾ _SxodIJvU+rًg40#4߷4<$<-QOl6U5[+_先HZթS2 j;7"lǸ&F+DnFU-{3*W_͜/#tNs,UH[3?$EwqYn}?*Q&_U b9!3NtF /cK]y~y,D4:V] h.z_'hQ(MCƉUA 9E@($9w6?$5\&2)8߮b!YcPdyp8CK~ P7Brv]&/=rdA|_0ԄlύQe/np^;|.m啓3(5/) y)6L)|:42 :4sѼ:>Q7 GYt]3ō񹔲Ir&F :poۋQQP &i4ݾ9*Hnx9ʿDV3UUjv'8TõK7%r('n:`?V8wWyїȦwERqN(nbfVJ#8$an)7 E*2.E|wil8E?CIu 5޳bh%^Ɂ88ӻ{)L̴•Vl^xg@(>f0_땍/RLS\4}Sr]Ժ:NzFti7U}Pߵ+Bߔ`c O?)) oyb# #E!߬ȯNyajV&(T*%qJG*n3oG J QuO;e:1Q'W@  ^#\1 X %}fMWxOPVu":jesAYw: .Ur?Hb.l{nh^]Ϟ~>i2֒#Bc) ZG糧Sw04X[=nZ2ލ^svDlR`,y!b9$b*7'je0xD sЉCڦ'h&!i6mAU閾;/c Z8A.SQLth}Vada e՗>!Z`z^Ey=@  R?Q-/³HNTZ}) *J &w3yר}O4F-}ĎNl,d*' qŐM)oi (F'Es]jmšr%%vfհM9nUGBwʭVxpJVhdO"s]QҦJ 2 IDAT+يl96%ÁˑJ,Yo+Z(R?Q`fڎZ=rE_B4q\(JKKh4,Q!J 6\9\-u,jL4VG `+d\,Q!@ P!@ @ E @ P!@ @ E @ P! LV"yB"0?He.0;YO|2S+.&W @ @ E @ P!@ @ E@ M!GfZ@  @ P @ @ E @ P!{p:6h횓/)2yXSG&-p]Y@g}V'$$&&*Ke } S}༩YfER,_CJT5.HR*gk[.K`v| ! P8VVAҒgKXPTh@X؂ }YG# $Y[ -3Q9?بۦY#E~\HYC 7yGR)CC&vK˸ɻnNПW 3K+8٥h8r%~Oߑ'fL=]$ȒbjMqp;vW߉Jɽ><0|N37VpD-T_"'Yh`Nn^^2b;^^N* Y,;rxrz>n{Df,~_/ˀ0kWи?{=~t1 KT -0/ S,ɚ۾e [h9zI&^{I*4X8 FgnBi!nW }벉-/b ?׋|od *g ĦPMcN=-ōFM &;D-\_"ܾ{EXɱYejBgk&z6#8rOȜ`9.&b?IbaH~_$ lb~ KEA  Ի ŃלHpD;dStEbNsi4_U~SժL?X1t~0{ZA~`>J}J4 (\ EӏxpʙEݪM t˶wy۽]xڷ#6 ;t,HRB%,Z<8PK.h_}8zכ1>rx98$>XNx~2Τ@ĉZsL0,?0IJW$ޝ7\6}#_ V188O\y#{x)) ^h(~xyy{ʹvW)w>\{4\LȕGt5*B[.=;yUܔWnkSɥsg=S.jWM\tU'C{Ԕ1O?K+Ui @B/+xΕG2J#WRa._;hM6]kN֮aOxp:(yM=EQ>~/z)ND<\ bVу(RDb#*Y]ZlΛ^wբ؟Zg:ex75l Uҕ/f^xXEؠw%B+'e3pGT& %= DM*!reƵX|.խ9/s&\#=ӧ^Zգi&nB%O[n`reVJkMf3vR`÷z+Go1bh@(?Z2;w6aU,ޞN=zޗA÷2L[iŹߛw)0{GߠJ4 @{cݙ{ԯ&-:tG'}@ΝGoo^򛣋W^0@@76wSjbбA-ψcl?1j)w7 9xP; Y| m JZ{U{(5„zjhm kq<8>n8`.Jl;v`7҅gk? Zaf+{cF0Νwu^)E36~.!#^4,/+OsVff_S{T3qv=gpU끍ok}K[]S,n̻h%4^=PՈJ egJ|6Yݹdq(hߕw]Q 5]R҇[R*Ese ˄̕@^~GEdgf 1rBfp%U|WeW3&0bQ|}t].k3h#/zxrtʟGxUvkkN8t#83k8L8$2ij#{m!3jol;b.n] *pN>iPDmႡ DPg^\ &Tjǟ/0s5mݭU.5!M5ا?`m5ahTqpoԎSAoJ˷8=1~1N!=-=>%%̚뿭J8۷_77gԺ+;J Vee}kijXFLS_f\J}sȤt?pP9ᙡW݌ EwCȻ`?Ӯ2ftO 9y E[|k#Stl;sSz `.KԎ7%~ Y~d-Η">K]AaY)cMVC(0T"/ok Zpֈ N!0XӲ}˲ %ݑ h O1:d͵<ӸݰbpCftUE}|O?E?ZZ ?1vqm3mvnL܋S݂)MAyB "@[%'Sk@ťU1nLJ[ _{Ӷyq6Y 8%6=h3@K+"mJ:1=Ɣ3^2SVt,kDquWeS-\iڲF_7`YbSp[yNO*l1n[+٦Z*3]oÙb]{[:u7GG4G,V.urJJPpAR'_Ĥ=OzeFxX7cJ^':4´2h-۱-OՁuˮ&'\lgSLU2&ӹwRCO2nTyLN6kcYxQg@+ڦA͚n/dyc[2 4t-(hמn܆-f3S u2:3A0JK>bllF- hF $7Ϛ$e&-aj^fCC2!B͇ )uƣd@`l'j jNR->3'<~p뢃!bx)b$jH8E2=sՔYr B!bc}0bq^866u=g1/T|ayg*׳xp:ʼɠ(ϙ?Š(gGkDOwQk./QȞ03>Ye%P◩}bӳ CD$OtRxܕM *_SA` ovy!ܽ3VR[b4*0BL;_*'PAG2G]>zlrP>0[>51F[璹(U =kJ;zQ( xM߼ߌ3{UF]F֟BJVZLHqOBnr6Twt!͢ോX xՏuռ(p| WԩXI[J9^^mx<;=io >ᱡSBjEU "_ n!@O)uYΓSRogġ 9E@(yqtjOwثReO T Og7P\V爜1h䦟Cxvj`p[)hB7|3]+-"E$~h#= ?Pj Jeo氽W|?lnO>ymrƇ`4#wLb9 T&imClS[K ^)AU%G9w,@05E+Lr2nmz3*J}UzdGW>r@ɮϙ5F( [/dP+)=MW5ttu9" ? ۶å qpN(nbe•WW@~:Y!pSlKr]tܶNK*ၡB6̑F xfrOD -p #}y'!C׌Z7|;-]\vW@Z|P_sO/76Rfp\kwk~*sK7X|{mզ4G:h:(WVsʬ}7ϓReDG{BWa"GIqڏԢ~}uUބO*JyvII , i^veԈAFja);UPs/Kǖt5$ZhryAG޾q۲J],Ŀ|+6ِen s}4=GcS(ucoz8G%T9KRg7 ? 6}G ReםI-J e}QI@=u'l]d[yO `El0NuGQ#(ow>eX͋װxGT$plí*?< J yRP)m'y *XXpCshUn+~+#O=zĨKV#ӿX`[/hIYyвB 2TE |Y_!@MCMu'U9&^2ǒ{Y}ÞfO9}=# R.J Ah:(JAЗ#FxL֬fp{ܡ[r(II\ })^2q9DžX2ϞTu-bcRw5Ǯz|nF|bg_oL;NS)$>륃~qz&f'EmuTL:M|'CΔyT;(hNLmbgμw|4uΈy2ʷ0Fo^zI{_n(=Ģ$"O0 j5W7bo\; wC%= kx tK 7,h>Ex_vn dVOkԠ2zAhOؼi`tUx\kp^N(hmglx3aԝL[o@}YFql[h\T-Z_bG@bYq}{αh\Rw"{0 p8NlV7-?Wa&r3~k䅨CLJO9Cݚs~^-fN#;f=PZaC -}PP' GF&3}|; `nRfԄ{.r7HNNSQBP C^ܖ8 S橓_9^h{ >63WO(Bf\w Y4 @/3XR8h_ɍ.a)a?6ojsaJ$Iz~':aٚ5ˈr{X.Rw??/D䨓~:|+5E*J>4oJs)d p+_=j8Rݞ}=rz*82% RIzC$2_ħ(TY&ЇCT(S5/R_bE4½ e 0v8, 6XpRiYHĥ3nX*nnR$k.CBItc_+9+LfÔHrKC66 ヽ .:2{x$:/'#O^;O[f%3`NmŁҞ܌%Vb P(.7[z gg~ZFmM;Yo=fFVj^8ŋ*ivŊ6x^ )/5}98,!X.C#}dqSDl+~9@n/o/ M˻A -dÈ |\”Hr&ɚ~=Vhߡ7۞ӢRSxZ! 5/;5onFl[gϾm/)p7%Dq2h1.hlݠ4 . c{x3ӊkny©)aJ܆s.?'~5*MOA--P=Dbc$u{R/-1 38AG{߂}ׇMiP W?,9lacfg(le&uEUNryef_ia4wm;qP(eۧ&\mR?2rFb;I(`DckT*[nƓ߆|h=&3vNG}'3pgcs>τR7K_k5/:_bE4{õ \~}1F˱KE`fp^Ow1jx,Aaŏ G)>kw:R>[2nHs+Q&Mj-&E}T Gv .vSʧvZ(ঽ z{= :WM!m^k\4io(zeдi޾rܿQ4J*\fpppu_M򃰣/>RuZ)I*ivs&<5d=LZJ*,ce-O^ۏ)+=),mş-ʏJKN١zu=Bd  [7iQ$ky3L" jB`\瞕KgrZw⒞$K@`(|aΉ_{PmS{N hлW&.ٷ.^yrݮAZ}y~Bz߮6?-k k¥is6 vкV4;?k$22|ϮvӞ6Wzxd?Ĝ:i9q^ @3cNc/ H˄CχfŸrNOD|EO>nE* RsPt->o;xy8o0$H?p̪7PMٲIWgnւ >`)z,t 坿N3 0vQI_C5o:+Ҵ9)E3yxٚȖ6WN;)3|3x:al3xvjW?UܠF6в*=8an-\0Lk}=5<7ULvjL) 8{[lF=NSw L>Ftc)YN-\0g@0FaP?1svz̤G|JUkڶ~n*p>z7(e7o^4ḑALnIcQ%׾񴑲}7/mFU{?xe߮~CQYn5ty-}]O&ĉ&L=zGZ0't&p􂵆UYڵŹ^6j`YNW%4k㮽6|]/@#-+eq`;]ԛaX6gSJdϲ/ll|-(r.z7WulcL ֩6 +p&˺T h~DžL0,iu* ƅ|qGh?g\{`LZ(4&tbw\Ӿ>wFS!؛/{WaC5/d_AQB"GnKa( p@~*Λ81Dp/~*7I%jlY ׬YBAe]u ǐ-ol3xd:};k!k>a!VT C)|gA ցۡy`$G?mn8bQE ۰}'|]:ܒ=z _iXShߡy7~;T~ /R&wGW?=fœ~m15' ZP? v K}\ *lСs$4[[6ݜ ۤuVe Xk.i^B*׻=d_/G;iܰ&RJiKļ@pFYuOQK'%3*qb-|YO7.Tqq\M=aܜ"dYeύwXOi'ճWyب^T [j@{|Z7J['ۀ13>>didL^ܧy+,woUtWS2[zyܥfi@Ϫ %LצtvQ)b=t)yg6cGh4ng0L${D<ܑ~{vא74_aUXl慎kX pVچtq66&\onN+qAaM+]Db js&ڜ;{q};v&XA-KPfkW5bt̽+ajinYuWUJpX yoeUr nA.8oXQ䍄o>Yt+kdo_('@K!I j4{qiovк36?:7_o׌#>+j̒&ބo{ħ(YLW }Цi@# yczLM lP&oߜ!?zrTR4jV˶:YC[no ʋp?br):Y˨F9 WB5"F>60{M qˉo^vַBgwx8 glS(P/-?|Д:Y(!hvo~a:?vMPl+zD"Z7ʓn އ ?(AMo$#*uƝk <%qp%fJJɗiG+SrdK&gũGձ"B~= !Zũ%zؤuo dgj꺏4xez<Œ'9zhG{Cebu"hVt!Y,EQN]L6 4:2^\Fy@r1VU ayҀ6 Yj>I󷄟T](!2DOIŇqSI; WY>iO#9rԪA~\vPSK=bβg%nF {BEK\掆kXOmXpcE88l{S.}!pRBt" ի# XjO!{ˆάjU'oC<Jim{ZA |Zc0U0њJ{R/`g׈W/O.Bhgt*hkM8/p]!(eOM"*GNѷpz-kjɦ#.IO2{ !M`ټ$&P@Fײk9| (R4[1Uuu11TIi>olnwR]jEٺ,{Q W+[ "zJ]=U =~}ȷxRSs>s}E*jDݺE:k6k/NA>0XS&1 6WB5tp*?J/Z E(̞Xr]o؂ ~k 弛."RMz6iƵ_z @L˻6C0:{ͬ2 bh}P!"S `f@TT^֗ufa6Hm9隼.cz5eJr\nLϿ %n\~ıs*ײ4joyl:. h cqݒL7iV>zhk\Tџ,rDž2*?7QI\yyXճZFQ%ka.4 g*}C [MUWji"q8UϚl2͇&[lDޙ>2Ѻݓ'ռp| G(iE^mx<; :T!b1/3'ƜPax n¹.-uF_)_(yPJq8q(AANQ!J>kʍoK| [ǻJUޏdF'Sȓ^ ڵ|ȁZ&yzM s  idpW!KSHXUo .к0ƌ\ύ׬;JWyh8ō0GK,3C>dzLvo{F;=xar젅sm"B- x0kEfCNetMRi'FeO)ThpV2j$ X[4p)w-֦G9Y)cJCAMt+0mVsV ${=f}NXZ͋qVpim.?F58vXp%}mwp"-~ymiWdQ%PSb Lg;вJ J>^/#nŧ6'2v _Z3OJۻ?<Ѽ/@|fUbeN8#[8j ][ n֝)Ы~0c{.ECһ>En[F=;2=ߦmzd!5e2SϱZ8yDC{Yog JsN IDATѬI+U\sW_ WY̎VnԐEݲ澢l\0vwDT;7agW(Y?NJKbɩN%Cf9OLJ{IB٩}Nzhx~mү+RߠRsP$9gp9Y(7yɬmݖ/2o|&0I,?H|I WG|]Ŝl^=NthH7w{z=:| L?_s5s^wF,ȓQ1SNÀtJ(IB @ZϮr[yKW,Y/iћ |7o~jkeSFo-r+lpaKچs.?oN|s=Oq.MWxUmÝ1w"{0XQqCىi *kp*7m# K]woHm*}{ϔ7kh ')CzdPэH=t-&pIf_6kU7WgBaЅ{urE}޻!q抆1 W3g++@; jj54Ѥ> d_DH +i/piOKg%fg2I8il_~m0EdW *lƶC g4.Qvȍ#+?0Ji!W{Q^]iሦիW~gm^lbُpR>bsQhBy]y=:@l[v슒k^sU <fBRmAe8dB&eX,ZVm^VK3ׂs[BK{U*B2R%8 ~fM#8veZJZ+R7?q1S5/R_C#BHx4HD[)4qyL -WDDDBHTT)H$B!MSEQTffFh4d k📻/N.~G w')=rn%_GK$Ⱥ՛+rP5@M5pH,&NϫwU*{i:ڿ{5Zk=cwBZ( s:*w 8v_(c'8U"5$82l)ڻ-LtsT'_aC -W\ .ptXBԉte@ ܒqp @pHRm~ Q ˙\hj{7vڑ%G `d\BA %R a)ia@ X-W/=8D$wH\=@  8%d @ @ bqp@ @(@ @ bqp@ @(@ @ b9E@(QdV@BsOkwX"EpH"KBI @ P!@ @  @ P!{pY+(j2Q֬U @ d@ @ qp@ @(@ @ bك@(0o.HJA;Y >`̅)KxfrT`ի{ bRTPIB~=v]8qM8ƦRSSRRŧl2ny&P*ד*7U JEzC(\y:Uݽ|8d׺cw慍kX(f_V\! cǎò kT*gB!TwSX^NL>SZ$C J6}ޙAi6nu6Sxۧ {s[Uwռq| EV 6sQf$'z|o+2rn#oA3 CU0E_>//_ߍ_,hU HJ+NpZϸ9`(zNojƗ /Ȉ!~}Τp}c;Ўɏi>v+0 qPEPn\jy뫋?NoEq$4"#jT*[nmSi>Q[w qoF PwXl慌kX(fw8O񁟯e?hy9v8uF/nYa4=Db[,Ht܅VeWq'Yu3ɩק4i\pbL*چg݇ZɵX\: 1JA |ˊ<ӰS'/?LJO5Y׺܇GW7n0ݻtз~ F!ٿenӷ7QS'/?LS>uު橓nD;wIu*0 GA_E*JЋaO_{TIk9S{OՏVtJOUl\<}|o.2M-i>KܥO$<=pwruw Yk[u5Oľ+ի^ z )ƒˉ-⵩XN䷧#bϦdPP={8eGqEDp*C7+6u\ět_Aݘs1)I xtpedH]Btx٣H5u5\s)c}2tXE6םcm5/|_bD4í ^~~ѠܥtfqjN83DŽkBq̓nb]Z|_a$$y;=,(\x~}hicY5l)md/66Mg`$e+V jؠIw͜`rmVJ4Ε6ɥsaw^drnvFC+?#0 0Z0 qp%7}ƕCFm>1y4qNv2(uKh+,>]?&~P者 ~`zվ:oNS7zf̂Flyиqʁ= yK.'Tб3D%17F \pa/@͵Bg?~ ?r9|J EF*vUjoNa`0d;xq8E9ߤ9̎ۇFL٘h4Jxޘ wf|FhO־Xc& jh]ͅuƕCGз&5h,yݦsȖݍmN 3OAo [O\xrJXN]65eX\?9VaǸa Q0Z 8z:3xU `NT &p,] :=SwUռHp| E[ m8Rג:8 .?B-Q^qx{d{XF)ޗ21K< :9#dXs/mu=-,mex/S mֻ!nǮ]n?W;9c| i'>ԚnnrP\i~߆7Y֥Z@;.gaI Si~_P@6.bEG=ѓ|^ x>2eQwռp| E] m-GAr^F(W8 V8oO;$&`Z+jJ66E?LdƬdB5eju>eŋT' oZ w7Y}aCqt15?ݹj_.Y9Q9hպQ^uًUܿ8"6u\=7mUm~"Ou3n;jč> ϞY'ZpCn_|BZTFەB9ܯ~ T\gܛ?`A"f-%3X޿gnTF5'CӨ]//lm9zBڟ%dt6Y!;%wAƉŻu!>Z 啟ӽF[77gȊڋ+L?x~ݱk6r̅{RPխvw [jTykVpymtS?IQMF}w҈ jbRf+_UZ/dj}VUg-ar\<(oS% vQ)rÊH*¿4`jHo#{<Y;wءռq| G(i@mxAGac[)ͽuKAKvZsP։P"Kp+b jmZ̞P9d9BY\Gݬu2 &y-Jxb]ɽ+Jy zqQ\Tt98I >ynAK -[ -Gċ8((}PII[]+.uzMr޶&)֛alD + 4 ȓTp1({qiνphgBdߢ=v>"/iצa{6=Qn`@Z'=+#@i^9ͯlx>~qN ~>E}t 1<@y+lhivh51{K-b1ٟ9vq[O &5 h?A*j:bhJ,J0)C&OJ惂Ajf1̞UËJ0Nl)]O\_O3at֯l~߆._oW-=\[9xݙ NuV0t\8EN(ivsnH~ˏo.|4<:hZPv˯=:3m֨5HAYd̄Zܡ% Pͫ6TlhvH&d]0$~x-DOIŇq曨0w̰CƯLjO\T ϲyk7U%aYauS`xv;M@:aS,JZ~ דc^6i 4Ug6 5tyۓAQ3G,ĎFAPޭ6Ip' ڷ4メ;è5W(dOљUW۬V(>Ybhd]&aX7V%E$OtRxRJ65L~MQ3=ߟř&`brYM6.u6\8B"^}LF@c(SgOӯ ]1I01l5A&׃]}TC]oEQKOΨEs%FTI/쩢fW6qV//5ܥVsYp@#[{Żeڱ+o:ܮGSׇb|d#mJe8 bU, kx 052l~׶RXݱ.ռhq| )i6ʏegΘ񎦳NP5n [dCo=dvn 6gŷš'F _$Cf@"YsڝvlPTtRMPVU1;#]bCQZh4 &i*>̌iU*pvkO'C IDAT3sfa~Yb/vɅ[ jFKdFcVW%'ǿ|ʅ_ zwֵM-)#k&A/[(M[Πѣ{ +STXI[J9Ƭ Cσ A?G&vLzdd'[M*) hcBmRஞE2/16^S77^U a |<-0UW,?Px X3ήH>4-c%y:>P )?yv"zs{ޘd}cCዪh$3u x(ubP'mf]Vd8E8q(U {` jZ&y{{PI2jԨA}=*Ux<+(a<)C<}F(.B[sD&1hrA'3=&۵VeX" n>n>o<8} +Ȋ UܸpkuԺZgFU[w:ZV^ e I? r==]@fVe؎?dp%Chgy:Oz5- 0(y6?L>2gZ7tK~AHrXR4|;յFk4[mE#'Qm/:2˕WWwh'98LڈII^/mHZm헃*dƤD@+_h$^i/OaF,Ad2\.\+ {N[ɽX9b&٦Kb@D1޹}zͺkn3sFt.!ں.͎f}ѦwmϸYVm~2G{/]ps5'&k&M=8`Ъ/c2­zJ,9]^~:ie20-JME|9`chEnV@wn,tL~OzsiͿb$NnS:99!>'!!!=kq ؙ"> |$3pe<.;&^(e4_c[c udב?=aT9Fh\zwR3AVO^y25#}J{y ҕ.!'αIz47ktDd[Z3<_Gd00)rAOΊ~qA+]Upsh&$.8i/G]59QܑZNw٫|xzF rt7:սwA˵Y3}NY@/~I~pru)[M !^IeҥKtԴ4Vju:ZVTIII:Ϗ aJ@G y8O/V"i{QNg>"^ee:n>ʧݜ)G.ypgPNjT^W#VuuǬ!]] Ͻ:|h.`s\̭ snaŠզV8cal =WM\4._i= $97RuGFHpljc#?׵#9sf_Hݿx;|8JIguVp=M'ɶWv%kLgW睻v#pBq䍖!@,OH ]JYouDࣅaI*h~5#rW6J{&Yvf_Q_PaZW8TR$I>~XV/66 :ۻZj^^^:Nx1u~O/ NXkdRqW6(lYM^ ~шf 3Lλg8fj7[ͥˑbSv.Smݻ0j'XT"&ƿ3.Y= yIgwL|;/8 ?& u'}$ټoܼ|X)͑`m'>BI*K9b!!!!!u?1u?u /qp0]lN8ڕ M7EVTux@vL޺Ay+fw _T )r)mԗ3ӺRmIHnb999W@#l'ĖXI8IcL~6WoΉ6cWu_>>T*N#IR&$IT*%IR$1 -* ξe'([.^tN:ٸ癑@S77/d~'2bRy o@ud*;h w]7RdL~ry},IkK]O*%R[#\\r$G8+{غzkN7ř5:Q\|+butpXB H|||ZmFF=`(\| )N|zMxϸ|m0~CsrdxH$ruu%|qXSN@@@ H͍6_xZ^>oHm݊Ñ\<\(#b.^aa!d2.Vd"2BW_yt^KT%,:|' %PLZ J38$ IxxxzL38z! JRxtQȅb>).6l8cżWE0pSll߾ݻ(ֈD"(pqGq-*%q[]yt^KT%,:|' %PLZ@|(^g={gFxxR4#GXn`nQ(\r| 0*J*J$$e2`(E- \\\W^LE@@@@@@@@@$!иqC@@@@8Sׯ_oܸOQSlز%d`@@@@@@@@="99YӥRe4YeYVV$I\.wvvvqqo(&^F[nk׮(J[TJ0nnn(ITA2UZw:S-JRo tAuVtRL=/ h*MBU.)JR*EL魇mf[[wѐT4zc#/9`n{?> 5 AhS BqE-@IA(a74o'oR_ź$٣51gVVj˗1EIӱVVY4г0ATd~cj5d)5gkƧ'TT^vkHcsnwQ/MXi̮SSȀ}݊3n\=Z~!2SGHgRJΎ_@OuZVߜ"a(6'} -ܻ{XY݂JOQT:oF>ۅCv!VINEf~T){x\gIs6 >7ڣk@9ڏhk+;ws2MEӷ[Eڵ/=-{k #Oء8=@3*?$wi)6n/7[ֲՙvM;m_~b:Cun4|*ћ'!?FkuMUk)ΐ ࠹?i`܏ȵf~ϸ|\L3~ѩxPyyu˞+Dmџ9'S? 37 {kBm큰zJN]7e޳lUQu%gezwn^QYWG?(υ6_#S%DCwaU,KˁJ{-D7EWS:*}uA.n#-^\?h6_Mq /ʒ釟@"V嗶[O~iffnJ.Q|+kLZv_'xo"y]" }>BXNSNP(Sw33[U?*Xp>h%Z_T*S5GP`7$ӡtn&Yo+ ؇Ot.=`cö7OP(|}9n Bqy]y>R27߹ FRn /'FtU!27K">CG+LSz+ EuiDb~:BXt4/*bRZ1!1>-әBw|4߂aWǛcΏ!NW3ğQ*J6YezJ{rbڌXAT:-yl )רu5*Ȝ6p^5딓*J%ۦ)NLiB-ꖫ {N"hR)wW)dgv @?w1NRRL#v8;#]TSgnLvW7vX*9K@_) EgQe|y,  V!ҕJU\A%]CXDW9ڇ=Ƅ= =>n՗p϶۱?+=hӎ2xg=\e`g᎕ϟ=]{.&KmOgPW`v Ksc?ZԢDƷޠHvvIRuɽB J>>ϜI.>#9Ž =Ù#Z8VpV%5ɇ_Wi '3i!(S'Hd|__;7݂V8E o=:lذ#F3fԨQwmevtp)O .vhqdυ:4}7hwܚ6"}vIa_�tdСn(BC c@HhZ6, ]ᅦѨc XOK_?;PqljWh3w!,*0YJxq_14$l|+ |T2JјHeƤ/7,;L CsU,9QGdskRXM}?ͦfe4,؜iD۫noNi::=ԭ pd!cwouN'5'Nr K=h" (=s-@E{MhKfD:>rw`X"ii`r;xC_c(rǔŚ{ØS}Q9WLPn>K1XMXyB)lqIDAT Ӏ]W$s5$c̩#c)* I0iF1$ɛrJmMdR`kӽ&#;yUsg \lDu[{'gl,[Ӊ{$lGFTv`$9lHVt˥! R,i!Ö,J':no-ŏj~&G0hq$2H~mYYb%#N"lN4L.Iq7aSl+*;AEbş"1 eFHNDԙ >5+$4-0j}21=NVj*`4ob59i@|+FɺwBd`]{K1FgDҭWjNV^վ~  z.R:"ZHL]%0ye2v3^*$Ndi Z EoԈAgP~\|2ziwթYCתzCd7jBK}-CDeM]6|3 ]x~ \F !;ǯ|LsǑdI͏Gy͔ k6+Wk*- *ٽ{Qj4W\IxtrS{#].9Eψ!k9w zMh, ~ĉsjY@AwYCOhתMCؤo=KָzviS_WG PM j,i? Z!sW/=SVnٸyg=(qIëWbsoز.}xǣUdvqEy5&{8>䯆wrrٳ?ٳ3q_bmĀڸj`;.;_tԓLK?ŷŬqѰJ.ӿZL'n^CBE"ot!@&X6{> 2д \*P5*5_ i@ZVͺR`xۈۃMOqU4MXYw6yh`;ߤΗ4lc5e X`4pp+HIZsΕ`$t haaP ԫAzwv1r:4I\ETVA8]Jڨ oKR9I2klewҾ $( & n,ddV:Y/Uo5^jo-nI~^kbf L>y>%E}1.z= |wY`0۱n0J оϡI]AJ8jR.>!T!f^ҳ6;%6$4 l_ڄBī탤y[u FohژXI 2p" So1H=7O:!LC収\761ԏʈ\S,1'8x'|Ffwѩwe y>s>E큑ǜb,Z{ʽGX0(+^ 季@*slp"_^%Yc Vyc8V9--T$?}k|=Jg9Z8Y6US;3-7;^(zW [:掃XרMGV" =ObO9yBopjMebB:Ca}gw^=3ٚ_:P~:~|_N+[>wM,z66?3"v}6DK]?']A^xp+0˫SKg-ڀ71.L7p[r/Ƈ,˲F={AAAiaX Ԑ&DFwI8$sF|S{)H}q>ӰԽ?y =trtKg~R? hPƍur(22|eγXi tF7"JTj9{= 0@lRe <|E\z ׿rڥ QF]9lVJrnm-6lP1[t\vj EQz}֙vӱq(ߨ+e?4Ze|:MCڴT wjҳ 72D gr=?豑 .?@OW4ô#&RPb%<@ޟvca&h^ NDqj3xiOS $sהLC5Zl7kDzU\*Ų1֝(@aȇP k|<Ƽ}A1 i.0]*bJeN J6K[%콧b31?éaaĉ#qbʕ+8\ ^(u'y;і={8Un,4uvքp%c̤ pekg܂!೬>JkÚ͗ OzTƬOVx3P^9!sc4zu;q뤎]aAZ|IsKMvQ$yR'qOwkGlM(#+7|ɕpm=;'779/F%[ :hZZ!դWn3asdtC eKPm%kgf 4v<,&wW{Y }ꦥJ׽UG9eS)H8"N2u3ʯ<^6zX$  Ie!%@JTv,_|邩]*x-*sɺz]do#>]`~p?^XLu߂sQmw,X4LG] Ӟ넍T猴N-͘U=VFWGێ1 @ r|$^!uT''N0a1ق/Ov c͐ZѫFN=d%oxLJЫW/X. ȆV3uP;rs3I#]n=q_k5Nٯ|sr$Ϳx|I'28 (ϬU>>-quUɞN7kzXP7G*[w?~}N!dK?[ Zň格M*{<0p ԚsqO8zMWߗi]{@ةsj[w'iRv\z)e )cy ,'3~ړyg |&O?39SΝg@oh2.$Tv%nl}ոvյl`K?l⑹gzdvo[VV?5Ü>׶('*+'=Y|90Pw1 iܲGM(:O.Ġ+nI *m+ʛl=ft/zs{G4\_1*gFa]w)Uy6"vo*'"yNJ`tǃӪ;"ι Rٽ)_*2Srrv'4+g.RKDȸ=DRiCޒK&жO#MnŁ<χUpXM2;-ڶmaZo?E}h(c?aGix ж]edmnmj^C{u.J5ٹ :h7Դm^Ź̈́GQe\#$O]'?xNMe׸}/W^)j-yY넾3]g׀Lj'"vD>g{J\o/; À/"00 eL`iiHW/!2 }yV?PkpuuejZ4[hXRЗnsԨS$%V jN,44MF? FŅTTۧ&GJJ 4 5)4HMARDST TBq'n0m㸔Jz‹$Fpeg3D4g$.?tR<9 w:M2%uGpqjQ,ץ,k)7n~\Č5UTP~ HՂfڥ,EZYo嚎j8Ly4rmӮV n\~}h$*zFv*nMm-Sa_ӒR(S%iQ7,RUq3!:\l Yi=f^v>Pi]XZI<ݜ@ԫf:3TFHdrOAO.-lHU1d@EeJ~"ݝD6XRFʈkFU@{* +:~bv/ZQYA[3/ҵb`V:Nx )jr}4;qY>$*;5<9yXu>k}:5 0F$ZnvֺGcpks{M[a@izW䃭ʹP?XLPsdVo?>;tbcN7#>1")&M?Wj΋']N^lJr+Q5СESɕ@؍ 'egݞrRk,F1_P9e*7`P [55Ч(rSk~N~;W"Ɍ'Oǥ$>VjoJ!ƎG K|! 29[#&YGT;hh lnO/tm-õn.ki06yHbY}hf 4G#\DZ2 8K1qAAb==~9Qqf24p= ǽ^ 0Tjedq2U 3J:2WZӎS?+aY4"P(S]YX"`0( `mfes}\?=xPiݓzY8{:]Dr2 4Mg.BYfd,I'KQ*4[#Jq+=7΄8@-˲6`bH~Ѧ8[VXUi( JKAY٭+9ŲR 4JecVֺpJԶIͩˬ!ˎ{N: >A &Sf1M* d(YSOK]" `PYެiHN1Xp(BdX女.B=e|rqn˖6_9[1}\dͧ#ÆTUZCCպ@Tkm5qfs=yaՏneՅkR@> &tZ>R+l^2hE/EʎUvDp;߻wF%4z-1[9JV&~td Ɠh[\} M?t|w*L4ˍOoQ܁㘲(gu?  Zt<~1,`yLceXa?˱쓂RJ-EVkQ~`8>m+HFލ~yzHŽU}ZZ1xhngk/F.TN8ip$||e,ZM"-8!Y+^$Z/WC1>d&:::--M4ML&#IR"$#(-MMF IENDB`doc/reports/ClientDetail.png000066400000000000000000006230341303523157100164000ustar00rootroot00000000000000PNG  IHDR/:iCCPICC ProfileXYPTK;8䜑$$D **H2`TT$E}ommnO_>}:"#C$cmt)N.@?< y/H++sߦaImboO b#ach0Ѩ_:v6z0fGVS}`;`} ;X'RXG",,bX? /67=_ 9#᡻wֆ3^f [ ց8m`pݖﺐUdGZ&i~19eft0n1<PWLa>$9Y#G M`ƠX`af-0āh$9x%pY a׈DGO_$E n/?/Klw1{2i?USIfVfr0JEi4QbAqIJBep/?>k/HTs3GK;ߎOϷC7vǁ""c):HIPdd3YѢͯbyl.>GG&z)\b-9N> ae<@,:X; <}0$}`?8A58 y ``L9:AX"C/$Cr @ yBP8@P!T@%!4Bߡ5AD0!i BaC#Q$D&"QE4#݈q H$RCZ"]hd*2YE !!!Ǒȟ( $a?5F٣|PQTTՀjC&P -4- -VCt}}}=^`0,*Fcqc1y*Lf3bٱX % –aOSUNj\p \ )nNCQI9JSGs :OkR| ~ H  kB!PJHO $2ňzD7b1XO"". jӺ6Ҿ]#IљҥUе=BOC/DCAD_B <$L#yRINA!!C? #Qрї14c/$I }u;)& Ʉ))e]adfcamn^!a͑q<':gg+k. W2iG\?y#˸{yXxyyyny5yxyo~0St(RJeϘ/1:?ߞ?@@ >&B4B*BB' S P٨&$juLVDK$JV(FTE4DJtP !((V!D!$$^%>$P$JHK6INHHKeHK}v.'%(*S'3*(k*!{M\syZyC4o ~ '^*-)(n*)+E+(* *{*W*0XWEꪦP֪U]R=D..]u&554j45)4ǵjk hj՞ i+{UwEOM/EKo؀ޠ!aa႑QQ1̸xĄǤdT4ŴόhfkVn\<ԢblIJ*5ʺ>{d=ltڍڋ8;94:8;:;I;8=tprp8ujzuM-m؝׽߃#=^{.{==ynxYzz6^93[;W7_?P0X8T-8:x%2>d;1B.331<$/'boPxdVxZh1P{LG,Lʼn׌_MpHaoGb $}{ۿo"E'&JNIHLJ7Jo؏ C&0ckܙ陓6eeEgR?T~|Xpߜ2%y>y)=ѓ0 h42&NYSs9_PR}"xyiG`ٱ**WTV==}:zTЩ5F5mµ%1OswFLYg7lq;ڄhkmvk<E˅܋bOPyPa#GW>VzDIǠൡ]Cj=~s_~14l?rmd˙W>>yCzSm;wƕoNOs/Wj}0-E%V?.-䬲6Tyoqmz=aQ)ymlkl;l{;+@z;ñ x1şbG` `^ .)ҁ|3G n! -jm~IIJb{q44Kb?@fēQLhf, =9G g.eQ^E/F`ZH^8@MOĆt+9>yӊʜ*vyj6ZGb4 _cLM=Z~clMg#`jgaxЩ̹ۄ=O[G?, 90/[\ؼ$p9JՁNk}o |;+;'wo_ܻew~v1 ӡg]/.|Uј177oގ;4<>?>xRvrCTiٌO>ΝLyK ߥ{:mnoOv 4#O Y6ZFFl,kezD/aoF`"G#ћaI^d̼ºNpsqGQ3 X f ݥ"EESŗ$eO7,v޷%T+Wc; :3ݽq+v[|];N'?px(q'>M{9 7M|hs/ TB>ACEO;D'MO@oKA%e5S4'f?8dy:ώ`/i+[{'~$)BŒƒBQwb QZR?ʾ{#?0أtMJjZ]424Ӵ+tt^/dLLk泴Jnyjceo0*v$x{k=wsEhGG''zD59b߭Tb{zgÁլl\ռ|kE%'yOWFTT8橛?Ps.9q퓝+oqwYݓѡ"5FO54<"e镔?m7h76g=F8x C1!шsH Y|DPhA8nB#L/VaqT}wFe8#&jbZ3stpϐHiFkd)&nRfv*VA֋lZlC~[圪\i܂܏yxEx_Qrt/ 5 R婛"wEyoK I^,}Il\)rB#ʇT&ūG  ҊN=wU`cL5176+3X䱲>`b{.A‘8Kk[{Gj6gޫ~y!FaiEbTMJVr5u9]sLY &8}W>}lPxDOdJrՑjS-u3 :MZ/^molwVB{BODDQ_PG_QG߈sh Ҭǖ")+W֤wm(ob7Zs Y[""1T@#Psh":9Ec#} C4x_0G"ޢդ{MBREap2\d4\Ȣ򙵆͕}3K#Oo _00JʼnI\R2 F1JuUj꡻4iqk>g2p5\=z<㼕}I~kA+!%a#e.Ǩv'&J''r>M:f΃Pš'G]+9_9a\zlB2jZZg54^lknop UW&MvvzsDWN^;7=5l>{<eҫQ7oߌLorX3=9v.u/_/-cQzҙN?~.(\5\}Sgᵉu ō+sΛ7"osnmپ1?BD]oY^y6 ů@Է¿|#2p^., pHYs   IDATxkeu_{}u~O{3 @H(qYɉ;q+'?\q>_RRl#Y$ZD @y =}>c Apf0[{>뜞?mmmAAAAѣI4M( C=}߽Zc(>    Zѱ1T$fn+RV%    "ZkTh zl6=SJ1Zk8AAAAqB "fVJe)AxGDJ4MEAAA^Zh4DAAA^@kkkF@q_,}    ܋(DQ4??̙jZ0뛞~[o_?|{DAAAı?ۻC  V'N!   ½HE?|=r0CwrRk'_]\\CAAA{8_z5jPΣ.zEtzCs666DAAA^su}E#UF&PB75{qDZ    ܫ,_(t({ dy=>0"fVJAAAA,G`>)V)%    .%,T"    2ll`@H VaD    ܫDʷ Eҏxyt!՘AAAA{bXXPj0bF X ϽADww(    ܊&@Mc&E+3hϷA;܅C   $Phn !NPO7!    X-!de ?3AAAA9ijHla,`Έ,R0fguR7~x_XHAAAA>ڀ )5Hc _zQ7KAAAMfEB_gR( pYc Rֺ1Hk- AAAAP`>c23![g`?WXfq"Qq%qIb5)Zkaق5)(yJi| \.BlNB t   ]%Uh*%xG0N=}f?G3CtT˕嵥fl6[QIj 3saok<43dox1`"CAAA2LcPG%"#Gp+?73"DRb=P<!je#6+g^yS/v٫r(y-4K*c "h"`l@+N 0 bX'smeD Z+!ool??30m*i"ve|;wN^t'qG   w֊AYۍ(po +Ϳz?'^5J9|R)@`"0@@C>|a\-[ EhFhЊ0hO8[*+nZTz浇¯~f'&  ?‚RHbĀAlovo Nڨ/.<K'߸ P.@+(2RAXx'{Jr+^wCeAD`26ŕFkm֏V7UT(R c2>|3R 7+7d]g/1& :NT[~;Ր.έov-U)5wGAA6 IB!ᦱqLVA;"pJkj~{s`ើ*  BM;]] ̍4q `b0.!]pAz3kKj Z@YB/ϽOKй$?K;N I~'o~et&.7cɦi[at6\B%h/- sǰ*?[ ZI&`vƓ7k'"dK8v)\AشTK>AAΑa@ѢMH>|%i'Og?%`wBȒ,63@;K߻<6 +ݾ,wuw$ٙblΕw.4lY޻UwƜ;,<:I4IvEavʝJY&]e{rAAΤ6%jW-,ُvlj#8lj8qAAAw,R[jkaH RvsGhw_g95< !e0S<Ɂb>aՌZql%)VN0DPn]CQn C1@0K@ D) _?=}+8w +'0t+/]^[_fuGQ~t{u};|YwjqgO1z}}}=cuP( \\. ](z^oZZ98rOOٲ2OD@Ȥ8ݫ x(npkvvbMNphf'8!#{uqNVmnnigX,|>oݮvVjfjYk|.|NpS0?Qb_*3}K݇¥|Ɲw  pϢ pX8Z@ LJಠMH}"vm/g^E.D2) ; &;:Ẇ#ff64ȵa #)TIAm0{d*{˰~peE~T_1w.ʅyl F 2O[߬#GӌQYvjs\&mgY{şEQN=S(qLD.Uv ؑlI9&c'(͛7 Bww||ޥ,$Iш(jYՊАR*S0tM3JVNT*aP;:Y!S˜5rۛ9:V6jX, LMM9$;"kkF8$i6Jeqq1Ib!N".wMkښ0 sW(|DlmGq;ZQitJAI!@) C(y+AAnMfE)5(H-Qa X~/C1  pM{ (Yi t! R1A1Y`d -OT򽩢V\1;Xh VQj֚Qާ{Z.&Z><ƿÿJMw/<}t`v$p.}<տZ-gڪ.VZrvf2D'dQR\'I8```dd\./jjZ׫jш8SZr\,$InfBwZ8Bhy}}}eBKPKZyf$Z8ꑅ4+ NrRjhhhxxxtt4 Ci6jl68Ύ~T* zy^WWQev d̵ᮩ+ͦmowW> b1|!'rcv;FU7Qֶz2J\X"gx+AA.Æ}ƦefB $%"J?<#{&|ozC`{O. h%8OYBH- 31X#IS8K(h5)V20ڀ p ͮ7 2ƴ⟌?y >!5ڠ!JOt?G^'|@ng(|&b0 {{{;X[[tJZu:Ki;9V:;f i}ݗk׮]|ƍJeg&p5rbX.3999===1˗.]Z]]rq;Ey1hEQ>rj5wIa J%"r*@Vʑ)\ΙEQroKKKǎ"ZvʕW^v͛Zͩf]-R5888>>wީ={ cn޼y(z{{R$rzxٝiJӴRlnns" ҍVQԌc b<] 垾21 enn^zu~iRYwC`EU   iRYxCчmov45}77=B( O0gOdHwZ[OM잸jR9e&2KVY"ǰVÐ =s=^Ԛ+Fe ) eِ5u愭a2Sf&Eb17|>IϽޕyT)AO1o~_|x߾}sss}}}jƍۛ B;#Z\]}zmm[ٳg{zzRm D\7~# rr@6!c)}kaaP(ٳgppϝ;wܹu׬$V[dm>mݽgϞ{=ijb-+a$L9320Il+8e{rd68{ -0@4؍w9Z*B{䑃YC޶N<3hk$y~d. Wd቉^|y~~~mm;;9 k=)I8u)#Z3::zaΝ;q[oh4\D.+ ?f3\FQT\rԩcǎ9rŋN4q;]ccW_}FFFL._宮.1cSjl6岳]?~ɓ'/^永$bsNs;D$7o\XX8w|G}tnnn޽]]].]VQrRw-Lq55NfJtssn?tA*V>:׮-=uZ 98f Fwq ハN<< P.W*avwue7Y   W ,{AJAy\q,yXX~_{Z#BA L(80چcimSlf(UHA)!uP,A1 lETsd_WX5 =c֦Ħ&MDD`#a0Q B Dqޞܟsk'|AD0 r /_uO߮fM\x}w&RwBuZvcǎŹ}c=\hV,"Mӕr|$I= /={vss ;NV\;I@>~s^ySDwwK(g6Wk YT Gr9 ٜՙ\5JPV<^]]}W_~/FQ}2 miqBGG?|fffsd[og^ IDAT3u D C'FI^Zgo\o!5nA5 `F~y;~6FCӿ~xD/_&Ioo/Z%*  p`֊4)y!8k ϭP!P=h!5(q`KG؊K "eĩB &)8f$ eWk3ؐ*h=c+,w{J1tTm@VYVL1"L`N`c{@lY[&"ZWO>i+X`0Jьz澙Vu0!ة"qr8:o+%h4/^?}iZ՜98v'tn`Jn6ccc2ƍ=ܱcZ0̚%3%oq~<͛71r9IuĎLJpbƎ5u!qBR T'mJ<̌Rɓ/ӧo޼AWdqfn(O~֗/_^XXWJEQ侕Mf$q)̼ggGGG5کlZ @[ ˰6EbŨ5{x}3\Je``*,/TAAGĬBW⡨ix@衵. .+G 9<AuQcnWM?:_0)hMa,+0 H"bfЌHu+LѮB1Ij8 H &Q`/6g{ 0k0ľUֲa)fҎKSo3*o\f x>H(Fw!j_o L=]9U}< rٕ;vluuZ~sɓ냃 :'fpD9UZ=X!T-aH+jP;X8ReknolV#MNNiϜ=[VDAAXi(B8 G(&pQ/z}n k}xS;óSkQL % d2CH 4` @ QBX1xcݹ(ި)C[02R!h104z6JBԤ8uB`"$ Qt{?M1%BU (b[,r|~kk|T*MMMϻ6Iyn(h)^zoϟ d5܌Ow.q/f& ZƾYk|M7'e0 [xPvYDvQNV͛7C=455l6zg}vuu5ntdƮdͶ̦*1s\6ƬӛO>/AP}?ےՌh8HgU:|n?K//}MZvZEaFQ͋uƦ7F#N;Ryq2 J9>B)(f N[F Fv{{zXW/򩏎U*==NdCAA>lR`YnB" h 6$"Z__E7[hg>+5u_)a0X3l D3h@aHJNyMc `d^!7kV]M8l & 0+!6PVD(R3h%"`ui;z~*NKbpy/: [R===.yɮgj\'Xd<$I aqĉxࡇ~zq:Bgp͛7{zzfgg=я~O^p!|SK}/JbT*uww nVZ:_Fvg;(իW~bcMOO[Jehhi".83.GQV3IWO]lQ۷ovvh<766z{{ p5,M486v\1v{kkksshdmS+'Z-Q.}o4/b>򗿼o߾VuU4z%1yO܏ Ks{&'? ޸c kmV0qPb߃P 6EX&*%)BWQ?:;T*NMۭJu-   bXXPj)WC1HRG_gϟC# bF_ *EQb X)0\Ye edH"@D`k'3]eۑrAY<=ޝ[[kYVe-``]3G\VȂ Hl@{jftZly.#]o}>znlAwU>0Y##r~xnn.3btv$I͛7WWW]jՉd҃r\Z}7>2Fᦺ nRֺRi:99}ԩzŋ.IΎ2]>;==o߾ɡr\*2%KKKW\pµkתժR}gfV#iy.\O _<\.flghx'pټerJSoo3diy^Ekkk??|,Jaed۷o|||``; 켢(\ZZzŋ/_\Nr瞦k+ ~3v WTcfc|VJ9?qWo$IRYZZz뭷xׯc BgGf')q_|Rvww;KHYECFQCFGGoܸO_|9 B@DeC]lFQ߿#G͍FQ pa8666>>裏޸qN:k jv;8y{ppng~j5'&&<{_~nwr^:t}+83:::11cݸqٳO>w\^nl䔬R /LMM۷СC/^lZNqD.ިjiԵKX@wZm Z D7~|vˣCC2O/|un"JPAkXx|aq '.3tEnIv)   =4 (Z4ע  q֎ ]x͋ (R<>)О|.1\1xeb 7RVSLd,%LL0*~fX?JSfV̖-Y ZDZ) LX^Sal" lT]e [K "^}zl-4۰d%(zWO?f'>X)m5o8RT*iFlع{߿gϞ?p-\ޞn'.\__VFQZ4<<<==j^z7|S)U..%nD455#<###ƘZEFp-9|/N阘^x'NEQUŻ7Zرc'>qcǎmnnfԍ8\$Ir܁>nw Zv=== 7P(r9t8R8c=O|P(q\ݡ^Q]]]Bsnn?'ժOr'ilnn^t^s_YXXXZZu5Jqp8Duk+ 7_uhOt$MZxfFّ}0IULS!'ag}#_"d cAcf+>{C3s'fg/]4'e]EDAA fcc’lqxco܏KKKǏrnk&t6蜊599ww]svᾸ׻ 'sݓ;v lA;}.ggg/9r$onn;wneeutEwVuʕ~GFF:499|ebb駟~+q5/ٱ⳵ow}a޸qcppi.Ƌ욢id!^?z:-C0CPEOz!Ͻ}˭vWd-3ӳ~xXW_C秧ƩS8. F-;SiZ=ztzzz޽O<{je:]?3gZZ:)F^=o|rH RFW$3Q {#?77ѮN}fҊ{nכ ׯW[oGѯma~QwNa(|ͳ0ܛ ~0wϨ   |h60 e&P2ShwHpI1kgPx SR0@)=Bel*Z8E``@Er:C":Qþ@ZM2^-[KZQ>IDNb& D CR %0,[X`Kl fTŚJ_9+A' ,kϿƑCÃؙuOm/ߵD}Wk7 N&yօ <=իW=l.SΑ.c4h]={XkO:uuA)[W/| Gi4gΜqFwwP數U R$VF(wSVݱvmJrܹ~xddR\pk״#pdkl6!iǏxgfkfLtkkkfffrr^nLlU7<>zLLLw'Vz|+++NZOLL|tsdUKP憆nܸ네,n7Wխ+0ޞkъS&yzqqqtdsTISftjxc}᾽b^En\Lk VίpO13yY!h  M{:l}{sֽ~EG{zG4z [ ``$ #@p~ $?ȶd,<{"DѸA5Ej! km!4&j v6Z""QI㶷h.@ʻϜ TRˆ~\Mr@y?`C]F$ Sb R@R%@BH`Q^L7q σ&h AFο~_ gGpHM{p,K":p#8$ 6a+++oy=w\,.aTq̙V[o}Q%89@KKK??-n;77'裋/V*^O5,`^){{E  X嗟 -$l IDAT퍻333J)?e_^wܵM gRE; L5jřS>vw (P@ (G )MEpWu:߿uw)yc^ DhTn# 4n؀YPDKG&@1Sb̜00]T"h= eERq$|DM"6 HwH_imj9MY3gQ[fS/!lS"l1vw0 ڎW߼>55؍'$D(#pSnzgT*)TA΢3 xA:(.\@Dkkk;H+Ye&r|Zr/LDnߟVµb}?OpD)2%)}ܹ355uٯ~ݻ~|2j:޽{ڵJ"‡ZrIpSԐNnehp;W*!.ڄ (; Ws{~QhlQ BYC{HꐔFaW!՘ R?D@#`wH=e-, K̀@l{EY %$@qVL& K}`ʀ8d554j8m`5x%ugw.^XZt9pBAH<%rG*JR""+O4o޼y=P;H /\t߿E(P\Y9 ''@,3V۷nҗtwywޑ5cputt099tNkb aNN"۔gr9WZ;w۷OGĈd~~ _B^_____m4jZ\G#T^1Fv|ߗ. vȍ)I"+S^q;1b*@c`g C;JMTvCV@)0$**a +A9{p(P@ (i0A*Q-ኙ7XJ=n|h PZy{d kk1I-$[ԒwAa[0+Cd  ˷ކ+ . 2,"tamKH3.PgR`dH 0 s2`@Z0fc-']`jy7\aFZGkX0|U/|sgȮ=>Nۏ ob8!p) { B^b棣#QRıulͥi~xx(ucdb|DK ed.58ÕRr'IԔeY\\Wl6Q^t)~_=bT0usGOT z%d.oݻwEkdj9瞻|2mnnFI:8'W ' 3'I;N׿>??/yEQRjnnn81E2h (mw@R&DP humPGa%䡊oA.Dl&i☙%٥`ZKɭ:2Z7 (P@ QbM PP"XaowAb$ 0Z5%^y b.3$C:*)PIqd aq]b1L +!pLOiE)ȲdHBϊD$@4>AJrܹ^G"I xqbY aPib,OHH(P@ (Pك#bV%x)xPU;i m!)n Nȹszr_?Zx]4nn]i$Ѩj%N 49|ZZN[|:r<vvvv7j8T*B9 ',BEk=33sYnݲNNN2cNPERK*!,̼`0ǭ2qK "#E  #R07C:~ـ5ǽ. Ux>DJs j%/Iao(P@ (PGXi(B (gKp`pkk,PCk%TJUKڬg<)(=)"X@R1i&DS%˞Ğ؉-`Ҁ&N,H1X D36 qGl@^L̊l5amlmjkĤncq 1Ű)j7:UD<ࠇۯ@$^VNJDva\zU)KU,{'ʊ :yFucF CWZ\ː^O)h4Tk>+yt~\­d^@Y9T֍qIOPp8F.㋔#;OGb%B$!aNLLLLL$I8MhT*fffzeAɯL;DK瑤`ax}XvtRY[TK?Lj# ċi|ttD"!q[NǙʌDSTK>P (@Vj|ӋQT $Q:)P@ (Pڃ_+ 4 M}6ǸZzð3Qť,OVKڷfRajMJ3֬qAJdRP`2J,HCpH$E|cG5IdX*[K0$ق)PD MA`3ҽE[Rx&50Jݬ J`-A |Z&=3"r y-:;A0_19!,..+jovӑ WƏF#fp+233=113DQrpp ~Ťɩpx||\E/FSAITU$ /*< R*DƱǣ z*r 8#?yw"`0i4n&_ݩT*R}o<-r|\t/D @Y;>%$NU44.% ?j<(N/JB婙>HAj*ٙ;<ѭ\VrFl#)$.$'<9dT@ z~3OS:*qEQ} ̵Zmvv@$D$7\Ĭ{g:B '}8qA$N!yK2,5^oHD (J.9A""}[.<qzay7`Z @$Z֢Yͺbt(7[ (P@*aaAe|ȘeĀAL Wdi?j0Aɇ@{P Rf+=EEhjoT,U$FT,RԽz*&h}_,yMO@wtI A h$b$x,#_)S[pח]W*%%0]<`l֎[o R[܀ tmbxHS$0;|7zVnG^2'Yծ\DZ"޸qΝ;IH-e1R\rW_}饗>88G\&# 8JRPy2I8jvMDGGGP)y|ӱRKAYwMnV}Gp|Y7" i#')#(*an'擟URF|DK'ױ̅|)BuNbPDEZ<]0KJ{R|{zkaY[ɋ p4)g[GjARvNtgflRw~*P@ (P3&e Ʃ2)gMG>qt꣛w?C 5)<JP/t}JCkh RPdG0e̵&fS&GS (P@ a@ѦsCFg2GhPg}ý` HH2~%qE0V4$ĜU?plP"1P fI |J*s֌i Q 0r" #j㶲V4>2w{oη0;ubʮ崂@T"ׯߺuU+P "("@\\\xoY|FJ%_@D0*0/NBYXU*AuiYB9私[w"{s4%tPp<ժ1Ν;\.?ya.//?siaaZHWa8 ]Dj`ժ "==+vNE{}JI jnjZrG+q_.)Z +'8ܨ0"T[[KK ]|[Vrщڵ/^83[\JAm(P@ (1X[Bja6FhLCFݣͽ[hB{S)2ƞr (  ʃ J4ȄE6A̴8̻ZP<J)˜K9R M*$ l R i6:'iN]%D*XCU%G鍻;}<*.z|(3~_x|xd)b.`fqF,..^|ŋv[rxxxĄtuRVE70"ZiۭVK2wL~<1-lXJRRV< H3Pd1Y |3`wd E6f $~8:qQ'C׮];<3Fj-wDϏ,#h4-IJzauUy{oJޘQٗVb vaΞb*P@ (PB A nhTS̨xF}t @rLGs! PٯVjJA?[MU4uy0`*$b&~c͒;$+jԪ͒x"4aK{?1t&cfAXq€ąj:>Ϸa?`IaqKZeqZY5.< r?3ϗ|qFHe+<=\:z"+(v;;;.\|RRY[[;::**1du1hw]yV~e+jg5890XID,V3/{A.^$nNln'p'}뭷(M|UjeV"\=j2rG`ggX{ٙ!{w~]@+!M1Q ~K}p䃈s(P@ (yG )ME>;C`?W2NN/gA~4#:vQ3J<3y%_u8Bv- X#WE1xԝȫwZ;o"w# <"$B%I2A+ ^`zƯ}:vP+ HRDj%p &{0kBF (P@JMU  U )z~(e<eq-pɡ~ـv*ˋZjE  D?7xyuި2ljDjmZj-l̯d="DŢ245Aijzk[H ,<9ٰ{a;8:tOT;i|de~6I`(vegQҹs|+/_^]] ^vAxH&HE@q2# g8Qo;'#`mW>%x2:iNʟo(  F4 573_~ea_KjL#򾋱|dwG^3!O1| ^D bk"yz>y|9pB gRRVG#i 'svnaOJƤF{כ׮]=ԛf7z[ Zx1JЮ}ڏ_{eqjo{7͉MƱ^T@ (P@g+=J fj1QcJZMـa=T',DOuLۿOF' ˘oW.^|~vAwrTbfYQ@ (P@`H[F373=|?Sp*ʼn6MM ,1 "O= {u K~E&MlS0lj5TLulFY1aQ=Ra5&Z6FFkV,OOۼto`4CmOv$CA)'N)vww~F!)%nl.///..^zunn?{qc"?岴x]ti0ܹsGƆaHDjT* -e=S4 NKZN@ .@ ~]SR 7u0ǁ\x.?V~^)&o @)i\jQaEQR)GGG. b^(uʯPH:)‡~8 CcT ҢE\O$\̡%!IE&p8WNgp8VkyyYN'N~ܹfDGQzrY)U.EDZJ0iH!FQF^oooiy0(gZ I^#w$o9섂9#O^ʑ8[h1[T?8X?w߃Q*A8([̴KsK0GܸiYZZrFT"}@ (P@Rb-p@zaQZιQĥהA%JQWggg_ ̬H12I1L؎=\3@qr1(T ACy2 N)%1ZcIIW$NJV//Du lO# y3(J~MNV d8VU4' q hii饗^:>>':p1??h4J`00 $)JJ%qUEQP\]O/%t~~~vvqJn^yRu4o_}:l5x XA9\/|׿tEw{ݍ{n߾ݨ֖dJ%n (P@>W`%j^QlkMرPqpFPBj827)8 ]ڔ0(VmΘ@8'F[Us%U/yZc%<Rb<[yȢ@Aˈz&5@ icj4'^|e-JCJV NH }ɯ# Ν;v[ [y„#ܖ5Mq0s^?::#O٬j{{{y]qw:ɛX?qaAJ%M X]]}:Jv](.*ց4Vc s}|3KAݍ;o;AxR "sǮ~yy驻>uX;39U._h7 (P@ |"  ۚ‚ۆIt),Ykv9Mp@}y?;_mM:vic #mhuI>v/Z)wC$ΗbȟݻFC"K.,CF(JBgf9==AODkh4  :v `l6'''(8d>OY$J@z}rr2ׯCY z؛CDϟj]ܩss˩YCr=,ƔǬZ6ƚԤ006KR$>c &'ZJF흿8!U/v|F L4.'DŽR&8=Q $s||̓f8JsGߟj;;;z='''ŷ22+5'&&0f3I)Ee#1LOOAp~nZ<1ԼKgϞLap0zb1Fr JmgmeFRJNl6eO9&ch4T*SSSQn Lֺ;*$BIVWW'&&ͦprZ~ߍ+SH;Xi9|tt4ќzZw|ݓdWo1==:>>#2'Vv•kQɯ<,8[W<"<37ٍ LDK.^xtttpp ItljY$I$>CDtxx( 5gRh4$xrLY^+lQqSljIkѐlZӻD$JC[[[sss7oޔ>"[U2i C1'&'G}BnDQd ϯ\ruo-qi J ^OŕvoܽGaaaAt7B-8n*嗫@ (P@x)- 8`L[)@&;왜Lk]|`zi| ?\wnv9Ѵp`#(&3+y ͡ r1> fKDuGa<#"Up Q0i$ID0GLJynw_&8  Mm )JHpA]:TҢiѬDNpȳ۷o?ϟO(N ~? Ãg}hlnnnmm S#Bn?ji4x*ErgR[Ǽ(ql{{{on4333VK2A)1a~tYJדM9rʭ,jrR>~yU,aQM2$ @E DΌ D4y||n;ibR;Z:ujaa֭[nwĥ";3`0XYY<rv߿ҥ[o<cf$%AW^͟7q>1xt}G=}ON/ݺ}R.AK$ںc 'Ƒ+X<<<<<<<<<<8}ge_kKNI lErYZmDś i8+4h\5xR`2c@yBj2e!88PW'\YnC b m)h ,+o$ӃG\]ǥa Sd7Dġ,S[c:/L% JCKϓ4w4-aFbel &J`8LG80P 1hHZ0 0aad1~RG4qE[[[ƘJ2ԃ B^xZ^~])T!Q 9a^'!ccc^osssjjԩSے!Z{=/_z~ۍ)P; $1fvv_T*[[[oΎB3o *8oj̻r3ao\_ hV g͆)8?~O=wu㭷&&Z~JR)$DT`2gQADQ7r,g.';<<<<<<<<<<>HQQH(RP!B(V2V'&ZCd J}RC'Y$J*W>f竽V5r"6P&ܰ%wJG[?n?.PQ:Z2XW/ I`t=(a2e;; _g0La5(i4ܹ"k/eDABϞ=n7Q,FT\n6F# r,nW޿%Jr&"T*MOOA0 RT*I=55uԋ/x僃Ufv-N݀Τi$O:s]p޽{ۇ'ŝ.xԔ䒮]zukk~RZvvv$ N&X;#c!eZVkf̙3bZB"#^ׯw݋/NNNK.\ۓw,QŹJR*ܼy͛"qrWn@fZv/?7nӃRP*Me7=/-^~R1["2"&#8.ue"5M^ޗ1 C p( %n1 Q Q"Xzrf3c+k0 )5AL0VP`1P%/ 8QL򨡐g!3^,S >ʟcsl攭€]V[&JC s+; *#VڝtN?c7}bfȐdu؜q`8q mgs P0CQឨh6/_P #n8BYnݻkkk"#@]E]H6Mb^ommLNN;wm~ OturjRժT*cccTE8n6z߿ \.;A٢8^\\ o&3jnHQUH9Q{& X@zɂB`a5(tX(fXgD/\Hd½Ky G*<^3+r `6CI ր-]9&k~aG篘O3-IoبDzxՍŲjĢ6NPppj,+sx fff.\011s8$bƍccc.]RJ}@'Iȫk)GNY'd" THjH߿yp87Iy@z!NRjzzzkkڵkD477}( ԑ9 ݽ|ہ#w^ryaaaaaViDNʵqOLLĥw6(G"#7QN2\B9'xZz)""f荇*aaA242f47, 8})J,.}7CDVbD!Q8DdD* A05!LA B+HnR2r)bI*## *Nǡ(O3e(BrS K,*aj8PF)bQ'fNgg~GORpvY|ky=" @4 .LMMu:;wticLTr=8]+7xCvvvկ.]*w988{rR4N^3s<0lKO!O͐(P@$Ь0wBA&.d顩L4VhXBkZ#1 W_|מ+ő6vȐqjSJwx ۣw$na811133TVnܸ$T:iHA̓kkkcii… FΝ;II(`DHrtp4HsVu̙p(Md[V^+rW#7Ȕ\J!<En+ivJ7KKKW\i4r9ܬFR?%3MSuϟ3q\VESXE24`j|*8 j@_O N*(yNJ=yxxxxxxxxxxc`Pfx٢Iv{GF`lk~lTT4Qdˆ`ADj tBÀ@FjT%jOBy2mkP cDA'-5Bp#TCr@D0Rtx&dmk%kk+۪ћlcj$֔R[5:ƦIҩe;ÒΌ{vy^%6/HP :yy0z:gϓ^ IʳV)O288rZjz]vv;;;fffK"ST7WBjf!*JcccHe._"ℋܐ)˵ZlNNNNLLڒ&#)N X$8Xk(VK 8#G$bА^+nF)$FD)upp믏z4M4i (F1999;;+[YYt:iv]z}rrR]۝Z&%6&{q P+g)) ^Y|,+DX&I>P7(iLdAQN4Ǟ{a)@0 J1B(3"TR%C!̈BUT^¯[~ւAtV7&,]*pGpH FhA!,`.q,GØj:{q"Ma4R4E ) Kac\\Db%8PޱU+ ȫHp1\Ua+Hk$I$wގhaaA&DT.~~ 1dB.i* M_VKֺn~iQ.\ .F\n4JL۫SSS:u{AN*I Y .//OMMZkTrppv`0z{…zl6em:NM1IHr]YYqFN.rAKk=;;X0b fij.Ls= )䂭'l@&C Ӊj9IF=f s9Fˠ-Qd )|g;Ouh; \IAz^`NgooUT*MOOWUp4+6G8D$Zfa7nܸ~4M$Rk]T* QclnnjK|(L>(6q(ⵑ5j1p#6.O#a%pv_:MOO/--9ƇZK+GUJon߾gZ;;;EZrg68e1{Gf>>,! b 61-34p"b$W\kޟ9Y@z"d9@?ݏԡ q‚-s޺ KKکo]ܗ§듂L'-4!w`n028]q'؋p%@5~gΝ[|ۘdeC~myTNy8 i`$p8l6R$;uIQ|qWZﯭ3ޞBr^,v8z$I1NG 333Fy{]n4N "J~oD8p<;ӝ|p8V*[=J7\J"-RWj*: i?<5?8G/]T$YYY3yJqWI,ju||$Uk-˛plc6\/hiE3۷o0 ,F1C`0vNG\-@ VkjjJH"G8'4ֺRĤKqNydy9_wGR퉵n_z#$cccFh G[^nw]5fggeZ>e}\,.r=(kda޽s'vgYboL8(HuyvFIp"4L'O( ~|Mk-R0 ` Vڠddj@3l6ER (}sjzZ%kx0aaH)AT _v珶;VI{$,F\932G81#0)LejD_|mbi}a#8Dpb  ǬG**2xWA.BpM=%q̔T ńQ!! "mrw:EdpJ9v wp>[q $)8Sm>;yaG<ֿCVՉ w7:Q7!zND}5y-`#E9(D@)'LpP sff'?_[~TĀ `Pڀ q6g71C8`hGb  QϾͥZY`s`Q  :h/ä !25O9FNmXs͊$jib ctq̂h"qt'lWe!:Q6ieXRK+JZ;jMɉJjHA._Ý`q28q]hw+>>Lӏbʊ e qV( 4Y &tղ 0 :@ 9g]_R9נw3Pe@:(7vcdRҫ)oNq=QPRȝ!)(P'n&8v~zw]6FrX<<<<<<<<<<<>HD̪$50@BRTGn]— @ R0""!@y$G %9I0yاS$ #"1{}9^Va^ƒ0,%o/;XC@BF',Zse.(5枔|#E-MXtR|W.˴QEG yuu|MEW}R5!rF$A W?¿D((ɿ|":LJqe{dr}9 p AmQH'( :8O}4 ~<<<<<<<<<<<XPPqpLa<%Փ~Ϭްe((HrY h ىP#Ee l}a·R "710KkY"aI/lH(rd1GcM"A)#^.Kx'F*F:n}ıLF Ǚ#PA-"8Ю<>Y8#'y 'y.Yq(4PX<+& (N՝ȩG.ɓm{>{-ej<("5(1)8\˗C`h @Wg]]$ Gc9c Sq$TGnMk1<\E;_+Ct4 r,6;ˉPp#ɆًAS4{+to]<=ˏ a)ݍT\oZfAJ5mRuwm.j |_ѣ(V)@cLGnHX6cO|Jv@a~| Vqa x3x#xwW`ۢ *t"~yאg<]yNyg[-a>mq$]=3 y_Sߛ(iм˘a$`alEp1Q}_/E3HeQj f'JIDdOy@:W{+LAuTMlikNAm`tvtPkl˖`E`ѣm<&6k+A@{O;LR(rLJ;MOdkQ<Gg'@>fi}QkpL`:qx7qtw~c?|Ooǿ5l4[(36hS 뭧KpHlҧ?~K Q/D)0Py73`īYGXrZ60>>Sfc|l wΝn}°3afF1yt(fnġ5 c,O~K]{Y{ZYՖCEƲ"'ji@i/ϕJ|0|ݵH)>0~l D4[)[|ͻ5aZT%fQ4<3?W.g~[wQcTf,XZĈM'(coV(CB( P2ZDPi*"fGD(f!,E #Bye΅U28~+,ȃ@ dD RV(Tmj$QǖS?`bPD<wztu'w^Cj z~À5=FHRs%8\;^{vׯ߾45YFjs#"܍"8)86vKo+Q=7Ƣ0 B9+AJp}xCJqQ%n-vG`p ș"qHo?yARm :GwmyDpq3ɒ%vRλ9xrGQRGlϬ?ڦ\B!,Q )"(@@(r &9FL IIxoL(UҾڂ" HW0tĄJeFaC@VTdIYJ)boR"T&c~g)V̙!ۄ2͈xxxxxxxxxxN4ͺv$I%~E>uC e|5濥 AD@;FC ȊO hX}hM HW-=',at\Rxua{wsݶFGcfI ' c`MjUϪ 0yG$:,* _я~:rrM8  <[EY< Fʥ3y0ř!E.:!c;%`;cʘa&'G2!"H|LP Dža(8X$?^OyK ܋>hC$r)Zus:@,Nl|tH 2[ByX^9yϸ22DL O&)9 }ሱo@;Cȥ~7s= @9/󇺲A34!5 O܃JR[ TO?XcM6>!&gS$Y (XQٱR - ɚf  k 5$HgfɟOQ(zWjiT =:dOqI"Dx~nJzCqYrh8tq#:nQqQurZx%pnpR\ b`HD@Ad)(@: XHx"aḥ'Ep0 !`` XMc2H)((M0Go c;<6= -!BBӄ(D-OCgPt,g0A6HA>0P؇T,*Hthr|p) (ۻ¡$'35%NufW#MaH Ņ_~'BrxxxxxxxxxxxxxxL\f ʞ֞'M(S3FQzŲMOu{?A=BtfAy`IR Na2Et6YIn-'*ٽh+o}?AFC a;2khDjQ"Ia4R wqRiOuӒ$#HTP@I bw愊L0btz )"TlޝU=;qfY{=UݳUOـB Jnۜ7aؓض][6xnˈهIu~H~P{WG $8EDnShL1V&| -])za1cXI g?J tiB.`9s}>%uvD+La wq%H2`Yyޫ'7kK >՞=< DnL Pg (wGۂm ;/ô!m7nNckȀmv3_x)[zͽn!J<53qpc1=64ASxCpx4qKEM?|CrUH [.kP $!-&,nG;yP!'`Rz7e)98l 4ad V)KbYPF5joϔ 1c1c"H)pH5@@G7>4J)gG߾]| S>~Ѷv #q>u7[|n4A Jlö(Ts3F8P %6(l(ъ2 L `0MXf)a*Jbfr!Θ8ADmpfi#) [rΥX gTSJY0Q5B׿oPJRTuPJ^A+)u 4ʶ*|~(3g" rnE[arz/rSh:i]I~ٚ$JHD-R^/eTkȩ$"Hg ^a8'nXT-ps(/政r1%(L-c1c* tҠ$@!|0ZBJծE׫i `ZkPI5H`òa0$IDJPTF(BAn w"Ro8/=DM LXVF0M&,N31-%ݼ i#h%$N? '>wW2%I ҍ8a rp(  !țQŸy= K qbfIiB@sVԠ QZR"#;¾2mz mq5ϐ"h $A4@wO9DW H,(%Rw4)9A()Ƚ@D @E(BAS, h^tChRBPa®4'"!@׈b(ȋl"SnÕm-,B B )4SpvhN=@%) ~8AB/s"d1c}X T:h"! 'h8eۚe4lpɃ7iOׯK!N C2!4wið`X²aY0KdgT:_!ҝ _:C´, ')4aZP^PY"m lҼG AӴ@B %e%l;)9y|`gnad;\Aa 'i,bT-.|Bx#C[iAph/oq"tΦ3Je‰}@BB.P)9E4"]~ 8|Dмc^Ci! "@wBޙ;!M.B o93}K#hI"^l‰27 BS3.Zw8kX/RRNCBc1cnlKIȖ%l &tU^8{nܰac;G_p+W=D"Ai,H x Bր!yFzmC9) %*%FI Jk00$ eF ?f>=m$#t:uvl=ptllYqgjiz*Y| <$iiaOKhi (#ڼfjDOΛBKCqj%"[ dX0Paii VJx#OY)RH9/%(;D(/ɭ3lqlgѸQ 7(|H9{ ʝ! grqTU(ZnB9a7~!,*kID gwPn/RVA 3{:.!hD$ow`Gتw&>0`1c? ga+@N[( bwqB} p8#X)%^=:6oѪY_joVx(+@ DЀf/a{3{$oa3t /a{+S耔 2%*PPEvX alңcrLZ;Uz]kMܢt%ĉ,G+"Q)|fD%𾀄Xm2R7H.ݢ*ڨ{IPE ɰ 0egҎy7Th[Bи{C"^PCpBT$H HAq s$ݰԏ%q+DJ혢) MfZi:QAҳPTzB)qZRi^؈0bv磔Fǔs}ZץA7«&YI (sJ/ 5Э w1czL)[*e@i`IHi؎،8r48CC78aÆU|tźy$L$"X` Ms7 @²D^HpN (1l"~ ׭}G׶MKxKl@>4)*ܰtVϹW܋+Q eTĐgʿY@ Ey C`sJKaT{gU{9c1XKX JR-Y %,@WvR+4HyB;lظ]/_}&ơoۆ;/wH =tʻAPdaw.r :p@}:ѡaFjzzݚUv1c1c5P2,҆`نI6M1q= p8N΄mZmӢzun?~sR(!Ae)>oBvq&(>ew%@*J @E0Ld$eVMI޻[Gt H)m[j9TBYS."ٵO啇=0rھc:E*G%:Vf(:mc1!QН-mUH`P yCsVQffc޴k4Ԕ˂$ٱ"B %*JzS6lo;)Xt-2Scw8AGm(=Y"t}:yȅ6hGZ=+쫿2c1jO!G*`ڰ,t$ :ҦYcjgMos-c-nߓAxw::Pf?MlNhԬQZ۵h)IfVh٪ix;l9hc˺1c1K[ ( :t$t$ bG^tYt]ߧwlޡYFqn;v޶m=rݟ?()i٦m[B* tOZ|?!ONHOi8afM2Z4MkݺYbjNl!m;7C 1c1_H)( @B<6G $7:thӡC;9ys iTIkZ Η %9-=9%%1lRJ)H!mϖ1c1'}4']!t.hpdiZmYvh%!Hu]t],tᮎ9c1cJi$ CRx?f { vV|IUm[)(wH<,c1c1vDJDEGA:5a hZ* O*fc1c")A( A ICH31c1Ah*ȯ~hI%1-YD 1c1ءb6R "蔢iP^`eǁ1c1X݋ dIKf+  ۆeN҃ c1c1-mI TE4,M(wu21c1ءbD6A;mU QD;sp0c1coL)[*e@i`IHidi~c1c1t KAI*$ lXm8c1cKleXʖ6,6  iK2KTc1c1V"@mB RH38c1c1V)[mT-a!K$1c1,!l$Nђӑ胈sq1c1c"P T Q!\%*1c1OF|:|B4C`1c1XeJH @ #@[1c1Ljt"?DY#Q@נR`1c1XeA! OHt@"/Qa1c1X4AC' k~-Q]Tc1c1VY T:h"! 'h1c1⅂YR Han1p0c1c-mI TE4,M(Rn1p0c1c2m"*(larqc1c_JR)JKB H6 K c1c_$RJX6,b`1c1XP2,eKeӆP%%*1c1@R J瀶U!FKB)$1c1ϔ| R)XiBB!(IFc1c1VYB%AI%#c1c1/E*ND' @BYKTc1c1VI tiB6~?d1c1б@P  ~:I0mz¾2Ʀm^?1c1Vt"DY#Q@נŚ[0c1cH '$ lp1c1cAh*ȯ~hxc1c_` N)hH 6xc1c1x !ATd`mX[1c1t e[R0U- bJ[1c1L h $`(b1c1cRTʀ$d@ $<1c1c.a)(I$ m8c1c1Vi6 K҆`نb!-xIFy c1c1/?P9mUH`P xc1c13e+BTJ`ڰP@JsQc1cF @Pt@'hIHAĹ8c1cKS(Q tB*GpVc1c_'H#h>>@С{ "+{[қwL=ao(c1  H@ #@8ںz(&tҥW#og @cfkW}ukB*&ԤԤCػUvCc1; ҉gD]tkn)p0vūܽ}ʽ+~ dڵϺxҠIq4`L_|7Ba89>,C/9c kY7 K*|c xÏʨaWT e1co"%4]4(?! }B 8;y矓/GэRJ̜“o&EU?PFo7Z S8c ,Ny+[T]{q)EA9f1c4ACHO4?4q' IDAT qNeC_'=ӏ!ß]x{i. `5hi`Jp"@|ڙ0rщ.`Ο2Chk.=oϝи6Zԓc1X]tJD@C@O4(/Qgmk%!а!+3PW]g@ Wlv(i3Vƪ)L#wҭ;ݴy^G4g6=3ڮWi5V u3y(E3XZ}7hjmP;Wnc1c_P K*X*[HzvY'tGс-[-?eC[E7j&{ݜ9sX']Vfӏ$cf\~7덳޿۬Ը-[ˀ?hӚ5?aԳ ڲ?an*b@*WD^Rn&>j-MW=1䋛W '(H!>rZSFܽٹ?19#qZo1cA@P$LUdKbyq~#Oz1+wlbgoj}ֳ~ݗG<=wkXu%-ͿdWAV`mՍģ[PѺm:)j}e˖-[6oO7wUzY@yp#/lA>%rn|.ٕ詌aD7o|M>Qz`ia0gK^gyGy;{V߽te-_xU7B|k]D7X 1cCF)[*e@jR²!bEn`Gש?=&=o޾"W~>G VO^.7Ǿou.QQ# =uW>ͅNp)C;>,sg?"˘%VٰeD\Ԝܨ2\<\$w1crx*6v,C.L2fxHaܖ5oy׍dAj?6;9#-s-25ǯy+TZ5!ݸ;4W4]Q!|_π[:MKGUUZ1Z;W{C~`TnPEn|^,,ӣ{4I*X/$Ʉj30c1vhTh H , +vP;?8/ 4~%D y{‡@Ğ/j^Z NPC]97v ٟ\ٔW5{;e|7S~m f3.-n guرdڦ 7M}!=;[}q>[Qk㜩`1`YyA2-; g=<=G{G?O'Ok/^v'ZГyAKg1ba2 KcV-}ֵ(Zf2ivT1Cs<M\ҮUJy3\ @;os9kvR g9 {lC:2iT{~ɻ_|ge_`ƟݧY%Wr ^aV2GJ&vHߖV]U@'׏\2i-O1k"Vִ0ҝ 5=万؞e:STە(n|Fitos}yyw. c1v(ټy΂Cݏ0K"@&k[lZȑ($ULhtdg7XjիS>Ϝ Аqgj3tȌ9]沙zMu#'=9rvld?yH 6k: ᙜc/=ch[kW(9$ @vO ̆@N5gKUG{\ J<39^Ze'l6x||@ZL TJRotCO<B(,"aPR]|%w8i"6G>Vc46O&4G{3sMmoNpJxݫ']2 viUK6l=;KQnj}*}w|5iW\0|bR+;퇝?GgkѨŚLWd?+t}Gdy?~O3#z4db/bQ)HLNDrL<-H&UKH | @o<54u{A#ȌkG䭝 HNk1nW`L@x㸧<;" ʋ}N^RKڅ͠vPW}C6-qaUra kWn]6;7E7o!:PE^n+zY9lFܖNܽy|bX\6w59ЩcӆI5c>9^g׿=@+ wo@\eHw\7zR;nzR~,{5޺ߙg0c[`].@|aʜsN [Qۣ̪6)GӺ棽jˢO5nٲmOe8>ړNu(J- >>`8|SdW= +FfM^԰ƙo??zO0&0jb&=$MPw X+kb4T^M;|a/Ѭі%ȸnj_>ymeБq~[`=ӡR᭡Pu1Rcw%r}݊ 57IҪ/TGЫ޿AOwwB"[C'i ֩0;|ZJA0 Pvu+di{IU9]oЌ>ܭGz<)O z<_ԁ\VfܾEQ%?lWG_ Ѵ^[oj$58A䔲4YS.#(NW\5$x]cݿ̾ VER-E*ND' @BYrPdtl[_ B|V͝xD\ nǢgUܷ{эųn[> @-#,Hݙr }Ӿv.%wc(xx/>k‰/-~}u1ۆ4;ȺH>u@mppZf;`e?L3VnP11ZyK&_6װ?7}whѨYʚ{^F~[[^y;" X9o|_~-7;Q1R^`g~y}3&s0b4ky]]9chASN>y+''L;Y3!}VM|W٭_~'Fzm1UjN*?nkW9|tPgYtcG ~3ffXrC0F{_jź#}e  ] 2.*+?5Lql+^@a>B?>̟y e}jhئ ߍ|4gj^=Sbh:AW6oBO9-/$nfoӺ)@>ӎ輞bZ nr^wG֬F!K2L;LY_9/79x'n^tCo=y^Ǝᴗ[c}ܗyWV.X+M<璛3QMc1vaR^7fڼ}"puW>ͅNp)C;>,,sg?"ӷ|w^fc~ڟ8b[o ۴:}D~*(Dt1;K^an#nCU 1w?V.Yz[5ܺѧwtwe_~d.<={NiD!#fT>W=ؠuz.~p5%?V/^츰$Ma?Uq߾kq>Ig@asCoYP֫e1\lW"gu[)~CÒڝ|?>l%/X'FFE{_bn>  H@ #@TM_{9oREY-,vy-T{b[կГ?cKZHNN'HTPy4ݢ0W_[^0-8/<yA@2QO1#~`ѬW`.UP Ͽ@j4Q۳;D>[qNw*sG]icUQX2S -lϦtٓܿw3g#7['&o>4p2s7biZ۶3C;*\~]QS*d~iMl?,ތk?Hg4,ubY*Q v9w(莑q;9ȴ9~wIƞZ.4Ģ|Ջ#QoM;ѶenZS"|hv2} 2g-i1G iN~..TI/X/DJDEGA:xA&iGSg^;Mw/nwMu܀mHh%n(䏟VG5bh;o~6r)-?}_`ߞA&vkyw.}[z<.oٿ5߽ykTEˈQbdmzlˡn īe3+ jZGuyvs2iG(@^y9:@:N! E~&o&{S;= %l=jmӧw5 0@lMo?nXXTȌg]yӧOQYO^n@JNo9j„ &L8{tN0x_tѼi1S 6T*h=ѷXjz5vD X{mRbW!>V, ?r^G!)_bn>H '$ l|n.]zq?xcV`07wsvtznA~b+Ffn%#q/~yZj[  vd}s 9UPy4k>7.2!.)t m?jT7;Z^t1Ј_0wܽW|_kj*Kߎp 7]EdǢ%5 Dj2"}kJaw]{JrW$_hΊ';?^)pc(u%cÁ+\< `s:dƍo}y'tnӦ}.9~O.~z\℆n؞E;{66vĈOߟܱLbm@{%˖-[9;EYC].~ם3vĈ#Fr]X)K~ī]<i'OϽp;>F``ҹ nXG]ccEʛqmlW9ucRCE^$wmEшpX0d1f[.ݼts0g ,muL'(Y ~#/7鈅0T}U w q͚J l 'Mb-Y@#0ݐLI컨X+^wRT0jᒝ;*|iJZG-5 # үԒ*[<@ {Y4{DV\Ao*%ٿFx@u+/1XX T:h"! 'hsSIFۏ{Ӯ}|+#v7M!,p)Kf<ޜPI|KIz.xvi΍3Tܳ͟!,E5Mu W}r,k4h^mkq{ǪC/$󞼭\|c]][%rƝ' (w'^]9w-dٿ/{[aͷOm_? SOѯ}Saۯ]UJs1>Msl ?,1k|8z &fs @r/Xzgѐ?+{{qIUëQ]jTîhT%f~_#jl Ԗ]zʼidֳv̲EC4#jk|5L%@^$ڜw;;??-_Rx !ATTBcmE@' T(URx]ي,צ,(?OJH {xL-]]jH49iٲjtNO]:Xx~{Ot}vEPy/"6n E;=W}vFulrDoteT,4zߛ}݃JBXжī1?-} +R+1rBr#+X[0D%( .i'El  o[lD+W0J5Dߥpy3JhЦun 8+k墧&|yeuNyFnT:݁팽k).8|Tbƾn~Ϋ IDATP'TuFbjE}GoyfIi!Iミ `}IŪh`-m) SҰm؄@@ YӪ5KNn\s9ڢ6}{D]y͘QNge$aj+Ď7덉K?_6}ɴ ƌQ+}2-_.SI,QWoIk".ԧOj"/x4vtxߗm1Z>@|d4򒇿k/|a]U'ug;3]7ԖFMK4>Bئ ߍ|4gj=& t5xR @i6s7iɨ=GXM@ZҘ_t^_¢3{r[1atpUza,Vkkr5r~|v~@CujV!XvnI~i|{n[H(OE-XL`+@N[( b-VAѠȫy-0/iJæ*7=c8jи{!{YDE *Ymm']4w['eoݕO''4lZqbO+ZtZūӉ;S(:qXOOOm٬cE#+3O=vbgW^vQ1Ԅ 81cJy/xsZ}KZHNN'ǘ"gVŚ?fD/5m-4e w2掅y'G"#ٺhij{㻯{Jvrcwmu:7 {#kj]myeM/\OyvDnx[qɡZ^Pz7K-X7R& $m@΃>Ȼ0?qһgνڷ$qһgݺֺA7hٶ[G4kStī?M#{=tk LNl:`ٹ`1-~x Y7^FZɫ}S?.d296@7_!"k_O*S򀛎TMe.OX8X#q߬lۤȺ_"zD.Sՠ ,Ѷ^O\>(|ƔE\g_Pfڈakk}[ݗ*Zt KAI*$ lXWc1cROz 侳6@\t}aw-\-;[2 ι{V)P嗏f~7a3 /yMGYC]LꝻX57,ӂGq3`,3W*0wӆ*{\w:Oc|+"mEҞ(?4ioLmʠ()+?`g_~7 ֤Eb%+U3CE}R1eeŲ`Ζ_;̉SPהݴ寕=>-˶b}P2,eKeӆPPIFc1c\en?~?'ogFgoN}1O:3a7|@}SUvE~xb;FR)f#1potʂX; @(jKj<'?bCd naMK$U5ʴIGFhh2lOj0G=ܲKVގbjѵ[*p5?_yc/]ۧ&+ch 혛zfT𦝜(z@e+BTJ`ڰP@3{3M@ *X+8TŽ֢UqkqkUZEދ@*ZY*,ɼpa(ڞM=9'7EO?b9uTI.Dj5RX5NpF.ԭ[7B @ ڹvGe*8|¡P"La pغ~,>j1LX+f!c! Q=[B\T@ @ |0QQ C@xi|𵊍/ނ]̕_z$孓#KK*4Q~nUiG @ QShGaCK;%+8 D#)! [s'42@ @((OQ,K8>,iy">yZ__lMaErзsҗ;|9W%U'H/1;>E @ ώ<,4@@42|7 z/>:.p~߿y E) H IB @ &9GS Ӕ!(M/UA7Y?>GAdS |~nPYfggڣS;ɓaaWn=gu'Bţ֦F? nlaћC bMHI}e,UųYϖ\kj eI1b^);ؾOIDU5?6e9}],@ @ |JT()-A@HUlP,˖Eݡ@?|K& x_r]0r$*שK0?R/VZ a+ n>:Gn-Xb~:\}$M U<)hߑJYr-7\agZB@ @ >'N:adjn"TejNp LqPnJ3MlDDDXX؅ ?nwe_PjEA\mϘ91k圅I>S~]d J+I>~ 5֟ ];j+(=t wa-nڡd_e'*%p n@ ዂVbJQV![ 5(5!CUVҥK;w -TsG'(G 8 ~=}ǧx-%H*9U\ΡJ7$(b#Ftjn=kA{6<`NP@yAm @ @X%g^" {5ʨbP*>>>Ϟ=;}III-[4eq@ =$jf@Wi"|>(_MߚN ,ϘoG~ceG% >,1#J' m۶q>)>>>n]Ń{ʭL\%}y' xNZf\>t댌-רς%:b"&gI_:Ѓꡪ[Gd%@ — Kb<)SV,K1VZ-{qۧ (~uҔX6eYyi5Tn\!@j4 ?n )?*hb,M@ @Ra4ee'MO GQjCW}Mظ.Moqh<6Y۸f<œ 6ں,(%cY99_J\vXmGa֭AMP6>u%8'W2Y@ @ (<<X6 >DDZ eL.- +,#"KmsM.^ @`XQRDӻ͎K=\JN@ @ qԩ$×Ws+'T2t ˟2p;I -1{PnݾHd-Y5j KIKGX;X;{G@,Ȓ& @ @ |ɨ(f v`"YB]T7- M @ g ˣ)MQiJZ~^TF1v- @ @ BAai S6,(JpDjQAŀea Vp( ^ˣL\RNݶw2\£|58\s>g`a$.60,, H @ 7xϰ,KA(P,)\+8 0<_-뉉[9r.,٤*}6 \g72EyuFE=ni[HVhý KC"@ @ |:T4Qe9C)QKh羹-6y#Oqʄh@giJf\lFO!)=СYDU;o\ YBOIYڡV |UO3w`ѩ}•{D>xN 3yP_],fĵ 1|а*v h9|7`A $5wʒb&RaU׷eM?/>>%֩S}Y^t;jciURzb;eVY- $5WV_Z&la"}}5OJ6r\3xWnN'8~@F%Ө:oE [9l8DHZע^91*rkGk}wyAMך;ӱԍ㼢}ϹN\"AkcyTѣlu2ܙ~ @oۥD{P(Qlq~^Ё(y^iJ#{ څ蛉) rU[4RYYԒDxrA5k |IӘO ܼj^x}@ JS"EK|le)8 VZK.ܹ344h(Dzg垛YɥE|0zh\F>(IGBj nc_u(AKA#;cav?:8h ޽+\f۫C)+f+l&st=ZTBd;;- E]wy'նj~B/"<W^ܬ,𮻼"Qa:ͅ9KMxh !Qs&'TjM$'8OE4ojĹ/4_v>P&s=_EDaZ] Nd3DSܹvfߪJ$]"@BQQ,͂O )X|DM#@pC ߿ٳ{1lذutիW^=o/^s\_nX҄<ƟN7(Տ,h ytsW(5)h$ 9 IDAT]>-zq?ō;uU]@dקv hBی8m]*+jغ=1G\6/5Ӗ/ԑM&3FҪߠ>u],(ٻQwq}/L@ 7*@ KS  b JA+777B77ϟ7J=~WL~ B] \,t)3`B:e~` ?Jþ漕8TqȜ;{]Z]$Mf)R {=m0nsBӠmkj_dˋns:L(X70)V!V?`] !=_ɟ8ˣlu Zo }sy~>{v\ jޟ??+-SW2Sr])B? &{@ |&X,0T ¢, j5TJMRxsvv4*|iBZϞ2[%Z;uDO]: Ep}D'|k>ԭfi|є#FtjӜ_7^]$V$TE Josw/\xիghB\_4t&dYZܫ)PP(+s-_p y䔷,+ mS2隳2;/RifRZ;W@0,i bKGK'kZmeec_(=Oqj)FW].귴+爧økX鯓f* E U]̸%,Ktp".!E.lx]F R%ۺs'+;<=1!]%)d.WDB7(yƥ緡XB꭮B)ĥ_JRMC>P.Hs6my>d ^e_y?w;~ĵ, eG\Y݋6UZlHy !?l`TN@'UJgD 7BBF%k8 eWS ~^uW5g^5X%&ǁKN: aRm3g[e܀4دN*^DnXo$6Ɲe9Iz&CP~Ȟ=e֮uӞ/97kqWʗQES29;䩎4A3~HV*LRy/,K./m*jPAV+Wʻj(^DnX<}oQ1sJ}Uob]9 Z5#Z s 2;qəE\P>ut,1S?&H>WZs/HOбve˖͜9ŋ˲Eg9fuHQT>S8?>}ZF >R/LBwΝunٲSNfveT@A4Rͺ0XRP(]ұ8غuСC_Ӥ͛7[~< 3+] ij @Ri΋*.gs멃g={HZd^=JFmU׼~:(Q9ޗ+`kZL̸+7s Yj{8)dn*钎U\O5Pv-T{SucLE"kwf[6}X>S"3 zz3MrrHlk`_zmwRYw4K'OhDgw.>4!36?g&yR_nr5+SR@#pK}M ~vnLWUC!p[. XCd }xRR@MnAqnYsUdJ[G)FC;KJpvןL@.+X.l}a?3\$MI^Q.NʍJfc /y9Cfֺ n8<ď0_JګqK)B:7T@( D jG36x^KK߮|58\R^'v#,,^,S:7EӛW?_+SʯoaibŊão߾ji>&bY1^.ym{vRgJ1LhKt 6^lkt'q^uÊ9ry0Se96دo߾_72G*#ͼ'͠oibH61 `B-KF%3̞N_)!Ścs:UqSkZHRdƦI33[lx3cs-?kYq'Xn~&]Nj$n=+K8ʲ^T>[j7,js|}JgTUYU^<8h9)uyď3_P Yrrx^?cӘG0ɏ_pO*<=~̣WTҸR#^aS{֤۷[cKxhYrɓmllhi½sT̫sv,"R~ɓ/^)5 > 4J J3fDV &m}'f?^xךut{U;ZpۜpsڠO[cO~hq}\,(31AeYG8ʕ =^/dQf%k\Y' *Wb[9 MZ0<885EA]q{V0=۳`s!LzKuSE7QrH.u*[ >?˰Xjq𒽣}-:v0*XWH#z99OyiWٻ΃Lz1!u{NZ1SSM4(gW2Ho0v#wi֕. GeN|CQ->'ӣ! %bך:Ճ0߸-S{ީDŽٲw1Q;L3>.[wfq-(|_Ui.ĬV:eYnj>/ScЦ!>5Pۈg?nGsǏVv.q /X4u<|#w5,IЅc{4h*js3{/TaZ<nԛmmܝ/}cWr#|agoNvٵЭk¹?tog~Э{o$w ˋ/^?6U<5ȽKoj.ٱeI.M%&uu/R'gny)=ɞӺBңd¾m>S[W)Gznj[l=sVUvqan|BŨԋ'N_L< %3%oƩޜ3buQc0q~ҹlUr&귬)2#ӪgVXq9ea ow'%V!kו͘1cSU Ԍ**BeX&btZwq҄5nϝv6rUwWF}3yzÆsu՜B;""v^̋9Uwܢ# t;LXBgvX-}u% [y!C-}]lry irF5;To9[Bg]"Aً&<:kk=uُ]ISbLkI9{;+WxʭS\Џc +qoChX_;N>s)^Ds2h΍̵[[[sq^SsC(~[C"kNZZڃM*St+T)B˂ WKdaFT XUp͈1#SOݚjX&vi2j١230B$w z:Kfe`m#*T?TQQr EѸTXƒgfĀP`_<^sZ,xi&cEyjgzoW QH/P.{qp.BR"iu'Z'i%bt0j㾯:@أ圂C9ߘ zkQηi@W5q24W`rkrϜo.r. {wyo پq.9N7kⰷlz.DҴTpHcKےq9/1O'z607#̷@ ~I~{ V ">aˮ:@?l*{ ?ݽ9㈗++bNV5bq1I]$kqӱ?|cmfݻ9]l;M,]Ɠ+?QT6:+G+u<#P&k7#2yzV%|SO\%##O!BHd,"1eFFBVXzu9h/PlYo9W`z)){W%~(;wQsSRvO{̖53L4vF)\Sǽ5`N=_?JzSv@Z|=m\7S}}ʙa=˄Uu՛s+lfk܎iȟXϲ/_|Y[[}`լ{eY *@/eaO /]QT&4/Ubڱjf Tsq)D|E_d4<eok{'lsy3"UGh.Ľ,;CQc}ٽ)`&S(KWlKU0̒%,pi6ѰH4B?ԢKu += )uuhˑ3k&ERdQ> ~͕#0ʟ2Y 8Yla.Sf|:..;|igXzǣ!$ k}o__ }CM(?n圅80 Q]Q{ٚlcw M4WϦnN'8~@F%Ө:= x;s52yG6[.J@Ӵ.$[yX~*{P{ t;^tӴ2 %NT/BV~J/I^k.۶m6mѸa8*MyT@P| |T RZSp9tI6,ȄUF@mr]Hy5a p\HZ-^)9*-ӫVj| K c qY^~i>$??2u\͚y6=/#\ڽɚtP]UfE7O8ϱwP A*vDb<*K<2b)p\w 9WOeYùU5R~\-rK_ Z(tQNt^$ʇv*p8=ODw IDATE޽lUM!Ig~ QQOCdž x;6:~י-KRfnu^HWjjX6|zM ə;ݚU{D_}FqdY> 75 C 8U(yF|7,ox?)Q^+5tXw=P:Efbc~8y #~En@>wvJ&稳d))[AGrntH`ո{(c"3>*Kquˣ/n][/4e 76\Ln_8֗+m]7y7jU:;Tk0r?qg-dEnZj@%{}9Lr4HI)ʗ6ƥ݅JhenӉx t ^xs=“zȉsڭ^HcT;dk[8&sĤ((E„~/qsAYl]J,s bPc#v<ʴfS>V5 %vFY OZ&k米zR( dv3@A!]>1PGAwvl~VveJ=R_w.>Y]:bP] #n?*Fu.]ŋCCCGݮ&#&(TH>W`E-%@zmwtJX[>V~Sg0&Yӯ/WsPXq.*|XD|dkc@oaZ`vys5V/O 1pr̔k4jҸ f5kf"~>waXi9BV_{ւžU4Ht0L \s[$ka ~anݠe%Fۚٻ= w,[W!LXS2BZ>>V5_qۿ=w۵r/O[*= #ugzV Hjkv _egW66`b@/jϏܾHd<4C󀞕_))-[psj)KHм +WG$(:gGFSp7ͷ>quleeae,Ry/ˣ\?ƸyzC g$fXTí #f`?"vz8ΞeO.<ߟ9^ԞGLP^Ze깴JE]fe{HJُf;5߲ Hs5׶"Ή#ß0:zշeSkIDQ kޖ[kWGŊ-`Q_U[,MYN/"m` "!}reI',ަQ2M6D|ݹK*%0pLmgynn?-uMN;Rd7s93TэK!}A;K[eS=M[/kR/b!xuϚ*6@K;Awklgceo+pC7yo|vf<ì*^!)9=8vʲ\ =̕OTu0&Qk)|ĀwQ}EV@FRӶ⃵jSK={y{y%(NgiP{d<@ZN/Vzв؋v}%ǥMf- զ 7h4jOZ0<8k}dT#LiW^seS7,㴦En@gfp4׈մ̺ujɛ:>K OD.ɅyNkwPӚg玟{CQ(Svmwg1V+C:z|FvǥqziA_Sw=>vt^2mZұM+%bac(Q <3ٱȆL]UK_x_ljtknhg;)۬YG7L k;=bu@ǓV mVF$OINq~׊=(y .&7LvO^Ԟ</AFk@OM 7~DOSꕁ~ pk͝,RܹAEBygWK-衳 )5%B 3} g0ځ]đ'/o1x~vVyeip"[~1![6Vd-{}jXP%?{? V[ĮQň%*hlX%bSTL&Flި1*(G~أ;i-[(ĉ f1ZDQF@,ҽӝ]95p.u}AC1q^d Dt-=n>#ۖ ? a^fChMa3Bqy:Lz%5I2xG)2LP1)gf iJO6wɮXgϒ.]_ܕHFgafSJ pn8I3?6iUm {κ'O?V ZpQ} |i&$V*0nIXOv(\֮SŋOTݓ0z-M`9FIn:KQAhrOkA[q&I6u\"RҦQY{1]7 s=Puwmyr,1oV#3_ﬞ("wcj~7"ec[TT_ڔӆՆLKz=1}w쳜ɫ"~׈. ouG*VWBw[t.Ro]V'K[4wnb""HRZv4)t!zz<3Q8A'z`qBQG|YKVVj~GrerǪ>y2hxzzN6mܹk &m+_۝a Z̢&1"͜ŌDd"Xҟ;xU?2.0G< jurw9leK.[8[{jgt_n_`n%wu*#pI5V)2l\@'"˝']glvX@ mk]عv،VxV׸XI֜?#MBskɖ5I'y#KgkU.`mӱ3}.7Ln "nrR:i]&ץVG+S;Xt遌M>nHfRІR^}/8~XgXcJ5]L89שDc*ϦJroj6sh/;sQdxB JI>3n" [vn1~r ݡ%τžjo_3۶~_kmQ0HU\&_݂ӝZwF:pݩw8[ɧʤ}=e1pQ.E1f=t("/ѩыi%>R"aAlhY$@sf9DEg!j_3f\8)2>^e3tM=,șGRƮiyo76ߵݗ릶+҇WL=X@ZnGơ+xsk˶gTө$gP֑ \.N8NTV t}7~=k>Tγݱ*O>]'Oݻ'\`L۞6zs,,"p! pd0h˃n9Z &"⅁JjτFGMGFd91mvuލfեD193ŅGދxjd}o|TӤr){5JO{/F\պ5yu xjc7mNٹya0(J WW2Q?\Q hA 'YәA*LruS%>*<8sτ'q,M4`l h4:os[T9QBD/Ng42Sa&Oͤ=T> H/Dcի*J&TB ~^2eXD~ ȑcsR[Лlh͛7+Wߌ?ydvL>}ʔ)67Lz>11l65jyڿe+JD-WcYI54.(7??7x/JO:.nܑ٥_h_BB ܩD%@)_ǫ|ngly]։SSj܋k܋6*L&g};4I+0groFM˚KkgIOꌎv\ɝJT/~E&ߏXU+Dܽ/\3MH=Ȣ.ZnӨTبjd}Lb`ȴͽa}nOT*m";Y8dz0m$W32b9+RWlrpftElH.PH?}۟.G7k֬G>|xsRJ!}Ep&l`bȼbg>nt'xPWkƽM籟U*`|v;Q^StBSm ~[?;~@s _GnٛՎm]!֨d[Z= :)?3_ɫh}ױ; kVJ#gLgMUwN|SK]Y*&S:Ây|ا/ hki^Y|'t˖12?|44qv:޿B zTMW[*Sȧ1a.L(A}8o(eBlaa!f[WB߉Ndѽ'7aޛ%W-[KKϷ2[uiKXVm8M"O.u[uFA1훖MtQ5`e)ϲ \n,!(#?۰^TM8… ۷|VC lyŸYe?2\7,q7nXhQzQ. "0Y`EpaPBrN8?[շA^;Y39wPkw7wdbJAo(4*cUB;7 8(oFϖ:euVtޯk|QBV[n\0:OC^C=zR /4 :WjZuɿVp*yoVTeD$Rjzyt#rco֬qRnyͥ1:Nɒm{}93 0 28%=(G{ݦM8p ^q3fMVW|4MkLx-S9\+λOU.# *rHv郠@Qy]sJ2@tPd!,zw3_^ns oMw`0pF\'kK!ŋ/^}sTR3zUKjk~E{OagggD&DE930E+8zh哣|||Ο?>UDo4fr1d8Ը*G` ?trkd#|T*T>̕wm !գGv}嗟|IA;3okt 瘔R29^ƃ )Gddd ҭPÇ_}c2Udh>!B!3ggn!$A93h dP2(AewI%K|)FB!B!؈1c pJi dc=F4vB!B!u1Q9x + pga}U"=m7gٗm KE~H~8L4,Gm9˃2B!B!3908CT0829AU8Lw5Ue OTn߾Qgx́û>a \i[9æB#-(RwĴ{ B!$*&gDc/'0ȹS <𪆨jU@M#+/iaԴ[G{V2~~@5t !#By-10"dLsJJ 8x7Bw>3뚊&n'$ӝ]_@]M y^<e=z{)c[j]Nmڶ5m],p=R{珞9vMX.LsgEJ]NaG>rztd$%jhyv!B!BYaEabd-;WJ ,!ĵYv2`*+6X2좦#v(S<`'Tu|]/DZ7i*<sy991|JP/l-սbZ9LwO\<3Ȉ B!BH> Z̢&1"͜ŌDd"XRw| p>_rtܰyӛs0E]1eәR׾k?+PR^-J[L?߽9Tݫ{Еj73 VU+[UETpdh wNܴ5 c B!B+a``-hBZ,_ K0:xdL ;O-ءAN bi14pۍgtYF%S%xPן3z@M; zǛFx*( ;Ei?$tfci wu]%B!B^Qh f@HI2cLawɫ[ t/Ul0k0ktPKͻIu10FxfH^Ө4g-!B!BҒ 0o D-0KR|l:N輩h}`QwDk ީc賘58)G*+\gjӠae=݋Vf!B!BohfX "Q8Y4#@>QVLĕ-<{aɫc+J^3{Cq!r[/*P?`2*y.B!BɎHE0f}.8a x,* !Q/]ny@ ΐI:+/;~ӧމBB8;oP?W7/WoD!B!Q. "0Y`EpaU8T;)pQ/\S'rXZcCG9DǗT +yW*4}ky\Avpskw̾ k—s?n=WCgY_N-*]~;evy h,l:1b&Xo2]lRkϺ2l=ϮmMgo¡K΄BțLdJ^< CaQA@IKf~[aXI{vhВNͺg|RI6UJKktF yl׼M 12!t49bDI{t-;K&kAGTg!s8:waB¢,"PD B1b؈#FT;ރ- DWF(U7̘tQ1+gg +mKӮ~G ջ Z,Ƈfno3^ZnR~b/It;naB!o2A1g Ar0NYR`#{pp?y7}e1Ӽ ;>; aҼP*;E-5.i]uG*VŋSSOy ̞U{( ^墂O89QSWSnE iѷ3J*h跖MT>cӁ[*]j[\kM=II /_ *Q5ʖ-?~jÈERf脿{R'XOiL 177\CvѾ/D=x c:xY|>\ޱtقDJ9اN2HgMg<[:G!wA93h dP20$Ro-{5KZ R-tQX(z_ߴhҼK^TPH+i~eZagF<F㤀y֭kpn1~jO BT* <,U=/~݊fx5{YrĨ||lҿ.yM]kuKwoO{X]ǧp6a~㏛{8&ҠL"ݭ?3"_]+v듶锎QDvKC1ݰ/@x9}&|QѧcbkCW`9g^ ON?VBy{1&cL94DEG22@)CbW}zQF#M&+>4=]#Qyl:ݳ8B-*+I32PQ-& mz/j`%DIP\ ݤ[LOH؏?^K7ZX>C: ^~YPPZ+#5vWo_~۹`t!g|׭f%2&ޣ{-.ع hThؠm3 =\ݷ6hm?[8T-wƣ?}]/5US|:whoJZJ%K?I(5yl? [|p/S-mYt7lz3ЇqϐN2ܣ9\kyj!v)O#?|R{dcͮB"բѦ嵎 mgMQE'WvsO #hFB. (\5LkN.qcǑ֣o/ EdNZz~PKԯΥ/Gˋ0!kf&r<82`p`pdr(8-W< r:y_J=ޅ%ӳų𢝺IY۠9Қ׏lK> eK_jiO9&1~蚩ӫ}~|}ãN?W5Eq)jǥ;VϦ##*:Dvk.VT(Y6aҷ{o[Qbۻ$!ɮe]SwXopl:^HeI-g 6 0ء:2s\}uHkiPPGWݘCS={/m`|`` u:Z oZ;omZ,]aY٦E?0vxۖJ:JV=#]Îf? { kww XuQS[B<n陪!SOCן h˪s쾡r<B!/E92p+1(I>;(!ت 14m=G(nkEO3?2MfZc) uZe˖GUJ]_Rn?FYfOIs\:w:%j֑" 0^ҡՇu빧ohJ7Qڮkrh]~u*~f0}6iRlc&@Ӿvm6NbCss9F\!)ĉ f1ZDQF@,Mbԃx{ZXc^`~:D.}JϐJ hfй܈CSGC.]( (Q) 'com/ӓҮ Z몈4,d}2({0pf <> ATڦ IDATqt_@gZxw4)1cvJ˭~o׷ɓš=YR?9W$JϋfP9jKqj2Ì17|qk rRB!1Y00XDcC@`4! sd IхS N_c0|nJS眠ē<9Į導*e"U  Ic+NkSd6`MnK Ҳۨ4cHt\\5#atn2/IK;u&RX+-ҙ>;uۖt޷]/w(yh1gx9ʆW*B!&hDf#-dM{ o}^Ljx&r_[5K&M}?Ye_33Ub=7*v¹VMzVw x7vF)E!BfOu:?P"5<|:kˣ;XV3f;?l\so'}膟 7"/Ӧ^;&m^~j=|\v'3.w@3% {I1?YS{}klq(/4Jdw(/!Bғ 0o %M3,I %}$DU:mUe[]{￷N:/W^zPBЎ_5Rj*Q,GQ׏9:/ Q F%eTDKԉq~ɖ2A4 b_]pwӮh%ScyDI-똷ӲE~wHufMe\܌<5aK3"E&80rD!,"ͰE-F,p@iF 8d|8.f|=K~ah9ŧ{]驕=lByQ."QE0& ̈`{[oߴS {R>sꕽ8gqi.ϦBYxR*<<.&e_MۡkZ}BUոnU9ffh舰sG~Y3]2/Lgw]Sר֬P\ț'*SVk@^qWoG-)>WeY\Sŧ.KU%>y}˂YǞٲѓ#tYIv~9!rlt۱2.;yy׌/m=~!r̯}v9n )S3`Rpl&eL/ [}=fݫ] eY: " ( I^3eMM>盱mbZO[} YL_m:z#S?w?9hNt9)>ٽr|wqn15!UfגH3JO6cAbR@Vk{Iϋ,պv`i}-|?JQi63_w5Y KKVv~++R@? ml:gr.LձH{̃ZVxjLfd!m5̔%s#M"؇ǡL>kZ&3nse(s,T9 !c<D.`<8 33Diy܍}wn\*++]i4ZjmZWT-:{^ѣt6U6^SkR;-LLh/DΟnDLw2obΝ7PۣGߏW(yϾwۺxzrωM)?=C]4Ź:Rpuf2`APj]c7FDCT82:U_ FuQkmD?"[ʤ=pj <*!czzSD-OKe\-Bߛ_%?Lf:=Uٖ&'dUxYK+6<^=!IA/yr]aB??W/%\CvUNfҀؑ:={N>^,^6Io8w*o,@@# pw=Jm(5^d51?Fr]MaYT(Mzw~G2+|J{~՗#B!$_0&cL94DEG22@)CbRrѠcǎٿil W`]+n%*ƣe>c0={ޤ}Z״L=)_zԬi'n_{LQږ%G7 -A?. @kߵԜ#`o&c?t)yë &j#VoW;B[D g '@&pL lX!!! 8qbvniYãL:Cw-QpYfwv&MhϏŒW8'-@d{oUG[FJw,rdAuƘy5&!y cs9x$.!*ݻw6m۷o6lu{SsP92蔧Gdb-(Py%F@4$k5Swɘd+n=.30)y~lhl4B_tpL^ՠp 3>6%t6!"9).[?n^GTWlo ōi֛}tYLSS#)>N9s?nMMoQY ԪCI6ụih}`QwDk 61 G{ckhGYb+{ؼ*fٳC%#JvnMׂny)<< MB!-U!@3 "b"X,0'M'XCT"##+Tne >|hsj [i\Z $ixfDL'\ t<7+@MKs(WH{/,:^t̽Lr>u: Mp{uT0G-=o\xwgT !B!qL XE82MHLJzY=8ܹs5{yAӦMmCUS7iiĬ, rwrԁtXc4:)w<ѿY)䚪f% uGj@nt#}=dEYcꔱB!BHE F l,`D Ĥ3냂̙uֹsk֬M=&vIHL&ETJm:e6=K^oq;R::x *}7)Z}ۇp+!ë.\Ke^3XEZ_3 cdPV%;[N!B!<Q` JA3`X47nƤx{{hiۄ~m]`0GE=ǒŗ KvWS_m]<0G1]KVEg^p ?[iiMV_h0tq7ףI_HN$"/ѩ i>dvo-v)o9PP8S_+NȤPT )wߕ:wA]۱-JOG]m5>; aҼn{UY8)-&]9**B!BD A:N/3D Gp8cs)Ky^H+DZZpIyGK33y1YˈX Xym:5cقQ"[J s?*4J@w/JA!B!$ZDDAE ,0#JDA.)(E1z&Y\`P*2'mu Hxb4@T(^ yD5B-fƴ{zxt+!BH~{䭱~1=\J)-Ęؚ[8;Yjs>+!*yKVy^C w&":\E &&V̡f4/JZG ='}L:Ph׭Q;66shvWcŚF#DuayaB!3A1?{{@"8nUpցhuYwUg:.*NDZ}UAb% @@m'\rg$bHX, V 1 a} pB&́+W0ޑ6e.Z( ]ͨ{ZwVK%ŵN pa}xj{lyIN~hUtp w;7m;qeӆtQU !>3`XFİ@H(%PR:㮈AMԟ8'~Pw_cǒ[M!iK&?"y*@(wW$Y2/&xl!)MZuZeAH,XM7=i.tIC!{1ax CTİa!A x#2BH>{]tMX|i)RӞ?;#GXWԧҫT/XFMaQ\|~-t;HxqZrv.+W=yq˨B%"~ϱN!YXb``^K X(A)>AdbR|1 R\u~>\7|طj3;> -lD)!mɺxyȾy}>qq^.8N= />9tBȿ/b@$1XF* pBK CNhlQ3%&_=iQB)NFYݤ7;vD%Գ]zw2s7p[-EjáksP*)Qwb?y m/jEQHoټfHaji~f#pzAث;w)#SFnݻz9aa0[WﯻUJٴyUPy~CFh(ɘ3"%\/WTi׳iy39WM8wƽ$(*{\8[͂ȷJˎ?O\KdLM{vέqKX~rusǎM*oеvHc_NV 2SQ=&yYdm vS=gu^o!nن=SOBy+:@2 ČBDq2 PpB[ٺqA{Pv/}*[gggzJ'u=sCD{kP(kd|mb̻sEof]Z%JC=,n0f.(IFop"׷r`lǾp@_wQѾ7'mߣݚ/S:nqӅ~O=7X1qwGsؘW@Y0M(Rs4xs{IyG`OGҹO ՙͦu?4dEu탭]?ߔjM[7uc2W_`ע?!wvGHq;~X580Z,zW/c]4[ HlMC/|}}ْ*>7EGҊ*?BCZ[go .l)u ٳgcz@# IDAT˫nT;_dzಢ47*\}*>[)g2ֹrKT]q[3'󉛽lܥ8/ 0Ns*`d6brjăKQŽqfOplM` #]5l۷>4N-D7;w9»ay!]&99v޸K1{g43C<,Ȏg0& P8#Q%KL vҥcՅofD:{#{e:F U1K J9йXe} X0dgshBX^q|<xQI|.t]^*+ʻf(W%czSkW q |M渐:vx _=͹su zpOCľ},rgkSa!+=:F @?(#mݷOZEy;: v\yOe e./$hl>]yz=?⇷{Էe+s[Xy4˂3_Yn ?>02O fKo"֙2?SI0]ŀ SLytEg@[_&5ԼN\f =O! i0y o>dpܩF;懋 ͚~Tɷ.fXkwIiT8l(II eߎO_894Dl6FlvYu8⩬MI{ݩQT&(+KB>gfŲ Z_?ia&A^H]BqC-^wYbݼWwa&/mXrfԃ.LPj=O1I 0^۶_`nGŁ{ TmJn/Ri4| –1$6 F854k ~c~غԋ%<20>MC~"f{{B!rX Q"!R&N:ErRt̽7NH{mp#b7@KgRzLBR׆_Y H'tew\SӞ>3.zybMANv X5ztn޼qZt猝SSm\d}r+vX3L&Mţ{~=~kKlcF`ª3'O'Qܚ4jȵL~G\ {GF-:6`‹[ ;g ca\><-jcS9gϋ1eۃmqe"҆7^fT ωFxq~u"a,dC55SJ0l]яq-DЯKc~ k`zaƲzlY^7!U D ^(+ޜ>fX4xTy%B,cUFj!/3/l\^6OCpg̼c:1gJtmzdHKqy EX4 XtoǓB'ګ,x@N9aϽ)%S?IoMTjslmȱE`HtOSc~VFŇ ب‹dϜןB!cY11 f Fe1l$(e.ǠABCC9;W_}U|l05[%dbpI{N}mMY-`jm l|2zZ=f7nܸqk^x@کSN>07쵗Βxzjh0کS~\"=X:=W4vUcGhҬkx}T*,qBYC%"_&6y>}߷^mO/8[8ak1,X !v8 EpD +e!f!ܠי3gUy…֭[aAgbV4XCz9b|jCe7=j܆KuӦGβUT,LkȠ5,K:ո]zpRձBYkR2{nD_uL d,TiU}ƇJD,2pjd`ļ(Lֹc˷S8帖9CX; E0S3۲3T[0&Xv5$ ݗ μ4Fj1fE۸/fF6g+XrFXMTRu!_, rd#=[QmDBd!ҥDa3klRʞ3*a 35׊&v48r%xĶCW^4"vrO@ByKz xˈ(Đ1C$$$ԨQ#5jCqE㽛eLbJgmezCVh}+ufnXb!+ȿ7~«6QO;gȖ+`q @=Csu2n?ٱ*%V:/&jM_Iص-hMwlLi[NZ:Gbg%$3β70^bFo!PƝ'6;Ɵf[GNռE-:v2" ;{+@S0_O%?nV;/|g RB!3L" b@&cKchhh(R.Bz:/9>kXH93l6` ;.oSvy~6+ +cQce&Ab+>䵳Νtҟ %|ᶇ79VU궫R]Ц>OKSdRR{}"+Z&)FLͧM/%wfpL|JV]+ a},Jj^ǿRkDPI{x5'iSZQ6uGɯ4DT25 =|FcZ}I~Ld;xv6yLϧE$r\^OX;:Z'YVIPDŹZOm+jDQEQ´E06QɌsTtm,?6_X%ӹX+YWtU'BSx"1X2R (IF7o;5(\/Ӵ镜>ٗZH{x1ւγ,ҽu2N7y潡sEeyrr?U582f3Y:oʡwU pv!E$v30fs<*矜G¹_S6Զ3{gϥb? ~O!|@t 8e e 7ޒح֭[ϝ;6}7t~VKKI8`TЭ jZ5j5Nq`Gth\uVvIŶO lsL}[>5״2oޮK|%MOoae C,rDEk7o$޻1nyEV&} um>X>eč2&O}eRqQ|?JrAHQJ)kw:ȿuto?[;*KNןBbyp8:>G9z;m۾DžWhz/Gʕs j4|Z:Jr$PɻZZa?U˦H°Ɠe\ŽQ=|ԔKͽ}<c$'xQTؔ9wEk>w)aaQQQ\k !b=>+iڔsuM"Jދ uBK,1 0/e` %,TpC`u"@l;[;>}9 /չU;"!jsE;^sscs Nm:k~;~qwE"Uo "!b=>d,p&!"Db [e"@ )QU渻"JxԾK} 4p^WSbBH>d0f̎y~޽s{TӺHToS gT-_a &Ч>^:=%B!`,@(DLl IDAT)182y /䢓J!S!4ԧc'ک߸q'x̊( `ń>FTWO!a mb(B!RJ<8p`ṭ z=tZC>ѸIN=>sG? χ$2CO:{k/3vNMzqs)*Aȟ^AޤkG y?5Td܄!.6Pxtϯ'r O9^?q@rrr6~="lZޒj'_=iQ€>8O1!79h)߼_r{#wdtڙUW9HlQ;Ɩu gn߸؃dEe vQ?h_I,8cn-:hGuM>K~*ݻ/0#ޖ51s.EܻϕZԤw.UEl8pj}bXƮaA29(׷l^}OPIi~6%b*^/;`}nY{&y- )'.̼By]QdOO>kn"23]]ed4˼wk%0DDQ'V;:u_^}]}Re9˨7s릮u ̽4'ޟj]7zwtid&-5:gVO~{ntY?>%h"ĆvO=KlɭVZ7I99MSFe=Գ˒\\Wv)7jb>s[}cn'mߣ6>&e@uf+^.(;>9ksnxjx[/0`Ĝ)uu~h$,=[sJ<:Pִh9/m5(,cƯޛNgC̆9UAB>tzuq/V(cãeiIE%*`m7: ___33gΜ :{dj_t2 1ZU_9 2m{0mFa$,?7z- /B`ViBt%K̜Կ!3N쟌F cPt?tv!mc @+0N'?4R5j\ڲv7 ~𚳭[nǮ]tةzg{9FӖ, 623<<=9k c -',c0difm߾MJլ;E!Wx^Ӄc=9%>AjԨQjϛ6m*\>* Gz>D4hO&Z]v Q.<ʑ:`hryNmHd^eY =IE t&ӡAas'AWi2|[D]} :tѽwMKL `Uӷ5.^5rc"6Gx %݈NSU/[ѷuy-,u<H gҼ]HȾ[.~"vv9>6 FK2T|1`흃{(5ɅU)]MexwwtRsʀsZųB&xI>&dqz)[Lli(ַf[[ͧ*[}3Q#>d',wBSN :m/Lww̝t;_B1,8x8p!Y4h3fΜ٣G/ij&P%hB$"&)JC1ӷV`uRGw굻/8NJ͖z;uP @+'K`,W6nz,cbZ  )=f!*Z~04ph@[>L~Ros_wjTE.d6ʊeܥ ˅w'lnzl<`'bGSR"vUa*X6nqF,C%UGmx*8/*P+[Ѝţen}x'usLrض] @_7F#|Ke ۳- j]17Nj.sWh0^1 Ѷ\Bį6Sjcc!o_1 >5!qE&!]ܿϹ°MFGu6q}_+_p1<~ ݗ4/ 9z4YbҁNBs &8/~ORxvzu@|֪·,&_yMxcq:v%B"2oך6R m)^f(zחs)"n+zS,NB!)`aTQ {=uqyJ=!!F6֨Qɓ'Ԧ,?aF7FrgbIe\juN/\ɖe63X,D Qt0nC=ɇO:컥=q#l!CbMy(q5(ձWmHߕEV&Wɣ_8uFlJSviۼ%px6z}+D&0eBU˪ՌՕs &m ٣1zza)OZT(T1EWܨn5t c?]T1 AK,^>|g) =j0mfGcAUVLGe9aX/-Œ<ۙg^ M⸙5jAd!%<^r<3Z=t$ A+chhhxl=\D8۔3f0SKUƓb e> %FyR/ggE1&ZZ +88:Q:lTI-[s@>TɿBAU8q#yn٫hm!oe~SO0йz7H'{'O )Isah,[6׮}#/Uְ.@MrxltFD߸m]KKJ.u5F8Sq8j<=lUa[v Frh;c@@l~7j_<ϽYovbF7MiA}wB,Ïfл Yݖ@Rg9Uힶ/ie'2'E9_h4EU/Z 5HiN_z.TppTanOwgYW+hIڭ+~`[&^ߜoBy0E,D TRbȌ;K,m۶}-K̞FRu ~fTO<=;&`*P '߃ k>'((Y2)=wgOOΝzt3!:N:2#4P<>P><ުV;o2:lT|\뤸7~[{'wSGB49gAӬqkZiSuéթO#;MxbC'g_zvz?z#;{~j6Pl51CRѻi*̮c~(!>ᵳS*U7&6!Eq)K&/*e8UO+HMÔakʜ!2hJ|p0¿Ye-(qꑛvf+\ס }Y>KŅۿST?7o<&o#}8WRRgm ;hE"iFXlVن-Jz3>T"eS#͋rt'2lrk.0|t*(ďp@cϜpXXӈ3#vg{IXM [ P'.l ^3[3 C"vx^W#ls*gQ2zIEy,~L5YS?;; ":{ό/?92eܪ˦cKpkNY=lq!cC,76/Q+F,0/o!cxV؁2E*8!:j|U96F:}uy() DPIi n$b¼sȖ֥4=EG29gri6l݉!pRtQ&Ie B\Vzu똯Z [=gY.͍Ca\B# a?-P_g+@R͒8r;df\ ҷn 8 شrlT͸ZYGm~4nMFeFmxA%[\\&f2"x]!#Yv&^2#E)0](YCsu+{ҋˤTG .9EEV]Xu;z2__vmm~S{ u糾>i X'1>vF<܅:^&O,!/b@$1XF*ƟbI̟GTlм1NlSpRf>IIע⟤RGG^ĿxVd22e2?7/UZd*eR O[,CԲgUlP; ߽.p(R,ذbݨDhXa%b&Q'v#X""b,?Q@vwG3ñ;;7@ >'(J@Q [TA פ*#Bl"`B)lܚ&6Rt@eQ&`NMe¼L{ސUĿŞBsY5&k5Bōw/|m0JBbSm&D ?A-`?^;v#^_h~21F Z$%Csvװ([⦴Q.zv}傼SRRb^ݹxYml*;jgtDڜ@Y^3B|6޹N@ 𙡢<@ B xHPf &rxHgZٰҮn3u^>X3ҝzmc@+jg'=h>wJk_ev+fL>uN~KGj{@/&}էL"$aX^ H4GywQB>OZ?.ŧVoU]@ȫ_-K9u%Z9䷃آy͆-@8{&ߝ'{-Y<^HGplv nSi!!_2ݣ;WXsQ"is-`<5%>=]PVv2+ζ_[=? :ܨ$eHخ݇XK:_۩{N"%E#,?R$r3d%&) iyWǪ ,O{6C6Qf\X{&bGSg&$-*V86:LptK/tYGS9qQ)N< zfù[ PSc`9 ~>h B^%%"y)8Z^Y}j8EM* _vOpL<,O_7x+fo[U*<^sNek6wn<ȿs M[W[6% ׅ.n^بeBv%K B"R;0Ht Tӆ?V5bYVz|1ѣf9`!n%@/Y/}OJy1&눤srhv ZTQ 'e~>%%ȹ]o;[WV-yVbYH|47Q|*Si/s6ZAwݵ{8*kWv%Uȧf>Lv*O)&{%2C;M Z,Kcu׻vtI˟lyb]Ydڇҝܼyvgfl=<_'Xj_rC6V*ވ%El.$/wF'KZcmu"Ghd6cL-ѕi[qV2Dsnb9ӭ16sF16%K*@ P( ʚ!!}f>X:wf+*!%=9 Lm=idgZWl]W i~2Ebo|4?9FĺЙ^̧b= ӑ'̽Ñ.XRkl8nS7* J1(SV{xUās>J>WQRI&%5:lq_.1hÓLZZELހfy<[>GZmɐtnfABN~Tߞt)n;׏0zrQ$3@b1ٟ /4@ٿq',O-_VGk5{3 <)%ɩHC96-V]\\*0 /,mtm.5/\ |O1W;ѬDZ#fuq L;0̽ͨEcX%\)c@  jded;p&y ,i#_3)3ֵ4nȢ;k!Z^@jͤgxY/ygЎ+ڳE˪.n )MjNW}bZ\eܥ ӫޜk]ŧDZ Z@-J!Tm C!@ɛN_dž^|.]B0rAVf\K ۨtY߹Y gv/b_e(CvR*c:#|KMnSJ!6ɓ|1FzګyInlgH2$ l߰\\93q k;rđV9N:7U*_YQ$xY%OW䜍'ĝ۰Njʭ7Mlk8w'ܣnj]Wma5-*iyvq۾Z@;Voh 9ɑF_DQ-/ 4JzH]&}10e݉ezfuk\yG7)@0@ ZAdiV![ *W9\F 7b1jCuPTUM蘼)вjO=mhـȳ nc[6qo0_dꟲSMܢ(ٕ$^.N}յ[[!3_]Q}k $5߽;d2GߍMi1':Zi/ZvQd1@[z=L%n'{B37 Z&5.`~e @y{\lZuZ B|Q7 \qgKh5S^ؕ.L^50w-eNowK(p57O$zݸGA{ %^YA6?2oĎrGK)a^˺vSA[ Rp)S>RR7)@0R 5x5J# P";זmR}yVRo_k.H|MbAD"@;Bn+L(do:Msħ$}:Cf[=X }u#F1Ls~'#YwHsn5{!sbw績 /G,i8`T۫D#΅'-sٲEZt\fZ 'C7l KL ;}Nhgz[-jؙکDŽSӐ/CsS?Bklp}[z@ӿ߱uz \v-, sF-[׻(}=x3)))UFR~穛I{}&~O rnuܕn^rDkkqWG'ѰQʱl0jVH~lTIoi)}a 5OgdVFۓX׬UE" i&QV-b D"G.Ph3Z||jnR`K J0+1q'sSx%= >r _$Q\fW޽;l}djuᘳϩfւbT@ a4(i *4h]NFKbɅuT!*RiyTN ;x55kGg/9,I^}#-4 HOK)!Bvń/vR2ڳyY's3 Bkc&-e}'ͲNܳ(UAstĆ_{1Ҕ Mr<%'D& ;C+k勽JkϢoNvoZ]w6 \9Yj䅛y .LCS.(t-Ec@/^/ ,3r̚9@ @ZkwZycwxE ,}}avM0.; xt, >Kge^2AX `DyK?ڸWOoUCEIxj2gCFܯ#5#S;\| Ν;<}RЯ$ܙTɍٽvu+ц~Z6Yu-kk#kͬąVcu"Yrskr3^Ӎ}-~7 @ Iܴ{vvex=IfAᬦ>B~\r0wy ɵ |_A$1%(0[Aa 8`g[f6=}%0_tpU=x{i;2bnh8Mš)#ePK >Gv>1X/yߢCR*7vmauQ `$/$=|WF|{[zo5+Ɋy'qSX=;<Jto1!~.갸v+̞vJb<;\Zp<6&%&Y ~,[5,TP`K}T̉ Ugu}yЄuM9[AyjbS=3\z\v{a0 _9Ín脴(v 4ak('L ޢ+H_q80,TFH?;\ 3r>,mgв/N8[X2;hsK1n|<ʉ2OM9U0)Fp#io"ب'|h% _Mw.33;ɍhUmwu( WMx~l7c#^@v`؏kQcG MN؎u {}fdGb*FStuHIq,sP}j:6>by|zMxv;侠k*]k/(>Ť 8jJ< [wqIÞΡ[_[ue3h$  EvqRf`[GNZvjjk5 Wp{s.B,0xJ^nZ)_f߯v{wZ@;5iO}vyakW oU9VJ%z9o} ݵoICcܭN;0Ԣt&Osu=5#df P$3rU)8̒ܥ wpʈ5vbZm|ļ#VB21OŮ+.׸< nhF"VLQMmb9!ڵ}S 2mD_c{-ܩ"Ncɮy=\D64k$ړm]|!a]=RKB4^*۹o%\݄[;Csgx^cbߛmC0VORg ?IsB|lPk`D96V~% 'Lx$JDk :clUl5XDvQORFAaF81bqRTJOԅE,gH2'%.Jxv7\߸j2Zw}%C>]۹t(H}+̵}O5AaUnϬk.M&5zV`fPýdw]ϫ>,&.=!x?icO.{Tlio)x EVʓWp)@ WvsY.{ 3_=s8e-Xup\b Lw8$6QbPN4w@؝Lr Ws=p&"NJs*Oͧ,9MA(DA(jsᐐrڳq^^􏊪t_~ JeLH\~22"|*mn vcylfd&g)DB+ {iybt&:5UvwcF+ }SJĵMZy +V:8y)9mގ04t89+*^:'$)`$+"tZ!>hٺ h "nY?W/;Tg#p5ޱy5Xms? IDATW2I?`jG[c2rB]n~V}3e5 qhވGTi*ރG aUߊZgxݢ~yaCr8jSQh8%:2%Z<5(ǽ}r=j":*Jw q3S.fɠohhaooc\D2o-X|7ݫc JVUҦ}k~> ؓ}Ȩ0O-ɈkŬwX]uOi4YY:)ɊT: rs)2sռi ZQhYԩAb$ ryP-% v'l-JKA(:J bر4KĒC+lb/zRA.*g疳"Ƶ(ҳU,<-7L6;ʎ%P#0ڟl(z}+] ^>[܃u s 7ḓTcJ9w^\yxw*'tϯ5zl'}9zݜ٧cՈiwNsfcȓoVk qLpfX#Ta!MN|w\BdM{A3K;W">}$qwp, uT=x kz<9xiE)ְ-(5\00owi̵8ʠKf]'lcL9Gis rcX`j2N(>@{. Pc"CST^ֱm9\hUg[5m7<Ę^ӉEm!mVʼi$ ZhcJi,T_䛔@ (ֺy|!(>x<PB1oQKu;:+zMw}tBCTe};|, o>=j[w>a/笐i\Ӻsغ4/̹ U7vh5 ,"q 0bNĘO:GkՅGG 3@Xgf=k7N 83 NXhbT rφ1?ޞAfmP}}=T4ka ppmvIeJ-r-Td5-o.abmK󷲭 Dy:Fw_>`}K K3"{'Xe9)5%oYYжEo[x6)O,IMU#vYtDXKNp)œn5o)õR<~9ɥ87)@(ϣX" @SG%|Ei7^5QwY,.?Y#rqh\ lsmj\9;{p:^Pj\ӳK/\9ci` sqg듈?.ǡ# 4 [>7cުA)qx51|XKlJ3 + \)Ы6Q)CC\2Ѡd`y蛷4ßƖ6?xw*+6]psIss/}X3 =A1S֬ tU M8MjezJaMW_ԉF7H,((ܺ՜iǼaV-8Q)MEuPD-*X C@N(8nH Ih@w:h\i7=Cɛ>wa*6:+~_cy6'ghhh{E>yA+ L ^x'^Y_Pm5Cn:?'ª ѡ{Lר-#1N & _^Szg<:;AӭfM;%2`8Q)M濜pI (P#` E+ZeG5h},;7@.Ӈκ J~ tSbƒ~_Aɛ MAz|h_vo[1 51c/SMfvN 0Ji4*8V 0|J _9*L>!8Fݫ~^>mW\o i=$xބ ?wJc@hkaj?v#.=Qa߳Z:q9%1~taС o\鿎@򻙻Jb<?8'o<z%͞(#ٿ7Kh=/#8=i(`+\9wwUI:)y377guQޝ37J7,>9u]Fw3R0evJՐ?-ўr{ҩ}xae|֮.i1&^fFuk9.xyzV вWs,jbs*1:ь$ZOXmѡ_ }}ϻV30ja^iT}5( <1b" |>{lQ!HLx_c~ r֔W͏Ϭ֍uM.>~;Vp jai# msW<_a;lQE?!-k7e'ڱ] \fPopȉc/nk'5{S-:}rMkw ï9׆lk3_Ki @)[2 }}z m:\Ε,j}NzY{`ڈY8n0.iIZ;=vvŐzzi^5OmusaiD&{n[raK4s-EE\u|!W^ι*@ɛNx߿ֺX]4Tn-muj8|* ؜1: K 3;ftя<*j}ʌn5g1/gƭ¥2y75V WРAh*:K ;]|l)A.`Dvv2itf"r[;:qqMV|tHZkᓃ8R.OOI}'ϡbαa.6!%Dn ioS>("Lf+.l,'!2ߦ+r/+_w\J. O{ EFεZOuvwVTxwg&'$`DExNjB UdgbVbs(N,iN>X/׌n5c13b JgP/'%ħG =|VHB[l_ZPRHm@‚P$$&d3GGG!8$#@oCXǘj9j|./,5,,_{{{|pd%< KNHgM{zDž ?dfT{J@ @ |V0f>MAE@3NFq}6W nu-lӒe`M@ @P1`h*SMF2{*5TMQ,Z h\.ӫUFKo%QxɧN^x!@ f*i5T TjB X!V![ 6HاȹG@'K5'u=/-*~?H@ @ 0(92UB SUpoP~1ݻm-| EܕO\zظ1cZiHx|6U4x`aحߴ=d"(ݿvs7c2XܑKr󅴍p@gif0y9x>>t썈)T[; յ(ՃQ1 9ulW˒؊^ 8q#{ W\ z;APޝ=`1>hwOvW3^y˦bD:u/g^_4圵pΨY;^XR5O1(Q,?Q5&,C!S X;qʨQj֬YZ~۵k9W p5e7;mce P}zkڍ=!.M.;·LwoV[b׫NShUžBȷIe!ncIn]; ]*^=n8ɍV@s*AG%y=4eeG1V>f$ܪbh7O51vӮ֪im#K ]ܚͧQˎJrAC +% l7j=M |BB_<|0SZp|Ǖ+WWޭ[#={tuu~:,kƖZ(.Hը&입ZY_ "2rtȤ QX==3[_eSJK[VAP^c-:>C{gС^`ķgV^wPʡ[MXśw? \B @ E (Jݢ"%>X:] Ě5kYׯ9!sw3.6PTUMx"SJ͹P3r\RKN`P#F_iRҲe:_^ҢcҢcRRSRcbbR-8kK lcqɜ@ @ bx|(x0" V@1`F>8CBB>{C3hUI*+4RPzJ~ÿCH[~&T6%!"MYg#e}yL*Jm~}韫׮] "Eb՟;::@ @ ϣ4D|r}'w \lY@@ϟ٬N'W4iEC{[qa::*upXw5C;E_ʭXGR"*6&k&]-^YRQe"dzl/QY~жUdB@4hǷNe6D.Ǵ[A(G-95ź?k3gΞ5"w,g /{Eܕ;7lDD׫SU9qPѽcG|dB̡]NAֶ=llKJ &m?SjQ[l0\ JKǧR;Q@ FFbhdԴBSڷo_mrq[tNK)F$Ps8X]N~l?1Ҍˋ+ټ~:h9rLiY:~SNv3^ \X4^ NEÍnD (v ͡ 7A| Ov"ȋ#U 珟ŧɄ#QUe_MɾE:*iD4烢AL|I6hCBS@ F@xTqXRP($8[TJ]焜ӨQ+6h*M)ivam9[t_ v=<?~V}O>~(`Z^U˳DyNDӺG@dB 6+ɵOnV:9 ʟn&>j2Ͽv9`c9\pM}ڏc@ Ra4(@i*5hJ dEN6~r23 "19oObp(%JWH&6&{*==CVV"UZezzJrr6 ;V6OyLOd&q #co/r&eP @YnI vroon搣7  ֳ\TbK$iZh(un(لV~g YP n VolQ!BٮK.=9|ϩq+]+^ 5`kr֠2koJp IDAToEDX$Utu-uYK PR*P*(5RE bTX$=yv$kC&Hȟx@f56M;Io7"΅kŸ>|˺vnҨezwtXDv?j C:(<[zÈ4E@(, '?vB,/y!iYÖ-wlRNfl2hC& o.w!ia[U-?/= ;;{DTx&Lӂdt_ymi'ͶW[nj>8@"lqcuy6r^4z]8 esjce | RGs>8\UZ`eJNK*vDA @ )(w(8>?uC2AXmUIK֙n;1h/\*5fE"1_r9 vWŧKfv`Gɛ\I'Ľ0AѪdb$pB&Q#M}fQs=P0gϦh;ndD96R \a$)d [sxˀ~Oʏ5NN[Pظ78N iC yOB<<xx`mxPʈxj= ԔاQ.=-{hd\rn84QEq8zi$lY@9Lo)l*$=O9AMJy;Y1G]B )@(>D@L@ 1bk[Gk[GF-1#^v`l·'O#+Ҥs6]}uij[^d_ӧ$._&ސxg0UcМ*>coMRP7d-%ԯ({ v U{K@ E (axbv<bs"g"{V[ῃmᛚzdR~6"1hٌݿ oFYCE 9|Vr IY:{'|0oHUK'IxN١޻Ne@ 1 6`DbC:OYqZg[Z$ڻ8׬V_ Aɟ oܳoјB”c4i +/E!Mnyb8hRVyhBn*nv&1F6H+S)}ׅ=BMItX@  G|x4!D@bUp(_l_eĹ`J*֣H\TK,1@ ??ZcJgA[)E) 4rOdZD1{SHT"yuv_ cHL~!%/$7ː!u' ab y_ielJ)v#BbS0Ȯ&WQADkzvڢEVj2҄O]ټ)*+|j΅EFLLHHx~ʑۏ&_6]SVn,Py/bFpK3cWS~ NLH|s{,HTcbCs o29jdB|rfr7MjmJ-!|?$MK6rrҹiuΐ)Yzs y?dY.IL~](5*ZD@ Or<4hP*Na`h@JIZpb=gǖNy҉?pB,vꉢcKL 9Rw K'OލN~FR+Ԍ<3 2ot:BI쬡*;$λN5ȯ0YKwcssx;L& 2$cn)xJ]%B!?JL$!"QLG KFÇ߽{ǧA.HπӁ/xg c6۪n9%Տ$-{}\+8~7&L)iΊM_<, ! Tn*j77u~;<5͍E"%'Y#<\C2n-kFZB!#Id&I\DM0,sk%8===Vzر~mժUES!ȴ->]B۷*r\ԵSnݧ$2v䟇}\{hQR:teh`}s=zkqTѺG!Z*DUri!B^G IaM0J0zY`ˆ ̳0'8`N:ٳgk֬y^%M'tr3Ц'%ꓒ iE$򸕔 6,yw 3&B){ U(A!וx,I`LgHp2IybHp>|ZjY ~~~ǏoժUau:1GKV oߙSub™?VZ\sVq={ hwRE!<_ub5\72B!$ $1 FY'/iΝ;EVg骺>.9;5j8{{q8ȠOWy Bk,7)A!חL<t" 1PA^\@1$8¬6^vhF|_Stɝjom0c,B!5|qqB!ObRV P4`/A,ׂ¼>>>7nطo_֖]vݾ}u2;) ?ٍ<u%t="K!B!Q,U&xp8d)2zA>}ڵko^reBe <{MVi7Oq@vBӼUjPƟNim3!B!ҙ$c9vB!B!2&0dLs  rxeErP +B!B![$,= &KWw !B!BdF0H&(0HMHXfi!`~=9ZNX6Jqw Z@.J2&HNk*|&By & & XIga4!`yG, <ŏ/;w*"hc^˘ͫC!gQxr*Ŀ $lk4P߰gլbU?\Z6>~R/3E&B!d%I"DfD=2L]dB^ ]旌ڱp߽ 0}KYI0Sl ɟ1bD}}hQ q: W1=}G,=Ze7<@QǓB!0JDn "$DM0&Sfb$!?N1W{O,EEQyPIWtJ՝^LϊQ~c{[mRd2sL-wƏZVrŜmԨ%E!GB2G&B!$0&%Mz,p0AaD<JpB^4 j$Վm} acٴ@Tw3dMdh߽O[]4Fe=[|w3ߠ_RO62!B1% ,O N&a!I0_5VKq=*%L;?48H]ׂ-Fz`l侣GOz٘Dܯm=r^w_%Utp$hOA{n$}Sšisߎ<Qڕ}9;{`ܭ#Zݶs)2_yF3<(\Ng닣m 6h/?[_eSU`Ǝ㯄LpjSh:Wr$F۶us''q5n*흚ڵKY\nr=Lika~A%m=wr //Rn wnB^ڃV"B! #ǙxfD c" .wH9r41N/-qF^[zsm}G~܁,?U,% @~#rt#%bQCv]V!?F *3CXurs?`y @܂h;s܎]m 3?ZsU[j|{mK5'^ZMN>`&M1SS[L<?cNYųV]% ٲ8Ĝ:}\sX~8[_uAލ֜<t-Ӊ eeQ71wKL\>{l, cɃyg |R/߮s|Ҹ=*g9J1Pt> S B!HLRJJ8181%(+0X2kzl-+k;[.n5d\Ck]a=6hJ+ɗ J[6r19{RBJ$D=PuZ㒟;^79 TvfMJ9>{|rNoV:lw*}W_s[)vFl)v`kXݵY~{~g?x9qb"iΒpX}B/,)RvXͿU1gy>퇳űogQÆS r5Ι]\2%=ן)֟T,dz3,7%`\™#q.aߌ~?}Qeqod1/$)"~`1,XVȄB!L83A0N=WyXEER]¥[VOu}x7$Wo8eY~S= ȯs !%Yy^&  ]&5?XIyșN4H)z0rwH{->ma|oɐ~l*5=KL)r X; 񡅖w={Bg$2*[l=x=ӡ={N_NW{Tթ֚S 7Bw::z8|zy%A[:]N2۶Jt:{~w[:CHV}IJ>?iڋF|UȆ(<&]]g IDAT =x83i{gXiOM/pY=uh{2#7;l9+Ϛ˛7pxϦV`H^**U㮳vs?f U?=8?b璣sF*+I7-P !B$'A L8W{mGLl+NH3[@ٛgsfۍUa[{̌q6Ц6MX,L䂒o-<7pjU36oi\߈o*n)xl?\1*L'{ 'f r9JC4:gFF•]=i^t33^`&ȄB!x pr|ݻ>>>EjWτv;萎ą,A6K#U6i Ƅ)1 Yѣ}K;x*$d]zVs3=jЖp=+QT3$^6q~sK5 BJZ&alhIB5O( d_!V(ߢIeRf]xxUlc:?iR;:q= qSN*+|vB[WF9e;$}x#"8.)pݿe .h~B3g^8)IA٨C~?˗e% @[%Ɯ5n,OO{KIÂ:"2!Bk'Af%d z z@L&3+G``L0uԞ={<@k Bv]iyRo> ie7ۿ1N$ F@{a,yws& N2W"pUN#gewxR*JR bXTBۻuF*^x+Ӑw׉G5-fXz3anlYe=kOwgXb™m۳p(+Jל{PGb{'c>{OG]1}[<Ɔ.3ݾͷ,oZ0υt~!(B!5$3A2EAzlF0@2+ÇWVSNY["""?ުU,?k(!Pl=xK i5±aw/Shϴz=LFbCb 1*v$Eą>^/3(ByF% ~֧2#Mgy Z 8DŽ[2Ic;z{Ws]uAJAa@+.]%:shÂLx9Mbh盓6OyA`Г~¾>9qɨהsj6-&vҐ~zgXM |Z W7E_o )mY\IOzZ H:YK]ةbv3ٸs*nhZq+=2!B & & XIg9 V===ܹSO`>ס d!oX_-O[ [Y K !/.lcɉf'3lܟWVhOP^?<-DfQ.)woTc^RD,8w,<֎uТ+Rp_>aF͇+ċ@Ygp >aڷQצ5 ^;q8F,y^rݫ9{9gur6O߭ܩսr(ddB!HI$=$^d0 0bvbv ׮]PBmT}DmԱ549jy0# Ѳ2;s½GkWy׋ !/\!H$Ju>3:c/5WaϹebE~,Ԕ koO^E/ߦ͚5kֱCOf-v_j@_>G[p-ݘNV0u@ji=Zv`2?=ڕs?5bS'V,i=zWF5xHrFrGesSW]pȄB!!Id&" "IN#`2e6{=_k޲k׮۷onڷa֔K5>A]Go  7L0eA6Wl K+T*<))r:C lR ިigI߻ɏzhTjg&l'T`%s޽a. k+Fʮ=q"ͣ^Uet Yȓݩe|xc⓵\.WZ Vi< +!TrJU G }5=HN:NTܜafJ6u.tӥ&?2N]K[! ٹI_ЅL!B$0&%Mz,p0AaDѰųLի tiOOk׮ݾ}{ʕֽ;4Pk -<2tCvg.0VUJ pv?4.}p~4u␢ҟC~z(.^לutI]C]Š7kV)gT䶕ptrs+x]b[oζB~mB\(*7קĵה׼ȄB!D<$0 N&a!I0-eb׮]d7ll\ݜ|ƉtC.N}@YHOz;lHM7_O#cBA%[+vq۩zNwιZ7leK !ZO>vfgi_ۥzAB!B^ I$d$F`It"Rh6mL!V[<4lӦwTå:|ųt͔S!O}jc^˘ͫ[gLJ3gL{!Ԋ,Y2Pק ?4[akV97QJ-b™U>]9 T+1vԑ%dݿFTVGA 2jU !B^$$0&%Mz,p0AaDoY x?xh煌 [Ln,(>>EB!BH&IP$L0ʒ$81@_IpFt?3;1`nӱY5 72|B`w>GSPK^k$`9v{xdphKUNY ms@5WϗWczB]v73 o6yM#Zݶs)2_[Km[7[ y09+]%ޑ_~9Wj+G36rѣ'\xlڽzry^wuO3 bVn9Llq#u`E?|`)פınfPoH GF2@]wOyА??w+QodVPBoϊ+rUS/>?+o4cD2ջre!eDފ-k8򬿡/7ɗʏB! p8Tcpb( KP2ggwuuu)r[:jBVqD?9Ψ)=5w@e{zzғg{]gV##V[#yϦe;=j2Qw[-2¾q%CowF߯sN 21L3Pϵ:&|iu>?w@=_s)G-2{0_(ŕ+¸ K2d]]O<ytٳ&K6zhޫoH72`ޫR9f8ƺS׮zVw. x[n-5rgN->ϻ/)y.^ h:} ܺ=A\цaGm;[ TS3ll?LsoU̅$<֘NjכS̺ћ <53^OB!% 'j%8===Vzر~mժUE}xFl7q̡Hj}V61\ZMCG 3a[@]0uWQi:l*Х<vJ7q;uԺ[)aW.Rw}AJi>x7rLz)KxVª뽤$z\%;̞rC@)k<F}HRj9?wzWogQÆS =+O\3J mHs W5m:ߜEa {<6`l}VJ㟒x%ϝR%$ųwF܂X5aXuW<"T%u@we{;o3uj_s;̳ީWfEi[]R㎝fvoԢ;-D!bƘ1I)*2s 2,#qjժee7EDD?~UV{F3E [b™?VZ\[B̜,I'ОY%t.ׇT:bk$Cƚ^2{- >i^6X 'W,|vXa4 _O%ՎGY<nYWT;i'+yeE[Iܙʍ"ovD7a}(|WY9d~lSx#ǬY=I*h?;Q)<8y}02{r<8˘9{b(5`^l !B $'A L8/愄OOOw)BUJ+P( h'ԚKw52kR3s m?}WUE2'jǶ k6$B ~6zx鐐'Go{FxUϳFʫ)p{B^-z+$q IDATjHE]}tn̉; ]b@xRozPPoXuD_6cq;.-Kʿ"ҏ~ A rLew'ZM{ky[A!"x8A\9&9x {1$8¬6^vhFZj#ga?oC4br,ݴw?B[.A?gX\x e-o[ x|nyԎ^\4͖j iC܉3P K <Τ4d/gm]֕T9:+ÎmU'.!l6; @ZQ~x7<8-6Gp?vfN\̍8x\8#}yKQ5:@4m==dxfCSڶ) BH11&0dLs  rө>>>}|}}[vu֭[=8{T𽬻G&?{UͯC; R o(=!$kqC;yμNuq?4PY'brf}NqІP3m>H3*+1`L֕=J]JMI,zC=T<S:=&N,;>>Enx`ȓy<0\smK‡f2#i١82>͐{lTY*Te !R & p,$x -͊bc޽~܁㽜kTPA_f[ovPI1rj <}爞;A:UW~#tn.o<B9ҥ\8[̕/Nv^nN5-L 4cF[FZvcʰڣx/6lJl׮eŰtq P%k=YPie폛cn),{O/} ci V!{ Ϛ{AS7g W.kxw̺_Fs7/kaX|sk|sTR[x(rlrƑbN8@иS1]a~ve*N SreCKsB!<'d%I"DfD=2L]dFp<3ƓG>8\yu9g;N['Ô} 8:k se[e~X;&WF_ի4MsuͿ~7clm}?ż73[d5.aZU G9wBG64 _w[~֦E~RH ׺Cyh@0.1W\t:5e;~fTw^KCq8avI>:MJ oy]>u6N kVf\GW!5ӥF,99Yʃ-"͆->UӰ |lK#~'Yʵ^>}f=m1E>EuFOGg6 ﷢@| ?&òoMk Z?G(_2ʎ:6SF_YyWwg')Ok^k|]hf'{swZZ;NfG3}_~/w$蕣Y2̽ia}g`}rߡ} ,ri)ˈ9s>pRގaQ -B! odFD&^80"WwNj"TeT*ʒݰÒ d+DMrl[g7smmĮt*+YԪ!`y ||Y|s%|gVplٺ=tF=sPo%+Y@h6tsϻ?y|g4L?wWj78C+M`y?2%ְSnܪi=ޖLxm!sne_[pX5,Y-\"D{ϯ\k0sUme]K$ @zC4=).>>Y @i h\5+(F!W!Ow^O5g;VP>4!^X TWT.{BkAf\"RP+@vsS,n^p+)rONnUl|kWה/B!F3Љ 2@ CEA{\EPJpB!B!䒘 p8Tcpb( KP2gP B!B!\1CAxǁAؠ!B!BJ.sHԀ\R2( QB!B!%c2)*2s ,RB!B!%IA L)*B!B)$c/ΕcrC2UTRΆ1tnaw^K7[#^B!B! 1S󜂇B!e&6^ppcɬ9Q<;9X#oΉá.ըb{BajtdqwB!BlR B3RL0,^EEQy BH9 B!b+ (`D3aL,ͨ!B!BJ. c&3Ii"&iFK=mzz*G{WQ#FA펚냃OjN=|нFܺeϩ[}ϛ-$hEHDDDDDDtiS HCninCב nd\IF:|;߷6eƱ0>mx6uCn_LF(VXf`uvɸ٭pKf{e{cXX7xݻg)!6:f54;Vn_fCJ!% B8MOzX"@nRslNG'Ȫ'r0ec G opsVz8 ~_xqI=Ǜß2v0Wn82)4gΜzwDjRQh P*h؀FQ)z@I[룻iX;Z-"Ήm<`VR;vk y.I:LABR7r16l -va,<8QyoՒ}E,aDDDDDDD$*P5(E Xauw[uStOY+tᨕ˧>\]JB{K~|?.^\1yaV o"A[Rθ&_(~cղ5sfBG^__Nh@D49[njپ}v9nޗ_/@jܑ0B""""""K0 `JET B!d}**Ԉqv{u:f] x{lUZ^viO\@#g7p (V, i .y0ub&!ms8I"5jgptzG ( q̐& Yno5&8"tz9,Yv(i!j9QnR:j MLY`L2G޼fi}eXrÒO9r"392v6WǕDDDDDDD̈́[B"P`0,km #ZlW.y l5#/"DDDDDDDj@Rw0 %܆kY@Y. """"""`fK !@=,m )P*0D"""""""ICZ$L)%\HpH$DDDDDDD܊b" p h#`n/)dC¢P@ Z \)Gߝ"@Z5{d 쑶r>:哲m΢ )Rvrٛ=sY]( !zO"""""uߌ]'MLݾk(ϏyJHR*6 4`Ӑzk]98"|v_qM1yn۟z̆m.N$[RD-}lSȈ.tckq`ƽVmVET^:6ET3hQN-B@ U ,t-au~Bj9iA|˓|r+=OuG]ɗ/]תCWYI~d٪]ǕRڸ׷zM=q q/g1z_G~9{[o3qҥ?l?ȸ}GקY߾ϲ/W ptIo_`?em;pཷ04q=GNa᧟lWz4ȧ!~Ǎ5KPEס8Qk1KRc)Έ*q1пK:_޴͍%9O3X-j{h{R8z&?ܺzETl0f%)/PbJDDDAE+~p'̺qqs".:@ՠ"*` dwUT=xkXh;FyeL#eÒQč}˾-seV?;rs%o\s 3Ý0qwrO KItË?=?9'n[xgr".ÖNfʺ9֍iw '7Q-2o+8)o-IZw5m}S>f/?}SV IDAT1#oŷ F5 + ,~ ~}_/zcnmzӠ}tkS~2n]+i[vityȓIsf2uY_=%>Mo4ܴfw7r'K FǦdEoz"[>WQj\;c}?MZOaz8p=eg15٩SN:ɯ섫|a ^ ?7ޥt²lX2m\u16da5D)tO"a„pn3EB0 ]jكc5&;Pؗ=%nF ;5}ClگCcxp/yr FߖA- Um9Ng;n]Ɲ%*Z]?Rh=x>Gazxr˖-Vb[ey1Qpq0(!3L ݅ъNkB p] rMUZid$WC}bR%(pYV@I ut0Xa螃 HqPwmFLG2E@š'}`U*UKQ'/ *F#gWv̗3rH_)ZRrtYW u[whߵ{m(n=b_=%zTg?׿MPtRi*53jH 䔔LgWs6MJÔRTM S#dptv;,r`^a{dmW{zTPJmJ9 1eֳ2j|FMc_T>T[bŽ͘gF|!:?f?O3ۃ#e7lUذΉ{7fqc c~}lʽM9ߓ{4({W_PѾGr~ê_ک9oňb)}˳ Ǹ޸o\}^v>#iPΙӏ{ϯ/ Dj⛣߼k\.szڃyMqgm!ca};7͚vxǺ;-Y>kss‛rXmux>wvO= s|;N{s 7{Lf-!Mi(0MH56]Bt@ybƜrAY>›(DI0?37 8O 'F.Ë*5q-\ S HCnu- 0`s#Ϻpg:l̘a7KdSlQw'>qQmzj*푮!{Btz_@3%9׿{uf ?6}F뮰[w|]f>xޅ +ݡCVluB^_{k:e?~1e|;hX"̟7,{13-9ſ$@@DDDDH| )= |n̘qnBjFfK !V=,m ){-m޾e6+#_}VӰ!'m/p'4Iץs߼ىej>p{#k4m +x7ܟ{ڦY& 1 "Q`ަx©2s9t3k;Y-Uql+`u/LFKd -hlUVcyoeE;5bl0sFΔ4uw֣ߟqg#;˄{z= B]~]_/\?|a/az u~ޥ?{q. \:t_}/l:-}a.씂jެٕq@Fm{2iYPSjg H[?sۢrR§+ex1Q[UkqK4E"ՔR ,-NMplf[(z;#-m_|gѫ'2f!ӱa6;N~uӝ¡<1bl5kv#^}$Ϻ>;Ptԟ?|4yq\FܯvNqa^֨;vrv SߐMWWJݕ|lߚUA/g\ш7 yNt7ݴç~[ϱ Zg1N󔚫# DvP@U`s׸K+geƶ|[?>bRƦ5eTխ%Z%[F -Ӛ;zO8: "w3hN}Oӆ_G)N)odC6gr.-oSڿ+qбUfM[<|jAm7(q6߷iί'G5K(x@DDD( hX4[d+N RfwN_5['lW]4YFos6j)7PH.JMڸWO6{X1j!Du4 i׶ܢj_l;.ʨ_!'.""" Bwdl0U( j`0<_m+x"Ԉn2 qxzª)VNZX0Ldw⊜]Ο1ccjN4o=%>tZwm|>utΙemvlǼ8n:\@̓_W)6vB.""" bCY^ >׉g~i2*&L))%t nu rxhǴ%vxJzFi`l.BT*δIIg2\V+tJ\|ZO9|(X-ѵjF~pyxis|/MuddX{0LJ?s49EwZ6@X"#D\.t'bb8e3lJ9uOU@/+r8uGlחrf=r Ң"J}Ț5kz=WndHeƟffZhDa#'קO ,"kG*lu5tklFLefQQK0GYFk y1QA-˥/ܸ q̐& Yno5&8(xIiRꐪ)6a 0ud3(%`ge_! I<^ DDDD!̈́[B"P`&%&܀adW `եN+cK !@=,m )y!*DDDDDDD!-Yp.n$Kd8MsQ""""""" ~nE1T8M 4h@^N """"""" ^R E@@."!*DDDDDDDL"TUEEQ-*-;/C@@ T@$`&`jLpQB !\BlrKC;*:"""""RQHh P*hEA:&8. [WtDDDDDDBT )`Qj*ªVX@&8(xT&"UŦf*ى ADDDDDDD+T0aBM "K .o5&8(xinHm\annd %Vc @ a<`.d8/) SJR5&La,0s&e""""""" ^ 4EL$K8M ȮVAQ٘xhGE@DDDDt)ߺC|!u7 Ӏ[m躖D K>PXV༔iz p8D4E"˔Rned tN2JDDDDDDDϭ(*): (!jLpQBH8$,  pQ!""""""eZ ,,jP(P4hى &8(xRU "P 6VcV(6 4`]$ """"""" fn!U f """"""" ~RUjPE XaCT /7`@DTLXT2;qW국Qk#[)0':W;qˤ,4PE„ 6%2EaVcpo:G̿bD4w]yʚG[P Sw+Y.hBfW uߌ]'\x u7>?b)!م* `H@ o<.VcTw/?N׵cݓ׫P&M~u 3HUT͓L´Z=5]PbN7v&a)6lkjl]D5kխc_D5s 4L)uHp0: $DׁW\f+݌W5}oFX){vp'{9c ؗp늈/UT򀕴-;w^.0ADDti#'8m> n\1K/]\ti&ӄdXnn@j\&r<NIN{ˊ'q̋Ul/B6_^G;KҽG"""*Wj bRe0 %܆# p0Ȟd ˂*m3o DeGFz0cdT.+p^J8MOzX"@&det>01 2d p:L׈ IDATU3Vshm!yV3 lBC箏>r8p^<^-[z%9:UQj7&̎U)dr/罵ǣU nxNq] پk\;xG;Y՜7Nljvŕ:r8r<w{J9^DMw#莍}g0yLgev[?Ypɚ+ ۺCݻwmӼFLzĶb.]o|mp{FsYw"""" BnE1T8M 4h@~G̥l~õ?۷_8^d]~fFnJ |7s_gϾ]'4 ]'ZT g=K)EvC?>R5O0_b)]ܶx[]8}3w嚗un[>?$JRi]b?v꠵fxb%톕wN\Kn7p=O5D0iw;cݒ60 o/)wIOG1bD,CDDDEm jfdjrJJ3n2D!aQ(h -KxF0Atى:vJk9\f[3KS T.Lzy ot9Ad5zM9Vc]3hèfaE ]h_0Ug,gJ*LoٟE+C~e7ݧ7[( |#%m8[YVrTB$H!TMp_1Q0fcGu˪AճK^y mQM=OVË2t~}^NߜGx_7O?ݻԛ 8v;Хgd2Éq4= B]~]_/\?CCg˘Vvi/[st(јnyOkRӀ xwHR"8$UE(5aU+T { 윮өS֭[nݺCXa3gwrsr ݢng63lQ&1Cgu|6dv iQE}x_r^sMQrL@bSa3aPUhُkك貓oCVE`9dQl(;I/p@(UwsGm~0gw"Vl}:>)7rHSk6we˖wUFw 9 {}(*&L))%t [ w yge7j;}O&%U0ODN$gK`?P{ۛ|\}3_K݃'$/'q俳;k8ѯI<6>x[nͺtq2Re"{NDDD;qau)hinHm\annd %DX ӓ6EK@ؾ@J ڸ0);lCۄMIetGjI?g/$Ϻiƞi (IS[c٧ɾfiwmuEŝq;V{qakF1C_[04oڶsMX6w=kZNѺҾ[xƥUn4OjVײ7e9jy$te&nmi{>\L}ŸdpgK `H@ o<.` ˏ/N;v;wvW@>oi{}0JV3}<-Ms'n_ @pFdlco/Ϩ6fU#Z젩L\:t* dx!gT39W#"" v%Qڑ#-HXgD9YY)ins&`AKDJwQ!L8Ce ջ'b1"Y[ʘo +gwsEײ:EHyΰC*qh벏yD)_Zc>QdDF;'i,v (NS T"l!=8 ت錩ڲ]c-\qn&]o۟7޿$7i/FBZMZ(=o\׮{<>1khzӠ)s+涶M4Қ3b0}N\PйJjݡhv/voaLYE[1/RJMUy蟽}).bV/!iv%?Ong/: .ZhG漺`4Y-ZhѢ%sO l!9@&)& 7L݀#{?!",&1-uy9|(X-ѵjF(gOqHR#όJ?s49EwZ6@X"#DT #==CׅDXLLcYFL;yt]/ v8ڄϾ]'=LFjs ""JEČy7^ht{ KrBB*U0ADD׽uEQۆݡLL=ZV _=Iݚ֍`(^V_ 5 .QgѱauFGG[V&8(H)bX6 ozp8R\.բFGGj&DDDDDDDlpMӄZ6ժi4LpQPZBQ0LTtDDDDDDDDBxx~UzBtxhGE@DDDDDTa Lo]!U$Q!""""""J """"""" """"""J """"""" """"""J """"""" """"""JO L~1x?n_'C@#/ OPu(kg8myeFF^ L +]gj?AFD$>a i4\孾Rf .nl'R)3ٷ2mbd)_~Z:I5xs;ֻP]J#v}74XeF|/%oR0KW6^iKڏcDR܌u%"|PBlwB Lj s^bq0۝6GRH`UC5atrt|Rl 4DED>RQ4:3D3<ӯ1arLceo^`HFmgy9lp jq UT))ïXH\Ip'аNѕ>䆜lYDK #"#\Xr"&fq6;X][ue{.CM+rmSc{~i 9gﳴ-XtE ܌!拵 u<%."rdvc[8)|LP& lױ>#X+#DD\L[DAZW;uk #c1i*3]k4.?$8AmA$K7`;dAV[|&7uki2i3Je+v2L$s3454W$W:F,Q,CJK2Fad}; ĉ2,8sYϡdȕ7*a*c" ('hrd@ U!!ߔTˇƵ&\ѵ#\gh L:@x [L$>^ a@l[լCxc9zT|{ɭ]K L Hۗbԥ(H.wO(6-_p{ӱ!vG!]ۏ~@A &G;JFH c:STT/FmB#2҃۶e⋖dq&~u!s@ivї:t'+۝uaYIz@ȍnʛcG!R2ebK"7X.ST9ͤAS#čKS c<Y՗.l2+-\'"bUudNGP$Sn q/D.\ JU9C2@l'Dd|gЗ@ØVY7F^#6< fjFrڦkY}oQ֬Mײ6]|)3@ "]ʡnZ@)R_0yW4OdzYEY\S"䑎=ǗeKt+ߕ]H4zUHC/_L>ո}zsUWϰfpx  W9{MN"Z.jն^eU:&UP[сGbM11oj69s9^`#2 %dׂI0 RE;/5yF1sFE$"9 «:VXj6cH)raLdJE‹ (8pU6ٰ0:a.>^XSȹYzEF{wbX8NVsq˲JRPraX,fl6%STʀ'l+%WInLQR9 ]ȔĒX8@Åجg7H/"q ` IDAT=$8!~ {HpC=E}>JK3=$8!~ {HpC=$8!~ {HpC&Ő}%LTEGG; YWy&Ep5*Rү, j[4Ӱ4DHpo=pal Dmӱ] vҽ_{t ឡA(98fD4eٞl. whePér][ܬu؞'>pz ,$8Ȕșk.Y=5ۙC+V1-MFe7Vji`Ku!ڵk|uh@UfZqVCZp  " Dk =/''W==4ou~o2MI涗(-sW\S|Hd:Lр\7.|ő<Q{<_Mne}l*NЈA;2=}Z{Qѩڡ }c#f&%ToK. 0m<|κ~U:R$ct#C7 hJ,o=VLJ<-&Ådz~AV~V.Ҧ#DŽ[2wdi>_ͨPd<5ysmeC07YCe[}Q1׹CgFD%ڹcڵuMJk{}Tk7%.C]7n?- e]pCϑ$'HwMa +֭q}y+3eןJ41|SQ/(4[y+SjE{S'ݶ6ٰq{:\}w+Fޢ獃;1ZUd׉7ua N~SPܨ-sw2'PPvzg=nܝb/8?Q%^)'/l4$h-=XgoN4%(ּwb|A"f2dnkב CU#-}otbա֩=(W-`QX;%o y-N!׻~5Y?1c#L}ցeb=]0DD%'?w0|/-v<7/m+!g#uZ$0)=nWUONYr2l+̐[*W7coK~-*§<͞zHt=p}~{6/[3y"|߷X8NVsq˲JRPraX,fl6Q"뻠wR]9;(ӧw{+ޞ6amGHjBHQt:a0mʍtXqN⹙^0q!1 }z$AlY Ly?eőVC3wpRF%K-{d@.mX/2O]כk^MQʉH6`{*&y)"2r*/JR0/N~Ӻm{ޜ}[;uڱ~"g$ʼn7Q5#.|Z^sŚ|-l+iYTyč:~hܭo@D_=#U n/n`=lwu$"CnhRJFwԋk߶~w)clި{w9*?YxOf}oշMS>I1.Om$"Kn{|m.bXtK?0]kN*-oưOQLw7mg}ň9ྜྷy67/m"mksW~3Ә >}(kCJ9ݸxDD;7rSOxnf,]K?HDz}t͡ {5Y54,a,1s& oaQ0地.G;iX"[_GFٴvZbo kq"QÍ8zȍwM˜K7z=[^ӦS\{ן"4-%N;OQ-)DKK-/ IA[oN-IݵURpR@mْ"~ݰY;$o_}2k6>MA͌D!Fk};UD@T*|jAuz(굱]G"4)~Dnd}aT+&%opn[Rguvv@5T6֠Dшk_e]HVND|؇vĝSѧ"EktMAD"boί59Шa ԁu}`ܥH1O&􊲟g~4z? A-LD-5VqoZ["畚=/meFɻϗٯ<5(":}qUQxApgm۷sOF"m17USV "سjv%:N7ʼnF ϋ/Zy 0AɩGÈjꉨ'H)U.z"w6?s8)Cn6dX ?겤W0}vڬht ̱]ct&K i:}d^B"ju(@>{*mY'=%u)??׹ڜh̐/qG[2)KᐱZy?ExyIRvI<~<ɎeW~gI>Ǽ%y}qֹ[;쁧(\\0>㮕I<ϛ̤;=o.`}z,tɒ%o7U4}!CJ.!"2dʳ{"(q #LMv?׬27DZSԀio|^!_fյy}v{kZi5hРc70"*rDṭ~:f[U~(]-5㰦7>Fyq̽sQҁ4ZـiKo8^b&@z0͒qJ^hRyiIN/M+&"& &HE*5:EK?֒m&z ¤AAgA`l{ s +^_?‹90 9iWcj[4sUy}i~£mUvFX;%D4kbz}(B8wcxzsv~k'mHHH`Y8Zqq,*JB!abfٌ.*PMM7M򟍤?WC]*5hc7W3vn9עm+n {?‰(ATbbܗ8 ]pE*04ғޜbn Pm?RM)CW!ÅKa7׮cސ Ёl0K`5-iԔz@VRi?ե#nUf#{a"B nzFNؔ$Oo #A-{];jU} -;L5U'*$8G{Dÿq6AԚ‰j4=$8!~ {HpC7%w @}B{HpC=L KKwbw P'~]T!~ {HpC=$8);;xpֶD0^oV{/>*۸¤ܳh'|vPߐEaї"!%fEz2YCq&! T߄EMLoQQ܃""i5` P4% dfcjaݵ@`+n /+-0-ᦤIljXP"S $8?x9 ymX}_U*¬'YA$/=[" WNY[C#+-MV|>ȠsKNԌڭq\囤D:%tm&s7rD$2h?眃EUl4^"/j7gzĄqDFm,u=boZAXQ<}9ҫԌU_Omiݍ7ؒߧsgeOl3HJ6ukH^}IVB|I@(ⱙi"Zp~U,ɼ0&.ZT)s,ԜQ <CUUn/,~7w.M)Q(;ʝWW F-놴AW21῰ܽ?y"Q~/K.ݿC/m=4o>5i+aA Sli)ՆSKd]Qy5t& -8eaĒ2}oYls9.V)˳ͮ% _wQMr6zҲPO&,Un9DG'|F?nQA-RTj__WL+vx[:Q\zPM.8׎_9-zәmή*:Mw߼#j IDATt-"ԆQ_}ӗTIog3:E_Ӌ3hb4ƀ.AG$oh*sX12/(Y';NTInxTĹW=>W "b@H؆iD]7zrsl DT 40+[7up?xkT1OdDX8۷5xfYؤ~ %q0#EI2ZGk=,4>7Щz؞DԬ ]T73q8Д4g7Vt 2{o.e<(+~{r3ӊgunx_u}0S;sKCD}1y)o&"2!)pZƢ 9_ޠAA Yk,LjQ^]uE/w+TE[EGjMrghר,.Laaq#J?xsvx]Yuױ^U)FW]Hdbb{f9봢' q) ?ɱ"""1d\|^hDUB]K5 +? ٶ#R\IXIX"~'o^ZwDD2^'/3 2TjfYA^' rQͯZ#%c2J"UA|j{ b`dE24=bPRޠ$LA>Ju/-n^o8?>aaʃZuwb&!!eYj5qDZ,T* \.gab6f3_`ASF-kWzd₉ .qk(T%]#W >Hh ꞊*'e6ߔި5L~fQ$8aѩР. }2zh)۪o+Z_C=$8!~ {HpSww%w @}B{HpC=$8!~ {HpC=$8!~ {HpC4BBΞ9 ,KD21pwPИ!ؙRtF{pv1}[(g\up&1*1Fl^9juf羛!5X#NN=^:@_1nV2vf$e~+ULgߛJO vf;ݻbhN=@yȉҧ=)a Ə_ )nC~V`{2qd[{H -Ek4UhɌF>791*cw?ò FCVT^DDqns,5s936r^]==l:CJmEܔn.3wBayCW'Sum$FS?KXk$5$8̸^6Rw欹f[R&>bőL3_yh'H߸aݩ ȁMu>Q{Lo7sy cC^?>e]o '>/IO.{o?QD$3:!O'?DDD,;t*Ժ1 })qruRMue&?p ł5gt 7MNa C?|Gܟ^TGhr J^s_lT$}tI5Mi5W֏# 9v~=j: 4P(_4@f:3wt۽hvlk vu3A4;xdR[=O'ggA_MAjuXDU-a"r)é叼h{IdJ;ׁ2?x]sWDE4-!<5'hg'n&GzwY][k7;։aD9{ޞwʮCF۾wL)[3*.\^t;U:S@gpp]9,R"5e2"sˌ6Sg_7YdЎ=i/u@ޞD6GooQIF$5>-,^xWmY=V,jGtsr=gWDTVuhZ.oRfkmÂmdvh17<}=]@P֎ƉCk/S_~&={1߳c>H{׏VYl3ycGmjjNn#EH=,Dd 7q|0JϞ S˒):2!Ӿڒ;oOb-pגI(C t[Ǥ[4qL@LY;D5 DDۖ|sS? m@D$pt2Ѯ׃JyF7VqwlKǑL\E> +tr%rKDd2J7ay>烐{䢒*,:4\|7#??bpw=VK.+Fs>T~~g[2d`xqyi-Dͯ/L{,A.t~rۓXc/a1FFRZ|RphOÆ'vnO8T'Ŕg&Oݧuljk޻ı'cu\ ,^"}i]l[Liq]6n[%EK4)lOЂi jgVhhEjˏ4DlyCYrM{?UFDI+͟t[gOzkǵ﹆xi!a?">$!lUH!s3j5Nx#0g՟it/IA%cźÇܵ~ݲ_P`O!T)5%7}x֚Q`tSGf?z oŒ+d]=s5S\k'/<}Lwl#,_|~XUβ=5=8ϻ+@8ꭄ=ݗq,bʇ;tp( [HZ!q l]݌Y_LD$k>lkޛ32qcdz~dwQtP=U"$ϮW~es?vN nrȊm&$ 㗭]p/H{$) OO7owʇ׊lRCl7gS#ڄ37M]j휮ɚ>ߚ "AleQ0ДyFe9"Q  ap\ &<\Cu\]z}nqXeB q3MH<,.';㸀h'GeX<ӮEM8yA~IFba-7KHHP*,˪T*e9>U(r\& fd2! QBB\.f7l RP( 0D$b1d""Ʉ ֧ XS@hX (Hp@eMdX,a aL&l&"$8!శڰ "ɤ###HP lma brk XZZqFA"A"##333M&ZVSDQ4L&y\0 T*SRRz}@@˲UFLv5<< h^pAt:VCCC;wܩS'ZmX@$( EjLTrT*J%˲JV hB(---)).Z `^ZR}8ӷC""L0mX۔3:פ5d2qD$V@"QA.aYf:E1@9h ӗ-[/w:NJ Zp@ebjD/X,* L&*G9w]xرcm| "2 ׯ=]~}YY l)\r^m6iơ7 @CѢE BaLb,P(J%qօeeeh~ {HpCp"//y$ z^T*ʀFV !^U"Hd[h9h"9&AEE˖l},4x.#==Wϰ,аMD ?2'ZYT"*yYXCdm2yIQ2sLJV i_)y>9΋BC`*N=?q#W]#µJV>_Nqqۆp>g65j- 2 DD%֟xxQWV!)?4un] h{ۄ/k[w% '4.n1S}ԯfyyRˀW~(}^Uvoun?ss{)9DdpID2& m~ iZ "×<(RDF!۔GJI"l7mg~@kvۨRmW&|?i៎4n]dxn'l ;,Z Q9鉂[&5.;XZA9 ><+=y? "@ꦺr m~  "_q8N1+z157qj"?q#">BtƖJhиfI>y3G;-'Cm+֮-RA{ÔO<oޢ*D A'}IIO^~#h{ ߧexDo&^cPӨ*F32ZZn"N,l(eߍ И'N(*  GQi}-vS'`͆߇}.ՌunY+5pl_1[ūfKc ddRz.ʻҌţqdWS{|V8,K}"3$i4ct@{Gtwͨg R)+O++oݮZtq@AQ6Bŷ{fȞh/ ~>F3)A$w ҇džH8^N>Nt;ID !K{'?TZf t߽nYN~~qqqGNlmmSTh444й@e$-dwKiO*77Gk@v،rRt7q tb@6C)pwK@Yzƀ^5qZ[:q[ɿ[*mqgBR^25]ktgKչ2ToG;X$W&}L% xQTFPV_<O^?Df0cjGKq hIDAT-7  5̖:uG/Z2Hްa:k]-"Ue۔+ Ȫ>ln4ʽJtQpT04*ri<9^7!DJs7&nIj1Ѱ5 !nCBB,--ɋ6-*444s5Woi}Qf^v{!> 'MiVuܴ:2ҥ> -N90lhC5owf({P^<1)l| +C"u@#}i/WNlqC9VWp׳ i@/rN vq‘%~wѦnpLTr;6&vAtk8( ٶ>+U~Xj$ՈP+{uyAEq>MResq!v<ã1dkʜo?7хsqSi9칦f0cP8w|-`OT%T MZ8b=UF P'G(U(s^F=MGbGNyǙŶ ,Fh$k"n~IZPҨStZ[5{xH}7U vyO <ᗗ cx]]N6m=`7`=LLŖ"#"Fxvm]Cx}F5h2{|00pjśŧ{!oʦ/\|Zј QYw$uGwR2s?gG=paj.{646#'7 y?;h/I4j3Vr:"`Je)\K?;gд+uaaa&MsQگޕ> З|aq..H$3uy|u\*~a,<,~8_ !D&]ë{o9s%Ҩq 뾚$|wn=6Z_tI˷ gB*!W }KSTO&M\;`7gAS^3Ԟ\|ө`9Lma1:glRј* UCM(j{DQɷU|bPGMF~i:J`Ί>FCM%酒Fmә"rf=h+8F f)X}HCC^quupssCDPJࠡ1dknwy1G֡ {֒aۯ_hp&Ɔ17KML@ ]:#9Lm9AO^θq'>RҧM._M( גpYvNnI  FgBjMjT" |2ՊM3lڀdzk#3$%Wv/=AD/cU$F$oX1cV qJĬ#4e)g|.7a[d*NgrcKr S>]؟B1c&. MEQOOOE EQzu,F-Whl񘚹juCi#}n 5 b߹4y`>˩xZ +@7?xJ֗^OYIꈢ Z| g[}ۓʣXշ|5PpvK#C2ĉҏrRVRnU,pzX8>(ԨGXilbAl5EhyPP-F!}tzBږ3+7yO0/FIQQˇ!U Q Ai0Z|= '9fד]I70;yLpyۮcq07nUwrDdNZ&,_( gۂ'H ҾZGrkm[/w42&k7N-;6p.zb IueL͓/(IN D{TօTZA<6&*TKe,tynry4w4ԈALKQ͡X}&v]wGOOkKCC,CpFCCv!r9-RXV˦;ČJ!CP^|15 g gwu6^~dݡC>,ޞUgeR!W~yvJ nc1P"_?yI<){`>' C vtRxH,GIra%\c㷮I*T<릦%$#K7YHo%v[;XT7g~##p[hjG 4*r1Pؕoէ=f. M;@.ՉD"L aXL&bar\,=PyGIENDB`doc/reports/CommonProblems.png000066400000000000000000005011321303523157100167650ustar00rootroot00000000000000PNG  IHDR/:iCCPICC ProfileXYPTK;8䜑$$D **H2`TT$E}ommnO_>}:"#C$cmt)N.@?< y/H++sߦaImboO b#ach0Ѩ_:v6z0fGVS}`;`} ;X'RXG",,bX? /67=_ 9#᡻wֆ3^f [ ց8m`pݖﺐUdGZ&i~19eft0n1<PWLa>$9Y#G M`ƠX`af-0āh$9x%pY a׈DGO_$E n/?/Klw1{2i?USIfVfr0JEi4QbAqIJBep/?>k/HTs3GK;ߎOϷC7vǁ""c):HIPdd3YѢͯbyl.>GG&z)\b-9N> ae<@,:X; <}0$}`?8A58 y ``L9:AX"C/$Cr @ yBP8@P!T@%!4Bߡ5AD0!i BaC#Q$D&"QE4#݈q H$RCZ"]hd*2YE !!!Ǒȟ( $a?5F٣|PQTTՀjC&P -4- -VCt}}}=^`0,*Fcqc1y*Lf3bٱX % –aOSUNj\p \ )nNCQI9JSGs :OkR| ~ H  kB!PJHO $2ňzD7b1XO"". jӺ6Ҿ]#IљҥUе=BOC/DCAD_B <$L#yRINA!!C? #Qрї14c/$I }u;)& Ʉ))e]adfcamn^!a͑q<':gg+k. W2iG\?y#˸{yXxyyyny5yxyo~0St(RJeϘ/1:?ߞ?@@ >&B4B*BB' S P٨&$juLVDK$JV(FTE4DJtP !((V!D!$$^%>$P$JHK6INHHKeHK}v.'%(*S'3*(k*!{M\syZyC4o ~ '^*-)(n*)+E+(* *{*W*0XWEꪦP֪U]R=D..]u&554j45)4ǵjk hj՞ i+{UwEOM/EKo؀ޠ!aa႑QQ1̸xĄǤdT4ŴόhfkVn\<ԢblIJ*5ʺ>{d=ltڍڋ8;94:8;:;I;8=tprp8ujzuM-m؝׽߃#=^{.{==ynxYzz6^93[;W7_?P0X8T-8:x%2>d;1B.331<$/'boPxdVxZh1P{LG,Lʼn׌_MpHaoGb $}{ۿo"E'&JNIHLJ7Jo؏ C&0ckܙ陓6eeEgR?T~|Xpߜ2%y>y)=ѓ0 h42&NYSs9_PR}"xyiG`ٱ**WTV==}:zTЩ5F5mµ%1OswFLYg7lq;ڄhkmvk<E˅܋bOPyPa#GW>VzDIǠൡ]Cj=~s_~14l?rmd˙W>>yCzSm;wƕoNOs/Wj}0-E%V?.-䬲6Tyoqmz=aQ)ymlkl;l{;+@z;ñ x1şbG` `^ .)ҁ|3G n! -jm~IIJb{q44Kb?@fēQLhf, =9G g.eQ^E/F`ZH^8@MOĆt+9>yӊʜ*vyj6ZGb4 _cLM=Z~clMg#`jgaxЩ̹ۄ=O[G?, 90/[\ؼ$p9JՁNk}o |;+;'wo_ܻew~v1 ӡg]/.|Uј177oގ;4<>?>xRvrCTiٌO>ΝLyK ߥ{:mnoOv 4#O Y6ZFFl,kezD/aoF`"G#ћaI^d̼ºNpsqGQ3 X f ݥ"EESŗ$eO7,v޷%T+Wc; :3ݽq+v[|];N'?px(q'>M{9 7M|hs/ TB>ACEO;D'MO@oKA%e5S4'f?8dy:ώ`/i+[{'~$)BŒƒBQwb QZR?ʾ{#?0أtMJjZ]424Ӵ+tt^/dLLk泴Jnyjceo0*v$x{k=wsEhGG''zD59b߭Tb{zgÁլl\ռ|kE%'yOWFTT8橛?Ps.9q퓝+oqwYݓѡ"5FO54<"e镔?m7h76g=F8x C1!шsH Y|DPhA8nB#L/VaqT}wFe8#&jbZ3stpϐHiFkd)&nRfv*VA֋lZlC~[圪\i܂܏yxEx_Qrt/ 5 R婛"wEyoK I^,}Il\)rB#ʇT&ūG  ҊN=wU`cL5176+3X䱲>`b{.A‘8Kk[{Gj6gޫ~y!FaiEbTMJVr5u9]sLY &8}W>}lPxDOdJrՑjS-u3 :MZ/^molwVB{BODDQ_PG_QG߈sh Ҭǖ")+W֤wm(ob7Zs Y[""1T@#Psh":9Ec#} C4x_0G"ޢդ{MBREap2\d4\Ȣ򙵆͕}3K#Oo _00JʼnI\R2 F1JuUj꡻4iqk>g2p5\=z<㼕}I~kA+!%a#e.Ǩv'&J''r>M:f΃Pš'G]+9_9a\zlB2jZZg54^lknop UW&MvvzsDWN^;7=5l>{<eҫQ7oߌLorX3=9v.u/_/-cQzҙN?~.(\5\}Sgᵉu ō+sΛ7"osnmپ1?BD]oY^y6 ů@Է¿|#2p^., pHYs   IDATxi%Wv?73^իWU] hp`8;9X2)˲,)m* EY!Lr!9$g f`4zѨFwU/oor[zS `0@7V/ޚydVG9 AAAAQ/@AAA"    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    =AD.>   𾹿C=݅    ^    G-pX^    %G;z0ۺ?Me"%EAAA>x[޽z彨"g   })E>;tcD "qľ!   gWÖfr@|t Pk#1;qUqD"pSd*TJnk 7޾EAAAUaftrm}U٭TVm? #T:M2d3P, FJB2Lp   ǓF`f[9BDQQjiy.߾]^VV7˻zwZ( ##DE O9J$zl:MzB>3671P|9υQ 1ZH)%AAAA8@~:DQTԚ~kK[B ( }@@`Bd`4tW* gN?$ӎIR h!   ]>8VGhο.8ݗ/U/\G84(P3`?`4"3BB6LJOД8q %   |Ε+7ҙW._rn 9L@!v; ϪR7 pdSpN/WK?zʧOG>251_tT4=@=ʷy_9;Dw0iorwd`  ]\.Wٗ^|kނK0 rK6*?4)4)dH&JDZ#>Z>Zm@Axw8#sŁ >~逈cIw0?UAAACk qە9wKg/-\]\3F)8 B=/> Y{b~rl2v)Jyi)^B"þ6uֻAn~nʵNTmzGpIY:!`Ņ_Og=zl3k0َ{(5kʁ۾4`rxy9yx^sc⢘{}ƻw%e z@=wwޖS)AAx Vh4ׯ|G.v4\B!GA)14 `.rxt8֛̥ T&<;TDd8P @floW/VwUz+[5IG b=xfD@,no^y{y뗿M]ŇO6ɿeF*(piXO}wSt?!"q>66LndGahwХ'XŸ3ιw*bÑ`AAAqv~oWg^ρRpx<6bv0%p`S rl D`@`+cSJs'Zk6f7o`t@kh@=E''XLJ7v?{0wߣ; Zkm6 c?ks]&qrn6q+='s]8p㣈-B$~;VOkx+.kɝ Ħz]JAAA8 Z81x?~G/߂s |H#‘_8uS3dB+*f=Dq q`Xڄ F;D`b"&b)"CRM5+m_z~^"* 0#sE4" ?DCON w}{@~pN: H;)^aºY~Pн0 m(bC!Z{B,ܼ[ш;bZk* Iw]|+N]#>v屴ADJb!怅PAA qJ]֟=\j$]dR ? EA3ePN%\i;ATm3`@"E DLb0Հ7*֮92ft (H.O<`w?I6v4HA<:jy?V<]謺aT65 ΙcNcu8\7qL(k0 ZVlw J"d2y^"at2Qٝڟ۵a?Ӿw>xqnUwǼ>~(0t: ƇܭUT*H$8t_Uꖱ e#~iv}*8V%!AAtpE)l ~•[{戤׃u\D Y̍ʱCOLeӎZ1D.b6RPL ClqD`VL hf؀ c\RϤn3Q<Q@# O_f}w7zM lM!6,E+G,t5bqzC cn4;;;A8d2L** LeFh4v^ Booy}<~-4`#ld+Jq5h[&Lk_vǮ?~aX׫jExfl6N Xzj1&NJT*8a#i'{Gbi#k >Kcf{RbzI0Vq<{w_bAALfnS??}ʕH%pLUp. 7<}ht0N&SdSPV#%@3U1rUNAijjj 'yay!כM=aTC}{ki[KזQi  a_O~3λrۻAl|u=ZY7`Vy^z6tRBLST6773L>/t:Hiw;J6rf}kZZlvpp< qɤhrauJ%"J[~'CZfY(Ţ1&ywU1bin*Zf JU+&jQY# VU.+JlyJGwa{m̻Kgwww5(N%ɔSiu"6tN=\Xf^$9 iCAAS>[,`um~[. B6 (8 R09.`vĉX)F uR^@0& (&0+ar; 4AA(0k8 e"Hozϗz3%lWp<ʅnTE~ԩ#wMm t0N&ݝ7qKݶnѰigVӓJ/0Vpv6lk% JB`XkZѨjf3Nqt:S(lOOOTz޶_d2jĦjT\/8Nq]T* nuj8AX#i\D2nGGG$(ͦ>vww[V=r===Lfxx~[T===3~pubu{aآ$kAb'Ll6J%ә`"kmN';f[oZ~wkF0zzrTq#oW(  Ǔ@ ߾?_h@Eƒ!$\ ӏ}p!A-?4z"FdT: C32O{zst|V`lB& faۛ3VjinEϟH _Gp42.kڣ8zq$mt(V|wuTH&b{ (텅Zfu8ӶO-R$EjEGIӎDQt֭ťrlq38RT6- CCCӓhFGGSTV^ kX .tX,RJR `d2988Ȫq)GpYv.鬭%əq[Roܸq[nmnnV*znUJ)k?tPzsssqqq}}b$㉭2_s0 EQT.j'?=}htd4J* r[A`@ %޾ eU7o\^[+w`{r{wAAAw|Giuˋͧ~X6tJB9{` ONH>ciD:w\2"9b&iCF"0Ɓ&'Iuy.HQoںiA e k20LZs̤"1xd]'NivRSqu] I@>/1o~O?Qm[ܶ\V=쳗.]"l6k=w+a#d2@TZ__T*[[[[[[6"H\U񶋉&㬯3Cz{{<+˗/_^XXqFѰ.N؏ݸ-=p]9yÇjNcfg`p[8ٟ]rW)e;ޕP-IĆk||oa+>, l L&3;;;44400+++;;;YI<wt2CCC:~ɓ'GFFz{{766VWWJR.ZӨ2%vmCS b@뛕kWFnvhװ˜ut'=ӓb‰|q`MvK+k[[ccV)V;_   |uo>oLIDp].\3c_>5q[JfEL9d 4`ÄXTGcP!&'s uPGA1{X3Bb&F"V̬#PrHElʝ bO'ɑ٤̈́+P<D"x SigsTs{xŐ`s{݋/>쳎 Hnu]sssG]\\\^^tL ЪVZѓ'OX__p›ol6mD*d2&rWwո_I,7nܸtRT:wC='ׯommY$qۥ+/|̙S&'N/bOOq cLVkZB.,//?ŋׯ_ذsXl l6=9}" ͕Ϗ=çO;tPOOBV}ߖtmHwe2SEjN=p2.oʭ7kh0@sT'?ggJydWwd2/wY   s><#NVV6oOp@ L&ql?ıå^ߘ9&""BP (bh!( ɾ\6h#LFM+4B0ဉ2(hBh@(Ab?5 9 D!R p_ 7Ҹɂ yӄR^?ƘFQo߾}ܹl6;77O~zؘ}t+SwÖEDQQ(?>88ٳgϜ9sʕjjp[}OAs'y?~~~^z'>NLLu@4ZXH$z{{>d#BυnjjsssǏwgkk_~_R; ";$Hojmٳg}/| 333>իW;u;/cu^ D''Fq~-?pkbbZ%*  p! DoȦP\8.<EKW. H)EJ!`#L#p@c3X8Πɞ@!* x[H _D eX1y"DL`a@`1LD c1`Fzr_?:1o`RH($S-_V7ѯ*q{t*X!J##ޯRf|/ǎ;|pEz @yBat:V566f+S~;wneen[YN~{\y,Ax뭷*͛7/><@2Z 0 c׉;b))v;  bJnإZi666(zgffR/^(3T*Nwwwϟ?NsTTZ^^m208 }7PXcLSJ w^H$@n]7bB́/Yy}}}Ƙ_>7;;̫d2.ɹS ܍w 5WOǎ}5*am5mnnjSNMMMZ~{V"ɺE3s̅BAkSOj}k###=؅ 5؍5 qn=ϫT*Q;ȅGU\Z|zOڰ)߳ofF"a|XWw'FƠիW[΂   Sa!ի'߼t uH"\L ?xlP_ TKJE 2 G PĤ )`@ 6It(畲iAi6bCfB63 7ڍ8Ҋ) "f-SH2  y3*&D аdzxfoEplƨ6~G|?g`< !$vHC B76N$D8`ph&)@f2=miHfVPL("M2T:CÆbr`BfE1P 8 1afD L|n߼y*M^IureumnZg/ JyH5m0 E&p(! "vgoB*BOPi_eg&v\NRDdgE}JAA{ͽ8l sF_ "?42lYa4a2dB|@D`cL&dɤ 4,Nu󩍠uB0 `1 T%4 ]ѕF5,׃h3W>v~lF9o2oF%VGHR<\luw{ka6JeSN$,DJjkvI!7M;&VV2)QMNNK.=ׯ_Irؗanb8==}Bb&kkk7nܸvڭ[jR~})o/Z|;V[$v|VJY>)[oa\^[[{7_{۷ok3LwGl'fA,..b%$!i}ߏĉKKKoOww[Nb+ccc<'9gϸ4c]6v^?tʕ˗/7t:mFVrgΜ:|'_n b llh4jکi'+g(gp\0OhgV6_Z\7o&Fi% XʅkgoJ߷KKl&(bfl v^ (`Ɲ J5 1DC3kf0 ( \"fb[b eHW 幟.ZȺHҕn|9ó/658,\.EQdGlؽ_*;6::sٖ 6oZfW՝bX)$6}1fmmmxxxzzn R*LQ]BmJthjj{'FFFzJl6mKlTJđ#GΜ9s…mmJxT*s=z3̱cΝ;WVx;  FTرcԧ~V=tvvv vә.JKKKvhk&IRrTť.A3L& OѰ%d2ӓKR&񜛛{.\PuXIZ.,,+'+++kkkF 똨RbF"w( E{&Qu13': vjAG5y3L١pxX\E 5h+7pbfVӱb\UD4AAAsϛ{o}ݚgυ rIoiPdH A=448BC%KT糳 Cj:??a;bZ٬Eݾq[o###'N׿>113ܾ}ؚx_V}׏9ߟL&r`ȁ)*qFRu o6lc_aۣ6l4؟K0R~K_̌KKK׮]̜L&=ϳFfs5":qLT* /BR:HĮ3HAoΞ>}ZXQ ߼{oFl=TɓSgO1[׮/.;O)C 1&LzË/]\@t!T?^X##nכ  ppOE_ٗRix\\#PRa(EQT`G.&Ff(ä v G""9`ÎuQ':䢔N2Q; 6l"hHGQkU )ͺB"VA)`-a0c1"6 X2B fp5"qP̖>Oކ#6whFZ'I1EQөFիJĉ'NXYYVƘd2i8)tZ(b[b{:;vl```}}+++\nZ8q_C=n/_mk4z{{٬>SEf\T*.\~'={gnݺe+`߷b`^v7>7>>n.'ޘw-Qcò}ҥt:m+v#݊=X&بZ-ijjq˗/_x1SA8alԩS_򗧧K d2F%Si={vzzСC?hqNrۏvuu7xFGGzV:U:lF$s~qkeDP (ǎinӨ_f{14Zkok'=9=ۻXނSJ!yesT)Jva JQAAAsOh dH U{R@.T&*E 1)pPeA SDy/M("Á]"0fΫ%bcQTJ'fh5HtD!>HE|o0 eM9 U6:#^b尩E 0|/Љ:yo[b7y5l;+iۼswwڵkN>}ԩ7o={m-ͫ#z7lƨvǶ2\t۶׃R*"xk%SSS_Wzfo,--+q @D\Jvۖ~V}Z.y䑑r|ڵjE?j [C5w?~+״ZVEdx嗗L8;>FaLLLJJlQO:hccҥKLLL=`2 c vîLF5kmi9~QNr=o kl]u:d@/BB0@[I Sͦ>;Ep]88 AgfFe=w  d t'!;h#˕v=<<<55kY[#Z'F2&&&z{{WWW/^h4Blhccc_qܹݡ!`um1RxKoooƘ'O>}z{{?n6VU'bcc'O,J/_Uj%*/"ɠP(.l'=.p*5Rq;2&X-'wٳFc||<( f l CCC[[[J*g?=??dluQ*.}ixy A b^ޞH6Ꮽ v/> N|`Gڬخ~??~ԩSD=44dUxy ;$޼yT*MNN~\^^~7o֮_\\\]]rv&XAxw;8llUQ-..Ǐ߾}hQl(ZTy\zvwTy8P[} y AlyCCCLfqqƍarqSNimߓD"asբn#\׵hGFF677o޼?׮]{뭷(teaf[R.wlG؞\ܩA:J!ЌL)VkȰ=LFؗRvtz}{kk}-?34-(B3@"_DT2L+SAAA>e#N\0함xBC>t2ҦLLL mFw+E01eubψܷ̪^g!gHCaєWARmdCd%dچ" $4$=2 ͛߷ zT/ҥKqONNŖyqJra2l6޽{O}S/ʚ1^`0>>>^XXlcXC=j;Id30"z;wݻwOKĈd~~W^iZz@Z-J"S]y#i}X[[[[[Z.[ydaaj1%Idezʭt8:AQ`xbd5I !+;FeGQ$I?~}p(P@ ($8l1s7}h $tÃqeTJ\w` \@ bg"5d]PX%Ji]6wB{ hKD@J ä)`pː"r5@1 ` 2 9c 1 m` )̚YfJX$c4Q-Zq 0<UJz~<SǩzC#%|Vbx@qVjSSS|||,J$I8\eGGGRW[v@!˗/KD_^^(f#Ks]J4M}ߟ,??h4Q^t`)^O=bbU0uGOllO=꒰wJD7޽{Fj9… /_&(&''mby]Dt(̜bkۭV7ߜ1F+?kQI3s JQyj$i Y={vrrGQT,L>r|ܹn{-Pi*Ob7ƖE`uF$~drr+W KIZ-+JpXFI|./>eg-M`GV(M loouwET:|vqZ-9\.z槝gNQD!: +TS]2KR*h"JF$iJ0j:mp/X#lrp>aPN8dֲh>"!U@ (P@?.N>:!teB[G@|-to`XܹsZloo/x;&"뇇Nj,4M(j6moo !* gffVWW+nכ6ƈvJ-$TT*J(vww]m4O=Qb\\U*%81 /Qzr=o$IR.r=>>"#F*̜={۷1l٢1x[j~I%,^&IIAdzA(0}0su}0T*YQOXnZ庾H% |i$KF (P@x@&6TE5%`Ӌb$ 04bG3HHW})Jnw(RP@TFnFqDy@ (P@x>ǰsED| G-ժ_i?%5tRA8+r@JdR@ A3ke,ȁ41H$E< cG!flŏ/\b2f&&_y0-1*3OgZkf\My+2 Mj$)FAxsv3.'4HvzvC^Kv-yc ;&u8oFRy;n%;ÖQ1x㍙ %cC&''IVN>3iZT\MӴK󄂔J|Sp$DJȘIG~DX] ~?33S;NM෻S.ggg v^OObd#"? !dC%PVd_u IdUQR4W/aLFɏ5ʹ4NR (O F$ oQpF# b | Q)=(Y (P@ qJ)Ea>ozYPbVi6Rq9(4L b4!VIA)1RiţJK.`ؤ3"&ɓzOjQ[C`bXb v D23 MÔPGÒWj װa&6 1hf6 .)) 75Y乵R/ͶzQ%QTE<~<ٵn+ i|m:J)Oݘ?K/}F05 ÓD B4T*μQZxn]몐.☐K0g,It!~Q)wwwk'g"te jI$zy^4j#\3WYi<۲u %rb֎)8qcHCV 9dMmdX( lrEEv??E?D4HA5hzq7[ (P@EwajׇB)x.[6ḑRVQ"E훣POy1.nu"za c>&(s+XA!?H1Fu.zeߩ)ވG)d4O!hSa3 1†kT a4qp`@Wn? R5*>\Q+hs: q8 ExHv^ȳ\>њ縤FILChH0!?ka(L-7+ފD$C/CΆk@2@=c52sh 8 6S)q !<|TsavID *W ѽ7oJh}a#NxrrrwDT*ʫzʕ~Z-)weV6~R!aJFhk8$jtA P2KF<վxUV[p9, FqܹgݻwrvJ^O:&8dZh$Ię r,T S!dxش, #p*y>#2pʲ,(daj/^0՜xw t G< '`8 gg.aN42|?(P@ (P3dݹs^E7 σ> 0mxK GAwЀ܀Ck= 8-R2#tʃ,)aOIC_@Zd辁Q ;_yO-NMVO"8xd`9]v۶2|Rf(MbaETŋWWWUV=ϋ\. HF*ShADrYbPmZ$,!=G9V`:lTj~G2J^__ R,pyy… YaaRHWac/D^jvJut:GGGm'wڇb?eȉ$@RiZ|5Ml8/r8JX9qŊGF q+$YZZpxޯwGREDhU_?fBaww?1<55E(P@ (P@pG[[wPQ C‘ nh>Г2]P@ re4 7A L+ !3сyW C)eSb)A .Aid<4@  NCIS8EQ0 Jv{<xL/'U\Qf<{@'xx/Z-85f[H R0s"ߐ(!r @9A^g?i]9rڄ҄e^:77' $6%?yr<z}% Djͦd٭yGc$&cggG"AP.]ו H"" ,S~XbR"]Zz%~[~ J:p \F'Dťzu!7nҴT*َ9C+T@ (P@?<Öm{p RK^(I $ao$_0 T)rc`A4RpXw F#_"8`4 @L)1d|fQ@%:CB$t6#IHq09E)8 PPq(( QG.ek R e\.70~uJ".n+qFŋ[[[vhu?ibtf=H wYsگ[Ir8g9Mp#*7[-J<9H(9јkƎ&9joL9WH$R cxgwwW928E!~D#%coHq~vz3oy{CT8 1 s޽̓Y (P@>AxU 4Xr {A DžA?S?2u⿵^2FHѩTF02 #HWТOUqZic?KF $2ԫ?\bnw~({8~H<sX <1H!Qwk CKF|$>̲Aґ;3$Sc5x$z*3GQTT宰evxH4M777˕5y| CFUd_B јn%{[k @B3T (P@x.уu % 嘎Q΅(/jT~lԕ ݣaf!@ fcZiﺊ(β0NO_ƿ1A CC  s&W*9G & &&n'H""Y+yg}xx\ƣ%i(Kplg-;mGO8;/_T.ܹs||\*$TZclj'<QuYyk8&W֬9jIr,Ǜa^4aQXͼx<[:{K[U|CDbooߎV7WOʊ^"s%;;Bm0 [n?2U~}yTHr@&gB a%}?5*fOBCJiJhA4^W2NV/whiG4j K*4O"872y|-c'c 1Z~7@~S{vGc;W"+6o"wc <"$BեiEQߏdey˯@9o_1\ i8F5K^4Y߻SUYJ`7 (P@ d9Rtv)x 3!÷HE|SW./Wu(2Ilb1Ȫ8F;lJ^aX yb69\ڝ;'_AdXŢ2a6Y~05=j_R0H5 <9a)ôx|2 ),ggg'&&ly|6M~8vfgҹSk}ڵ~/_^[[ VvAx5W'L˗DbSeg!;<1Vo򓑏XO:ND2Ce-#9]=AI6"I`@kvg^|$կ-TL(TA!//÷$FlS@ (P@=N輐a )C3Ȁܡ-?;ɹש3 IDATfʕjlHiM3m HR4™zpXk8J;d5f#q:L)t[ޥT'3r*fNicdFGv\$fi2ka6l1&J2UqA7 䜬Cfp(IY־9== ^DT.3g(sεk׶@nv[[[okǑPIyA 'O$I5"[0z~ #Bx k_9g8̜$Iq]' " @jfѳO8O7}q$Iy3r!adVkcUy>bk@N*"PǢ_Z~O*vyDV$: zrS_?I~_֍7ZA6OF0 Øo7._~at޽~93*؍ (P@>qx>9q!G)>-]@p]*r|8'ژ,&0lL #~:^s_Y*A/uAx>Ge@ke` =lIPC~\KS+Sz3+RL`f)"fm`#}R41$6tOW88K!v*z J\g T ΌZgtiBUjKKZ4_r>f Fp4Z1r-oqhyӄQfr@٬jYɓ}జ6_MDKKKf> zA4 \.#ʕ_8zjTEyD>\hRiv^'8TXKp.~$I$B?r( $!"^'}k8}|RBLNNE(_JyEVN!b,666$40WL84<㓓f_ ?ֻװ "LT:4 14J>Z?_NټݻJuiiIf^.F!(P@ (PCp8 5}j/6?3W܌_d8R/3aH)P1pFZU@Wø@J ˾Jt(P0Gcδpp,K6J/_(ʹ˛;B_Va8ainx)K8Ilˉgǘ _Mk) 6ckN%!˲;wA0;;rxx(G"M\.7` evEFCjrJp <55E,YX!~Q%T*y+xS ɯc `}}j1au+O0rݲF!Nfjbi4ju?sKvxƪ!!b~fTit"!҃eZŌZ;@&8 >\uov9[kN> r5'Cw!Lv'нO"8ƴRfcS 8_!{u,sё1ƆeA֖^/MSq~??[$Irrr;wZ(8d>Z$J@eZmrr2Ik׮ Y zԛCD󫫫2Cqlܩus1==:99#2c}V+#D\*i*֞Rwttjժ$Z%80̙3.\݊lP2enb8B|lddq< RIh#9%oW9MBH΋+v̙U*?WAӅQ bw^\ yw\.Y^ڐ?Q@ (P'Ǣ9"ky<G_,u:9_i4u+e҂Yd$Y +@"P`$Yn93̙ "mXsF^QgW .*#5<$80 ucW WǓy!z )\.f911a{MLuFqܹ=ѐ2Ւc_[*ex.Zp%GGG+,VDtxx4| 6AYg!8N ːkp iF3??l6wvv0 89s|(,5!#lȶ)GgvvT* <}Y:ն3I;~F3W.]w̿x7!JQ P m=SiWʧ^sW|ۻIRF6Z"EO(P@ (PcQQ>><<A$kZcgr9MSrY$"zċJ%;%>-~Ắ$uYf9==GDɋ8D7777;;{M,)B`U!0i# )h2821MrD\DZֺ\ 3Wƭmx&niHC v7|}Uzݺu[Dw#Ԓy֏c7ȮR~ (P@ (8S.`zi|e '9voǚsma60DQ&լe@^FhV_$8frR]tv irNڅ+2 \!j:4͘hƐȘI)u` !:}$ɰh'^B+_uqG`렵vv3W}cYdYV.}ߗ8?T vd󎏏X/%-*'r޽Fh4>^ZZ:1fssVyA7zte-c 1]] 0ƀgUR65‘)M3 3'bH؂#9JLY[W9vbUjKvk+Hd0ԢA >)JKpIm:TҢu0dYl6ѬĈg{{{W^=|$(> CQU#9%uaaI۽rJ^FfD[VgffRҰV5ȓ;cN;;j$4!fpgggϞ]]]1 Iߔ##npҥjz}=~L%g$6tG''Z5?(o^{w( B8Cuf_xzi:llFTZ Z%4Mblm-N[l (P@ (Cp(ղ`Y0\DU!}Urt1-vj{[FZf0Hi`WT Lq,J6!C3`R(۽;vahhj@G| fΌɌδfcX<)HM?֏{8RGDF f@ v|.`ćARK2g/S,*RIA[sk8Кk0tR0NA[lr><'iѵ*!n#Fҗ/8v7&'[Q  ޛXWb7⽗/}nv7{asHQ졚ɱFȒF<h``/? 00 6@C4H&{ʪά%+o~{_ˬxnܸI;qc{(Zg:}+_uѐ71 2E@^kv $DqAp'0LCQ{e }ؚ_.6ۓMl Z@^'wJGY_ xkZ9ԱΑuJߦ5!%$0=(0(αu;{˻ݯav+ n0x|p)p[3o 1()$qvvԩSV+rmPJ5, ^y˗/DImbQ)bֺZ T^2{ffŋJ~Y]]e4(LYkkkEQ<ǎ{'Μ9o^~}wwWKޥ>̌䒮+UJ ?~FQلVtep8j2vj?CbZJ*#^_}G~gΜ9}խ- .P^pϋ<'FWD+Hpvcyxxh|w{m(l `cb2v{vvg7ZHRwGd|<"̒@F¸I} ow&2_؊!E; r`up"p3/|S{sljac< V\ V9vA!&tY]b^P8(R0\'VNM:_k@ĎB-.`Dq f YR!P!8u=bDv#B$R߿tښ@u@XQ,..tuW!@VY__ؘ>}^B*d֒jY։'&''ƶRj v_|4((-vV-///..vݗ_~UUu %>gSիW_z}˭VW_vFC)%zx"z^w񥥥I;_CCCBi]v7߼zjRQ9:T1H R펍///.~ J? @/䉓ɑqaW!ȶ.`dDSvivvv]&I?QBBBBBBBBBB] 8ɥApJ}cqPekYKpVUyaH^!Ąe ["ϑ”@HP¢9P bnZ|[C t;0P>B98 Y6!C-[?¢R 5Z-ޢۻvDcH ܸGn;;;ۭVK,$s Il6vww]666&yaa믽ZV$VgBDfZj_>===;;;333??Uc666666^z,FQtRjN>̗/_^__wh,Z˥<.]ZYYqSòJ Ƙ1c̹s笵gϞ}ꩧ#4UZpmllZ-I*8 zvtݮܗH2ܜdJHX=aΎvrtϯf_w1@hֱϾ=Ϝ4U9i~O=N`c1&MHHHHHHHHHHxq7N84k-L`P((kRABh+`r&Y 9+MXE!s `,,4< 샜)ו8`'q#p({(K/*:ߐ2Ƣt(0u`Xsfb*͈TD 9~*!=3wvzdНTvfffΜ9+WQ(|hfsjjjee%˲SN9s/]$%t< -H^KDY IadD!D|[ɲ9'AcccgΜkۗ.]rIhI[Q̑!Uks)g $HQccc?8]rd#Axf$E1:v5p$JrȂGC;zj\'Y5!pߕ4CS͡sװA-  OUL[u$;y/=t =-ƔntIHHHHHHHHHH;!GƏ:̲vO uGsb#q0zA pp4@ 4`C#zF"̅DfpELʾ }ISUIJJpLpX 6DÎ {l%iF55ZultnFIg#9%!!!!!!!!!![Tp T8A)ҙK?S/Q Y ?JCgF@RgDePCJ)+rj t ^S@)  '5& X CKM48Ơpst'#=D y= N6t^#/n~@$q#˲'N4ͭsε홙"FQ'R3ښ48qę3gFGG/^Y\$ˈH D~FJј|衇z4K+rX#edJK %y n*f7v^mnn;wsĉzjtttuuuggGSnGr$YU28˲4jŇ~X\՚ͦv7J0sY3P՝=34vBwwꮞ/!!!!!!!!!!^-U?*;6=oF14d@sn*Z\.6:҆hX]M 늆Iu'ZJwH(AWP+ԳQ!H[t+ WvoU<$D#l{2䘜aYȹ¹!k-s]:m5vak gkZ9BLzs?91$zh|n'Y^Mǃ֏ɡl4077gz‚y@A!Atn֋1رcsssyʕ"%SPUp y7'O6Mћ\xq|||xxX.-ҀYy|05Q%sCCCn/_/XXX8yƆDDQL-/G.1::*ڍ,WWWGGGc 8rUǡlltpvESIٸwGϠts [}xّ{_Xr͡_7xA:j54jkr29t_fjW Z#={J!m"b(. ]nx(sP(4> |@1;!VzjCv;)ږ45awEa]ǔVڐLtrǔ( %E>gY@lHjD5&.*oQ4%ahhhbbbfffddD.ﯭj9Il4"߈Xʆ$;`rrr… /_>u#<233#}^www1կ76 4?錏7MitK.C45jGL@ػqyxej^z+W<䓓nWETh0|dIu%Ŏ$y>==t1˗/llliَmwFY{tbg`SS 춟_^h6s>>BJ";|CPegScO7gQi8z‚{Ʀ>UB:ؖo\1`ft|schZMnCg>tbCanLjJBBBBBBBBBB»w::9@z"4[@1P#uh  p`Z2j|;p|)/| >):`e8yyآv/'^;Kf>r-c h4:΅ U9ͻqcʲzn(^7>>.ErThţy+WZ鬭moo3֖BHV9z[[[v( kޞ@FGGFvꦉJ^w:_~Y&` IDATٙr䋽^ohhBZ{ZǗK[644$uzl6Mѹh,;nz" bjn^ڢ8 L:#dJ9\/I'|Yi*b& (~BFpM#l}[(KhFa 458G*ju +M\/ûYĊ XvFEeR 9ZɡLE.x`0k}* HXo/M|?89/=44Tʊb )c'$6͉jTc1o"DQe'N`n{…NeȈ,FGGI2XWޞZݮ&''gff$~IqzBcebإV77'ծq"c2^羕ʑHA8SZ->;p$&&&bȡB9T|#)țJkr- OpOq  2>~[~{j(,@ N:(`,HC '? {س gm?hĨQ t0sYSԆsG؃mV$HfXL͎~GO>$_@VNJoۮ"WB4dG5hJmieX ɥKHneTRc"E%q5 /:8 H.4AdՕ߽r*S#b2s|YHD,8d n"9" 9bŧ;HHHHHHHHHHHx(8yh={PP} (iP@a+8ĺB>'V!*&:[{'ks̝^7SPC:JXww~:58)&V^!< :BFmڈb ok+V#%=}v;#Hv\U*Јd[0Vˑ2ȲL (舲y^%/ߍ}I\: 7fd(dʀƘ1nuP.7vc`RҫД g()IPTTW[ T8 m߻voP9, > !%A9KWs(PdA PPK!(Drx7>ş"Ey ]on=x|bdF"FMv֑,/__ÞCٓ^-$dtp&G :`<)a{Ui \7/|{LuGU15֥w7Urc:@gnՄTX1\bM~BD.JjT'DEtLJue{`HDC","7 CtxAly[D$J?8-xjb=Pn.҉6/-C7P9zBA-*Wܟmup$?Cn_}}]Dm$$$$$$$$$$q6O|/}^y 0 c0$8/VE&?Ņ&,^i?]8]WO7tiV&7`,olQa,g v> $Wy&7 g j.WLqI)Dv$ȅ@&OD)|͕a{f1ut7o7oW >ȃ@ D RN(TPITE4T %⁸ݑzV>8!!!!!!!!!!]{Kpv?7^zu,2FN0"8fπ8 f[.*6m=t\SW?pxd,J+>فҪ\(EvְJ$Flo9q Y G`5[JY)Ƣc&W~gfPw׮XE^GÍO`d3燛702N׶ ]>9 5Cnf} ԛ>`&o{|aGGoEp7wZ.1Bmߵ%CGd)[yܢE=t4AӁ!")+j)jSq4,Z5H_C]@< &"P >FR $!]-HA{O)@H&"&RPPb fhRrEB=P EDdZK) ̅"R":,|Jrh`7 R &VJ đ)޺HP :#&$$$$$$$$${qo WzxkPD-LAO#C)d]h6wηA@ )qP-WBir,<ϕ_eg 'sMr">wÆaJݰƠ׫5>OxYy~}a-71oAgG'8 M,}MZH_VjО8x[Bov:'[bߡ9>CwP qݲeVj1搷N 3ar`+R=?3)#az$?;T8 ebJm] RcDD<:C5(׈k5RФD5R a=if-PZ)N ӡBZA Х""ȈX"bf ń Z1 MNAiZ| {.1DФRbȄ3~XkELxYN5~p?Ƙ_/\:=8:JAŌDh*"zRi W^JnYխr՜&EY* !}\C(ky)Vr臌Zp$q!k4/??qb֗7l9L;%}5jZ=Zz#]X!3#P?ZV5`s4jz"ɴF]utmyDp2'Y|b'-K zH_)hU4pՁaEDuhH)5 P#9FP#%5RVhT}3ڂ44 ! b(.†CV9E2RrZ)ELJT3?³TU OH1a(IsC e?G R& h/>%C[@8l 6F^ÿ'e?w&Wzdeemv23>)0a--0%L%#6LKa`JoQlgsW~S2o3z#@rEp|&ȑbFͿt[O3Q1PSuG\T +m-^Rp/ nġ F{jp@Pg BmC N3,2, Z$cc:X7V*.!Uߊ0l|UaXn$$$$$$$$$$$$B屳~>v~nY&_!'?+7daH/Xsӌ !P+H~*Zi]r՝aߢcI3WXJ`Ko=Yni-] (2 ༈-] ؁-9cQq2 }@A:q;>](PPN[CpJ w`a&8 3)Gj11P q!@> k@U iTZCWHEq灯28D§2hM~9 '?2AGCepPyo˞7}&d(· $T'A >??/^A[*5V`%tx+J0X(ARk܃B%ij.>Rpec\|}l~9 Oh@PH3U\98^a\ݰl@Y>/~XP( >7wb(4#rX%ESUe , a[|ȠZTgTqA+{L}  Rq|0" P s4a<]azh"ᩪ TX Έ0,A3@paA|G4&#DTHAQ84UG;]oЄЁnbJHHHHHHHHHHxSfo9S fCK@"=|a`XT$ܑਟ̟eLS^kmŵvvo93uP 90U\(KX!8JcryiG?ٟa!zn9S 9'>cW_Kr&|/F}#I5-Ɠ-'tφS[-g ` Tp 802l` g|GMZ Gϼ,ˌ1YvW<oEh *H  !#˲}~{G`߷p6CiKg0s l cJp>py(ܗ=zp.7-a %‹5!܊S\1 Xndg_^LA ?xg,:<ťWV1=FZ:MȔ`%eƳ H% A3*k3` g+ 042P(K:*<))GO|? zՈD]!D^لs.Ks;M>`V}mML7?,Nۛ|<鿦4#t wVT?R9chh`J^,$!p,CkD2(!:MwJ.L.Ep+Q8H:P7Q@p׌c 1#zUO~,~w쵝.z PӾTrSP:XFQuX%  RSF <}6A+kW>!C4)aLGE ^fRր(J8i6zC?sp)Lo9<#4 RM: y yRBlzeI#5XRoR>xEPW%3Q`?W%Th;FS4b4!#8"VP& d+@PD@gY~#9)DE,4, n(%0bR BZ rЕ5PFB1`/BܤZ ;*!!!!!!!!!!^#8Bs_<#s5XF` \$3 ʡ5(J̗kDP(-92_  SL(Jt 8 -QZؐi,zN?O=$CY)ZZ``VpN4fQFPV 5Ꮄ*ZA7X&&g2 \A a i*et/C%KHB_V:ЀRH'd,200I0N9 +"Lz ) #79H胚R!dh"a4AA2p !h(%LN1 -.EMQ ~@X"bfVA!,G#@s&$$$$$$$$$$0!0jg~'>Y{߀a4Ҿ}( 1(q!ae`*r/p( a i8JH 18@(YsM5\9+nx( 4cۜ##KCgvT$x@J",OpABk4A0t(6%8Pc&xH4~+" R#'R@p&(h:ȄBW.fʔ"B*F#MQ$ a'p*J3>_9Bfu]JS! VPzQ}j_Y_їאQ' א3P ӣ8}ىWneJ :CFބ"6$8iBpޅ6Bs81:plgζ_u4Ey|y/|'NO#S^jysՉ;:@Vq2gdѸRw:^Y9rFW~@W摉 np}WKk{"/UP~zvb薓g&2|i}](vɳ' K+ 'ue^"bbIJ!u CȭgRi@5Hk@b5C>W&^QP#W $w% c *ERC=uB#xBPvD<գiWC;<mI %܍W&& " Ɏ%CPYDs$$$$$$$$$$K9Zq_7wϽ|JEM9sxSY5t hZ>CU^sH7L aq|'f]y+0J eO?4ԣ'>~>w_FB+3Iၖ1A!-҅8a}M:=0 ga,ih=ڨ?O<`IRBBBBBBBBBBBBBJp \o~җwzk-^ylg=j0@ x!`6B0‚sE;+X|{33㓓qmpBBBBBBBBBBBBB;pITg4ɯ}ۯ{sR9676hf@JXc&D3%13,[m%޻}X^ok<<O.9gZ-bj-Mw5s+w08~6nƍnvδ-x  =C-}3{ղWڵz7^̎` S:VHVE! ک8绱dzȔc COyŏ>{psvr,s9AԆ*F;? _>}`?%= n_oBBBBBBBBBB}b`*pmsoƛWϭ][tͫ;ۻ@'qPu32ͩىN;CO?r|yaf9dGL)!!!!!!!!!!!!!!»iYҊ`KG-VW\vus}c{ck~[t;nQƖZ%TW FVMm j|!d@;ճO>.0[ulٲc 8nΞq$\((C+xFFkRW%8x( ʎA<(\!q`!|aDw@ ;ų^iǍ؀_zٯZ伻r%C4t:A:?ȡ!Z CPlC׮ B!B-OcF9%?Γwn1$[?B: 5~V1- \(w|% ^ 3xcS"B{4חm} vMSKP}Ĺw|+KHQؒN,U=Wz  f]?wf%?ơ@!8(Nz{ LvݫsoGfmA=y[:TD-^CP)%V9~V75@[P'%E /'nT`1ƧtLn};!BV7xθ[!:"4xmwK(z@ɠ@˕/ˢ<ߞjy鱓cϯk;CcR-F6Ƥ6$6 RpyEheoG"d1tL[_3s :eb`T}}سvS{̍ @ ' 2ϳt/HTLYq9t\k-xZVdeK*S-~`?`XSoeZi{8Jljdܾ%@\4}l?]g<|̍iمm6y;9=FVϬVb-<e>|f:#'2gK\³ ط|\k-*>jo9SnAY:oJR?<TXJ Ο5N>Vm .x׉ЇvۤnMT5BE5f=/tZ7ۆJvHI"oZpG?$ȵNmSqJu¹<̇ҌȚ{Ed{Vc1\\RJ.ٺzR`QK:a % 50t & ti)r|;Z*: o[HkPUcók)cw4[.q,<:+ɼVϿ2aG* %wT! JUȵ =u^YpOq~Y_Ym E S.b6Wj>WD*- : Um<*Qz\m )dԄqBU/U@&n$oP6//%ko8>"ՠ 8… :p$OB [j5Ys.]œL e.a/᭸27us<1+ W-%uozL?7]hTFշ F[8HDn=ͫbdtíOϽ\|ڗ?':Vft=nO;#S+] }K{t4wZ\K (bvm5ץ*J$⅍ f8u]ef+!J3wit zcHX2--cCGX2*Պ?VW*<_G7;:om[|ߞ6)դn;%zB|[n{Aޝw5BV]T]"|rc9uSjsoeh{5K^^Z0 Ĕ؅Q7s?S5}@MJ/>/(TF :twhSt\iqޛkYؐ%kIeZ2xw"qLyVNWgח[>'LXO?t'm)|gqlivvD 8gsxa-)g>>ji׾L[}5'[-2x¶I=Z:?,vVO6la*~E>mWvCn {B)tSk'G:ok  ܌nKo*x6m.Z<'dMBja;M^{7i_z%i)+R'X e<P8@I5y-#G2n,Y4 ,$jY޺lZ ,˪}>o_,MsDL7OvvJ] ~-P<=Շ|٬BS-8m([y}Csej^bRG66|XӁw :*,`#9$3 1S7B˛u^WvK/oO{|GK9GJ2/\|GtI XqZ"di<۷5~gl|Nat&l-^}=`}RXR?_WhiUV>{-ʇlbYvB—.c5WwytCWG Э2fgE-+"YNKkꁵWd yKǖ&*y;~^:C燴JVa4e;q@+nұ-W1KQPK%1Z?R] <'w?Utw:koKt주.Bb%ʇ$T ?1NPwGٕ_g#/}kHB1 J/)X}>5qd~/|Q5hV~yҞ(4TFuЕ[i.ލˮ2bɹ\F>;O%DqpFJBT +Vg8 hȋUmG-~>-e7aT$FUwmnK9s:Q0r33UTxサyѩsN?^[z?]+ 629zV/!χk ]/\c8@FAIRkޜ~_Euqׇʲ'{ KF)qH"nY{}$RmgوoG>*|ĮK3|!lȷ]suoF|V:p2?gS0\M8ϗ)8D/UGL'~+l5x '_ IDAT=?^x:@Z.,};?"`FDҭLFΚެSGlp=৳緍M+#:J7>sCsCo#W"lfdknԠ@yur[㚟k\%,rOԟ jxM9ek6FZ QB@əw암#? Bv#C3S{\Q]**w HM}@6mOۺd^'*REZt֫u[G]=v"{=5lXW]Υwgc{+O6kЭ)C݄LJQŲCh56zҗ| \C1G%n,\5Ԟ:`*\@TT2?ƕ!P'|yqMu[j]f[׷JZ ZV@>"3w5$,gѯ(:q)TȋO' $Fp63*#5^?.pgs2FvmJgoFqZ<|&,;4'L+TզWRG:G{8aq^]Z1EXԸlX l6A:wʚ >KLA&-npyO fxoޤ9KA08Uo& gQjgq_`L薭lojV|aG8IT ~ø|PA܃qAoo@ɨg&fM _GMiPm_/n#Q 3WQ 1DآyMU/WIWѫu\7'E7(<; u0C%Ù6U JrR)JV!SJ(j[^\; .TYtd⫈/CE)|OK0 ;qS`6MT7XVޛ[_1֝<<&s?*XN%<>Gocy~]*&?vܭg XGM@kC.(hۭ&.J/4}ئh рKTox .>So;MәLPR"HuaP#wHҚv4dݽDž=ߐI%PBGEb$zhqo|-ޡ8>"$2]jwpp?/x_q[ERMz(kƈ)Nxp+*l}q* iY(fgg'G>JP%kjR<97oA*$ﴕÚ$$gm4{ x\;`T `+LX>rd'K-ɈySWA>S?u,c&CDuF^ϜtʑϱjeRhTL 'n[Prvŋ { tEa-ݡsGtT{>ALkEfXJE3X ^m=l&! ^=Mp:}}Z%U K@?=ס4"oQ$ y{jtX0{_^)d!vm^S2e@ ìR֌ĤArw_&ԣ1+"dNYF5aٹ/hqOÓ`P6~‹'hS>.l'&]Tkc*\St 9&|EN;׷}dYʣ]tv[1tj&|A -MP,R TG߇q R9|=S*ԔS&)YL(* ( *yΩVWVmcX70w7._}x/_73DO_Mfv{uFѻ \IPq Ξs.ǝ+3yPBb}}-;̡T˺qt>A2U$ok徾s4YcW=w JOZYH}!2@ q3d]sTl" QO ܚ۽bZEo8cL)\`6 7,%R3Xe~=F7{[|<5KTA>0AjƵ>Kw!S{cW7cؑYçl  ?ɭ'vXu%I~{i9O)|uߪ[QIl.a|weG  =JIF5jx7 {X`ayjh/+R]ɓEC6?ion%툌HIEՠs+Yo_st]*Ps 69D(u﷘[Ij1ԣm"koO\]DOY?J {TY0a ae lʦyd+0iIX x(l&9^dM$Wad/R4 R˔QM;S~”.G!4,9}/}[Z78 0 2/9rt[>C3־ ?#gJaQٿ]/rWٿs}ϗ|] PsjoY}Ǯދ~yuiLlJPJʂP\|v&a0B@YoQbtQ1i~:W 'u0ggKʫzLAh.kҹ1=;/1yC:.+,Օ]h9Slҹ+s PoOY ݈ ȴ6q0ʢ}bU_:UӋ mVvnZ?7֤fixkk@{3nruk 6@M0%r9+Am;[Px?X/<#j ?6Z\``˥ G/KN:t} r ` q3":b9З0d %UϺZ)-B_{SaFyj$Wz/M"2@U r~_ՓiMɌ9}4J^ZX> MhB^rbh`Xu\+z$n ΋k ^-AJ?'*^zJJԑ9NJPUoFcovLN1-jz'V)` в4mlvf~> ==IJ(#AjdxszDxeCH1F;վH"fIODTsZߨKT@NOWvJ}+wْ1xh@JX>VQx29;|a[e;(+ݰ :mB|Mg"B ;+aJOf{ (9Wjx~Q<vNB6[68_6Jzt^B>WXsznT᩹ b`^ ERњ0Z ~˗^3$u)-2Amڱ'NRWvs"@Ȫ3N^7h׬J%͠dGI>\~XCkS[Ϫ\& e+5bO::\ֆ@ޘUn(md%%kI ^ sTh|珽L!&|ANye{Tb7h$ڕ@f*ByQo͚&\ |mXƺS&ei8:eɪdy َW7橫<{Z"[evsm#}ٻ;B5uU;~qs/1 `o е+tYuAAς:P5|= ڪA4{V,=^]`y=$;fFM+k=u3{6 |qC]z67d|:~'FPx6-, X^d|ߚ;RGwK_}+WtZ/^O۵qmJi3ϭYNx.2 Pt(GNlcQIK8ͱUS'$0# P-oX.9}|~;9ѥϝ&ݳN2dҰ울4:$c~fjX'HU,#4>7 n93px5ǻ-EoK\ϥ5ʆCR^$N @}/wma.Ѱ=&,+ m'hy';“y^pUIiS;yu(Q`{ ~ TiݠR^ zO>q\>bi_A㛨"PyZ [ԥU=O3JRmlLkoj@QDn߾Or8zƐ zV$,F.-?5㕦&jTaZKVzsuZ6 Rfd [2hƦ W~ŇD֝]*4=yY*OagRT1QK `[V˹dqwgM2ER"N@V-{ Yn;t_rg9͝ fPV|=ƫ3]ܽ™ vTTiӀlj dF5j̋S{JT'a 'I5Ƅy \LTzgٴ-$gIչ_z4C7z}ԟś Ӟ5[:S㳨 }2|mMˑ^[iaߗn),@Uu٠-< z<8-);VTp)]Yc*Nf `FI|6NΣ'f&"֤ddb.S"dK7AL(%?#Sylv1lE2EX;A4L(*le# pU7?'< +++Od@t|*bq]9R9| ^Ƶ=~ *jUNxW6$DOضw+=iKK}۷oYxrZ`urmȓGxm>yxn)*\z$U4Y]w{s+%X:-8BUAaΫܼW^ ͰNj0jD!U[ϿW,I1{݌l^\pĊ0׌?,[?ul(f\Y0*gM2Edޚa@=c5[ʬQ:Spɷ+.p{gčnXopAengT)w%HҚ p4 vp_6O }To^W IDATڞ(wwcO @z`uj6!._z#.y.i|v5XWB*I!m0Kr$JF7t{ޞ 0_ʿ\W0]:Hw2׎Z@_вbML57*_`XUZty: 5EUV!U$]G\5!o<8C?WP)KIwĶ&~Aw 3Ieo$=BA,á|prW\ G{bGuFT!/)%vC'o /.U:`j1u5*\gm:G  7(MsP]tqvlυ[nP!!T[|#:u)R۷=xSd([X4A<dZi{8RO?쥂Pe<6;3? :]fp9 2PTճ&JEtiCE-Ŭen\|*Zݏ6 SR9Eں 4e~I{n^w]>EޞyqMѦsN^T45f{O!}ٜ",YF7(ǤaBUak^Sa#%<Ӛ+},et,mk1k2)T$X,SQqd6t̛C KěW>ZsnfS( Y+njyX0Bz0 `0_!BezPKr&1\y@ޘ22IQMfIws(>B&%2ߺ}em`l*XujԴm1*B!BH " ȏQ$P萐O~vz!/>euR{kP}X 5 nsyJ]<G=(71O B!Pmb"Vh@c4c|+N^zjvrvZA kضc+K(f+D4 VjX?vN Pb{G?K¬O/="\WÂjYE@!BUG>cmÉ,K!SFW08 'S}. ]A!B! 8v#=+Sm%/E7?|a}H/K5+/vU.7/X`_r@u_F1a7+AB&@ǔe^}-+&P彷o_%dKN$ -MZXw:gW[cK![0$q9el tFװ 9hwpsխʑG!B!!Mm!tJ69[iM%vP\D?]*=ll;ޜcz UP&skW9W4#}d?^P(4>{yHqK6qz s2]BB!BHcQ0;w9sݻ'A'q0@h+[.\"ֺ߸bmGt*Mz]l ϭ|^cg-sΦr)=GC8bjͻg`t!B!κxyy9884iÇ'N8tJNqX?7J\Yy̺prj`ӮY&j +L0r6{nL6c֭ל 0|O^}P7qY͠6_ Ҳĸs9|wA{Er:O_xOY >a'`cKo&Oo[C'k}0:®!B!ĉǍ3f̘ɓ'ߗ啊d<ocvt[Yb'@Dg _$ O趛!_'UZFFqAVJz|W19|,T!{ շ17`~=xg"Ş ӟn!B!A ;w4k֬% xzI>Eu#=$[ q7bPQ)f n!mt~1d7=?ڛ kZVjBw>(oQK^@&zNV)kB!BP-86m499YĴ:Z`۞>xa*oXxkiYUsc\qtcbl١kWgy dk}B reD!B!4Oరx޽{W-a-]V&V1wf6z8Bc-ݝ8_\4h{z%Ijvy>y~!@; mB!B!T-8\\\_.^˗^zOǖBIoKԬ\L?xɷ@ut ˦{./Bm~ B!B߁EСC'O~Yfbcc|rU(~}T櫜o]PPP2h*8B!B8وϏR}.ZgmSNխ2%w.Zw.Ip: 'R+GuK )` _1uQ"B!BN}*эDp".;䃧oǺ,tQ\tEݢI|8N IXQB!B!Ti|`6MT7Xqd7xc|0;yxLҴR8AעR)Ʀq ^vaN'sWr({4 v1r:(iAfp+,|c![0$q9ڥgs꧗F}-%;%04iaݱ]m+maW^kDbAg.ĦeDCrHlf5ժ]NNvKOr Y&z7yon8'6,=c+] $ZJ5*uX\B!~X@ 2l9'x6|O&8 Ĕ|i3~]j|6RNOv92ȥO?'CH=D;Ǟђip,t&R3 Ӟ_}h[o94nD Pɇ9$9/O]oe p/NMs*Rm:AhY:ԫݒːi&.B4+W*_!uقΝ;...Ֆ(F@3D䓸dƺYvi~]:ٯAL$ ^^۫;="w=iwo0N}msSW= KR|CjW},=2LI6ZEi6Y;sg ңMispBs/‹Z2ze ?;v|1T"ot]?DVh1`a-M8y|xĉC)Q&\33N S+!4, %`;ю[[0 af=]hnjNG,忦;YF?L;: ,0:ufߪo6>uGԘN^.RDa= {r~hh9q'pR.lD@w5hp{k1i#dׄ݅8vu5?/L@ǜ%/tZ7ע]<} s1rٙIv@HdVFXDq ?/&_:!6nlzfXCu 422"$+0K:t,tJW1/DS(ƍ:99yxx}q u8qb^~}GQ#K '㍧Q…Oׇu3"<|TAL !@pӹЂq).O{>n:=6wT:o^6ɪ5̈q%ݚxA4u6*KXQ[S@R|AVv>1C mN_&^!n鶟5hɰQeŦWo’|2/.G7&lwVa&tMLmwr571lwqQ_{rC Ă]+b-DkѨE_b+Xb/X(J5nq\'Ny3}SII$cqqkvK8{A%raKO>ʊ$I$ Pd@zp;:T?O!}U֬Yc  RtJAqʕZj)Э[7niXQAAQ Nsgmg> ;klh~mVסFߊȌ ig\. ɾڿXw]+,x]nų,1h@_Ozǐ4FoC5?6^}rC#)BmL "K6o9ڞ=T?obvϯ?D==F^:wR3ty ] ZP,1ηJ{ɷq-7]á:mKǻ{moL;fhƎj9#cPw(pOUPW^+&?<DD3ݹŜ s~lƅ?|8 zjİv˗/4-h/Ϧ*JQ(?_SE)}TF Ě5k6BroMI[M rٜ2fmLNNR-gioz1QSi"zsy?v.n˗f+` %,HxV~3+*"PD!]]]n(OIT zEC~5d`Sz(?oBtɟVZiUTIOAX)GŊ߾}ۮ]B_4cX*0}޽}& zU|nr"He]BOR`xTiz5&c՟0E\QSS9Ov@"N# cKVlˏ]~Ν;'[9zeC`occ2?j~ >RQbnl[v<[1zEfQ_kq:,G&h;scܳfozwn[!),>+ѩG5-|f xOT+ ֍r DDN»ZBdꟚdxƭ|]n}0_\A\rkoXP~/I-[vK.Y[[! HS r}?[nLʙ3g>~ضmB)^}mo3q'8JЉ~4,[s:m,V{Я SHJ#E$R:F8zV87M=n)k ªA; (#9L{"r|/Ϥ 'DMz!>L0jŊ843)c/س4$fvq noVx290עIpdǠ!}Gs\ҢNEY MKC̸ '#^Zdh(;q*gؓC ˪K-R\\v4gذP`4c8!H@SI9:a{Ge$I2cb`BGQ~SɯtУ̣V]v͜9SMNBa ԣGlA)VJP``Ç-[v˗>})zFcN.n@&w=vwT=6lBwZWVIyyaVOaIT ݮcU0Qj3nz(pIe^|hݵ|Uw9ԵQ+'W"T{:ZT?E aUe:|DN\c73:7p\ A |ٝ똳3wW] Ս4hiQDB>fJỺtr$bF-fY\W8Q oî>ixyB]*F]g ̄w2KaJJ}Q_EՕǐʏ%&iӘիW?\O R}׮]>>>ADg` hޭM6GnbqɥG2"(#3+1~+1. aH$dd=npSs 8y&dJ22<Ի2222>E'UJ̌'h1zxi"QVRs< #`E-hVBF:~ IDATkAG;g U7MmV_EV<LÉC?jGޭm5Y%5W>9|m꘳P3 @sU̥7(s3< A%''ʂ.tmվjkMGpP~MP~M֭[oiAAJQ=)Jt-=/zrc>m{[.պX~P |5ύ Ȥ͓o;!_Uc8{';pNOwIVOWmC[,H`'&`TE# B=VT1Ԡ6npj ;hIYIMdvXn8i6,A5 @}@BVK]  IpKg7pq:)s3H".APNsS8&W[L܃{Ϩkch gTUOټ:u1~G -?h<bccW^rqƩՉjA)N12-zs4N͈f]hkN+Լ{͂#apl:!*3j]eJHwn11LꡭDi|bhɿ$Iooin t;MnP`vGpFoL[\n4c{VU^G[Z{Tsƙv[ 뒀])s8D:VN w#6U5MһQ VUڸ${ldj;/#5י`S\n E/z.-+b EE<O_jp[Նr:Dχ\eZԯQ(?o*U}pŋsjYY}p  HUp}fO4y_33@XZ 5L+5W"ߊkT=Dowt}W=Sˊe8l>Wge^Wo~΄L麃5:)|{~Y mBjc';IqCīyELfony|'ظ$_o.8㧲CgA8`W8+쉕'-'Y|X7ӈg_DIϯYܳF8eWsK]z{ziSu)<ȔiQC2ڑ[I=:wg< >^@7Ӻ5J.-je;bЎA)Eʭ|'v pXRJQ|{} h9]/\.dY.wjj$EX$\|fGC}!Ӂ{~jNR"vj8v m扎 a,K` cVf"T~hBNPd2X 15 wճUuKaʱciO#7KvS&iq> 37\_RgQ (VX ( N Xx/b;tk*w>ffqrrF(9϶RE9%/eS9WE N* D Y@s,$D\.h@o(@ӧOy<xfffl1h"8G4GQ~CUڵu F׻u/^`Z6D" srrRiNNNA)p g!daaWrrâpݬ NcyNNl) S~%u TcH=(?ʏJ~2Z=ڵ^:qDRMF cAQL&cA)5 w56kvt"%\ʏjWz'Oi֣ܱRx RAo1Wm9V!_cl%ʏ [[vimyP%JmAALE9TpHއ~H&[:Cr=a=xRK'C"bZV-Z4ʖtX;`Μ9<t80v(y<(\Jի5k*A):PssmdPtM~Rq&RpBALLӿ(;̜9ۻm۶$I2n8J[(ABgH$Lf' eCA) ʭD^tƒXXdWdnv<#ϩ5d!0y JŊ׮]b ///&V(骘"ʏE_I'Ç}zݍڪ;8Vm̭݇=Nkt>sH[^::oRyN NfAٰaÆ J[AA) =xӂ#US u󗆱)d#] Q!@-x}͐} sa$ŕ}f[EAAAD3wʕC]zX[9r e GǡsMs/^Ʀ@&h;hri`f`MM;׿<|͡$Xh7ȦÖ^bќ3'6\I    Eͭz7oܷo_```+7oYo\1#_kS#)iT.S۷͆cD1r[ʦs~ՙR{[Ͽb(   |㔎ȑ#.\8tE 0`%/X YrE̓X>>W(n{@sᅫ0&>HXSPh78\t    W\UVN)ݺusuuqF K"tmɞ2 H/ŚӄfhAAAALH)(8>}TF Ě5kŕ0ZǤ2Dր    JAQb߫%}RJ%/L )   bRJAuyeʙ3g>~ضmےDaۻI [5w2   DDQ =z{jժۏ??"IRE+<<9ˁIN w#ZDAAA)ڢz׮]ׯ_iӦMiQ5 'ڀ/Kstn EdQAAAeVxH{R Z"y>CUWE AT.  A\-$Wi.9(\{V̿N|>so4뱬P@% pJ{OR 0    !h.m7vf_?_A(!Zp   H1R{g1i"eEf@AAAB-lQ)Wo_k\հE< -tR!   HyFztLDCakˏ    .PQ|:!1Ж=;Y`   RAGõrtrtq)m9AAA"   RA AAۄi%PAQ AA䛂iTŋG~:666;;[$kMxUVusskР;BM CRprhYi  ;hʺp±cX,Vʕ&L lvyW4-HRS+W$I>}ufaaQ{ RtJNv;w"ޙe_WKFUctlPnI-%&*  Rͽ֭[|ܹsׯm`zٳ'Mӯ^ $K@&|.5x̹sK}kX$l rAv]aKgB.=ONL{gRͽz5t3!H z|_]i姾5ʘLIoߎ`S3CYGI^8*" +5?=x xNnNxV$y>ƈ[jh($7]qW.YpjRF6P. {/w MӹݻvZ׮]Xlhn@' #ϰzZyT{ʸQʯؤp8''ۇ֩SYfEAUp? B v 8ssm0ȭ[=:?ޚ6sU$O\&o%Q^.WaCrn ܘt+Ũ̹ ־ 6|Hg`In ~} sa$Eh@$-?fRiK 2Y$EѤHꩧ,!y˄IqGe Cy2ˆ _ e3nurx?[H^.#Lw͛ۛ1;Sd2= ;H;{By lǾB͛:t}"Q 7F@s֭*G)'\ܶ$)xw3<_?Q-tؠUүm}7nz+Dza= *]ǴV.`]*@nG+sqPRHF'(ujnzk~횎jӯ dWi[V")a#cŝ+aJw +/v\wZZסf 2i3m^/#ƱhHH@ V9ڐk7R0%&C68|Gk[-L#z'!aOoYZ$IϬJ*񡡡{&I8(V' 6wt Z8>[}`NPP?NN7\u9ƀWQqS۷͆cD1r«Z'k/ʗۿd67=$~{D_σlмbK5=͊uϳDeSo:PscW>82&s fb;;!Xp@&%xT.]2I+C8Z(L&ȸu떷9aqJIR; E w;fUqۺYLm&4 +5lbӊ.hZFJRLrd2ڵkwЁf ũDqicSyHp/ ُ ğĠX:W>AhQY1sJEEAJA<Cf;R|~9IN1K& =V] ڱйXU:qn_(nkMtܧ>$ mL%m:ݦVyOl2w2;|gOD2U䉮xnCD}sX@2A!?U%߱U7v` ɪK0vP}ן`~;3a`UA}1Hn/qIZ:ҟ^>x8<&@:jq]k‰G X\gOwXp R|[ p ~TŐfMeS/2\HB߹7RŋbIRDr/A0;S_Ψg/vQ|4"4=%şGU(-H$BP"p8|ġ)Ao }Lܮ.=2q _,rPp"y9+jJ#oEgdF%'ʜUj$/eSn5N}*i;{Uݪ:U.+<<pz*>ȿH)? B\A%]=dh1-У"#V5o$h ]cQ?@َ3@QAG-#ao~[S29kif1zWC"un IDATnT_V7f{*~_|q0 8202c̴lA3Vi1/[jHy&m܁\ nmzLq8jĜh1qɥ^Pw]]?Rn>3A]zW^L_ԏjy@.dQ\7oEE/4d F**h3jve 8TZZӧO[n]`N|#((H(֯_‚QjX,X,&DZm|`Xsh&ݿRg@# ;1ѸPwgU?.yh2:7W*HRinnnnnL&D999ϟ?gX$InAC󾍔u5NT͕)|pʬxV~3C wm,!k/lXx|?IpuΨg Ȱ`|Lo-`ض3xbxSۙ:cybcwدN@-|'̭oRZfoE^Q7'Aa{n_2"d>CFFM + ֍r DDN»ZBdꟚdMQhZ"ֹ!zй ~. 8hQCxg ,>"_#I]~ENBT` *hd;w:)q ?j~~I^b[Xa/N*4e=$3sʭ9JP6eY\Nw;ZP Y;W/컴k|_Vp+)'y1 폼Яnل+ɲkԨfY,oiYL"0A$|qh>.@ˀk[U999!d(I{..ͯZ$Ot_SE" JP*1crh6:] 4R6\Zq72iřO RflkjBݧt1\Gw'VEဃ BQͶ{mVV*8~((* *?IP$ wK]Ij{bM#b?_~|wܹX[{EǠcǹ, g 4oUZjժ ,"_~HkG.B0>k[l- ^mM O&*VG<>3wA pXj=HP|y 6F+PTs!lgꖊP?jQ[_1*Xx kal~}cO23$eOs&HS _ 9O[/cn7&-z &t=S!V˨vow?]>X"P4jwqMrrO*!~[ υ:2|g& -7ݣ [!|ytVYgu4o wnL y;=wF55[8)$I e"C&X,+++T=b. 7J*tyuzI37<Г/#ݰw*VM Uv~Sիw/@'" !H®>V\O2kϚ4f_-Z]>F-Pyи̑P8پIkZBhn,NGK ='Ehpaԭ@v0Ѡ!>޸IarvQ+H0|@X>'vIͻe_6QEA-ҘJ6~Ƚf<9jM;ү5'HaL͸kXzS]?3sê|˵Tb1͈+DQѻU:hkz Y;W΃?'ᣵ`:F]ѧ~!>_"+*ts%L&r檪 R^uHH"uA4 MxP˻{+{9~-+5?UoLG^@íICbk7O^eݴ5(R,Ϟ;#>k7o `Gy% 27ܜ䠂A4ǹ,˓'9qܝ&_zf6~ <3 [ظ{%U[ -7CSR+襁ouSV Bۃ@TّCsdktMφ|7o-Շ u TٳHz˺sm_3re]N^^>ft5 f\  U,&@b6gcC*4Gxyc^m3m ^_ӹM7Az023#kX{4_b W{.LDQ:E,❫VEwwi@] _#fÒ|вqw7̐H$< u`FrD2/珍gInq5 \jo:ʳ= Я{s܍$W>ٖGk1%=qeo 1;@ GFզiӮqga'4eq_fEd26p$ s "P c8>gwPÅ6xc7A2ǿcm?5wwbIRG.9v>5`ힷީ,7O7u&`Eu{ד(n \׼9 ,+M=b_)< 2o[?oqe> ^rY߲fJқ`]Mܳ`mgX~F,_QN[IrU'Z-~{ ۀV)NP >hF_ÊSZtCّhf4;Ǹ#? Y;0: U7PV<ˉ'fϸ`KQO`T_2Z_s$ّf.΀;5 $,yI@m |hOsQ WY;Ll0=ᙵ_'7A%HA3MOI!$Z"or9-cEs C6`'c2՗~L&ՆbwHYb1؋u\3U]g cT4(܁r'M[B^dNg6"zR"JL1&qZ*nnGP C(H  E"Y*/h7ACi!P wH+8ܰzTGǡnh ?h*^zhpJ,7"3_+M<%b?iѼKE ITXYr]g)n g~oiD,cl&kݷ2I$$`W8+쉕'-'Y|ZY<VBG'oMoc .0=lߵ;êӔ7R˦ǟ8DfVsK]z{ziW s3&$Ƽy~鿓w>myQ,PjTZ 멪䧻K K[w&S딟og, Q;(H!x皊i40J\PTj7RARxIOD-H+ArgB;@m+LATQ\4Pjdj@2#!MՀPQmaitߕ(TXp*2 \S( ZtbaVRVX }>۾7筗Fgd|ci)V*GT~vw-bhSˊe8l>Z<\$NdvNe~W*jn5ULH<mOOe˿܍f:x:6%a -p$iffw^H$8*+EQ W!WTy'U&SE JFm8[ J$ "V~ [~-?޾>D  pCUx,K5E,b;;;3< Co*ͧĥ݉JLHHxڎf|Fߢ:d:Ib>??dƏ ԾWCy,`?08^`,umPıN:+?5G3fLҽmӷ^+jJ-f[iʺ+b<8н p8cPý]HOJ'$iaaaooLJPbz/ڭ/2,9{ԢΈ(b RU(cX,8\DQKwey<-TjT[cDЭC;0uσJ&en|`%{C%o>d*Rvn@jjw-,,P wE mQAȳ܈8%⼖UR@u=ђpye"L)7f{0ځ>|R5t|d:U*#D7|Z?߹I ;YV<~i'_j˻^)6~Ʀh o_M}!I~`QD5ukä{dj5w:)} mшցF̔AH4>goIj2vZoNn/P+!j S<,k;ks"|tt$s]8yj˺홎ӐV\g~?ḘG4:sE.$I;88x.fڠKm3uvu83'crycJ%B )"ˊ;埱S\<4%_F2)jN8!Ţ(AZ|;`cC_#7ΪKUV$Ɇz)4NE40#6ӭSX/VY=Բ}h0l[W:XيABƗ3<}=\u?qϪݼKsaWFu8Jb>Am̓ HbQ.^fdd|DMFIv| q8NN.gM.>,»gd .JOHcn) )i"cg'h1D)BH$Z99ٕP}"IM)G.B JLKJ S 4kEso޼imm|faJtb8ۍOnڐ2,###22͛Br8fXpy%˜lڌg%XڎEW]ǪyD))<-όRBؘ!z/#|'U7DNJJJ6mfNql*ee2T*J"())ĉunnn|>-8T.EA#|_LWrr2!?Hu|1ޞ[OVe JiE˵u*cQȔLz mM9b%uKѻY 4kul]\R AWTݻW^ѣVq(+ffVnk.uBOE 4'jA K?v%k05 8]*B0$$$;;^z*U277G A7P B 6LȯsۚHIaRw e`Xǰ 8]bd?;aL QF#.*J+HFժU֖bA Tp A&ERFʵdt\A!UXC*>}3fxUV%f|r˖-Fruu-:>c\@(3 M111vJHHRJݺu=<L(~˗=zmnn@ hРAZjԨhnnA;+8!/O%%&D[7)AAA _VHӈxs|j 2ՙM=8%XةKC@A( ʕ+[ZZ:88TRǏ?NHHxCrsse2Yii̬bŊLgUdmmrvArrv|!!gϞuJ #wn1('y&̆;LrcW*9$ÎsYxAAdggGEEYYpEIOO.\Y$y<ͶpppQFRRRRRRZZZvvX,d]p8@ YYY1qK[FAҤ+8nh7aǗuwP=Af?4*¹} )3l>^sYHٱ@ DMy9%  ]t)mL\ gggWJ윜XK7 BiannX,EQh X22HDf׭ޝĴN5j&*"![ -lY /۳Ex'YKևxϚg{Ejf]G]]pcC] OxQF01$d3z "&Lԗu{uȪq܄-o>9~ʛ24sN^6g5B!~~(¦XBU1;ݹswU~ҼYk6uuUPoHU0_J!zZkC7śH2= BӥְS9$h2mh;QtlsNGڦ9kn_Vۯ~x,Zy4M6P BTHM\c?5Z#66V g/>/St|`(9Ig ͚vMPFTɢf6^ݙkiګJchԼd2J5:;$Krg7cPLJPP|3W'w՟w$\= =މ& #;*:!B!jZaJ~~~vڷopth/\_; :Ri҅H\[R/Ͽ%nֿh9'a%;|+QVs군ܟCV]/~zڷn3 ;*‘SDN;ó画ﳏhPHCLfnFzܵKu!B!B)k7o- àAf1fsS* qjZDie2k[Aa`{k^a쟉uƠ;S:@:9.hI@\ s fؾ-,6R!B!P8=z… Ν~ÇAa6=vS, 6a; Fg$0%3Bf=wbwn~SS2 F&^{"KwI)L2= ƽͣ:ˍVfR|ӫ{Gu%[B!B53 ;v,:::''^ܢR,Plp B!B!< p|ܼܯUS~FFnC!B!;D4)$:D/PZz&z Zk@Qbπ ɇ`uCNy+׮liRHtDC#B!BMK pnNU%]}~UP+[(sMju|^@YmB5\БJhŁY0 :%B!B4!*Զ|>{>}12OFCB›|Zm$qqck:6 QB!B!QpP8=hݜ >b8?cSF {!ǯ\yQT^'Omߞ&i'#]V{B!B1w׷-\镪J:z*y}3wuyW|jVfXhuuzF"0:>: ۩|Ơ)SXl=p wpD}2(C7jn d4FXUnMNi-bآi;18iO* B!BzQl./Z{LU̘ˁwM_8<<<cePpT{:/ j |jbyWܢȪv-3?4j0@bhǁcHLo`j #(эqn,=Uc+ x}W܆~Ģ}]Ɲ,5̫=J!B!TH 9m1xզ.֫'OduaM_ܚzxxL>x{Yf;v^wBr40h/T>3BX\I=\nC60 V¢" T! `CưS.hn?AS{r W-䀱8e;eeuG !B!/lMkwWd4lP)O൓討~pkzΝ:0lذ8w4. d{o1}&]KdRrؙ0 ?/b`RKU{P1I@Ɔ5UY]Msj[o9b>} ͍M kB!Rom KDl[e9?"!j!߮];۷ήd%с -L1*fd JŻP8j6[y]~DxrMS1kDl OKSzGOEGGN+F4g׼vo^ڎ>aش<ɾi3GreZ}د 8$jدFMR)-KNStB!B?Fp۟>}͛Æ #|aРA^α@U>Ӿ-YNy S ޜBA/; dPs=wd:5OƐ/dPv.\f}q$RN A4/jzQ xpc/׺K{6ׂ*| UDr^oZ-:~ Hx B!G" ѣ.\=w>\m~K=j":{ΚbF&^{"KjR}cﭔ.dl vI`JVrrTJI<T*qn%6&UWXUG h \φ~@Զ| VZ|1RB!Fرc999u-t'Yn*\V9(771d&ѭ S74_A/{pvέ(.0')⁍!iwǜCFN024DɆ6ͥuEUS143Uтq[o /5!Y:U-#k٤]/Hj![) *UaSJNGJhrmp,2][?fʔ] 8{B!RHcNY1)IyYk9?tɄ$W]/%U\i˜V랮׉e#-͖uM<'U+u~iF)*8PO.<[ym!r| j4 MM(a qzgw @y},>q_M_2)Neh;`ƸjAe%/_ ܪ=}Ǚ2&p^˖Ң sR*D7/'JT"!×7ϝwE9!B4`E>C\ϭ8f'R~;Iyg1X,S(2)m6Y@ :M7SF! t6 M/\$V7]Tc\E-r@@nߜ *:PmG8uTE(|P ͮF֕\SU6VLE^9i3y:*P CG,nXow~W"fdKMTZ,ǡM9xseՔQEogb~dbE RثrpKݨF BG@)7{%4Gݐt:2gMuiƨ60]*JS j{ЏL@HƟ{'f0烊 9ʹ6(݋פy+2c:GndulPu^vtHVT @7HQ/kэ཈~Cf 6ɻz 03.YuD4!.L[/p:"%꣔/vf ~ mȑR#_mH-;*4 PU4Rg_hYNMgnY^H|J2$ !B5ñW:yJbk.?h2-1 vqޛ^HU cGO pvV=ecb*(Zэx=uދgo}rzU5ˍ],xsSꊮJO'RL TnegweqL5AYK4snT=ࢮ\sn!Pw]bs*"ק㬵v_opme+mn2ʊ^j]_8|ve }^K2cSB!ШZz#F@(i$tO(M60&*>.eAUx^Aų|sA˘}7F2ɚMM8$ݻAG!h&"Q Ʒy?6l>%Vs=C{ i[ ⡤YBpjdW3dS)t9k]A=-XY_[}B!7@ M=}M"Pz|62?1]R4}dmid7_ j܀˔YP\_ IDATh]*ԭD$kDM'+v0k_Z)B!@"^CYH .'gȯI2EN\%ʵDy|֧ruLEdzpkb꺐s9^B!PBE a@@iYyZe^cހ = S~d}%;xE58R=lڵk {+:qMZc!>+P(TVQȗ_%cVmO6fc+aV,Is#B!T_B9$v MMLLLLD7E$zݷ}sy4*Pc(I"[$.t7͂?\P떴h@]w9ό=m$&PL*;{.=E}S=N E(Dg_[} qqwn_{}:k!B{H5Vn񓱹e$+ـlTܢ?H-KqcxܤB,LWɅ9L7W˖Lד\⚱NYgy'd6Q)6pȗEL&87i%#=79Ŗ}}S=V8Q;&+ͽן׺V+8:h}IY*tֆ}m%k'VZB!$~8D!$e9_uRv}u*~P)E{ϣCN{HHڻrފ7r&VPYU-D˷Ƞ-UW=d!g{%PħV3z=S-F/L9=$;>k-m?ё$ڈsU{?4 !BHL@Ѫ8O(M qFXppl.O^!n { Gm͑oWɋ);m[u}phܜܳk o A|ȝB &#SCϋ: s{hm&V R54?+JU1h^MrOi<槢BV)tF]Fe(ےO)d,o>C\?c~.,r(*Fahb*L'Q}p\.PZڭ1!O+zzVx ;~AuTui1"qZd:jD7zu5mTl f/]%$Z~5 gUsJt]=n#`Wu0w1J%fHEWk uMUOUt]zW:RT-m:%@!N1nU [VW|sjVɏ}#7nBEQI9])/f|"0`XY?+-@頝{o$raKeotnvl蹋 o2&:9tU/2jΝJrMFPzM<}}c!ILrヒ@*Ab9#B!y4!*Z:'Iw%,cW*yJ`+gn7oON?XaU1׃#g["xϲ=Mj.sط!zpť;yҧA-l"B"$4f;w׺z{sKWrBxj^ T&PAS ԣ|͋zdPv4m'7LxiFzU@Xox މf 2S_}!mʟ#NX/Y9#@56v]~F !BL8<<<q*o@6 L30q8pXӒ-%~m|dq 4qD}]}qv':uT=m 2(U8d~¸{_ٚVG7v3B!BM8<<J]APMzmG5+dg=ԼHN(V20?xTZ !,H|.`!B!;wtAaÆ%&&޿.TӫITK'Ry1=Je$+g a^vH:nt-">Jbg^erB!B?ڵYؾ}WaMwB o[UAMcbdEP=+V!G`o| @iFhqQ1T!B!Fp,|]]m?wu$}Gpޝ@lZh'L&vY6rreTWUx]%>]UEM~r_B!B~\ۧ߼yS$$$Ç SYK7^J5{Gvףѡ+$v)䈷_e k%)tpB!BHF8zY:t>>|92Rd{Vzl&!QS-mk,o;U/V/y'|U,00!jxiё3dsm%ձ!B!B!=8ǎ'HP 9^tqg0')[Yzbr913z:Ui4_{ =p7O?mZ!B!y5f6:IQ$mlQ){%+$8v]=ï= JZJL/B!Bh=8ڊ/{~bFDjV6@Ҫ()"5_@/++Z!sڽO>6'9P'p䌞2Hz`!B!F݆Q2] C6=+LXq#VWάc2?XP45fͩU8b%W>Qd(WAAAP>bwf!-}.$wS+_Z/*ThPO(TCn B!BJC࠶SUUXv%Xw\Pڔ8FϤ4abN-oE$HRdS+Z-tݜ) TA+bw=E~c!B!;]|漻YEޙӔE'[? P&4]`L|r9]qⵥ=^nV!j5F8Om B!!0IFYiQ~HؓǼx,% [,{ ;݇RT``} .T8Ω,>Ejf]G]]D(}rʕ7e@55y2fؙwC"_偮jnZaSG-^//2]e>te\O q n#1Uٳj(%\"\nE^͎ =w1청2$ rԳ[q hϨ[j?Wp`4֫| &BZqܽ H\N'=l{Zw2as8⅀؃.Ӓ(aM}BI 4`2V{k|J=e;`U+arq=ZeJTx YdWU’=Q݅! ^^0INFzW^.@{\_(",&" 1qS|%]׽%l,7$??r(Ryۖ9k+[yy5'bBp-,t1||ܴMڌǛMj)xM95~@WgJ-ԼﻉOwMBJf1qL [$kW@tQ?aDPN=8n?zOU?V)H2zOܶށYtEZ]|G̶sϤ d%k VIky'6[n:0tYmSq x<%X [4mt'4-QUN C(p?k6u =Y9~C*YM5ʬYƣ?|rw̟}}KŏnOi#˸nٙ&Q8֍&v}nj͹-đnb[7LdjN {142duh\o ֧WB r*mg~tGrsƗ%C0Y/B!E^KS2q ۥ\+cbo|P|G߻]Y^ϷjM7n`9A@Ca5lU+nQde;ήz+\)) ZD@Aض`+\;L\,7. W {w]Zw>^,$o- \u5ȹ-suKVOc`ڳyADBDi!'-Ku/Y9"̓2t8K 2' W;G!z;5q52έZ$Q`=[&Ϫ{U8zB)}k0U'mwGqߋ4z@DE@!`"m^Mid@Ir6^`Zܪή (@@}&ܶKbW=h0fҗqk*n >3BX\p W-䀱8e;eyuN5مd>AܽtDe̗SQ"g=ԼHeP PDU谳S.hnc4ڀw9ITjCE#Vxj^TSRI 5%;Ԇ \_sېU\^Ƀ <V\]Z3TN`g޻q2((,1hҢ_g i_+jV:TNf΃Ǐߦ/#3SC޷Б.zI>*:8||?K9:&C<.fLҔGNE%fa9鷵ӍB?:AURэ+mqN%`TG)9_]Msj[o9b>} ͍M WU,\mA6(+14 C=d@ZX5J9,rx9Y@}#P&%FFkD#V - '^O=eQqa kEa}yQ5R{}qw`k']*.u3F_}qk~-)}>[lWY"?b3%R4ӄAW\E1֪O+(#Ǔ8m}yZq”>nGIQFs=^Wwdv6wCяIvVؼ EWoW>+qg5Ou(J֔v?ԀQOij]zUІ}RR/Kcsdz pd0A%.J]!;ddEEEE O%)~69;5KST*_F"S fdfdggdff fw7}r-.p7vIȫjw07Zm9yV&JѥF(JD(xyS( ZٷSD޿XPdȡqbuY*O뷄! 7?ϐYpw.TwUÙR)Pkl!X@w nj9$m Q; ?rGqIrs" BwMSyުqtU&};ڡIQ9aRKedA2 IcT @S Ga<{UwUUE]] T(iEoY5?P&PL콆tiWˋ]ng臂W*!VwMYG4*K*omsp>.eLt7Q{(4Gz~^b~~_ILTH<"<{\NJJU:AuЍO3ysoy}ðAw_~$v<|v"ޛbhgh-E<&3-tמ0ŃBUrYDy<!dP%{pIF')|ƚu})y "1LM-\(PG':rt ';YNr'E>JȬts[y[Hjɏ%edAo4Zx 4){T^V rވg)w64plq* \EoD\٥6N,PtbYUTYͤ׼᳘O,)MMFs]S9"K]Ǡ2.U{OZ{:vYaY) .w=YW3-R_J%Jͤu$$l#- œL.h:D *NU{e_䐜.4ͩ0,ni5殮!ɥDέj8UL1E9+fF' ]9H@K, _3sh]t KnnB Nf<P9-`vb[e$GBxΊ8 qan-o.Wyk-'XoA,Itϴϖm π W<XM@Taʹe4;)3k;W +}Y(.h82aUêRK漴3'zsҀ]wG_Ǭ[$5˻gAHӫU U͞@ҩPMAPIvߴf"~va:ODuzMzkz&o| 34Y6U'=toL=4pAPPUm/Cy $V98wcZzFUHnj:=q{}/f G\#h]qtO$A [2%wv/!zi휝3mwn|I_FB!/*8QedeeW"SoQUL%ԨQPB!B*8>D? |/2!:<j԰CUzT.Z7 ߞK3 %%!B!U1`#{nn^4wXK"*JPdxJB!B!D_qPZ|A#[UF>q8'g)]jPB!B!V|+8Ro,v>hd@j5؞C8wcK+LE]B!B!߳ZzԐS&VԾ̠]|nd4`#/{<}#[T3ɘI7ߞC߉HU>Sv0pLmf!ˆ_s`d__mL'\ډCx~e*՝ʊ5e-N5oCǰr8ĠҦ++?x;I 껺O)%?R 4mFOB/\X knzN-5hԠ#B!M`T*UQ ^sz8M&7;&*wš\FoլO_ޖCd?Z_rc@>WwԐ#~+9hwe29jo4^JOzz|H5 )͡VrkKjSmє+NWE8L Z8]j}N27?RFE] B!BOt],p0o^|9&G\ (5dPܟoQƀپd']~Nugq&X{v@>Jv͞:@=;A eBSIf5^u^#k9g,5׭S!M^ :;|u'(v{ZYvp2ury[&|}zpvy1K'v ]ڸ9M2^'ei|(5>}zu(&bXT3,ޅ{ًн[uFOet}2fWDxؐ( Y{I1;P Kӝadi0N<'I yc 4{A 3יbޚ03qRk'^'lw[*B|v˜n:yvdu^OJr(];;)QfnQ&H܄pL[4Š!`R^$u9;1_EG4B!?EЂήSN)]vuիW۶mk"*9=PD^ 77(2z 2,)j0l2` uCd 2{aHa55N |hzꛏuQDi/mz^>f|2ypU J*{ pT!Q(^3kvw丬a5oQžj5+BYww|b*J@$jmW( HtB9@'>՛C}moϥ@MP& !+,ɝWBCK8dƪQ>}zhl25XAvQ-հA[[?d͍1!`EP /ܾG<4/I >L} r:}W!* Ini+M Zz.wfˤg 9,\;YfGuu5^Wki6wfvweW,2gVM'VV*C\ \4g^СCҺI(KB!$'sBK.=t`vI>]ܒKn_^ V ˕[wSx2;UW:8&! z_xѾ}{"(T <פ -N,={N(@PZ-y7;ʃB!V.Z9پBνzekkނ0&76L,&?Zlb#zAH۹s͛7^xm۶|-/}fx y&'Nl{dS)Oz_^ɸda}p͕ψ'zc`jyur_HSa1Yk3tkKWR& *N3). ;fwK#¬Cټ /85.G(w1ȊɏV ME3bEnw?>WuGjޕ!K$QL+֍[k&a~39f*YMvoP5el鐏F nգq+;6G/|"6ENuYWs1@hdР.W~NNme~'"U}WI[eke#/{<}#[Tk-{w{doϡCD$Ԫsz; ? 8}Fx TFv`uvvgyĠJuasgSrB!_}%͚N_oE6ېʺ1N\lݣq?1l]ZWBxwv,òQ}&|EKԷ!CcX9Du!LeڵkW@@@TTE31zk)Y8ѕ[Բ4bX$Iu1L\ma*ցzNElee6>pKMo(/u֪hFpÏ0!rsJOm*=rϱQpi mnÖl٢vyU$DݹsE>~XOϬVZֳfضX<ˀAN]RLQ-/%c 7/:|62fLym2ޝKy?76Ȯ|桞N{iDyڔ$G@F9<ʶuy^p{l~mު-ڷuNuJ7M4vOD7*wź;KrKfm=֧fR0X@7gu9Cn^[rs뼞{Ks:cL(헆-jB)Ae9S LIAXan[q W\Cju_03L?`BMiR wjb^4oczz|y 4ƓꩆR egIFf`z/ 4oSU#v7p *ѹ}eJ%s7?.4->ODL#\an!-&)ͧ]kCLYڗG9ɧsydv;q?~){͹)oiնi@.=;>@i8T0p6=0W+ێ cDSQrܐ!W:0 IDAT'յib>5݂sB\Ǔ%< >ClNw֗ڍ=nH8y!ndUsjZn-kğ}O`{-V ߰3-T1`K^=oC(^9n\YM8Uu:ѤVz킥s4oӰV#9eelg֪JϢl 6҄BпR`+fP]>&Tܼb9UtpV\\GP`aUQ69?溜zc@;}mx)%=sieQgjstpId4r| +BBYNGwXwOu:4hP|;`{LbE;8W՞oUh|V>ϡ0{/*W?rWHP<_˫Tڼ8M]/\猖C^)mc;oҭn]&uksԏ^ӛGM='ع$4e~]1z@OCKԝF4( _넷N^^{^\y4+Nmga?Z uҪyA^[Rw.{+T78B3fשKexZNsqWqq#OTsBlTٰQ֩[^8;EyӪʶc~6Q6m}㞚M=]e ҉*G6nNC̣ hY:S;L?8ݻY7aTFy,sJB!_Hܙ%|( qF:%Z1?HkYǹ[̥j򢾗θp_Yh?S/z=SQ1⼩,XL[}6iuR#iyfDU~klXyr뒙݂e͢BAN6jrB3 ɩ=({鹘tફMT*cY'_srʫ`UHioddܥx R K6]!+;˥$TU|w/qeî{R`M<4anZ$HdR6A"QȒ(Ex($ DQjṶ̑+ÿ5(4Sƈ={uLf~6 V&pK3A8inz&6Eɩ0PD&gL k><|0=ovCͤgӽZ<{|sWCaEj]Mn~2>|t{J0NɍLz@>7kU!/!~Ȝ]?jZ%TT|yxy6d?kj6,k9i+s~AA`  MԜGQiwд4`!UHgdbndb@ 66_,+2ec`QiZZX\ASHѼ*輆 B/b3ORv`EͣM)Íy<<8cmț,=C8du1<>oG+ӝW{G%~Df)M5 ٰ=^{2z 2zyeưl6һG]WB!:U|Fl(ߪ\ѻҀXo߽777сòzӕN=iMGZq`FW6ɦMrW[M!ViԧǐhA/} 7ugT<~g>˭8xuvðt` _P|.pdI"**hܱT7l6{ļNڧP&U/$TA!_}㥾t0f^ƺE?$aN= ʖ >~-Ͻ}Z<ج3Ma_*x;GH1E%j[h3co۹fMT؆3fܯ: hS-Ndf{@rPo]$ͫ'.ѧD6E 7lYb):H g6H#ߩK ʬCҏϵ(β0H&w]]wEH; T͛7p+)Mls R,{}9vVpfLٱA ;!ϽvܻBo||SuJbqi8[%‹.m:'!~KkhkQD]%RϺdAB\H6|n@: YGB#ǤCF-W95iW[TQMk7jԨC:y]Zxμe)ˎǤ> *48Gy E6IzO|7*8!trxo) Qw|W~Lglpg7r\!F=fP+ɫt., SóSLWs60;1]3GxlZ2czf7?msRV.KeQϮq˞~[.r\rfUߎ`$C5 ްh sZ.l|킻Mo_LG{|#Jux.4M!_@QS{21!w3ZI},:q^dL.H^_7^s9OmEs XM œ}`R[]ߥ*eoMcu7ZJ# ͣ+B): !vT\O$כ& ڔKJ;ϧ$dNjH/;ƺRuT-kchvuo994UNy2X‹:q:NL;-XM@q^zز,hS_R1, )Vo|]j+e&K-*PH2ش@q_򓝮~rLVnF~YL)DKdHT*IHs~uæƆŨbW,k}Y~j5 W]mҤR2CC !_Wꫭs~0ZLy[Lj]*iي5ڰB!8)VV*C\(;7%K9,@-\杆Ri>zf$gV( ˰XlIB!Vphں_ƋDḙlO=c %dwM3r7 {nD}|% !B!Y}կ]o߾;wl9U_LjռNxYYܫs{ϋc+X{iA#>˱S'gp~^+!B!Bpss:thN_Ϟ=k׮ZkSיGQQY+u{ԔL&H$ YR%OԐþ)#6vCM awK*uHbڏB ]\ movאOTpik7t֭[W^m۶m~&oF{6Kzuϥ?cmINN:v]Xr֔oAʃK{Vo?ع~Goi)O?˗%LXkңs_ߜ,[?rpj['*ʛ"T~f[ Ր[>ţߒ^o0hS 4n6r?>6Td#|EU_dg7ʡ%ck AP+Wݏ1??֨?ʆ.Yp?8dvEޕcvj=7o 8voS)kFlO[} f_Ȥztm// o7ޟKΚRi!"YNY+8=>1͓s%L.[jiI-V3TVTBem2̦UÖZ^|{h[-ZlyӔ1ީ{3cڔwqR/&|4541~X"@ŐӺJ6ϱ蛒b"c?Z>M;۝;{e՟zI uI#Um=O{KgwފRy`ٗxw^p4V5Jzw͹3N:M{|Ū/_# gqӸ2@XYSB>,$B^tAj)16x_~Lm﹭փIuRQ*UHH/|mfڨ֠AdbJ"XE/ˆKX9TFe+Z4Nȷk{UTNK }G&*cA'_hFJw6FkUeüLl нQӖځE /8ba-]V{s;8r{|TMCɔo]r֔"qa}zvT#;оtե¾3\#C-%0nBȷVpFlw2. !&lbkŶ𹟮J_򰇁O< y>6):)mY֮RZ6.ea)k▱ujݪv4Oe_/ՈaUunPɎ :;|[5qjWXJ<{?dH;p7*7nm.W~ E1zY~RO|w~1yzb{Hz2H^6ƌg pŘeTR*:&o mg@L_org89;|{LZXo{k:9v8ᦄ] yq=gDzox{xJ&ex:e6ͽkw8v9VmiGN Cz9n/w͓דHĠ| /7\SBHnG{:hx6՛(hSq_U7B9SydİF:4ų\/S}WG{~j'/Ʊrjʖ9BTA'jӏi|n|};Oz]{v:CaG^Єi8 +eא\?@i6nNweRmķN@wR?h|r?YmiÉ[^{̩lTk643eLד4pgp,7lPN`߹r>=@UmXOHYٽXB3)Mȣ3:gyv}4neBzW7 o=6Ƕmz[CefFl̴#4s_0 ˮjRmgAloh9ny_Du6;e]ڶ7i2iȚB 'nmԟ+^ߍǦ]".2giBlXaU5MgϓSvtK =E"c;_ܢ %^-FnŅ*8!9c <zORRR?E(Z۲vy#@۷6aۯ'[e6 ʡz׆d 5|Wg4iUR}-74spR" J ;+ I.\|v]׿[~mR?NifKv2SFQnjl]u{fzÆN :R:߇14`D2NϜzqM2Љ=@+GqFgEq^Vu?[ڥ_V .Ԫ>@;ۨӵӥ/ö'BfwrѴv_vު~s/{C|'}YLF[Lm4Ky~\o:y)mzT95>&=ԏ坆ڮÈ}@&կSJ5*(hnjr)y[9յ->]H>(tg[ONPx†4wzT;~̡ro_ ul םtN֥_@YˢWWlG!d׾m`n_ 憮)![~F]jf7D@ aײ[ЎK'lYY$x:RP!$Ã4Hc1onޠ~ % ,ci Hd2Ebb"}i1`Tl"qgn0(;(cw͎>s 0 nLMą-*e ڨ?lyQk+sYImK3l0KOҭj|YkZx ؆S;TRdx~ϟk}eu&w6(ކ @)ζI/o]yNsY5٦A^6ud=Z={ 5`C۰Y:'/x q0ٹB/8lj^ ٘_o<@If6O5|M !߈Oa{PyWx[.2)nÄ՛}xv+r}0MI*8!l5*$ٴ 4-JUwߖ 3(/GQ]ryU{|q6г]&yg}9|Xh{v"\d)~%3Db,%@piLJ(kGn~z)Ȉ0bꃍANfJ&-qb k0Th1LԃO_ߺ R@j_׉K twxޒ71xxdW|{9(IN:y0% 2F:& (1p :c䝯ܷ@mncB o/Y ZquLN.*JTTbǕ{v羻u ȺѱL>O?dӔ2_'yB dO\Y]ti_9}h M_ܬueۚf+Ψn &3s^[aZaXv~JiJ8oj'&F2n<Lj)m*f^d3v8z !KZa*_xrf:ٺ-7:)mLn^Q9浀>vdDzÏ|$1/xndGXwtR8x?6`L`Fz(}󹦄"{&y mL@Zx 8kM.@%QB?M BHYmꄜh_k]w|w{܂/h -f 9cΘg{@r q;yD̨'nO);11>"ByRdOOF15G1fV?Y^ލ7{@^aR[Wn|;s5_^q/ ̲janc5wgt~}j;&i@ƚ8Cyz6הR$?uwudU~˖}AQIo '6>6,,:ʠ׃i Qݽ4,TW. !F/CPJ pNl٥wߊvOޭ=/>(zcd2?"uj")_Ͻi|2nbqcZ)*qD fLVMd9mu?P lv ] r\%|4?.kUʊ]`wjLv00__ ^SBHXH:e/nXܕU3nZ.w2< $OȷVp$9}hEv Ri_osR +({sD$-/////Hnd[~#'L=z:ZB_)onՉ 4oJŵ'c2|ӣdĈ;{9 _v*MT}xσwcxiNyF)*,%${|Z٩ky p!O{?4`DCݻR ׮KξeBN`١._Lӡꪫ>OeϿmWeg{e!_8I+PM +ӽdBJxF<-!Ԏ/* yהЖJK(L.?LɖA?{aɋP4' r})D ~1p{M !GA"$d)1 712L"t/Nrs=Jdž<, rݸy.|/S2})O RMN3>?Qg )j!Kyύuk pyöy֔R8"vߒ 4cg{Hҍ+}eѤ+Yd2>5o~s3)vXQXX؏`ُjFٵU ?)^dPa]GNX/pzN;=^2nߣ8,feS9jw܏=@Su .ߩ!ӹa/?,,кɠAC{9760vwM(J; WZ'2h_YL/yњtݛ췻eR,&>+"mHdnj@oX9`?f+.`Ko7Mfe2 *4i,+HDo ,ҪzK(}>Nd$Ek |y`C_ 慻=~q)ږ, ɗovQp0Q;[ezϜ㝥;*wš܌FڒK]:`5k>3\k _s/H½^CsFyh8/"]/p"'}c[*sJ)7W^põZ~ CȗIdn-2701O$|N?ܲN:WV۸5ɶ)lul~ x̦UH`+,-E{ ą}OmmQOFi޹a!Qc/B>8Dy̎RV֚pHn9(xǷ~ C\5,ܳz۫m2+Ǎ+{rynS4칵OSY]|ɒx,风SpZwbY`gUmGy yzlb5 ҺF|c ҡ>nr bw:.w;tmd}^]-Ai>С6a6N5X<͗-~S55/ !%ۇo4^2־؇Za*ddF4(/{QYq-_~!cJ_*7B!RrMСC;uzYww]vha,PcZ~^U!7siߜJC Gk7LY{l_ظ6i!}SF2dmz@h@GnfTu '#;MP3j>zacY~Va uv_LbtbLJ,`Rf{Kj*A]W)n;/Ių0s_)2J B!*8촵vz֭Wm۶`ˬ2~P3>Psj٦SD^ 77ЋYQoO~\&e;!/P("}W;  6UIeJx;$C{RB3@\Z\:$GB fg\@df#B!߻"QD[[Ȃ/ԤقSV+g/I >,K0)5xGK/ pdI"t|`Aɷ#!BGB[*8,--&x}X10jYrԩeg>K=k9%KzaIW5^*!B!B"pttܷoٳgvѮ]ZIK]/%uO|nDoroSCe!7m(VK ȩRB!B)8y'vy%K8pԩS۶mZwk=W+5WdSoj-HX4S3_tuITwY~M}y'$Z!B!Bk.GGGa v@<zNEףS\?p% ݦ0r:Jx{|eaTySnbL}JF-ońe%oX-M<lX!@ dBs$2>D'~/+_P%>!I*ܠLyEiS&}dxUl!rxG5ѹ @ ~OzGQXidXZ{$}.ELFJB U$"!^ų.?QrJJ d 8A?3XT\_ӺbSv^h=>)ȒJK;扐(e(jutnTd>E޹rܭW˚YYjXN:lS.ˈ{zw}(U[iݺiy^= 4bWȼ?2+{BƒSRH3EvúɧWF>JQ ٣K#χr+7ou;V-UY"~7@ 7}̸q<ί\Wv+feEL {|_j$yuυKߩck>^C$#֌q"V)9ɇoyة,<ƟKaÖR)oC֒s+ ~tNSRHcAi|s]s٪akOSnr1}bko蠝Kbywo+ߝWL?%BQ!4͵5n mn4cu|M ^M-7y7N1e'%gUHٚNk%J6'Lf[)9;۪G%b<l:QN.v.9A)W^53.;b W]HҞTvm( W =\5{a\]?]m޼P$ZrJJ -O @۷ߺbE+'9(u֥Wlj\/!%,& ?Q5l9ndga17?(ef!gpkݺn>frFj@IlZtid s+^F ;ul_0M*hV+gړ+{NzoӸ!\~# V s9\>֊{'eM(.} v>-PAG;#(I͓\&uj6 WaTEZ^ŷi)|1?{^F̍+׮<&&†]S ۫mm@&rgjˈ|(tsAuuU*B&d\ۥ )zwC @r1K7\ /sg߱#O_(nNfYVRPHlO! IDATgQ4k2ݻ^HPҔoAVwx| Zvid%-ՠaز󨍋T etll8uYDA  [y,ДW>cCĽ+GO,7w97G=g2aJxkȦ>==f:2}Ne}62P( |dYΪ{+[MYv>:*.46zw~')7?_sUkze3jqGVj"PN,+/;kѲwhʦz|дfK@xs6`o{z nv2kjVV53Wum[E]}GBch¼3&ޞ>C5" cևͯl0ra\;P-V64 <\w[%%ĸra v?6]OE_)_ӜǂdJ$VI^ %yqq~AZf )LOh;V 7y'fnY\vc?,9S(Oם~2r\lǽngL,E-:qX:%M ޝq@fZ6lXnjU= +:%9hrSn ड़տ>]؂kZ#;\ 4dV/J$ia")pO$7s96\o -6RVfõu>ؿץ#90Oer1LWI\=3 5,=[:'Lž0.$&ϲ/YGg'Ƣ^7 W4rh5/Ɨj.p/wI5{h%8&7xj` =}/ Frp?y(%&D_EoRƾ ?e dX$M eT,$,i J ږ[в-@1}@Q e}d)+s=yApa=()~ L]3p}T|%&\6%ki웴Rv+^ްm m@IUz91j̚O$^O')p13]O1o,|ϧN15<kjRԨ Mdc$P1`|LhSMJh=۔(6٦CvF)dOu4 {G o~<a9,3a7[`]/?x6yu"ԡ8 _w`7Q,o lӝp.(uUQN(؋J>#9k 8itAY^4{>+;ם+jOc.'˖&YklJ _5iIN> \8ݜ63@`",9Ad6 Q}Ú)Q!7|4JdIc=<|!Z.y>fg{7,Ny'3R̎k.)WJӵPPoCVױ'jaK,mǀ͜*9[[{edN<횅;ji4`;}Caei9VLk2ABs>g`Ye Al#KMW:?vO6e'(Q[;j]\DHN Z_ď4PTjڳˠ.*e+K^V%57qmu=oɧoҵ&|Ěn0K.ւ*} -:F,LZ|5?bȱdTF?]*j[uҋ5n6ָnp 7 z5tOLshfKc);c>(՛*^X3oǶp}3OR.XcJ |i>y#Qz^m ?6aIKcwNl#!%e;d$*r1e`X@ M Tq/S_dn%{6u? !$;}$'~*~&H=L1v`yjX*We}h0[Š:-q]YKO[< h4$4qMWV~rը9YI%eUdW7IR,z_]1#׺hٱN;1Ҏʴ*_z"ro!AѢry5onv^eY\j͠o4g|f{K~vvpsrJ8cF>ƒ;Kte`X٧@ 5ɏq=cǵu~Hcj56bwHN Tny5iA'j_I NT/0ׄq|/.o2!d$JLe,b_}wEY?Km_kBǬ  ~3]1ruS7&m3r%|)`vU*;VO>)`8'.?=lbSxԵ>_I6ڸOD" :1{Iu~Wzj F]+_>GGሢnl:vƼCr1L9Z 5sI B{ͫw d>nW}'w+3Gr'= ?~~&8sAfVYR~ʭ ןHR^V o{Zr,o$o??U~w {3vᲭϟv7<<&.v"=}`5w4o,t}DY%?al;kƾ疕<\mT+ۼ* KR~IQ;k7ި/yȔ<6ho3S_OI&(Tw杍wswKwSme|4H(?_;l>{tS(8zKFxMP18*jZq&o|2繩16vhݙ|V.2Gp\R0t(Z!5!n iN{@k.{3] ~O&t 4_7??Rq#Gܸq{rӄ<$ċ~阫ȶ4؍KG6ѥ }MTѭi@~?Gy>>>>>s'W7ͳ\#~iGE+[|0,DFҽ)S|{σaD&SVGLR)eS!'uޯ&#f*}LtksZ4阍[tQH*HD7.r/t^fTXXK;aOXtØbDaSh*g5˶CQ蟻'v&uD"zz{?t\3+wB,^*{;:y.9I%=/ א*) Z^[oyqhCy2v҇HA>Y?z B>56l|\F, h*A9چ 5YcOY 8ȫydyˮsrUرo@9A.9{rr{)~;98<HN L aÆ 2k}9rd%g_4N7v>}w0 /Џtrað<#] 2ғ?DG? J5@&~ u޴{Zkivj2ɹƐ=T&S5rMK=(bpȅ=}[]lM׬YS~-沽9e忆Qd-W?' f6drYT,GZRv|?Jmw\># wmb%Eʌex楉%Bkzx}H*e N" S>1|)2j^4(DE(MYr aB{Ǘ$r2Jh.ԤKJ 2(0x\ hFߛb]o 8Ç*UVZx#M 7 ;ø\@zK.ٗ6f=}޳Yzo#nn,/u?>_`m%`g.16Zۧ:'Oc͜4֛k:>_֞bkEhXGZ+IM깤+t>_4#9++`ׯ_iӦO#~ Oƛs$V1 /P-Vv #\|4Vgm;x?{ң@ P268\\\޾}{EeH@@wZn$??o i~eK7#-:F)݈=hM!tJ*i@oX #_G.JaI@ Ǐk׮#G޻wZj_~]GGGvaċ_=h5U=4DPi"  wN% ӓEBn\NZ@N"ek}p+?H,xFF9-[~l?qwG_U3hҴҏ1'EE;qTBŏt-9Mď4PM,L,~t>~i ,4*ø|5@ zG~G@ @ 'i@ @  (v#M!~ @  uiBB$, @@#@ zL.O~?~#*@ @  @ @ /p@ @!@ @ ’%'&&茔PJN$@=x'ʑ7dxFFf*V6j Epz|Rz~%=RƝZ%Ϯx- RFvU N7.yɼ]ƪ^zjԮbcz\Ž;HQ,4oٲa˺6j)ѣwdu:2oZx?tn]\n{cnKdO>y)^씜/9%%kDUn3Эiޫ /MRbeU{+V:,r#VѵA@ Jc׬|fx>|2ﲗW[kSK<1ѽMꋤ[ӖWoWIDn4;{=w]cV禄3B7W.ƒN`H y2zycS4}U}5(ߜTfoA\Z*jJN% 7?X\}^PN. A.*yxO_uv Cr&=E{G p(%76SXGI\n^"Gb[-w .^2trƀ-={2"Lx҅t2)Ќ঄k:OբJowpgù{*ew!lL9.M(9_rJJ(lq£mz;&i]ZԬmCȺ}kSRrJ*_De 'j!#$sRA2Y]*=:]fX[u])=inߙ Hn>?6o(+s}RzcMSPY?' 88fRѓ=ؔ,z3.霽qWEon$%3;jV|'? E99?HMHJ[V~n۔0I']Ld1MlQe= .?0M;/]c^>yW_ m5vm]P,eq\VuR=~sbU߫οՑ׺*>xs9Z7otC?fHQّ%W">n.yb$:sWBn[gCJ6XT+fOP}p 9-H1Nj+ri%\}>$q_yEjUb[rnRn]m4W"{o"cv! 0j~!pxqIR k:M0koj B7=*FKco<pN!C4)Du\ :r^Ļ0vq7K 3.KŀW@.9tGs{n |/ IDATg`vQhaIl9,ԪT -]:oUz[ɽ3w(@a;FSͥL\a"7[-P¨zW>jP9vS i8~V ,oCع{x| UY^2u6K #鉭r@(j ~Ŭ(6۝BDq*F&~J\&_3h .*TώƽGg֛]{2%rɧ-/(4++ -{q}V2Og'᧌>"4d';Tdys|/'<7Vr=ܩż{m`Q^󍍛.(˫vЭs=Z9diR mqM~v3cZ.,:r׵\ٿ[YG{zTf#i5x ~ӣTc)~旕i7nSZSp )))q"Btp刻:n B;/SuW/6-QyvjʩSN+UmRR 0e7f\{o7ʯ-'w=sK#@uhG.OMt](N~e22RU) $y),O95Dazk'a8<"@(Ja<=B)zeּWʳ8|7+*3zm)\}6x4(%ȃt甾9nd<͊˙&5 ըϟ9¨׏݌ -9z9V [L]y{N]{,^(sTK͛j|ziNʭxP{*PsbfO\q8mPG>.=zؔ%9թ56Z6߿zt8hU54Ƀ}hx[vzRpx$~M~+}IX5^NM4UྣG<| = h΃ӓ'/ƤCnR9Z'JS^F4H7ZxRTm-UX|4"F/DZװiԻY9ĕ{/Fv՝:c&eqjw@zEmK!{ =!.=?++g.HQvzzm3v}.nBVI@mUE]e9s7;:^]kMj:.o8 l pK]{`` &Z/N;$bᮍ/e7۱7Y/76y{r=>gL,|Z\T]7528oEe4[vJ5T)o2DSE[rc 4]x^:U;G54qv >Qu9|r* GxOڼ1%L[ҍmʉugc9&=|*ó>\KS(ą]>2,gUz%L91޹! 涭d|S++P(RRRdK;a:`JPV>[9-8ȑqyI\ Rn`$q'F2NvqξQF!3.:od =BcdZ+$6拏<+J k8;Elr0VJ& kٵ*&B¤z '0n3joO]d|9v1`XcƟtqrs oMߊi>kڨf5†gTFqSZDf8hSsO2ت/Gsj|gB9,Q IT!L/a^-T͚E~'j̇;{͢e>{cZ)2xzoiqcU(K۽}F!l{-qI)gWܻ;XjZ0B*Q u"~-Iqӄl$G-7Fref^ewᔮKE?X1M%7̙.eyt;kإqgWyvld0Ew'bSܯw9S*_2ueCY-s+FncokJ>,n`MCP, 襼y- ˶^qqנV\e.]:Vr/B.Ԕ:dCAE=:wP] +4JN߶+OsQ9@n3;jIzz[?rX6>uHe&xg.F4ќ%K8p{t%4 @.NXHLoYQ6991W.@% 22"8 ϲtW,ʌs86QPNGy{^{%8pwOUJd@\)Jyd4iՐe^+"֎v_[CWR$iۂւOFtUjfvjFCٵ;e몽zT z͢$*" }MջrNC#aw)E!:p^45g3o\RA%_ԛ\NY4Ao]Yrpﲅ>t[q˳6+,DYIr}:?޿ĝ3ձ MOgK Ƣ^7Ō?ּp]wzS9氯E항F9}rB\=SK56٣cV 8Y7%;sŦMF.,se>^ v֟6̆/< ydN7ncw?pן7VYt֡U^Iu3"UYoO2:^wWҫTAzGPs׎};hJo+Β$fel*VauCih$..i30tYrR q+df SPZL:7Fz;%@e4\QY3 ,}}>Bg-_;@oĽ{~Ǘ6`n[MYUm3JiO/n?8Lke^-򵳄#cnh6Л+'_9IGi4s27I0lJԔP7J*A^4]KSUKߌͪ˾3]$2ж66`wY@s Es0yN r#4)bi*˥*7 4A/+N+-e8ؚxЦ?%8,ޥ7B:zGcZ^T"rFd{͕+"f̹g?6 xգr)=y˖4>փ~BU|0qL2ړ&<e+%&AkGFzP,& 2qi؈Ie3eEt~GzA<|KC*&'/Ȇpu(8e&iN%d ?]yM'P ׫} SOE1 8bl~61"``IWc E? {8| bSRjU@IӤ։gdb ͙nmp>S&r-DisU=n΍҃ޢ%&xΝ?ɕ$[tWb fZ|LCA/@uyǪEK?mAh2[94̮kZBM3Cż7ft+:sf.Xx UOK%f Ijz[ [9tT*sz)'ή ׅ \0kow^ˇ~'ה~ 尢b@qaQ|f>ԮdPAxŴM& .Cr ʡ7[-@`» ~Ai;uvQhn_`qgva@(&}EG2|*&|C33z\^oYIWyoS,HPl 蘴aJ0%:&&FTSݸ@H=(s3N;4}Q9$edžڲ̛pLbf{*2#XkutAY@r}-9yfFl,QCAǹ20XGSç];&";Tj TRIG'TKP/VmpL i- yǘI_=cHC5V}DDϸˠYXji~`޲Ī32N^z'1˓[s%ȴu5#i ġj8Vgy[R5KN̪9ȴ֮2G(֖F dS6)tCfit#_Ms ^ 3W"y̞gy{zaFH SOF1 8xɤqlUt{thS 2An#ݞvc:S7o;uϊUuSޛO}@7<(xS+EKJ¯njs\ =KycRiXyۤ9T`L *itȿ=&&(=hҪ۶m[1w!S)o4lkobqz½mx J;>].j>qdljbq£G2m3toF!m5UorfJ,/B0Y,NO ^&bP(VtuS/gw)!2"7)?&?Ms*3oZTFd~r}ErjGHSO֎&߂ȷyyNrMje:ۊ9.HMߑ>sS;$֘5Ic%'>7{!÷ۈQvoϽmALUݫG[&RML7<7F`gmN^Gǝ m̠(f_ esRz{[rE2xf\C+􋳆*UkjdBA3np\'rr}D-ޖF(;crd-^ =ŮYծEoyj䒉iNQ?]sxh;VrNxYr ?={\^()g3]V46GuJ4#t/d״~)ME*7JݞY57mjɥOl]Cy9XD'ҊFJ 2HUn÷<2E"TJqK ؒ}N ,*밹yoreg]&U 9$hχ>Ե+[>u_)ѥfkLwg@eF_[J)ғwj[t) cwyl'?5䈊 5Bd^Tra`igW pkmM:?-^h {?CE]#p}J敕y=أmV}6?ǐ2}UyMQ^w9?~[ThGKV)mW=7)"_yԭE#޵я3.Ik VVG#cvٴ z̏8kN%(h\1!wpUxx/4;"Zҥ'wh,eGϾ;+{fU;vy|qWyE{|5XyF8(lwXG?t?:8sNCQ^d'=7vj?=O-jd/=}ON6?"[ֳ :O[}y#-!NKZٴqƾ:AvDiV6d"<6݊r#Ϋӿj3dmآM];bk>(~^ݺUUU~ 2حVN:"b?>M VMQW5ȝ}?׏~$s[ۏ;cQ?`ǚJ˫Ң]{؋~#7gGGzH޺a3ȯ]>ݡkm]󸫱ϟ݆1=Yy8wFQnN[3MTc_#'~ub&Mӷ]\w흅Ջ*޽WV_lo}ԕ[άN;mK}T׍Ov>oߥhRˆZ\p#z2x̅VSx͉ʢ}>sI!Gt.[>އ۫Gۺ*|c^x~ߧFDD{ K7ҳgϞ={wȯv.~ h~DDeѾG~w]W.G?sˆz։j-\=q%e #fz7=uMM#"=a>O7xԡS':b3V7-cv>oՋ W5dcpnEqdv>l;r:y~D?߻gOzjIt ">'DeCXrFi7oۯ˹e6ey';+Nk27"#>CDɟeu^4nwHv|1#ssxɵށ槞(hѢ"<%u#"NMD< #"{{=zL%u ""q|DT叛JU/_{E/Z<%gɀgTbnx)Ӧ/˯΍tuZqqmoVKo}`}/_>~dnNDֶaDTd>ɎέjE#"bIxtUҠ:sGzEDh]?M%,2{ [l>⣁DDTiܦgGĤ_^y)$<"+#ŷ>[rJUrnجpUZ'FD3^ zwa;7ϫkH=O8~ɯ}೷xQoBq΍]ot͍Vzu^?"bQ럵nQq҈5FFOU`3I㾿E۷/-}SiOZU?" YM}&j5zG _ޔ)&Mxw̨O8}Esv%oQ.Xʢ7]A׷EJIɐ=G;&.sşjLyTu]{5Wl[y =ZCvTYN{\^m~!"_ʣY49XnDDVfZ~>s/Yסۅ=8O]s&dw^FD}Nb5c+]Gvu#diŗ콬n}ћ~3*N%_o8`ʦ|}о{xIW>DDd~q[5fdfro.e㤿Ƈo?}E׾4)#ez8)`oỳ1XZ+.Ö֍(ت{NU%n5!q }FD> zS^tFُ՞X?:P@}Uh66hܴ5i[5hڝuGt;*"s{x|g ωnzSǎ86"bv*䣈٫4/q3]'.!K.+>b>D&9t眷^sĝCF9d؈c[794A'%%{6ӉأU7fDQ=~s9S^DZ1U7(wsoˎh{{s}_y'D̙-/~xˋ/{˯\}olr-w\4lVŽ8*;+wf?>ˈ&i^cx]/8dʼ]*ƥ'=RdjNu P_o 6_NfW=Wv~gٸK>FѢG$/dk~E>ng^gDz((({Ǝgzn]Ũ7=wxT{~""3w_ݭ y;uA?6ɽz/^vBDw[6 7:8c+7䬭o|?6ӤdGqU}=7+b{ˢrW^Sy3' v얲/=|Ҡ"nƌx͘k;IJCT1oSPpzl]sB*R Wv72"WCTmlFZ2#޼Q}8mxe׌~UÈ:OJeܢ{&{τwFDT6q;>DTL GةIu((߼m^5͍NyQPQ1m?v֍(j]ѻgF^{mgޒMY_E,%w)--/--[v1{:c<ڢ;ܪaGDOx[-+NJKڰk;/Iy~㭝*≡o֨>mSV|q=凙P^:gI݈]53ݩQe˦+,-?n}r1{o("`{łȝ6:*/UX Nʉ`ڛWupଡy&5Ȋ#;6Y̙5AVSaFx#$w|cjYmC߼Þy;#b~Gxnh0mΝW,;+6e&b]~|r[v*.8W{Etu÷gמJȝ[2AVr OmcF7V!YZhfS/#? o/emVj^QFX yVƬ׬9h,9ɏoXKn<w{9b:ZC}V~a_ ]qXò1~%enC, ^)|oin0j `gϟv3-&͊ٻ("ltetݰYau:-ʊURifXdQYu}u5i;YlUӯ}7̉3m Mղz,,CZ&z0oMnZqQoGYt̙[WWn~=Nv_~x z 6=AyovGө{t91l~kxFuX}d)xEYk1a=5>}ǭKUyD_,{hfSO~#:Ƕe~QY햾RJ[E^4v\ֈȉ&=?.-CTo*2Ek/34]Oin8Msn~揿~yؗ#";+*oeY~)I͟&bc_y>Ɩ5䱈ʢ7\a-u:7K"ca+}ˏDV/?,S+,sś\<֋Z~8Ճ&ܨcME7lkFGql#09ȕ6/,ȯ|AZ cµIn[ӅLD‚ku:NdF:vߪ3nw*`}X'>̞=sVyQk{r}@Nyq[oܺ~,u=J'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y<H$O'p8 @ y/#^]IENDB`doc/reports/ConfigItem.png000066400000000000000000004452351303523157100160700ustar00rootroot00000000000000PNG  IHDR/:iCCPICC ProfileXYPTK;8䜑$$D **H2`TT$E}ommnO_>}:"#C$cmt)N.@?< y/H++sߦaImboO b#ach0Ѩ_:v6z0fGVS}`;`} ;X'RXG",,bX? /67=_ 9#᡻wֆ3^f [ ց8m`pݖﺐUdGZ&i~19eft0n1<PWLa>$9Y#G M`ƠX`af-0āh$9x%pY a׈DGO_$E n/?/Klw1{2i?USIfVfr0JEi4QbAqIJBep/?>k/HTs3GK;ߎOϷC7vǁ""c):HIPdd3YѢͯbyl.>GG&z)\b-9N> ae<@,:X; <}0$}`?8A58 y ``L9:AX"C/$Cr @ yBP8@P!T@%!4Bߡ5AD0!i BaC#Q$D&"QE4#݈q H$RCZ"]hd*2YE !!!Ǒȟ( $a?5F٣|PQTTՀjC&P -4- -VCt}}}=^`0,*Fcqc1y*Lf3bٱX % –aOSUNj\p \ )nNCQI9JSGs :OkR| ~ H  kB!PJHO $2ňzD7b1XO"". jӺ6Ҿ]#IљҥUе=BOC/DCAD_B <$L#yRINA!!C? #Qрї14c/$I }u;)& Ʉ))e]adfcamn^!a͑q<':gg+k. W2iG\?y#˸{yXxyyyny5yxyo~0St(RJeϘ/1:?ߞ?@@ >&B4B*BB' S P٨&$juLVDK$JV(FTE4DJtP !((V!D!$$^%>$P$JHK6INHHKeHK}v.'%(*S'3*(k*!{M\syZyC4o ~ '^*-)(n*)+E+(* *{*W*0XWEꪦP֪U]R=D..]u&554j45)4ǵjk hj՞ i+{UwEOM/EKo؀ޠ!aa႑QQ1̸xĄǤdT4ŴόhfkVn\<ԢblIJ*5ʺ>{d=ltڍڋ8;94:8;:;I;8=tprp8ujzuM-m؝׽߃#=^{.{==ynxYzz6^93[;W7_?P0X8T-8:x%2>d;1B.331<$/'boPxdVxZh1P{LG,Lʼn׌_MpHaoGb $}{ۿo"E'&JNIHLJ7Jo؏ C&0ckܙ陓6eeEgR?T~|Xpߜ2%y>y)=ѓ0 h42&NYSs9_PR}"xyiG`ٱ**WTV==}:zTЩ5F5mµ%1OswFLYg7lq;ڄhkmvk<E˅܋bOPyPa#GW>VzDIǠൡ]Cj=~s_~14l?rmd˙W>>yCzSm;wƕoNOs/Wj}0-E%V?.-䬲6Tyoqmz=aQ)ymlkl;l{;+@z;ñ x1şbG` `^ .)ҁ|3G n! -jm~IIJb{q44Kb?@fēQLhf, =9G g.eQ^E/F`ZH^8@MOĆt+9>yӊʜ*vyj6ZGb4 _cLM=Z~clMg#`jgaxЩ̹ۄ=O[G?, 90/[\ؼ$p9JՁNk}o |;+;'wo_ܻew~v1 ӡg]/.|Uј177oގ;4<>?>xRvrCTiٌO>ΝLyK ߥ{:mnoOv 4#O Y6ZFFl,kezD/aoF`"G#ћaI^d̼ºNpsqGQ3 X f ݥ"EESŗ$eO7,v޷%T+Wc; :3ݽq+v[|];N'?px(q'>M{9 7M|hs/ TB>ACEO;D'MO@oKA%e5S4'f?8dy:ώ`/i+[{'~$)BŒƒBQwb QZR?ʾ{#?0أtMJjZ]424Ӵ+tt^/dLLk泴Jnyjceo0*v$x{k=wsEhGG''zD59b߭Tb{zgÁլl\ռ|kE%'yOWFTT8橛?Ps.9q퓝+oqwYݓѡ"5FO54<"e镔?m7h76g=F8x C1!шsH Y|DPhA8nB#L/VaqT}wFe8#&jbZ3stpϐHiFkd)&nRfv*VA֋lZlC~[圪\i܂܏yxEx_Qrt/ 5 R婛"wEyoK I^,}Il\)rB#ʇT&ūG  ҊN=wU`cL5176+3X䱲>`b{.A‘8Kk[{Gj6gޫ~y!FaiEbTMJVr5u9]sLY &8}W>}lPxDOdJrՑjS-u3 :MZ/^molwVB{BODDQ_PG_QG߈sh Ҭǖ")+W֤wm(ob7Zs Y[""1T@#Psh":9Ec#} C4x_0G"ޢդ{MBREap2\d4\Ȣ򙵆͕}3K#Oo _00JʼnI\R2 F1JuUj꡻4iqk>g2p5\=z<㼕}I~kA+!%a#e.Ǩv'&J''r>M:f΃Pš'G]+9_9a\zlB2jZZg54^lknop UW&MvvzsDWN^;7=5l>{<eҫQ7oߌLorX3=9v.u/_/-cQzҙN?~.(\5\}Sgᵉu ō+sΛ7"osnmپ1?BD]oY^y6 ů@Է¿|#2p^., pHYs   IDATxi%Wv?73^իWU] hp`8;9X2)˲,)m* EY!Lr!9$g f`4zѨFwU/oor[zS `0@7V/ޚydVG9 AAAAQ/@AAA"    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    =AD.>   𾹿C=݅    ^    G-pX^    %G;z0ۺ?Me"%EAAA>x[޽z彨"g   })E>;tcD "qľ!   gWÖfr@|t Pk#1;qUqD"pSd*TJnk 7޾EAAAUaftrm}U٭TVm? #T:M2d3P, FJB2Lp   ǓF`f[9BDQQjiy.߾]^VV7˻zwZ( ##DE O9J$zl:MzB>3671P|9υQ 1ZH)%AAAA8@~:DQTԚ~kK[B ( }@@`Bd`4tW* gN?$ӎIR h!   ]>8VGhο.8ݗ/U/\G84(P3`?`4"3BB6LJOД8q %   |Ε+7ҙW._rn 9L@!v; ϪR7 pdSpN/WK?zʧOG>251_tT4=@=ʷy_9;Dw0iorwd`  ]\.Wٗ^|kނK0 rK6*?4)4)dH&JDZ#>Z>Zm@Axw8#sŁ >~逈cIw0?UAAACk qە9wKg/-\]\3F)8 B=/> Y{b~rl2v)Jyi)^B"þ6uֻAn~nʵNTmzGpIY:!`Ņ_Og=zl3k0َ{(5kʁ۾4`rxy9yx^sc⢘{}ƻw%e z@=wwޖS)AAx Vh4ׯ|G.v4\B!GA)14 `.rxt8֛̥ T&<;TDd8P @floW/VwUz+[5IG b=xfD@,no^y{y뗿M]ŇO6ɿeF*(piXO}wSt?!"q>66LndGahwХ'XŸ3ιw*bÑ`AAAqv~oWg^ρRpx<6bv0%p`S rl D`@`+cSJs'Zk6f7o`t@kh@=E''XLJ7v?{0wߣ; Zkm6 c?ks]&qrn6q+='s]8p㣈-B$~;VOkx+.kɝ Ħz]JAAA8 Z81x?~G/߂s |H#‘_8uS3dB+*f=Dq q`Xڄ F;D`b"&b)"CRM5+m_z~^"* 0#sE4" ?DCON w}{@~pN: H;)^aºY~Pн0 m(bC!Z{B,ܼ[ш;bZk* Iw]|+N]#>v屴ADJb!怅PAA qJ]֟=\j$]dR ? EA3ePN%\i;ATm3`@"E DLb0Հ7*֮92ft (H.O<`w?I6v4HA<:jy?V<]謺aT65 ΙcNcu8\7qL(k0 ZVlw J"d2y^"at2Qٝڟ۵a?Ӿw>xqnUwǼ>~(0t: ƇܭUT*H$8t_Uꖱ e#~iv}*8V%!AAtpE)l ~•[{戤׃u\D Y̍ʱCOLeӎZ1D.b6RPL ClqD`VL hf؀ c\RϤn3Q<Q@# O_f}w7zM lM!6,E+G,t5bqzC cn4;;;A8d2L** LeFh4v^ Booy}<~-4`#ld+Jq5h[&Lk_vǮ?~aX׫jExfl6N Xzj1&NJT*8a#i'{Gbi#k >Kcf{RbzI0Vq<{w_bAALfnS??}ʕH%pLUp. 7<}ht0N&SdSPV#%@3U1rUNAijjj 'yay!כM=aTC}{ki[KזQi  a_O~3λrۻAl|u=ZY7`Vy^z6tRBLST6773L>/t:Hiw;J6rf}kZZlvpp< qɤhrauJ%"J[~'CZfY(Ţ1&ywU1bin*Zf JU+&jQY# VU.+JlyJGwa{m̻Kgwww5(N%ɔSiu"6tN=\Xf^$9 iCAAS>[,`um~[. B6 (8 R09.`vĉX)F uR^@0& (&0+ar; 4AA(0k8 e"Hozϗz3%lWp<ʅnTE~ԩ#wMm t0N&ݝ7qKݶnѰigVӓJ/0Vpv6lk% JB`XkZѨjf3Nqt:S(lOOOTz޶_d2jĦjT\/8Nq]T* nuj8AX#i\D2nGGG$(ͦ>vww[V=r===Lfxx~[T===3~pubu{aآ$kAb'Ll6J%ә`"kmN';f[oZ~wkF0zzrTq#oW(  Ǔ@ ߾?_h@Eƒ!$\ ӏ}p!A-?4z"FdT: C32O{zst|V`lB& faۛ3VjinEϟH _Gp42.kڣ8zq$mt(V|wuTH&b{ (텅Zfu8ӶO-R$EjEGIӎDQt֭ťrlq38RT6- CCCӓhFGGSTV^ kX .tX,RJR `d2988Ȫq)GpYv.鬭%əq[Roܸq[nmnnV*znUJ)k?tPzsssqqq}}b$㉭2_s0 EQT.j'?=}htd4J* r[A`@ %޾ eU7o\^[+w`{r{wAAAw|Giuˋͧ~X6tJB9{` ONH>ciD:w\2"9b&iCF"0Ɓ&'Iuy.HQoںiA e k20LZs̤"1xd]'NivRSqu] I@>/1o~O?Qm[ܶ\V=쳗.]"l6k=w+a#d2@TZ__T*[[[[[[6"H\U񶋉&㬯3Cz{{<+˗/_^XXqFѰ.N؏ݸ-=p]9yÇjNcfg`p[8ٟ]rW)e;ޕP-IĆk||oa+>, l L&3;;;44400+++;;;YI<wt2CCC:~ɓ'GFFz{{766VWWJR.ZӨ2%vmCS b@뛕kWFnvhװ˜ut'=ӓb‰|q`MvK+k[[ccV)V;_   |uo>oLIDp].\3c_>5q[JfEL9d 4`ÄXTGcP!&'s uPGA1{X3Bb&F"V̬#PrHElʝ bO'ɑ٤̈́+P<D"x SigsTs{xŐ`s{݋/>쳎 Hnu]sssG]\\\^^tL ЪVZѓ'OX__p›ol6mD*d2&rWwո_I,7nܸtRT:wC='ׯommY$qۥ+/|̙S&'N/bOOq cLVkZB.,//?ŋׯ_ذsXl l6=9}" ͕Ϗ=çO;tPOOBV}ߖtmHwe2SEjN=p2.oʭ7kh0@sT'?ggJydWwd2/wY   s><#NVV6oOp@ L&ql?ıå^ߘ9&""BP (bh!( ɾ\6h#LFM+4B0ဉ2(hBh@(Ab?5 9 D!R p_ 7Ҹɂ yӄR^?ƘFQo߾}ܹl6;77O~zؘ}t+SwÖEDQQ(?>88ٳgϜ9sʕjjp[}OAs'y?~~~^z'>NLLu@4ZXH$z{{>d#BυnjjsssǏwgkk_~_R; ";$Hojmٳg}/| 333>իW;u;/cu^ D''Fq~-?pkbbZ%*  p! DoȦP\8.<EKW. H)EJ!`#L#p@c3X8Πɞ@!* x[H _D eX1y"DL`a@`1LD c1`Fzr_?:1o`RH($S-_V7ѯ*q{t*X!J##ޯRf|/ǎ;|pEz @yBat:V566f+S~;wneen[YN~{\y,Ax뭷*͛7/><@2Z 0 c׉;b))v;  bJnإZi666(zgffR/^(3T*Nwwwϟ?NsTTZ^^m208 }7PXcLSJ w^H$@n]7bB́/Yy}}}Ƙ_>7;;̫d2.ɹS ܍w 5WOǎ}5*am5mnnjSNMMMZ~{V"ɺE3s̅BAkSOj}k###=؅ 5؍5 qn=ϫT*Q;ȅGU\Z|zOڰ)߳ofF"a|XWw'FƠիW[΂   Sa!ի'߼t uH"\L ?xlP_ TKJE 2 G PĤ )`@ 6It(畲iAi6bCfB63 7ڍ8Ҋ) "f-SH2  y3*&D аdzxfoEplƨ6~G|?g`< !$vHC B76N$D8`ph&)@f2=miHfVPL("M2T:CÆbr`BfE1P 8 1afD L|n߼y*M^IureumnZg/ JyH5m0 E&p(! "vgoB*BOPi_eg&v\NRDdgE}JAA{ͽ8l sF_ "?42lYa4a2dB|@D`cL&dɤ 4,Nu󩍠uB0 `1 T%4 ]ѕF5,׃h3W>v~lF9o2oF%VGHR<\luw{ka6JeSN$,DJjkvI!7M;&VV2)QMNNK.=ׯ_Irؗanb8==}Bb&kkk7nܸvڭ[jR~})o/Z|;V[$v|VJY>)[oa\^[[{7_{۷ok3LwGl'fA,..b%$!i}ߏĉKKKoOww[Nb+ccc<'9gϸ4c]6v^?tʕ˗/7t:mFVrgΜ:|'_n b llh4jکi'+g(gp\0OhgV6_Z\7o&Fi% XʅkgoJ߷KKl&(bfl v^ (`Ɲ J5 1DC3kf0 ( \"fb[b eHW 幟.ZȺHҕn|9ó/658,\.EQdGlؽ_*;6::sٖ 6oZfW՝bX)$6}1fmmmxxxzzn R*LQ]BmJthjj{'FFFzJl6mKlTJđ#GΜ9s…mmJxT*s=z3̱cΝ;WVx;  FTرcԧ~V=tvvv vә.JKKKvhk&IRrTť.A3L& OѰ%d2ӓKR&񜛛{.\PuXIZ.,,+'+++kkkF 똨RbF"w( E{&Qu13': vjAG5y3L١pxX\E 5h+7pbfVӱb\UD4AAAsϛ{o}ݚgυ rIoiPdH A=448BC%KT糳 Cj:??a;bZ٬Eݾq[o###'N׿>113ܾ}ؚx_V}׏9ߟL&r`ȁ)*qFRu o6lc_aۣ6l4؟K0R~K_̌KKK׮]̜L&=ϳFfs5":qLT* /BR:HĮ3HAoΞ>}ZXQ ߼{oFl=TɓSgO1[׮/.;O)C 1&LzË/]\@t!T?^X##nכ  ppOE_ٗRix\\#PRa(EQT`G.&Ff(ä v G""9`ÎuQ':䢔N2Q; 6l"hHGQkU )ͺB"VA)`-a0c1"6 X2B fp5"qP̖>Oކ#6whFZ'I1EQөFիJĉ'NXYYVƘd2i8)tZ(b[b{:;vl```}}+++\nZ8q_C=n/_mk4z{{٬>SEf\T*.\~'={gnݺe+`߷b`^v7>7>>n.'ޘw-Qcò}ҥt:m+v#݊=X&بZ-ijjq˗/_x1SA8alԩS_򗧧K d2F%Si={vzzСC?hqNrۏvuu7xFGGzV:U:lF$s~qkeDP (ǎinӨ_f{14Zkok'=9=ۻXނSJ!yesT)Jva JQAAAsOh dH U{R@.T&*E 1)pPeA SDy/M("Á]"0fΫ%bcQTJ'fh5HtD!>HE|o0 eM9 U6:#^b尩E 0|/Љ:yo[b7y5l;+iۼswwڵkN>}ԩ7o={m-ͫ#z7lƨvǶ2\t۶׃R*"xk%SSS_Wzfo,--+q @D\Jvۖ~V}Z.y䑑r|ڵjE?j [C5w?~+״ZVEdx嗗L8;>FaLLLJJlQO:hccҥKLLL=`2 c vîLF5kmi9~QNr=o kl]u:d@/BB0@[I Sͦ>;Ep]88 AgfFe=w  d t'!;h#˕v=<<<55kY[#Z'F2&&&z{{WWW/^h4Blhccc_qܹݡ!`um1RxKoooƘ'O>}z{{?n6VU'bcc'O,J/_Uj%*/"ɠP(.l'=.p*5Rq;2&X-'wٳFc||<( f l CCC[[[J*g?=??dluQ*.}ixy A b^ޞH6Ꮽ v/> N|`Gڬخ~??~ԩSD=44dUxy ;$޼yT*MNN~\^^~7o֮_\\\]]rv&XAxw;8llUQ-..Ǐ߾}hQl(ZTy\zvwTy8P[} y AlyCCCLfqqƍarqSNimߓD"asբn#\׵hGFF677o޼?׮]{뭷(teaf[R.wlG؞\ܩA:J!ЌL)VkȰ=LFؗRvtz}{kk}-?34-(B3@"_DT2L+SAAA>e#N\0함xBC>t2ҦLLL mFw+E01eubψܷ̪^g!gHCaєWARmdCd%dچ" $4$=2 ͛߷ zT/ҥKqONNŖyqJra2l6޽{O}S/ʚ1^`0>>>^XXlcXC=j;Id30"z;wݻwOKĈd~~W^iZz@Z-J"S]y#i}X[[[[[Z.[ydaaj1%Idezʭt8:AQ`xbd5I !+;FeGQ$I?~}p(P@ ($8l1s7}h $tÃqeTJ\w` \@ bg"5d]PX%Ji]6wB{ hKD@J ä)`pː"r5@1 ` 2 9c 1 m` )̚YfJX$c4Q-Zq 0<UJz~<SǩzC#%|Vbx@qVjSSS|||,J$I8\eGGGRW[v@!˗/KD_^^(f#Ks]J4M}ߟ,??h4Q^t`)^O=bbU0uGOllO=꒰wJD7޽{Fj9… /_&(&''mby]Dt(̜bkۭV7ߜ1F+?kQI3s JQyj$i Y={vrrGQT,L>r|ܹn{-Pi*Ob7ƖE`uF$~drr+W KIZ-+JpXFI|./>eg-M`GV(M loouwET:|vqZ-9\.z槝gNQD!: +TS]2KR*h"JF$iJ0j:mp/X#lrp>aPN8dֲh>"!U@ (P@?.N>:!teB[G@|-to`XܹsZloo/x;&"뇇Nj,4M(j6moo !* gffVWW+nכ6ƈvJ-$TT*J(vww]m4O=Qb\\U*%81 /Qzr=o$IR.r=>>"#F*̜={۷1l٢1x[j~I%,^&IIAdzA(0}0su}0T*YQOXnZ庾H% |i$KF (P@x@&6TE5%`Ӌb$ 04bG3HHW})Jnw(RP@TFnFqDy@ (P@x>ǰsED| G-ժ_i?%5tRA8+r@JdR@ A3ke,ȁ41H$E< cG!flŏ/\b2f&&_y0-1*3OgZkf\My+2 Mj$)FAxsv3.'4HvzvC^Kv-yc ;&u8oFRy;n%;ÖQ1x㍙ %cC&''IVN>3iZT\MӴK󄂔J|Sp$DJȘIG~DX] ~?33S;NM෻S.ggg v^OObd#"? !dC%PVd_u IdUQR4W/aLFɏ5ʹ4NR (O F$ oQpF# b | Q)=(Y (P@ qJ)Ea>ozYPbVi6Rq9(4L b4!VIA)1RiţJK.`ؤ3"&ɓzOjQ[C`bXb v D23 MÔPGÒWj װa&6 1hf6 .)) 75Y乵R/ͶzQ%QTE<~<ٵn+ i|m:J)Oݘ?K/}F05 ÓD B4T*μQZxn]몐.☐K0g,It!~Q)wwwk'g"te jI$zy^4j#\3WYi<۲u %rb֎)8qcHCV 9dMmdX( lrEEv??E?D4HA5hzq7[ (P@EwajׇB)x.[6ḑRVQ"E훣POy1.nu"za c>&(s+XA!?H1Fu.zeߩ)ވG)d4O!hSa3 1†kT a4qp`@Wn? R5*>\Q+hs: q8 ExHv^ȳ\>њ縤FILChH0!?ka(L-7+ފD$C/CΆk@2@=c52sh 8 6S)q !<|TsavID *W ѽ7oJh}a#NxrrrwDT*ʫzʕ~Z-)weV6~R!aJFhk8$jtA P2KF<վxUV[p9, FqܹgݻwrvJ^O:&8dZh$Ię r,T S!dxش, #p*y>#2pʲ,(daj/^0՜xw t G< '`8 gg.aN42|?(P@ (P3dݹs^E7 σ> 0mxK GAwЀ܀Ck= 8-R2#tʃ,)aOIC_@Zd辁Q ;_yO-NMVO"8xd`9]v۶2|Rf(MbaETŋWWWUV=ϋ\. HF*ShADrYbPmZ$,!=G9V`:lTj~G2J^__ R,pyy… YaaRHWac/D^jvJut:GGGm'wڇb?eȉ$@RiZ|5Ml8/r8JX9qŊGF q+$YZZpxޯwGREDhU_?fBaww?1<55E(P@ (P@pG[[wPQ C‘ nh>Г2]P@ re4 7A L+ !3сyW C)eSb)A .Aid<4@  NCIS8EQ0 Jv{<xL/'U\Qf<{@'xx/Z-85f[H R0s"ߐ(!r @9A^g?i]9rڄ҄e^:77' $6%?yr<z}% Djͦd٭yGc$&cggG"AP.]ו H"" ,S~XbR"]Zz%~[~ J:p \F'Dťzu!7nҴT*َ9C+T@ (P@?<Öm{p RK^(I $ao$_0 T)rc`A4RpXw F#_"8`4 @L)1d|fQ@%:CB$t6#IHq09E)8 PPq(( QG.ek R e\.70~uJ".n+qFŋ[[[vhu?ibtf=H wYsگ[Ir8g9Mp#*7[-J<9H(9јkƎ&9joL9WH$R cxgwwW928E!~D#%coHq~vz3oy{CT8 1 s޽̓Y (P@>AxU 4Xr {A DžA?S?2u⿵^2FHѩTF02 #HWТOUqZic?KF $2ԫ?\bnw~({8~H<sX <1H!Qwk CKF|$>̲Aґ;3$Sc5x$z*3GQTT宰evxH4M777˕5y| CFUd_B јn%{[k @B3T (P@x.уu % 嘎Q΅(/jT~lԕ ݣaf!@ fcZiﺊ(β0NO_ƿ1A CC  s&W*9G & &&n'H""Y+yg}xx\ƣ%i(Kplg-;mGO8;/_T.ܹs||\*$TZclj'<QuYyk8&W֬9jIr,Ǜa^4aQXͼx<[:{K[U|CDbooߎV7WOʊ^"s%;;Bm0 [n?2U~}yTHr@&gB a%}?5*fOBCJiJhA4^W2NV/whiG4j K*4O"872y|-c'c 1Z~7@~S{vGc;W"+6o"wc <"$BեiEQߏdey˯@9o_1\ i8F5K^4Y߻SUYJ`7 (P@ d9Rtv)x 3!÷HE|SW./Wu(2Ilb1Ȫ8F;lJ^aX yb69\ڝ;'_AdXŢ2a6Y~05=j_R0H5 <9a)ôx|2 ),ggg'&&ly|6M~8vfgҹSk}ڵ~/_^[[ VvAx5W'L˗DbSeg!;<1Vo򓑏XO:ND2Ce-#9]=AI6"I`@kvg^|$կ-TL(TA!//÷$FlS@ (P@=N輐a )C3Ȁܡ-?;ɹש3 IDATfʕjlHiM3m HR4™zpXk8J;d5f#q:L)t[ޥT'3r*fNicdFGv\$fi2ka6l1&J2UqA7 䜬Cfp(IY־9== ^DT.3g(sεk׶@nv[[[okǑPIyA 'O$I5"[0z~ #Bx k_9g8̜$Iq]' " @jfѳO8O7}q$Iy3r!adVkcUy>bk@N*"PǢ_Z~O*vyDV$: zrS_?I~_֍7ZA6OF0 Øo7._~at޽~93*؍ (P@>qx>9q!G)>-]@p]*r|8'ژ,&0lL #~:^s_Y*A/uAx>Ge@ke` =lIPC~\KS+Sz3+RL`f)"fm`#}R41$6tOW88K!v*z J\g T ΌZgtiBUjKKZ4_r>f Fp4Z1r-oqhyӄQfr@٬jYɓ}జ6_MDKKKf> zA4 \.#ʕ_8zjTEyD>\hRiv^'8TXKp.~$I$B?r( $!"^'}k8}|RBLNNE(_JyEVN!b,666$40WL84<㓓f_ ?ֻװ "LT:4 14J>Z?_NټݻJuiiIf^.F!(P@ (PCp8 5}j/6?3W܌_d8R/3aH)P1pFZU@Wø@J ˾Jt(P0Gcδpp,K6J/_(ʹ˛;B_Va8ainx)K8Ilˉgǘ _Mk) 6ckN%!˲;wA0;;rxx(G"M\.7` evEFCjrJp <55E,YX!~Q%T*y+xS ɯc `}}j1au+O0rݲF!Nfjbi4ju?sKvxƪ!!b~fTit"!҃eZŌZ;@&8 >\uov9[kN> r5'Cw!Lv'нO"8ƴRfcS 8_!{u,sё1ƆeA֖^/MSq~??[$Irrr;wZ(8d>Z$J@eZmrr2Ik׮ Y zԛCD󫫫2Cqlܩus1==:99#2c}V+#D\*i*֞Rwttjժ$Z%80̙3.\݊lP2enb8B|lddq< RIh#9%oW9MBH΋+v̙U*?WAӅQ bw^\ yw\.Y^ڐ?Q@ (P'Ǣ9"ky<G_,u:9_i4u+e҂Yd$Y +@"P`$Yn93̙ "mXsF^QgW .*#5<$80 ucW WǓy!z )\.f911a{MLuFqܹ=ѐ2Ւc_[*ex.Zp%GGG+,VDtxx4| 6AYg!8N ːkp iF3??l6wvv0 89s|(,5!#lȶ)GgvvT* <}Y:ն3I;~F3W.]w̿x7!JQ P m=SiWʧ^sW|ۻIRF6Z"EO(P@ (PcQQ>><<A$kZcgr9MSrY$"zċJ%;%>-~Ắ$uYf9==GDɋ8D7777;;{M,)B`U!0i# )h2821MrD\DZֺ\ 3Wƭmx&niHC v7|}Uzݺu[Dw#Ԓy֏c7ȮR~ (P@ (8S.`zi|e '9voǚsma60DQ&լe@^FhV_$8frR]tv irNڅ+2 \!j:4͘hƐȘI)u` !:}$ɰh'^B+_uqG`렵vv3W}cYdYV.}ߗ8?T vd󎏏X/%-*'r޽Fh4>^ZZ:1fssVyA7zte-c 1]] 0ƀgUR65‘)M3 3'bH؂#9JLY[W9vbUjKvk+Hd0ԢA >)JKpIm:TҢu0dYl6ѬĈg{{{W^=|$(> CQU#9%uaaI۽rJ^FfD[VgffRҰV5ȓ;cN;;j$4!fpgggϞ]]]1 Iߔ##npҥjz}=~L%g$6tG''Z5?(o^{w( B8Cuf_xzi:llFTZ Z%4Mblm-N[l (P@ (Cp(ղ`Y0\DU!}Urt1-vj{[FZf0Hi`WT Lq,J6!C3`R(۽;vahhj@G| fΌɌδfcX<)HM?֏{8RGDF f@ v|.`ćARK2g/S,*RIA[sk8Кk0tR0NA[lr><'iѵ*!n#Fҗ/8v7&'[Q  ޛXWb7⽗/}nv7{asHQ졚ɱFȒF<h``/? 00 6@C4H&{ʪά%+o~{_ˬxnܸI;qc{(Zg:}+_uѐ71 2E@^kv $DqAp'0LCQ{e }ؚ_.6ۓMl Z@^'wJGY_ xkZ9ԱΑuJߦ5!%$0=(0(αu;{˻ݯav+ n0x|p)p[3o 1()$qvvԩSV+rmPJ5, ^y˗/DImbQ)bֺZ T^2{ffŋJ~Y]]e4(LYkkkEQ<ǎ{'Μ9o^~}wwWKޥ>̌䒮+UJ ?~FQلVtep8j2vj?CbZJ*#^_}G~gΜ9}խ- .P^pϋ<'FWD+Hpvcyxxh|w{m(l `cb2v{vvg7ZHRwGd|<"̒@F¸I} ow&2_؊!E; r`up"p3/|S{sljac< V\ V9vA!&tY]b^P8(R0\'VNM:_k@ĎB-.`Dq f YR!P!8u=bDv#B$R߿tښ@u@XQ,..tuW!@VY__ؘ>}^B*d֒jY։'&''ƶRj v_|4((-vV-///..vݗ_~UUu %>gSիW_z}˭VW_vFC)%zx"z^w񥥥I;_CCCBi]v7߼zjRQ9:T1H R펍///.~ J? @/䉓ɑqaW!ȶ.`dDSvivvv]&I?QBBBBBBBBBB] 8ɥApJ}cqPekYKpVUyaH^!Ąe ["ϑ”@HP¢9P bnZ|[C t;0P>B98 Y6!C-[?¢R 5Z-ޢۻvDcH ܸGn;;;ۭVK,$s Il6vww]666&yaa믽ZV$VgBDfZj_>===;;;333??Uc666666^z,FQtRjN>̗/_^__wh,Z˥<.]ZYYqSòJ Ƙ1c̹s笵gϞ}ꩧ#4UZpmllZ-I*8 zvtݮܗH2ܜdJHX=aΎvrtϯf_w1@hֱϾ=Ϝ4U9i~O=N`c1&MHHHHHHHHHHxq7N84k-L`P((kRABh+`r&Y 9+MXE!s `,,4< 샜)ו8`'q#p({(K/*:ߐ2Ƣt(0u`Xsfb*͈TD 9~*!=3wvzdНTvfffΜ9+WQ(|hfsjjjee%˲SN9s/]$%t< -H^KDY IadD!D|[ɲ9'AcccgΜkۗ.]rIhI[Q̑!Uks)g $HQccc?8]rd#Axf$E1:v5p$JrȂGC;zj\'Y5!pߕ4CS͡sװA-  OUL[u$;y/=t =-ƔntIHHHHHHHHHH;!GƏ:̲vO uGsb#q0zA pp4@ 4`C#zF"̅DfpELʾ }ISUIJJpLpX 6DÎ {l%iF55ZultnFIg#9%!!!!!!!!!![Tp T8A)ҙK?S/Q Y ?JCgF@RgDePCJ)+rj t ^S@)  '5& X CKM48Ơpst'#=D y= N6t^#/n~@$q#˲'N4ͭsε홙"FQ'R3ښ48qę3gFGG/^Y\$ˈH D~FJј|衇z4K+rX#edJK %y n*f7v^mnn;wsĉzjtttuuuggGSnGr$YU28˲4jŇ~X\՚ͦv7J0sY3P՝=34vBwwꮞ/!!!!!!!!!!^-U?*;6=oF14d@sn*Z\.6:҆hX]M 늆Iu'ZJwH(AWP+ԳQ!H[t+ WvoU<$D#l{2䘜aYȹ¹!k-s]:m5vak gkZ9BLzs?91$zh|n'Y^Mǃ֏ɡl4077gz‚y@A!Atn֋1رcsssyʕ"%SPUp y7'O6Mћ\xq|||xxX.-ҀYy|05Q%sCCCn/_/XXX8yƆDDQL-/G.1::*ڍ,WWWGGGc 8rUǡlltpvESIٸwGϠts [}xّ{_Xr͡_7xA:j54jkr29t_fjW Z#={J!m"b(. ]nx(sP(4> |@1;!VzjCv;)ږ45awEa]ǔVڐLtrǔ( %E>gY@lHjD5&.*oQ4%ahhhbbbfffddD.ﯭj9Il4"߈Xʆ$;`rrr… /_>u#<233#}^www1կ76 4?錏7MitK.C45jGL@ػqyxej^z+W<䓓nWETh0|dIu%Ŏ$y>==t1˗/llliَmwFY{tbg`SS 춟_^h6s>>BJ";|CPegScO7gQi8z‚{Ʀ>UB:ؖo\1`ft|schZMnCg>tbCanLjJBBBBBBBBBB»w::9@z"4[@1P#uh  p`Z2j|;p|)/| >):`e8yyآv/'^;Kf>r-c h4:΅ U9ͻqcʲzn(^7>>.ErThţy+WZ鬭moo3֖BHV9z[[[v( kޞ@FGGFvꦉJ^w:_~Y&` IDATٙr䋽^ohhBZ{ZǗK[644$uzl6Mѹh,;nz" bjn^ڢ8 L:#dJ9\/I'|Yi*b& (~BFpM#l}[(KhFa 458G*ju +M\/ûYĊ XvFEeR 9ZɡLE.x`0k}* HXo/M|?89/=44Tʊb )c'$6͉jTc1o"DQe'N`n{…NeȈ,FGGI2XWޞZݮ&''gff$~IqzBcebإV77'ծq"c2^羕ʑHA8SZ->;p$&&&bȡB9T|#)țJkr- OpOq  2>~[~{j(,@ N:(`,HC '? {س gm?hĨQ t0sYSԆsG؃mV$HfXL͎~GO>$_@VNJoۮ"WB4dG5hJmieX ɥKHneTRc"E%q5 /:8 H.4AdՕ߽r*S#b2s|YHD,8d n"9" 9bŧ;HHHHHHHHHHHx(8yh={PP} (iP@a+8ĺB>'V!*&:[{'ks̝^7SPC:JXww~:58)&V^!< :BFmڈb ok+V#%=}v;#Hv\U*Јd[0Vˑ2ȲL (舲y^%/ߍ}I\: 7fd(dʀƘ1nuP.7vc`RҫД g()IPTTW[ T8 m߻voP9, > !%A9KWs(PdA PPK!(Drx7>ş"Ey ]on=x|bdF"FMv֑,/__ÞCٓ^-$dtp&G :`<)a{Ui \7/|{LuGU15֥w7Urc:@gnՄTX1\bM~BD.JjT'DEtLJue{`HDC","7 CtxAly[D$J?8-xjb=Pn.҉6/-C7P9zBA-*Wܟmup$?Cn_}}]Dm$$$$$$$$$$q6O|/}^y 0 c0$8/VE&?Ņ&,^i?]8]WO7tiV&7`,olQa,g v> $Wy&7 g j.WLqI)Dv$ȅ@&OD)|͕a{f1ut7o7oW >ȃ@ D RN(TPITE4T %⁸ݑzV>8!!!!!!!!!!]{Kpv?7^zu,2FN0"8fπ8 f[.*6m=t\SW?pxd,J+>فҪ\(EvְJ$Flo9q Y G`5[JY)Ƣc&W~gfPw׮XE^GÍO`d3燛702N׶ ]>9 5Cnf} ԛ>`&o{|aGGoEp7wZ.1Bmߵ%CGd)[yܢE=t4AӁ!")+j)jSq4,Z5H_C]@< &"P >FR $!]-HA{O)@H&"&RPPb fhRrEB=P EDdZK) ̅"R":,|Jrh`7 R &VJ đ)޺HP :#&$$$$$$$$${qo WzxkPD-LAO#C)d]h6wηA@ )qP-WBir,<ϕ_eg 'sMr">wÆaJݰƠ׫5>OxYy~}a-71oAgG'8 M,}MZH_VjО8x[Bov:'[bߡ9>CwP qݲeVj1搷N 3ar`+R=?3)#az$?;T8 ebJm] RcDD<:C5(׈k5RФD5R a=if-PZ)N ӡBZA Х""ȈX"bf ń Z1 MNAiZ| {.1DФRbȄ3~XkELxYN5~p?Ƙ_/\:=8:JAŌDh*"zRi W^JnYխr՜&EY* !}\C(ky)Vr臌Zp$q!k4/??qb֗7l9L;%}5jZ=Zz#]X!3#P?ZV5`s4jz"ɴF]utmyDp2'Y|b'-K zH_)hU4pՁaEDuhH)5 P#9FP#%5RVhT}3ڂ44 ! b(.†CV9E2RrZ)ELJT3?³TU OH1a(IsC e?G R& h/>%C[@8l 6F^ÿ'e?w&Wzdeemv23>)0a--0%L%#6LKa`JoQlgsW~S2o3z#@rEp|&ȑbFͿt[O3Q1PSuG\T +m-^Rp/ nġ F{jp@Pg BmC N3,2, Z$cc:X7V*.!Uߊ0l|UaXn$$$$$$$$$$$$B屳~>v~nY&_!'?+7daH/Xsӌ !P+H~*Zi]r՝aߢcI3WXJ`Ko=Yni-] (2 ༈-] ؁-9cQq2 }@A:q;>](PPN[CpJ w`a&8 3)Gj11P q!@> k@U iTZCWHEq灯28D§2hM~9 '?2AGCepPyo˞7}&d(· $T'A >??/^A[*5V`%tx+J0X(ARk܃B%ij.>Rpec\|}l~9 Oh@PH3U\98^a\ݰl@Y>/~XP( >7wb(4#rX%ESUe , a[|ȠZTgTqA+{L}  Rq|0" P s4a<]azh"ᩪ TX Έ0,A3@paA|G4&#DTHAQ84UG;]oЄЁnbJHHHHHHHHHHxSfo9S fCK@"=|a`XT$ܑਟ̟eLS^kmŵvvo93uP 90U\(KX!8JcryiG?ٟa!zn9S 9'>cW_Kr&|/F}#I5-Ɠ-'tφS[-g ` Tp 802l` g|GMZ Gϼ,ˌ1YvW<oEh *H  !#˲}~{G`߷p6CiKg0s l cJp>py(ܗ=zp.7-a %‹5!܊S\1 Xndg_^LA ?xg,:<ťWV1=FZ:MȔ`%eƳ H% A3*k3` g+ 042P(K:*<))GO|? zՈD]!D^لs.Ks;M>`V}mML7?,Nۛ|<鿦4#t wVT?R9chh`J^,$!p,CkD2(!:MwJ.L.Ep+Q8H:P7Q@p׌c 1#zUO~,~w쵝.z PӾTrSP:XFQuX%  RSF <}6A+kW>!C4)aLGE ^fRր(J8i6zC?sp)Lo9<#4 RM: y yRBlzeI#5XRoR>xEPW%3Q`?W%Th;FS4b4!#8"VP& d+@PD@gY~#9)DE,4, n(%0bR BZ rЕ5PFB1`/BܤZ ;*!!!!!!!!!!^#8Bs_<#s5XF` \$3 ʡ5(J̗kDP(-92_  SL(Jt 8 -QZؐi,zN?O=$CY)ZZ``VpN4fQFPV 5Ꮄ*ZA7X&&g2 \A a i*et/C%KHB_V:ЀRH'd,200I0N9 +"Lz ) #79H胚R!dh"a4AA2p !h(%LN1 -.EMQ ~@X"bfVA!,G#@s&$$$$$$$$$$0!0jg~'>Y{߀a4Ҿ}( 1(q!ae`*r/p( a i8JH 18@(YsM5\9+nx( 4cۜ##KCgvT$x@J",OpABk4A0t(6%8Pc&xH4~+" R#'R@p&(h:ȄBW.fʔ"B*F#MQ$ a'p*J3>_9Bfu]JS! VPzQ}j_Y_їאQ' א3P ӣ8}ىWneJ :CFބ"6$8iBpޅ6Bs81:plgζ_u4Ey|y/|'NO#S^jysՉ;:@Vq2gdѸRw:^Y9rFW~@W摉 np}WKk{"/UP~zvb薓g&2|i}](vɳ' K+ 'ue^"bbIJ!u CȭgRi@5Hk@b5C>W&^QP#W $w% c *ERC=uB#xBPvD<գiWC;<mI %܍W&& " Ɏ%CPYDs$$$$$$$$$$K9Zq_7wϽ|JEM9sxSY5t hZ>CU^sH7L aq|'f]y+0J eO?4ԣ'>~>w_FB+3Iၖ1A!-҅8a}M:=0 ga,ih=ڨ?O<`IRBBBBBBBBBBBBBJp \o~җwzk-^ylg=j0@ x!`6B0‚sE;+X|{33㓓qmpBBBBBBBBBBBBB;pITg4ɯ}ۯ{sR9676hf@JXc&D3%13,[m%޻}X^ok<<O.9gZ-bj-Mw5s+w08~6nƍnvδ-x  =C-}3{ղWڵz7^̎` S:VHVE! ک8绱dzȔc COyŏ>{psvr,s9AԆ*F;? _>}`?%= n_oBBBBBBBBBB}b`*pmsoƛWϭ][tͫ;ۻ@'qPu32ͩىN;CO?r|yaf9dGL)!!!!!!!!!!!!!!»iYҊ`KG-VW\vus}c{ck~[t;nQƖZ%Tf+t(HNND"b0D ȅ@U1"22R#MҭTo# * K1P|@QxPjb;t@h+.8J9 Q2(2(2(2(2(2(2(2(2(2(2(2(X_t !HJ G?\Yݹ[Pa yp@yp@yp@yp@yp@yp@yp@*w}:*r(@J!8&Qҋo .ۚVd/'$Y=CXc;#$o 1eaji+gajR3 "싌rD"Ԑ/UeӋsBt[%y9D(ˁ]W_]s=W6~h=#vyڌg`MtQ@F@"((l=6[ZD-)>^Yzo`'#h=# هAg$OlѷaW4WǘS(,4"(5(լfiimZL^ͭK1}} "NHx>lQ~\(j+e[Wm3Yr-v "Pkǎ.M>[.=Rm9JD.C9w̮&%݌"()rXdf$ Z&,IUYҴqM8".|Gʽ5^Τ)Ebbb!kMt=a~~~Ds<P:gqJ11_PTҾR >f$OL&"ӊ՜˫LL*}#]ZVh e$%G qW^+RQa*0DǴba{^Os Nz;W_22$9f-=O7iI)iD$昘,*Z^ E)Fwi&"Κ6'%婢"kˏV[b \ŠSwE 7@gĴ (і7 tq:G}T7NsziS'3*`ʗ,OczukSP,7)c"Dy hܡo~+K7zdbnTJ\:Ȍnj"0Sm\Ƙ~!EyyODɠrAy]w[[.>^<+/D7ZS+TnH?8s!e|Q7f#"M+fK]#}mvkzly=ĉA_T)bD7HcTi^Ĝ{iZ(ߒ`b&/i׹>Hu5_nO`kͭw:*:Z 2[nj/=N#z]rbED$ dlmSEwgYNfX0>8nqvkw@t܍Oz]ʻ0_*qu[,Y*PNqof\޶Z=mUZJ=+M]ݣ}JL̝lve|-lE֘\xo6yG60IBYlb!᪌]Pg7Dmꛬy~IH?8uhy_4*uڒUǍWUIczmLkuLDbv̎_~bEDTQg܎ND7"fUK7'=r.{uo]) ;tZ?5n%Ĭ% #*S9w.F7$֮!$ֶ6~*GʊuJV/D$bMq|GNs߰)$'+(wz1}Ƶ "CI}HV Ƕ۪?k"N @ct􇄌nR$b!/ 6p?nZru{}FE2cEF[4D5#)p?Eb{e.{7R֮k4lKMrUmuY|s'LHfW~lmQ!KKWd[6q p>Dq!6-R*oՄ4e2D9L4b?+sGuH#y3IhO>eeE͆DWbM^  K=ʋ5d EPpeY!Nc 17/b@'Y"̌_E wvt.Eݪ.;ESplk#N >ua3nX>0CG \ɮ R:l jTsUҒpo}D/یfM;rKiD/e).K :ⴀc^I o(T4l°]פup[~m\f]Gh?HbJm}u=򒘠y*Ҙ؇w.p]}em']w%Q_Uq8زC"a!rƧo經/Olj2cDdf.gD1%:Z$0}]*Oٵ[ e'_y{=Děg}?}r`7n^(۱W sC̮Ԡ~J4n%Ws'TЄ|ؚi;_+%gggٻbHaڶʬ۲" V? Xc7նC.uC~^UH##"jҪ1Ç~EtWhD-Hq4PݜТn4fE"2Mn{8c -.=SegحCkϋэ'{Y'~4bpB#{^];hSk8%YR:Dp!,`OWkU#YBA%Ʒ>LZݺ}/KD lN1(=:F^TR*tK(׬CE^5F晡Oo]I$7!L8[7_;ѓO<vky26֐%@;) *\ȡQ)$1%&pnbTX񌐙=*{Ԭ٨fZx6+.f>SekMW*zڍn0}*oiڥ %Hkִcωh^>ͿscĆ(^E$6X;v}Opapӫw)l#ǽDw[m$bM?qat̾KVv]Pj_mj*IJkJٲ: sՖ nXT 9U 7fϵܱ6d?:]r^$1]f&dw*=]'Ȋ>Ț3݀[̭{M8+. |v"b 'D$5bnR1`i5Guz'O9"cp>e|pID,;ehPzQPcP!ӘR՘bu rDMٟqC=rLÑm3z7U|VQ=o8>3+[ӸS#gߝRcDį5J 2[/L[g*YpȐFU>UWvB-8es6HDTӼ{w[N. b(Dr"}.G-AncH|>wFxzs y5%C)&m5i;h6f$'~x7WS%ԅ񘈬Ca6 \et]ȼ+P&ҖG6vK;DD(}6埅QʩGD7zΖ߸lyxSm(Ab{nO::uu*A&%z3abZXz|߉{5&/7&/eD qw,rS|ΈWYy:5n' eJ\LR]Ѳ-x*v#!VԊS,v\8c"  26VQ~zTͰoYC;aDw=r DdCТI\Y&hSJ11 ۧ߆'ܱM7xǵ?EFnԠZqLyj7a6Ȧ(x*(D/\"Ԋ>oVy[Qg{x)H< mKiK7#E孌c4YE>>OUm9S б} H8x"=+M8("">^ZJx 2dȐ!ûmt(=""a%G',+a%;)-DT"bSS>ty =zl`wyzۢk!wٕ{4$LnU O.~nZlv\yqY]/cA Pbel4)}m ~AD?Ρ]=Gk%'H̛wQV^v[n!"*?cx?ykX> J-ry OZ_ɚӈ܋?<ӽˋfAUpGcKCDoB:, ߒvX"J~u#X7yFdvӢJy4[L[!($tLrwR"ݦ(ٱ@"sF4VKN歹p-(N%ޤ[QVfrJ_"HS$lDLe?P"bdx{]Bd^@H[No^4NY">r Bnjүk,H^MdX9Q9bt˸f䕄{ewLjk_ޘ(%C?}'"$]aH[wM%Ɂ""j̘4ܮY̗ZGǒ}^{S|َTc_>WTn'58lZ_W ^ٶ0[\о!V$rZ)SDDdӰ3Q(SSqK k vF( 3uĂwdΙz~6}i;<ĺJM5`K>?~/N>;$ލ204Im[o\m7'bu9t*$Ծ?T6L\vWҍd._=..U)Ѳh`c%ѧg?#`V٨eM;(+eٕ6T$@iR\`?/Olj2cDdf.gD1%*t;1b{K= ʝgPML *ხM*8 _*-5*TYDDSvI_=X16jm[ujN *\[Ȓ52v{Ѐu{'Rm}D$VaNFV%ɏ%3y:XG_vqYmzgC <ŚoZ+$olǞ&K˫(;b{'7 ӕ'DTid}0st]-f55kfoWpPl33{/W 8z[/V;3!DDTB~i_^/ zb1c4kY;/WZ|1G"}`V*Mbo TA|*zmzm`0*tGR[VPc5oހ%7;*:6ê_Se"*t]^e76'nuqnj=2l,~Uhh'}}+ '^CP!]/\Q˗IBܧE_9˿,howoJRÊ%!2aWpV3RLKi,G_[A}-{Q!Ƶ; C!9f+ةgQK~>vZ+QR#>hŌD~X028"hC:nŴFnd9 }j״w7V*h1O 7*ܸoIol#e6=,(/W!gW i7bsȸH½?)">?UCI,t:{4JU-?}@ pcfE1 A$H '%L|P٣܉ޭ\&UJDOXv7"W{myED";PVfrrg>?#!3Et!kSzn~GHhD$*7|M⊬W|Aj3H{a02<N9|ݻw޽ BADI}uiMl0mW>6XvCS+n[LDq] kkq4͎<6U }m]NjfV<#k˚ 8ܪDiDC y^J4VK,}ygYV9?mUWlL5ݴmK_ݕI̥ $\Yy6}yT0pUCTZiZ{l?EEyr f\_NqKsӂ")9fZ2oē`v?HqǦS rZ~INy!] joq4DsF9SS985!I( U%;Ej]JЫIj?G'g۬Be^AӏT"c"ی[ wiQdLBTXzry4t+k—4f[9 &6S% (|n]r)l+HJ-;jԠQmBvx,82o0FlL,?rN~ڑ^c2{/zO'DDَӆt/59ŝӷ㤛| K e[YM283"2NJ ҟ3~}f54d([j!hcrt捺̻jWvnt#]_8̿<]idܫ+u_~~ĥ{5qHމ$y!l\!"Ǥ|IHḬխּ[SZyvɊ}(EjJ׹WW#)`PfܪBeSz{FَkZDD`)p \.]V5,X6R\,*7xTG v#?͛'`Xcf +p˙4t1x?YEGk:L:CDvwTB2$zѣG]fx)?YV9rږK52l/= ude(ۈļg-9)GBDo}ҊD\""1)|||||NH6xH; "a{>9AiGCv""FInQ Q=zS5n޼yС} VO4bŝu*roɡqڕyZ;uhԪ-xl"߯WÁ&'?Z9-gѝG1F%S"8qic{{@{}y'1!S٨e⪹W5 =:.%>|؉rG|9UϱT*G9ra/^?;V"L\<(G e|fMӽyeZulIBD3DD&5K#'Ǎdэ䄏>|j)&"I4;W5q-d:(~uWޤ7""nU࿨zp\zv]v/޽{nܸѡCaڵ[uzÐ;מ|0Vÿ3bޠ^ zR;,C.$.n,#Hq';S-e &:N|14٭+HI6z5MJǏk֬VZf\cY<vJ?v-c!UzoZ4,7. ^nM LNxXq^K`vX+RzIQH"o15p"7?M>_Qb|k~uj!@햂Q~]cƊ&>ψcʐڛT*]8i`ZqWb=8 Ž>s{4YEnmqhζy,۵u*\{RnDD"r]66DqQ|bg*}mND 25ԢcVE,&c\M& ÐH$%݆B~FH)3jW/$Ik: str*T4P"wƀ%O;(P#~kGٙ<O+met $l" OaLj3ٲ=`OE #:MMJ1Pa yp@yp@*,AD#o23upSݮ\5?xC’]5*{[ː)m%(ye6!zgeCͪl jTImFU]s1엍1BNz]oI()/(x8pUH##"jҪ1Ç~EtWunξ {O{.CТ/r*55_zKRFJ8VNIJ~s"{#Hkִcωh^>Ke`ύuQsa QE gqM{͐8[47Q_XX # IDAT(|#ᚷv\/Qj"?OzIDj- ǭVAV̵G? aZرkSd 8̜{uױo v3guW}ڝg,&f)S@6Zt` ,JK^]g׺g1B6pPmxHij1)_R>3,yu5o=CvEѶP`#E>N#] _?|&7Z;*t)- rDJb"UfTqM: "Jnё.@r%$$Y~8%"PXe+~&=dE>KL˦lu.4wI1CDLdn6Dxw!ųϾ%ls3kj-\3eagϙE,"W`oo0$II *L;|֫;k  pd]a?W5pu_8xZ.nooѭ7یܭvw$ eAjI Ըex+zGSzn9&N&M›'`Xڙ-$&p̠뵟rՒ"""ѻ.4+hS"zՐ'NFsppjڿ.JQԨ^K_3-6ަB6Xv٦S.n֋۷ ""[R̋^k3j]´bodFhnlٷ#7~2<3-lmu /`үLo䞅Rտ:xr+ޥ&u`΀\uazՏ?ٹYg.XR+ry+c1iՒ5ʋ\ם*϶-=Ub= Jr $uKJAc!63l(L@^Q:WOӅWdgث<t09F&dO\v͍MEX'j%tNNN5jԸyCgX9B³3aFD4b\+Τ\yOrǬ@Vߗ^r5{{r;tp1tKt.G.י_':WbaK>ɫD!9rĹ4ډfmX\OA SHD.WоidN-~qзw8u mڴ47Kw|zuDDj`}' ]ѧ&DɦmimfwU轡e -:ѥ~]b`ǓD)mD꽰kẫmkIdɒI#fɈTg0ᮋDy_cۖ^<* xqӌíJV[#g<Ð!PLc#Gڵ Ǝ~J_;K Fxtmm䏉ҟhSkVc&Sb? .'wLDVFAT޶ݚүTNފ &9W; -WQm갈D$YE:I!XP5 "2&2R-ŔdD1|""G[X8I|}%F4U艊QmH7""Qrү:&b^gVD9 򯆔8sYAn =@ ".KDBwOf8{1 5l:4tߨBVt("po&#cxo |{a_vVz uMZgJ:7QQ\p&F3(yԸ1eCdo6]͍׀i㔉y<VK?trOyY8tV QW֮][ ݻ߻wƍ:tл8V_$'(>}ybS9 "3s ?#)1J17G=:Aӝy&x'g5^{'|}7T1;HKniz6Uih)'8҈rL"^_)CV79GU,/Mkb}%(aEEgJ]~߫c7ڦcwjN?# L@N$j0-[SQvm^JY$A߼q9gߵ+S(9698_sߡ+Odj|Ѫ%*n1O,s9[]"+UD4uIz]݉W\hy꒩ڛ1=y15[fZ텧ii2KP`¡N!ׯ~[A.g%ܾob7ѝk"8e 9Zϣ]z~ 3%ȎCoI$,A f4V$x-CDBň}bX>>vpt dž zVI8aDPըK]g 3]*0.wVͺO`)wAvG:Xvmm6d%?~ȱ?e!Vʟ-- tV^Iwb.M 8>~XfMjՊW}Ym.m߸S:HU%\6jFD#_򅈘85]t. MOZg]S +*nhl5dD{7,~h.z+|F$;LFnYH[A:w{i?==6Dckp{yuj0H#~;# ݐedO:#_ ؿAid ' F OWꢼ`~y?YEPyJPq8_uJv'MSC6K+F,Mj9;sl%a݀-wGԒVS^ .8Cۮ R9"xoj?<;|-D+A*Dy,׹_%9\sØx-|}|G6~%m/F3ʱ*q]v|$`ns +)0=t(æ/cZ#z8d$Cq ; dzyq+T!\6A {`jc؅׵ښ$=/w pم,|捫-կ  dӷOqeaӛ˧1vMލ`HD5 jhxc9eZc '"ſ/rk+/N-G۠f7S$L+ fLv_yr۷o_?jDSYRszPXI  OlK'J ㌔6;.m;G.E]'R}\5ݡ/ <ɣ ?nv ՐY6MI>9ymh{7GkLszWk3M5""FMϚԺ| Kʝ%$ZFIo_L}oV\<&SX^}t)/*5|˔ivj~J9vYׇLRs0huWI?1JbT_/*Ka3ТgZϟ[ʊv1u`sjL=y\󊇮Сj^/S9Mv+uY`5E" ~f]絪IKWTXQŹA ?dz۷oرwޭ]7oSbY~>eL*ix \ĭzr>AcTYf}ƺyC+i?zߺeSn"ec!Y9RK[:+TgDݺgг[v*NA2AU֝+*Pc=G(ҟ\}[S3rfN?.>nS*YTp2o=|ѹ=dowZWܦ)/ ՞VD9 'ݽc0G$:XhyrEҧ.vwvn<3Eg,٢#tϏ[lF?woL aFއ>>&nTv0s"rl[<_e{t]~%_$ 9ɏ/ k?zODD񚾜,ž b>?)̺aM_ b뗏ad #bbu"|~ZB*ټMx6sytO^}/܌䘰Ʃ~,j(m,{/KD$zy"5۰^~svȅ_(&_߻-KcJٷNG8vNNGw/9O>ޚo.)MۨN lXhъ+>SiHp9>_ZXumYK`M wMSv&߶ooٗVWi@S7ƿAÿ,3qdEtCAQgeU3d2V94ŋ4Vcpi ɜMs3fΜ93pٖWgƝ64vbF4M}vn`4e]ok'ܻ 3w^Y\zU摱M)*7ԯ؞}ra al͟ 7$Cj۶}{rxWe܂3¸1) JA*/䤞V97ts48Ej]cRR|C2mޗjOwPփU%R"rܦC+^g^Q5ߠǶTZ둋6lQ]Gnk&{RY$Ml&evJ[piiW/6ֺTj䠶!p=2yKRG"kΙ*+Qv?MD ^mmXzN[MUAn%n-Q_}/}Aдvnk2ڵhwʰcpf/3ip/ d>k\Cu]{n1UzI 34>Hr(<}r(? ױ үFgYΛwJ;n2[ Æ/a/g;/Yw!{>zB6>Z$5jO a'QfڸkRU*V/Rmbo 7׾1ߩn㸴9|&'$,5rɫqA2k+{ ۠|{]֫h̪vVV+vCbrٺ:rڬpm}& r _Dt缉,rr y:ʽ|BD'C o_ݻ?(&HjxnEHY,6i\EZv{C+kc$_-q#򓔟鑡W.aq5K2~^ɖSO O頹zAI9Z^y2MWbʜX޷=sJGYcFOC۪i۶}iӴ6o0urrrrr(L-ah> n󖨎\ytb^N59 bvNr+AEcg "$"g㛾*uUU9yM?]N....oV+d]ɦ'=v339],)|qKT9G*zp,[9CfS/J:1}В3*1*m·ώOyr0ÈMq; >2΍k< Pl9{S#R :^?%`7Mwɔ.2MHDī{7j?0{ܡэ2bh[5mޗ͚ p|&j^~QjdiF<'yמ)B]?Ԑ);Y/n wfn_m~1|a;= Wd]aIΪa8q7xi3hRIfkFtS2|۔; 3e龃/tWYgЗ*Kvz̔{v|18fJL̵cjS-6%1]g.fMI\R΍iF4TyOޏk>B;XDnĄ{㇮KnZS'z׏|;>pV?;L;M>O_cחmm;7"ƓĄ{aQ/ IDAT&p>Iv f~T~qT:KrCgJj?夹gٵh.];{C LDyti{݄]}8qa4ѵJk PpyÂ!Qto*W%Ø sG)24WoXi \z.r;0]oʠ}E:T(ux12lu_vi^$ތ1qET3 Sz.2=aH#g_NdSe=&&d SeжZ¶!v݌ s>5s`eK'2X\s"-dF7bjܥFi/ \!rw:Au7 h^띰`"*lʳuBapi ɼ8V^;v};Iϒ%ZX*02T-<֥+tR0O1Pl>'`c/&$ĖwSb]|74%ō[adgs,yJ}q,3(.]T{pؗ}Wb ؃CwG"nnn5rGE\>"\yl &<ܻz뉹$7y].Ca4zp|w}=8 ~~VK-~*OSODJGLu5"CV{*J7Y哆-aV玌MwXlIDMڥ?1g܋ fx|DL";ȕED95 ܷͯ쨮} l08IUW>=8ƌ8bĈ  2dܸqe_&ɢDDd~1fΖG7}2N}y ""j4h4Iٷ 1-*nȰؘx_bKϞ=]]]/_\%D2Dٚ67#0.C\H’&D&TumW^RRRF#m똔x 9ptt|ƧOV^ S.D`R񉏏?}bˉ'^zձcDz/Lb:4'sOզ5z)UTv1nܸ7nԯ_ӧ^ںuklUnot~3ݣlk^݁]n+TtPsK.%''tСQܿMWс)zhΕd@IsǤkq hTO ǖ0:ḑq2qU)+{a*Sj#׉2nUc] ^kjHIT7(ޭ; ##qq8k X~U TZe1[9U\:>Xct(UT>s/ΆO*6-q˞\m>!*RDdR$.lCnuZ6 r Sktm0Gfd&ef$"u˻XPaQ0-H-"|Vxw>qp>pJ ~P,AJj]XʮG^kqע2RR¡G;MjW"""鋐Aö=Ye 3Mi,@R&Q(mzBD7c%qd*)Re> !ߖVC0; i>.P!#69{JE9wF:Af'ݞѽmC, ;V$Œ$Ti? /q@D|Qƕ֭ϴOo X𷶃]PdC'ҏ@))$-by"WTW6n]^k +;"i#Bdq7_Edu\"b;6lL?zc[rGE\>vW:)?sw|\d .\xŒ6mqLHU:uoœu9$^?q>Afj*98WoֹG &FPbxŗknoSˮ+k~'"vnM,so0([godڥ#gIHkd6%'/lN #~[!OֹI L7O=秸Y|J5rW\iPOcR[Bi3EtΤX3NmzN{x8]@jGw}q9DteS]l/7_Ddhcr{ϩ5 Gs0"0b@)C)ҥI[ymAܐA&.C[,1+Ldaܕ&x|E=3w|z*"WYyQ?؈5 VYEuC+"z|ݵdZ43\Y 1+żɉl"i+~;Xݐ;!Jt$yt MDR I 9I3m+5k(ك)'RUi2'xaILj{qv"AD\)?'Lʾu?9-2]%4秖jӶ[mW'l@+!*<7tt& 1͉(;!Q9-)uE1je4'SN ܹ&PX"qKT/ىE/͈(wM̈́XpMV#3kosҥKgg˶7*:/,˂rjXKҒs%"")s[S$7L@N%K$RųֺK թzveT~t*e|swtl!F/kLFQ|\ned}54fgӼ_N~hPO܅[SƽpKz+׌aenoB޲O],`c䕬(\_9g~},RŴWsq* L(I>?88KFik]Wiu#+Dw9o\ntmaun^S=()zȩg_2Xo^|:ϮuΏuS[zV,橽g㾻ʑ}7Y-\n_-8j=QO|_ոOl>8t?>@6!l.z~A۴ ^1ۚUH`|). ,{%+ʩ-Wnx.=<]k]!l5v1)1صB[G.v-c X!._NPJs"l"{xGOB1xBȺ 7` ʍͯW[. / C=G"9h m)9b>?)& _D~svȅ_EDDBa+OPEmO J:5+RGU6 ?G5lf`+d9ywTPS"q}k⯽W\8Sh]:vQ5ߠs9?k\ ";-g Xx͈Me7AD$e"vc˯~):hFpl8Fԩ)Bewk@DʺTȒ~ o&< ^*XD"*Y/rg䕬N{eS~ԈL7vpggwYha6-ME{gw՛urD$\"}6JY8QM:}Sv5:bذa_4VH–ucɿmIl!jAnY=w\YZ0 YBlK [pip-6,b/}/)`U?oI~D!k̛z$4٣GwDTOl, )KERڸn׽cv΅ +̍.ǏԵe [ٽ$%>2.ޓ<γ5TlR%^<'$$,|7c}UG"xprPK'IU񊧝=r&-Yaϻ>^ cDWGCJ$PQ T)ҧͤ{%f)]⭈9{L"U˧G \WMkR)3yZ RRn:oEŮ/<U45ݭT Sv >8 G}׹>Q680RK 7#7[$""ҁ[b>MNIX,kk^'8D,&WyP̑/ +\<8P-1z)<ӻ`Km $7WtخŕgIBXE_>}y@|lPDS|T)mJ~ixzM.4NtFsF{jwce~yz? PYWXt0mQSH D.Ik*ٽvV{g۠ҞC NJsrrrrr,ADL.WU ^Iܪp\.Wm >6k$gfRoķY佯ޞivNI^2H9W "bH=P ݥ:7 E c>{u\ T)mJ~ih3_ICiS w(2Fa'йlMܫIKSQiq\\:PMg_)r&F84-Y8_l娦G-|`Iu%nը*oظnNX,z'm"X%IGȊED$}iĔ(pl19jj]Mԯ9^U( ;G[c(p@Wn\"IJb ?BrN;H٤=|#`%ES<:Z ޶wS""/?Y1_rߩE unfBڇfvDR j7׍%zvx[v og{W{=G- .` صqqޣsOk 5P>ʰц⇻LJ~f%fOԹ )IXNNj761ވL;,%~P0'CԠf.DMt.WTϗ3͉{hD- tWZc5#PO!VȰע*S*Z70ی, Pj'%WC!2R9F+Ph!A R][SY5"H;(rS)2)U|!פ,IҼӿ8][nÇѹP$yѹQ7}]巬bB*ګgc4ϏkPx|٦f\>sp 9 2+Q5Rŭ)(նVM#AqTO\*fB]rgPɤ6[ YS3qO_Ƙޙ) GU7v kQuO2PXܚs_zL5;2%eLܘ%W(S$2E$*)Wj N'4cMP L~ft3I5;5>¯WӸ%?t\W]5\ k WeRڭQ`0+YI|߼aHUQ9tיUu)!^kf by95PNv3.O "漴+Aa Kz]FtHЦY8YF61aҖwԮIiܩ_`N&Ӧ"Y+rRӣI~V-U=/@!dwG _>""qNXUJL>v%MRMN-~>m쀚7˲ݴC73D^؜䘬Zw'U}ɓܳr} !"FNӛZ')Mz"mFlDoҒs$,]%8z{K/6C$6~%Z51fWfb5*QS2 IDAT)X,"1!&mrQ ea(ypy|YQgnSXSAO'c1 D+5D_:CT)|$KǕ4E59 K_~"s!.%)Xi3md<@DdG*:38yל}(- R?!Oͽ#Af >|ɓ'.۰y388888XvGG1B:z%Ş9rh )â~]"[oꓡ/C qlKv7R~2Btov/CVtѝHǫg8tB{gP)辒&QRZ%mB\K4&n+>777]2nJ Ǻ/ үw֥GˎxӾK}}`B=!xr~w.Z{"FbbbظV,4wbMÐbص(A1O ]E_پ ._+ `%vxckڨCRV56Q4[S֖4GŝlՃ, aHW(uZsנ'w-aeY"juDO~U3OI-AMmj-ΠJiSK[}% N-p)%}/hepQEoF>z"b[{T56۲SRY:+ӱ;'W}2qIW 6frN\omo.:}#m .W Ֆn@9cB$D9fI"f9(", <,6ug ^$1U$$>E"nn*7:W \+Z5#FsV:T;z(!CU;. #%?\ʾ~bױcrjذ1|U*(^ĉYĪթ{+ZhϷpJ>bV^~lw}ٻ䵣~u i[Rc{~iYBWn_ԩsʕ{رÐYH)]]51hΠ/jqY$"J &9N1,LeM?$5.ض~_D©CsnnGv@s<ʶ onH ".C[,1+LdaY6:w'q%"J:g*qDT9;YQcIDV;!PjxxhXQ @T^1cޞ>}zܸq;wxqrMKbmKe0ͪu\K9QMa= #lDE?FdKAtCֽ Nê`,s6f_ Mg]f7󏃔&O%;‚!J;jx&CjBN$}qS!"?pu'*$N:!&\OT _"AD={y˗;tWL,9Tv n3vrFV~$ .fY켦7/kȣMYD}p"+t_GU t%1=F&",qmx"3)NnڋE13=~=ĩJŭDěu妯W=g{QLswӠW('feʴu;/ԫW/))IHd/3&dMHLLP2N/\"z.ut"ضY=ƽݷErr7ô*QQQj>}کS'3a$Ddt"G":n)H"2ܻE qOǰsQLoOl& Q̋E/͈(wM̈́XpeΦAt“I۬X ~.DmhU/QƘ >UЃ'>>ӊ-'NxUǎ΃ՔQ$];Ńj{$[ОȏG⌫oQRӱuw`4'\neW#h])<~'JDTܪDtrժ}gD4T ;v:thٲeX|yhh֭[ wpޞ}][ I8l[@|"r޿_y %xkV,#CDiSd^|"uO<2éu~%Pv Ddւ!lռ%0'AD;wa0>>>F7esاlP(HKϺ5w* ΜdWD$eߚ4L1{&hX ˢe|"t!hYx}րKo䋄B!?'РaGOb"fY ~N Îέ͕'zղG `gXCƤhlM|BDk\] ZF̤a|˃?UbZٸ AAc=!{s`}b ڽ-2`aY4N9[QT8>EFUm~GD 4* +&a1#?R|C?p2wFK= ese/ wUv2ʰ&C kLDVκ(^6E @|"lR}jG.ڰR9JRIًTR!"L2T*-2# l5 4C2SEBr;V6tQU$,5 +\<ʻ&|.9DŴ\kl-;{ !8C*<8C*<8ClRR㟧7U~7i )9B(-;ĥTpeR׆]~#ʼgy ΉID!m!{q,%6~e]1icN\z!۵bg* ?uي' ;q휴<@Gu}2Q~o ٞJd](+Sl҂ =tl \l%JSqkJ$W۴iӦb_N=$'x+we#{abŲM'BF.JR` Um2f;A^6. OD1;?!!LC>yk;VO1%J&T,rc{\{1":8vu%*O:vqB.[ 6O=A­ /޷0#MC}lE"2RN[Dgqp'1mrfٳm] ~z'qu_Wa :W \+Z5#tA$^?q>Afj*98WoֹG ]%Vyk6?Z3fOޕ;ro0([֙godڥ#]^t91DĎmQ<[V<&-XyėtFx_+yӷѬ`5[;&W+sb˓df&^1atCc7x(/S(͓D4zAu^%2Wȴ4GGGg>hO;[S"PgRŽu,6s=w{<nx>ID=Ʒ%",F#s;H8|MqާF e:pKCD$eɣ:Go_""JkfID/Z2UΛkkUv?ȩBD=א'~}vtJennnuԹr޽{waX4٪ZRQzʣl 憄 /218ŲDv?ݣ4FD0gY Lrt׭C ;:VѳtzEi˺Q8N1]$`p^"^'>"g][NE[A3ܞeZJe|Mƌ+{{qܹӤ'a5|OVi5x"_#Fw i3v2C&"wX>@$*r}8vNN9|QFf8睭)1OgC #ad !Te͉Hb(څalϖu ]#ãiddd "ٳ͛7/_ܡCHڂ8""q҉ĤgQ,Dĵs̤[:$_۷?uOeҬL¤ܴ$6v-.K5uɣ{? eڴVɱI0rpխ[WmczLz.16Z$4'DE̷DdVnre[[cɹMˆEfNL~d3Ɲ:wlW ~ ˣHC1**JmӧO;ud³H_܋`H,zinFD׽ lj'WȾ0a%y-c7j9=Zνkk@,XZ1 o0slֹ;/]}y| A&ZT#2yf"3?]q.|'Y-x9J*aI|l<|||O>rĉW^ud?_3 H›Si>jpY\*ڡC&"A7i^0a !s2P-\ YmUN^%Y[ ݛHDm]qI&— _z'Dd&xӧOg=~§ࣩ%(yH>}$ ;v:thٲeX|yhh֭[JTdͼԛ*[5iֲͼ^|"uOd#xs_Q_js޴6zoWIpꒁgp˾|5l[4{GD5viJ 7vI-:Tz:']:mƬ'>^YrFTixw7 "J\4~7:Y"GCfߎuD(T>"ڹs1:AD1w<?v􅈣VMuY,"Jjxg{AB7^m $KH(s_ ~ğ<ˆ!86%G'<ወزn-"K(H [m,b= i2+"oMStBO=4[^GForYg ǤǬao!AfwS¨+FљEΨ$Wza6?#TUVnAmϻ53cye")~+I.-W:kDAvj?{{'tp+j*fP!D:F Vյɖ̑?1=ǷB J:76SHQͳH[OЯ< Q^=ˣ)eBxD p j {;Wmv֍IoPd[{V (8 .>H`c"6^<.pxr%1ݰaȏv,[.L~a'|t&C0+0j: Vc/}/)ݪ7+*^=/[Đ)'"zaOsr"~})^Z fB}i<<,]U3D"!Q.L|g$eUoܲU=iWɃ)/TeNE/tkפC>{M^Q_iPyLs>T"k˶,H~⩐ϲHj€U!,orrHbY[*kX#\r?r"brj }%$UwԘ4ٌN]%h$.tjlg:;s:YHdYi/N:;tmsSQ W6c*8/4>i̫+*lp![ƔU AD<ܪ$:}?3Fԯ jZ$mշw^z_=AysooΛСbO~!vC0BIz71s#qO[t yq8YVqfIs./VIU81 "b=g>˷]U UPnUٹe6&a-bŰquk9;;;vT/He0tMR헇q4n [3ʧIF-Ν_ȻHXܚk5hq*SV]\ߦSs JI/R4]呲ȉgaDV.ׅܲYG;NĐZԴrj;}ngVNU0gڃJ #_J̫2agQ hY*KV! W1C""z}~Uy_Xi!$z|ƕ 7_/CLWt_B> u_+ZE 6|?d+|NssAӹ@d&߾~ӧ# }OzED7߸~MJ)>W6MY|DaXkmtW TnHD %UqK)dp3 Ti `J i\Ftgec h1ݩ^  hj FhY*[pDŅZQZGպբm]mZUUV܈[ 2eȂ! 2sܕܓ3-_j @Nv_y2%^1B!B T>& ,)OXٻ3Y^3Ws 8Vt &ҥR@ip~~/38k&)3Ҷs &2 JStU1ˊ'ۉ~ Mhb] !B!Q BH <{\w.ſBxWeC;$?Ef7FڝCW@G[ ڪ%12]f䷗0$%2F]ܧYJJJPKF]{}F;azI3^mSf<$4 aL?!7koL0o`f(3O/!B!Q BH߻9;.nPXf7kd)`e!;73g%i^:a#< 63pǩo;XJINd f$i/Rs3pٶako2N kG&KNʫ$Oҡ1!B!QA/YyoM aFvM`uKMђ"аٕ18d {G&ːɠ K=G,77d2LVY<B!nXx[MG񁫻w3 G&34 HsrF_&4ۯA HHQJVУ5C!Buv }ׄ0bydI-3d"gf{d<<c#ڍ=cz9B!B!2*'3-o]3d[ZR'5O;0˽+n[NfR E…/+6\gkަB~6ӟ]+;9!2J!BHGVlqńkkk g"o5rq+nt:]Osl asfh3hmnjJ,䟧ye5ƪQ=?ʸuϭLx>˱<Bz\VzL~v02W g]ѽ<Ԗ%{ne~nO?S !B!BjٹaÆ׮]۷oΝ;+D#/z-Mχoo):|tE7:n:/>݆ ]0Lzܿ+a6 28ʑaQ >HݜZxo ,1:.8Ltoew<ޅFM%B!B4Lĉ===U/Ϝ93iҤ]vU(3h梦Am_-5 0i y,SXL,9%Ef@E@ op_ MJ9MtYg(sWkUd< ozx>?7%@B!Bx񢓓S~߾}իݺuXt^?'\7TlX@رBKD&+v/e1P{>./7n:@heF%ΣgC!B!@GBBBF6n8&&tX1bdbJJ(}qu/윛[(χTdvJ,S,.6WȨ)ИӗoB!B!@׵|Gmϗ[0KqiJ@@۾bLT/uQII|ʴ;!Ep>mYKNq":?/!B!R8e'lgΜ_ݻW]]6w||woڊW2CE3/Ņ(R6 ybB!B!L[;wN4)$$˗o߾ݾ}gkwZ3#n6{Tw{gr_9nTKo^v|ͣ`޼G>OڐҬm2 B!Bcjʮ]<<< v:C&]*J{'>ag T9?oڴwf)WaϪhL*2c_>vt ӿ{&PB!Bȇ&Gsx>)lNm8wu GЍu_ߧrf =A:𬔔Jyv匳VB!BH1j{S(x%?f$xC{ˮy*VdƝ޹sY Q^JM|Ӽl3<^<8ޭ;vءK=lMiVWP Yr6TA!B!(ʚ&"3p ̵3ÐM1˴\x_#kEL E%$#ﭰi^QB!aZx[MGN8*pZTsoΝx^U'z k1yb 6BaeI!B!|5@zҕqBFU+I<-!B!BB5 #2*4Rtq~BjB!B!*8cq)1)Y" dP6hٶMeB!B!QG5Y9Y98tB!BȇNK!B!FB!B󨂃B!B!uI Wx-u|YRBlgaSg%B!R+82\gJs[Xۛ^a6j(PSv7CcaakQ[g7׶-g8'oNۭ8)]5oӚB!BHQ? )rݮCtOWgXǟ@`/9zª@Ƣ8!(MFUQrұYPofHI5* o '6 j:B!B!JUP^dd؛UI5U&cgǔW}47wuPGpD){vl`@H"À{nFU.ߧf֛}d8g! H_TZCe3I!B!245'sAyC+'$wP`@d3YB>}^~aUW|:0QrFj`?T&# 451S#D2ɤ;*`j[+ͽT[IQ_Ug>H=NJÇ"2hSYv$\بW=VonH%HE Ok uٍd ţg>͹+xzH$9t/.sy)vN|0AJR0Df%=61eHmRtsW5%INj۰Givhhٙ'/5Y˕՟45h?Mn9hʋ=y:vGtIS2?8^LN6W7ih`B -}sC>`ЄBʣ{jv Q5>~;#rܛ\Z%)yzp"Ov›={Vt F O,I¾#1 J{ب>mK#?CALe+T*?ETB *4 ,^E,9XvɣC#&eڍJT9FL ű?ꤿxwqF= Uٻؙ, yߞ? w D\9с4Ƚ3#I =r"Q``O4OU~a;@;9bn?q"fRעJò,D@! "JmޥPqy½3U1KH9\]κ)f.Ӷ_l,ҋ-{ICeNMyAs}UETBvW gϕJ9J={;͊ iRh`n< ~ohgk|Qa]Mk6iN)e=wσo`h6Kݐ3(;RLߍaJ{򕬋/(]*@R iĨ~P֓F"-T`i1FUz2 Viœџ+֙28% YwʻKOܝ(S8(Fnl' WQ~\wG1#lWFNHY/$_O:JHzP.gcݢFCӯCE=::V?cwT"Dݺg6'@1G\Uɢ4%+x J3?ᄃ}M;i U8P!Tjx3'Zpd2 ϸȳrL ̠/7MgyټC6}7;W1:vuw6.q$WmJcࢩWF&nCsuR){ `"n{[|*qR9ze'iOzjt͸BL߾#i(+ETyg9>ޡy#rdO{l@#{& ue|ze9sCpN3xX~˓[M.x) q|u^wר%*8R]9#QR̬aR}t:ݪy3>7i;;!/ǯW7ϩ&rv .rep;/<`_?8o4xٿ?*O4[]rzu(6~6Ob@6x_4D-h^,&F؅?05ޜ9r {a[&+8oDò,MMn w|rĸSv5.E+ÃL{3gH|fH&?*))f63UG5 ie&LÂz] 6?UUlN߹ol;7}>U|ΙٜW{έS >@J~<`Ϧ٨j7d+^^4e^6F,Omr٪|$7t+sJ UUFyRp6G-ڜ <)M&.Z~vC_a,+m]t%bwfu8]~+Wn޼S IDATyuzRòx<]Oi*捊 !Wˣ~T =8YmRϾIq0:3_YK4+#{3 n kTkz|g;xW<ro|3ZJ]YryJqn-ZOԝ&MLECY?R%n2C[\t8 (52E̔3$_R jǰېݾOk ~WI'[>]~cuՏ)2:I2;YT}=v`ٓv^ @6x_^?%ͩ@npg!tգGp)mԯi5K?eSӪgQQ!y&fa5.`s]%U?g?߭zW%jq):'bs])3:~ߟ[RH^Ծ#ɫNK=5S/L_jWaLq-|\VU+ŝ<^īzT?{\]momW>Y ma&3;8"LNyZ;k9;Pb[zש^/_D>]cK />n @3yo)Usڰrqؒy79M'$lF =kzeB/.*U@ WTV?Ҙ7 `{EJ^} aC5k|sϖJRy~1L p֬soI!ۣW }˅ڽr:k",ݻht{={2ygD'ű]E=DT(A+"=,qU*XI=ʳWr7;^ @a #iYq^GN<`j \o!sNph^-gZjӠRa_#T:xcM 8s Yz$eOn70ֵ4Pe@4&/|V+a8k12 {\ϵS2HT 8r>aBXpR Y77ՏQ{9- Y-Ct3.;ɝ)eػwM&μ^ՌGf]/ܫmcK!RikL ugs6mcvy6M,%%s7Ndf| g-.lnRo~:dI]>[3p`POȹQ9W\6iou{%b)Wݖۚs_ްTa@ @)Xسz,TH<^&[='~3lV1:&]58Úp3:rO yje8܄c3ȊXg/=eW[wF}y N[{VoPYÉ--ǝK)Hg WqrK(][OآȵءX(uZ`a`[sN>.CKN+}xŞZ]L^} |bfRId:y,ȃp)ײE4^a1h 4D[ы_@%E}fsZ~awT_DuLw,W-9IV0BSWp>l`Eu];MI~/'TFܢ|DV_"ٕ3ZY(+^MKol*1m#\J/´WV3CW:70 8kމg / /ty=\JS c7۪ԁg$50o/NS n|34oiWg6?&/?}/'& Ui=IZ7݌n\\jVkRǽkh<0{֫Zq7FꟑV]byO;& NCa_G@7ξ Y9w6t(-vxFm|aip wc>sݳ!cf*&`Ӛ-Ig.<u7'lNy}StIV-MݣLe\@?[5 n̗?}8+m (KC{QTPĦ(*W]ν18~]p"KxBIoB_ߚ~uó[j<͖qٓ}+onuQkx0zPv ?uhAM|<.7wEz苜a==?O8"5%ǧ}(RSLW%E#mQQKWׄ)Ȇdf&.Z4ldb_}Z9{?OMը,({W9/|pI'Gn]d%s +eX/ Aiubp/J/ӇBH%.*zEpה,T"T =*̦]mokdFR2))+ϵUq+7k.fg/fӪsu{3Vc}?|zN*́;ߴ{I=?Tq^[wرC#l9zؚ. w]qs]Bٺy(yU%`b䪵B)ȁ} 9=UbK3bZ;t1:3)o7e?ĸ(y9^[~C/v6M>g=.k$x9Ag9A`V;2)sL2:I7m@ P2zĘl?^0 GfFywM̬Q~ʲ2>;r.?*蓁fA@p^S.N=a|K`w]r3ϤF9w$ 8o}Tptk7 )`E<ٓcػ>K&$%n^c?W>,oXhR7ZXؼ2uۧDJ(hr~ N=Ι$ P4h#,{7pn7z5 Qz<,Ql#%ON]IF˰Iy|x^jF`쉅9A| 35B\U܂pۋY$d(c쥽נ_ 2)1@nDL ܸ{JE)f6c~ԊVH2d240oYM9;!Rfh$;ЗYk&ǾɠX" &NJ^A?_(B)0om?x^)ځGpb{v\ X.#\K^zƍAogO2x qeZu#N og~SsB60֘ ׼!bOefTp&'/ 4C.s$-_hfAyN6Hi#^V,QzsΔL:'V7ᾮ/Wp$w _ #)WqՏ^qN^@\5Xf_TU/T=uǣu7T( 9Lwݮp-@ cQo|_eҳwlZv:n [ޮ$z ,3{W6>NxS尋=͹ʨ6s?d(٥ac'LQ[ny8@O"B!UQR_7N<ߪ}^5}/\vA V4ܞ^*ç{qX]Mߎ-F&@ 0):~#OXuqD*Cz֬(_<,K!0@`I˳#JsCi1"F[~k,W%hK}`{4M*a&EC*Put0)2H%)˄uQYiyF@c;,CΘ_ˌ8>-].31( QQ<"-yo)saoSг(:%|qp(>pTA$['``Lz/Ztuk |Pl1^-^XaŜKl4m}`lkR{,VD" r&mJ-|I@ {^?*0O%mrmB>E dDFFn=ԏvTFmGNuIв3Kz=:WJJB!B!:^!9zª@Ƣ8!(MF5EMjq\Aѓ=rk:.B!B)M]qAk{S٭! Nr}l%[y,h$ms ˷ B!Bȇ>Hn}f=} <#vGk2@R 0Ҵg~?nVܥ_շ[PB!BHIj"!h r'r^o{Qk߀Kqظ2yr'G2_{@OA{n}#>}29WN=}+* Jƣ.eSY?HӨ{(6?I(S~ ?u4 }6ܢ)tI=Iaܣ[V1Iqm&Ue<<c#ڍ=c=Y*aK.pqJ|<,tɭ[G3}pf= x\!B!4RY'[^n4P2^Ӷ1)Q?mfZe}ooo-:WGs\oᠼm9IIk0 J3fӛMWGM.lU էpaD2܍ޛW WNwaúmvk@׍3z{7@kj+Aأ>VK8I?Uh^IMNOBaoM'Zfydzq]4iv^^*6<>#Yߞ7$܈ k@é뷎w!B!45ӂgĉ˖-;vG1iҤrl/xؖ( aB4[B(b}(EhndQ0Ttaoą˫ R]h6|%BJ1sPuϚE aDVn[NoeJ72;ڙ( gQYJ e< fϯXD/%nQ.KP$GI533r`!B>Xq!$2a߹5myo$Q!#"`B!B\ Tp\x3I^6 mI1QM^c6TgA"+ReLI%ټz}e@,jW430,+hP~ot1Ǝ5v1c b%ΣҊP)㨙tt1_|ܭb6 &ƚȵEd%:+b{B!B{5E%!!QFZ 7n:sgG1e'*y:K{yp;A,.m }Z4z$fYЩRѪr$YQ̞+@FdTThʱSQˠ6:Zaٮ̓C%t4gHK.?nj]*u B!BQׯ_Z=zτShhA&ˢu87*&s'#H;]ZQeS2tUBYyE>mYKNqM*tprt'+Wܼy|+!k'M7wQ__?d s.fXWvXB!R袤< IDATq̙%o߾޽;<.C'MwKI'ʐk\ QDA\:5%6)qVv%zPh_^б[ eqh *#r0jck tUZ|yc5 !B!T3ܹСCXz'o^<&`8kֹŤ+@>`wpL[W@cSGsVR+u9Y9tk*+ʾ xPy"G>^GE*W* #bf>|йg[z>tFjy>+2!B!Tk.a<<<[h4uwWt[?$Lqp(>p5ER ,BX98Ty,﫚L}9a6ZPh%`PhSm|A!B!uXB!B!bd7e? !B!R;|0]TkCB5p/;1!B!RPߨmC!B!B]T!B!RQ!B!B< B!BHG,Ȓ"#b3k:B!B!2q7J6dD m%  m*qac^!B!R jgĉgΜ4iҮ]*+Ƹ;A6L,9%Ef@E% yt4`ĤߐWW|B!B5PqE'' }իWuV2~G&eOm=? ^7رBkܽ>Wy9V}B!B!@GBBBF6n8&&od)4-Z}MixR=>YFjn`Bx> #,YA!B!:*8_˗=z@nZĪל5Ҵ>˷}ŊE/uQֶEe!B!BHmǩ"=<<"""Μ9$ ۷ݻw"t - ^cg߽y`ڍ"F _giQ,B!w\%d0T=ЪkV:EmEmZjժVuĉq ΀B Dp|߯^>3sC!V3nڴiĉnnn>|Ɇ *)@,){>% N]rlk:";t!B!B^U56L͛Ϝ9[E5]٢%O.䅹W~_w _ s "Ui?{-$7=:riW_|]MS W(A!B!j,Iar@|@㸣4lI0];m7hf}o*?s4F,!B!򪫁>8-;WfҶkCL(NMf祒)r+6m Ԥ0j ;LdQ_/ s*!B!BòzogqZ,*(&uFaAՅj: B!BLKR,u(po IjT`VŕT%Qʽ?N}Vt jZ<\1BzծW-B!rjB[)rX,L<&Yw]\NdwyglygkUTt]Qrٔo)S~]k3_i/8DLAbtBA&5 xSn^j+ȌMS]GeޟG'IyM\pWET`+]B1>= 7q'-.yFYN̕BNU{qH[iZ_'T+e* :nO=nmcac}X$(`ˇTWxg$IeN.eSJgL+Lm !6B1FfC'ٜ)8= Pw]^Fo柂x\b- (s%10Uvm/i}{8sn0llmTOM3:=hEDX8W [;T;q̸8Dm7Zmhp4^5eůW{p)ÁqSsgI`f͟&dFB^JpBa 2d ͬ\\('y׮ øgJ{啢gIC=g1aL*%?-bs5~h콾oygopac?ZEXj@*"((IpÚ*O^kY΅k xƢ^]6@(} 0NpRt @Z=:7'vdbPЭ[-֎̳GfGW7,k} N܏!xd8koXy}SoKQxƵuZ/K\9nyO隣:y8k{HPŌW Gl_{'aVu}C$z$]}Ztwɿ3&~}< N=0+|(@*)k"-` Lݬv[!FuMcH\3Sf}cL1i;icT[<2N.SHBݓ7H&pb~c?vp[v3)MeݺŇLTsnpWvzrf韐\_y-׎$L/bČW_3@*{ GjvoHս) {=^M7.egbbRӦ~RxVw#=%7ыDN! Qr )XMf}zQie`s ſndm@̴3?393Oo<.G󞻩 ;rKy,Ҕ>Tqd3SҶ~qQo=ǐkf/=ymKA1sp?l/Zf "Kvd2+W\wM>CBߔ69%[>īT`zrxV??/"Au~XT:{H!FB1NJw'T熱{ -vJ68r-X$7۽cl{Zn?.]RwYR#hmq,LVwv{{x *Z{ "snBZpI,59W)X`Jήnm*//B YʤI#7kf|{?&u'aswH1-dZn9 =0"dX1fHdZݡN'8jlmI뼶Um<t;}iuO3g+i8zD稺Pڸ7S>Y55w~[Xf}={03rXz(fCLu}0$|mk.=Rݖ@^s\O&g|[FHN՘日TnʧKjl}3ɹ'j x]iʸC=`ó/;n߻C` Vƴ0ִO9)񷮟VX;vEJhreeE߅WTUͷ}X`x? ZOE6QG>2\\;.ۙcڕ#;>_0R"o E@CO^z r6G+i=pɞ 8'S%)QbzØ`;} ?ئ ~bIS޵l{up^<̱O\ұ6xbZ1:n8z L"oZ_KrpGq?/Y]z@="zQ=~guBxo#ʁ3O'[yq,Rzc&פjM}odp?ع6YQ"t3M+˫fuW[١JVi0)[33483 h;\5tX*V($.YGɜ8*Ŷ$l:2lǫZW Jw}> y2K[I~`mE߃A1GiYzg:ږ$ sJ-5Brі=3(͓.8uͺ0`MܙտLrGڣOVft(Y'~#}\P6RY%;Xω{1r,2w}e092Gu^0iah1'3@M7FS|GOƞPlrg,Z.Lz\+z=&Qu.BOy߭RfN M!{ `eZm8*ca xnW_1wZg>A !TMp(4H4ilɏ^d{Mc߶vʌt^y%8B^˳i]hGvu߸T:sxm 0桳 inV^Ѩ ]:6y4Qw5/8#%py/j-OѕcXA+'`_8 }7. % ʦ2kc~zq.N9ţ 0xZ2iS}j*j7iI…ͣ/ѳPN<߭&f콥LQNM*bCQ>n:gK27 0V Jα=<{1\OomW_W!2@鷾%)I&x@z5?&! +1|>zR.?ԍ ԒYm<t":3vow,{eJVq0T^%3-<9݉ӷg]ȣCIT {+nvIx!EDaŽ!T}뢕8K=c?[j18!It$]/ZB{?+Y5ƅɋIpFt+('Fv_Ҧ2v:8mMI-0kv_.5_}T<)9ԧS\%NZuŴJF}zgy[Z*Y0:5hs" N$=zNZ]3Ao!jބNFJ=efiKYYq~… .|a1j,`y0ʳޯ<9` ` NTu¯i,.Q IDAT]"SX`+6o'C'x8+oso'`./O_x#I0:53[h!é۪/0 0)>B?>XOgvm&X0Q7 SQ4nj1*Ğn%[r{t/{ZZf:x;:Uu 2;U܈_?$rem5|oQ~Vʕ)ΟL ;NXz_B!Ոa(v x9]`֞ w p5eksR˓N'6&}kvs,~\id߂6/~)F8|,H.4W3UrYY>=.%ֱ[@n̘C}V"B*kAՅZj2,3$gkkgݞ1rl zEΒeyc;Gĭe9LL+* λ;<XēV2#N`qv;s9,(S;.~ $h L"0-J\$ wCE&N][C3Eu~ -&x !ۂ1!6o#6t)L}Ϛ -%09Oxz"@%JL-Oq}R *l\yC.(2K6zue"\q!3G`ȯj[px]<#Oj*LЍ,B:YR@F\UuCubXUXBj k1 1šJ7ؚVKGM,qTK%I~Ⱦ#Ѩ""5'f zR$oGXj!RUj qI777Mv@Ξ=۵kmi?|c{x8Uo9Ex1ZvkE1|F*)W oJ($Aӳm$ dsrc+|;9{C!ͭ?ߝ07Aa] u@وECh,&Ԫ-ڂjRcs7q!BS FtXkܸqRRR7jaZS\2o۪NEJwܮYaQ 4*8qUXMBfcVMGiFtEHhܿ՜ecxn_2ITؔD"R%5Xnp B!By;1,֎&M{Z0[[?Q&uzz*rrP vzUmЂ tB!YUQj&wq1XȬBUA 8J$H0B!B}EB!B!DB!ByQB!B!=JpB!B!G B!B!(A!B!%8!B!ڣ!B!B^{&5@e_9p)ÔTVdֲOƦ!B!BjkP<)hi ]P .y=,B!B[}W*c| 4]dg7Wjr|X!B!k$_kƟ7 *!B!BkਘiԾiB /HMƽSN6X1Z.O qYx!&޾T.E3߮Ms΍gs[]]lk҄B!B[OpE5AuŒ4Vv\C? sHO].5ڽG;; \ ~Aaf TKOnt4h]'F䱐}ؖU^_B!B!U 'O:88Vz#G/Id2"a-Ǻpi] C6~Z b?ЎQrINT,kݿhv Үۻvin/ F|?MǙQB!B!%G@@{Æ ϝ;}M6Un;ާqJ|s#m;>?|8ȗ]}<Ԛe5s3ܞ.\GQ0e]$BȲ` 4x_Qk>Ct6<0aNx>ʡvEHD5!B!TR$8Ə߻woÇ'Ny旴;1?- থfz06{RU?Jua,@lZ//=#7G}nD9%8!B!'Otssd7?**ٳ]v5vk ﬽;9O.י J,{'S@v"z6jjnQvi?7:t\^9!B!@#55QF:37nT5w`is6Njg1;ddٽY-cFoB!B!U >޽{%&r,(|fb/58'٧&/tX>HR̹i]ʄB!B*ߥollÇ5s8ɓnݺ_2#?ͤ~\NYM;IDzELFl]=UqvB!B!խf:ݴiĉ###>|ɓ 6TnSr|wpIonWˡ 7n)-t hybuscGYB!BHuab7o|̙d__J-qXyz:5qig{ F| )%_`[gt/9T.|‚Mso. i?{-$7=:r؁%B!BB5廊N7v̚Rs#m}pƨpۖtYoa8+<}܎< K8TGڈx½y Aϖ  صƬ}B!B!@UDTGV8]"2#YzC-oX^jEښ#9|yfq/XZTM: uuy4M;d`,B!B!cX4[HBD$Nγ\Izv ZwuWMGA!BțoW*B@"m:Z.Z/#B!B!pB!B!F B!B!(A!B!%8!B!ڣ!B!B^{ B!BkB!BytWł5!B!Tò~g 6}}].uJύx4lΝ!tP[s'WMa\PJn}4~?Mv7˗/]0Z$. !B!j{E7#Nq*gEUn4[uRӵ/$߻lJ;;ლ< @w-^w i NV–L2B=~˔ ۬Vϧ7]"6i]V|~ەo;Y5x_Q>Ct6<0aNx>ʡ6=AN?nk#cB!B!oW웻IM*>^'tI)xvvI;یY=KVoQ97L(oh/=*;Na`rh}4]ً8BdLi^zFn<=#CZ4>sڀ4>2 Q|ZP0:BB^Ђ5!B!o.^5 ﬽;9O^FX!a(Is`k~4\Τ#ϖe[Ε f~c0bH,ٜXNJ.NnYQw/m"nZ^:ʘi?7:t.P$K0S;ڠBGHȋ.!B!/ X )J$T0m^, @Tq&\8#bȎ,ȄV$ZGXԖh~[ _S@`5Kev.j)ZI&}UDH!B!=)!r+'y5]3Heb:H &.5@ɋ3wRi6snZ+}!DH!B!=^de3yQ;6SwQ|k+Զ0"_O"J*B|>" nl @t,[LFl]=I Fnc| !B!I5 YC8\K[@wBkv]jfnӨ_0BB!B!m%81`<?X=\0Es"O6L&Mz1ߛm yiNNNNnrĪi7Ks]%'_ _>Uy f9mK~ ֯ߧ:*!!B!6{鯨TBnmtȬG0-hqsթ)` 0 ]rnpGM-}/y IDATcޙnϱK;xn_:##$B!BZ [:*=U50[[o*(rrP vVZ} !>cyߥ0''W.gL,,tƻy @ UΈwi"g4mngiSfiRU]]UQB!fEK5F 8J$ɔ3D\q1HjHjU\$u[vpkP> [e[w,!!B!6{o$HX4hdv5Q[Fc#B!B^W#WwY* Cd_Sԭ!B!R)ਐ5znfUE{S22H!kQEۦU0-!B!֢GDMet/B!B!%(A);xेK6'1qk;r[7q~ܾ{ν ۩_>s̩yҧ{#m>s<$݇ p1x8@+"Yƒw\zALL::uѫ–&ByuB [_D!w[oo6t` %=7͜-y`/ Kķv9Kwu-&xTEc@no,w.B!4Ll5tYލ^+n8{̼Sy#D.49wo嗚%O<}4[xh%'4 -\uK,Mz̙3.^ҥAvٔs1i|OB!O.:{oSz ol/G=[LnM)']cVŻƟ>{HHصD ]?гV5Ɠ˥WݿK}{ָ֝S C>zqf[[[m;vjVGM}xId.ߛ4vDRTz=ʭ|}ֳʲa/N+'"r|j'icվo~mEꖈ b.aT.N 2gR2PCE/[F)*GB!UP\<%]h>uYi{;T*Un4#?1۽ƾ%o 뺰^0%M9jO0,O£7܌c#Ts΍Tw+P|@;: u3Rf םRM֞}ߑ6[7#SZ\nUR=2$Uv!MȫzGPR˴;]4 Y.$ =5zEqçuz\澚s,uZs.4 5 _,N 24H&D?$$ Qgx`M=bB1 Q ^v Q}2ЪcG1dlJFdBjYjԎY#v]Z_*r ~@e Տ@6N5yQ9KqSt;hM@lvqsO;VL(r u/ҬP+";S}Yu%T9w {|Ў{.~HZy6}oٱ9݈זt[Yu/ыn?z#Sh3[}~F$^z==$'a7B֙{.灵l%pn񓷈@TDCxލ4@N_&ֶŽ4+֙>W[۾~6,2 {azֽu=;Ԣ6{˶5]8}2ڞ˜V_a:nǾ+k 5sݷHȋKIJw::7ha?ŅGLs8)b>ly6-e>Y%[Φjgd}xo=F9:E`Զ0xM*Qd`?pތ.nJܢr!Bȫ&{JO?98o;g&*6xƉ3Ot~_n8rD׮|qMWBv+Cb9V.*kG;~ݸKk>p^2/rEiƒ8|3lT? c;4dd|0 QE {@4k#BYnc[B:>͌8'o<_2 S~l&P]3EX!Onq;W s㕬8`_1 I /R{g oEi.Ny:-pdD?;\C>4G}kM`RC=5>s;/QJ>J53\_7z'T(B(n ~%W)񠾾`t @TG'-߼7}(A!7F 1b{ `ӶkFԊsFz *bʔST<^u*QA _/y:75vvI FPu=sT x!xGFé˶m* w/g̝ fAګf7f̘1cƌn+;[FaWAAAAA?*俟* nAF/8ۤS31N$ά|H.ɦ)xű؋NtWSKf\UQXw1<'}^KoK~b 3HEc?N^J+@ ח+:٘SrIxg: {7(97&/a4.@_rEeht @pըSN:uD m5K +-O!ڪ=:/N6OJ@UAlEI"CbOwNSh c[6ֵS#{rOB.No#.O6eK]z|gnŦ"(愄P2%x~367FMb+M+?C&vzRt<7XDNC MG*c2KFFJεFޡ,~qx+V_WP߅R)w{\}u-'+4_7kB!WGx <o'5 \5(yW {3z>} ."/2h1h%8qu($>w.+;s,&\ÂWO5ԅƱ~ JY̲Tu7.×\ &&ɝ wn,=] DKq&8/c襇a V?ojtiJv}%TJvz"[m[T*#+h8{-(<=ۅ)#F!J'R&\P$Cρu\)f7}^+G:gˁ2Խ ($Aӳm$w`wf6eP[ZЂ=e/  ( -((,e){/ J6Mۤ<?>%^|N%"zkL57GuC:w ^=^GpY^b󋉻o(=""/npyvK.w~['^ᄐo}w2qw,2)?hS';S3Q*bê;Q:{!*~{ve?3w#4)MmJD_>,RwVMQjq ]Bo-xiM>E[;_xowm|}b}G*̐G'rKDB .gA5퇕b>xQsDA*2.b~`7[Smb餉>dv8NHNe{ʨ=y˺y&N:l?2|/c7}I]W*fSi0󱃷SpLb5xg撴T;JR7u/ɿ0从_]'9\w򒖎GsI5oqz+a,'s.U ;^$ QŴmv <ӀruzI""=/YZD_K3c($yI8z;T3Oq"RXϫ;g?yj3Z+"٢S?3dЎ3:s^\c\O߳|N( zO !"-v?; 9i8-?TụvS/?ݭ_=8A]MxvM.p1b]!onl|2)i1O&sUmv7-޷7v NF&_B}J.-^u7`iVa4!en[гzFDn4nR%ԯȊ.kt^ɘp_ p.~3n$(> ju/]3"n5Ī+PPC-WUCm3'% `[TZUNZ5˓;v}w>U_}%&&26d2Xu>.~_GEDZ_IG {U+UHc5[ 'N7n_XVY8l}& ]Aew ٞ'/ᔖ=b .{ZѦ>Gs 8A xΕ{̉kydhM' rL֤NsuPz}6kb@6>9BrtsP\8lW/ٷw˼>iG RpO`QbU9io{{%q3inRw, nOz$ݒJN7or)q\Q$h o;zl^gmzQ]l-yi@;. )S색wO8/R[pT'ܞC[(>*7j1`%;-LnȋC6o'a}qˏ`w^kg}T-AE7imOij# 퇈'Ф/$j辈w"rLPdF,ӳbv唄:SUpc-0S|5"RٜEؿn[Y%ZDT>oaqN5+)SYӟY3 m=ޘa"U 󇆈b<)I5h}fo>SKL}FO12?&2+"j_aߌǜmp' ?Y,d4GY.^^9V$ [+ _/[[p>& 'p#G(P<x@8p#G(P<x@8p#G(P<x@8pxxV]@@@g]W| aok4޾0pzceN DÒС!""Qfx@DtP#@mF^3{bD$ΫD{{1uKhU6֫-g ꏤ]lY-O}z_Xؕ([L*Rvڶ\$u73uбJ%"~/֨[NjiL}]ap|!4nٸq *0921| IDATn0IoRuhܸic8gjdnj>:xF,ҳSbF"֒w/YOh5d9իNR/-j|vF{z!rCVS]7>Za8zjyUg|KM/G`=&Wf8zj #xߏWnhxY9ӯKDNjr^È7|הYNSω};o'J<[~o?+j|lȯqyFOli5ر@]m߈Kأ\1/'jYEE׋![4h=b \s)dМr&uJ󶋈95{hٜuk""m $*|IohDdɈGǯ^yW7|lM&ti^|Թߦ8v7Ì+l.Μr:)kпVo]8nAJԵuڣSޘzΖ--~@*СYYruQ[<J:4ӝgVH!D$߽9p9M%"w ы'k<ڸi$oܺEMVYҠ%SzD#"W*}zV=*nԾۮiFsT'4.ZSHF4T$q~m:5cyy݈`ąs5uB^}[h/JY1|s밬wHz}[6Hgw'K |kvPTZFe=1%5-xfߩ}_8s=^DTD*_o"u]-@bq+h%VgLJ]S/+KEa{ә/ݸQsՋ;6.6wȥ` M=ߪ; -~ڇZEawzgט"/ZbWI@. h;z[?[M)W{nU{!=fr뿘6ThÄxuȨ򻧞;cwuw/MD*6ݫW`ÚxܲQI®6Ļt%mYouGDny떩iyvnq3\? Gp,R/,n&)X+YxuӼSrlx\'މϡ05O\ {">A&d u5xaqm~|;:9O&DўowM_ۺ/#.E"rԷkzu}^rwo[W#s7_byS=d]r'V#U?urjZIלKMZLD%g\fo6wN}狈Le#:/i\tZJR#ջLTK5)FH'+/ߞV9"λXl|>tၳ$Ktn4Ϩ=s?3Оuxx~IȩUW >9~~u/lq9d ED$2;b9*t,X׮|K'_[l4/(:u3l;53 7~yӃ$t@(T"=wt !_~F@y褴ԸtB:`%ཪ:՚SD6[6_SbiG+O8^j;ߺQQfY] o4E]0ՒwfKjN{:\jdTߺvnJ?|C#׾%޷Λkn0w҅}^Łn49ں eG.e}׆-ڷ+ӣgo\ ޸umڎR;s /W.W)P-"Ү]٭z˷qhg8W=K"wղϥ^__Esjן;E.׷rʙ/4qW%:k߮| ٺkۢLS"{VDMZ= sWf+x}qx xi淾'6>4D 3aŋ\7ĤZz )-j_zpzƈ(I5G=M7Dd yqࠚ'"rRD鈈Xt/{WЈX؍fjø?x@DTMwgWF6^X ,gB}٣ O%53xBga5Ul',m8n]S)}j\7"x sV9;hM;Q.]7>15. >ϭh51؞v>% s"RvҹM{Oؚ~!Y)1 ][We6;xSƌ8 El}Jߵhn未dZr}H7kyŧp}],"ן7N'a\@T؆mΜgּ$^*[:I{$$Yٮm?ZQet DD_ lj}Wn+ٰ × O\dHwvl/%ܼidt&5H8[[$]gHcJjrĜvM-#f 8 ƣ *2^PF+cq(ͩ㣯lv2Zm{/ᔖ=b .{4KxpzW| yXyxcW]f_К)+ VF!R#"o4'ni?UڔH=vVa_J>7*;l (g6^l{#\-Łe q n{՟7.'JwT!@S$ %FT7c5y`;CCDpftόذ)w-"9a=m2D8wUYjdʶ\Ʃ`뤲dZ? *m09^2K2eRkDEZ9iK"2R־X-;4=;e\N\ԸtB_"u^eh4o b،DDt󠲩vT*ɗ(. ߮fN˥)9IU톉ڎ-".rbL,{y6w@OhgP=&fo3J}[ڽ6d7664;/O)zh"";FǓoS8h;IޞwI )|pmfJHv {'{xw^S2X>ţ"TMO~.6G4wߚrNDίm}UT"[hoZ&Lm <4&kHfCǖM8,o~76?%-8\wfkLJwOK'K%"Qg>p~UtkATbj|x%|m ^9\1jI)j]*W0?i\:%rSR.q-?Z‘y,("_#__|դ%47P^+Gܦ=Q'? {?: )~DbED+웵7Z3\ރkTX:KLLXD ̨bX*b+Ն3 m'XЉŒrmEDL{&J:^/x;~J%&澈NDkMKLd!Xu|ED$GxjK>OM,QĽnF<^~^%+%{9ƎMUyhބk+>Mnԗz ߳)T/opP,%j߹k^QηatAAY[YVDD:dj>:d CTp#G(P<x@8p#+ƿn[LF/+m?mRęLw M&sV_0gn1FJ>&Sld=9^/9^[fj z;j>:o}{9)^ZAnFŔ9d{*&KzzC{lt{5L'Mw+e~NTE7׼%nA9Ԍ=k6:)2n@q=߲'/U#|sӠޭŹBjjx :}GKJ&"??6qL׺E\H7DDeΕbqax)F FWig5sGx}~>Y׮]#$޷wʼn߻o:8; :HdփއVpyUoI>!lῼ<۲}zrW {AÁ F_kZ?_Y Irwqsu_ H^ҩ2K9F-mեC8vk7D|bSED-z؜7ζ:`ЃYվ%Թ'"g7ԯmZ jн塗?E"g/}>z_D|s碉mVU['s-zWMp jxD ]ݧH@:u_;j|sQU*m{& CڴQU*㳍g3씡+ݟO:Do}fzmmjۆq|&Y]{ŗDb%a4T=DJPϫ/t֑?'"'EFoɫkEܪ?rȑGDDS9I\W{P'em7'wOҽn*i+rriL޶`=IDATK|ww?s~ !"j*۫ |ir6~ZtgX8"r'{"vemzw~ tfUr!l䞻Nn9e\kFtSq?{gIӻ"^Mo~bKؖg"_IjыN'b _ͷٸq_{׉#NU3\mxbk]ڦ)>v_^x>yj\ o|W:! bbT""47ky/2qֽwVi^-ՔjoC- gaL&HS{l}~hcߜuҽk'op *gk-U n\nst#FKMmn?o%GRwȖn|Cy YDPĽK&e7<'zq|}NĮFN6~:ad}|sρDyԾ Pͱ#ќiP}ëww""~`'=/ vPux㋇\>*'&HDc7?:YQrm}I{3 ߇[c/v-_c}K,̓4z:Tny)J%wkXͶIjS{VkX,[P =2MG7{\OQWl?+ɽ6oyqEdLa-Ygy@Uط;+W5=}8xW; D7ZHUC?~?M%"YK4.\UT|pnwN?@D&,hb8LxߞG]>MDƯ~7ϊ8ȢxF+o4v:~N>gSmV#}Ik?WOh 6݊ѼؤS@lj|-__1R4W ߈=Ji{8ۋxPuB;("Ư͏G8""b 7u{ &R)q܂Z?Y|ww_Wk"Ob >-$t~zѩb>&8x7:qd4I7LPC#R˞A[1,XFD$ww /:E+ջvbAuI?"*SL?zҝ;,ѱTZ =a;֭yG:̗O P}:"#C$cmt)N.@?< y/H++sߦaImboO b#ach0Ѩ_:v6z0fGVS}`;`} ;X'RXG",,bX? /67=_ 9#᡻wֆ3^f [ ց8m`pݖﺐUdGZ&i~19eft0n1<PWLa>$9Y#G M`ƠX`af-0āh$9x%pY a׈DGO_$E n/?/Klw1{2i?USIfVfr0JEi4QbAqIJBep/?>k/HTs3GK;ߎOϷC7vǁ""c):HIPdd3YѢͯbyl.>GG&z)\b-9N> ae<@,:X; <}0$}`?8A58 y ``L9:AX"C/$Cr @ yBP8@P!T@%!4Bߡ5AD0!i BaC#Q$D&"QE4#݈q H$RCZ"]hd*2YE !!!Ǒȟ( $a?5F٣|PQTTՀjC&P -4- -VCt}}}=^`0,*Fcqc1y*Lf3bٱX % –aOSUNj\p \ )nNCQI9JSGs :OkR| ~ H  kB!PJHO $2ňzD7b1XO"". jӺ6Ҿ]#IљҥUе=BOC/DCAD_B <$L#yRINA!!C? #Qрї14c/$I }u;)& Ʉ))e]adfcamn^!a͑q<':gg+k. W2iG\?y#˸{yXxyyyny5yxyo~0St(RJeϘ/1:?ߞ?@@ >&B4B*BB' S P٨&$juLVDK$JV(FTE4DJtP !((V!D!$$^%>$P$JHK6INHHKeHK}v.'%(*S'3*(k*!{M\syZyC4o ~ '^*-)(n*)+E+(* *{*W*0XWEꪦP֪U]R=D..]u&554j45)4ǵjk hj՞ i+{UwEOM/EKo؀ޠ!aa႑QQ1̸xĄǤdT4ŴόhfkVn\<ԢblIJ*5ʺ>{d=ltڍڋ8;94:8;:;I;8=tprp8ujzuM-m؝׽߃#=^{.{==ynxYzz6^93[;W7_?P0X8T-8:x%2>d;1B.331<$/'boPxdVxZh1P{LG,Lʼn׌_MpHaoGb $}{ۿo"E'&JNIHLJ7Jo؏ C&0ckܙ陓6eeEgR?T~|Xpߜ2%y>y)=ѓ0 h42&NYSs9_PR}"xyiG`ٱ**WTV==}:zTЩ5F5mµ%1OswFLYg7lq;ڄhkmvk<E˅܋bOPyPa#GW>VzDIǠൡ]Cj=~s_~14l?rmd˙W>>yCzSm;wƕoNOs/Wj}0-E%V?.-䬲6Tyoqmz=aQ)ymlkl;l{;+@z;ñ x1şbG` `^ .)ҁ|3G n! -jm~IIJb{q44Kb?@fēQLhf, =9G g.eQ^E/F`ZH^8@MOĆt+9>yӊʜ*vyj6ZGb4 _cLM=Z~clMg#`jgaxЩ̹ۄ=O[G?, 90/[\ؼ$p9JՁNk}o |;+;'wo_ܻew~v1 ӡg]/.|Uј177oގ;4<>?>xRvrCTiٌO>ΝLyK ߥ{:mnoOv 4#O Y6ZFFl,kezD/aoF`"G#ћaI^d̼ºNpsqGQ3 X f ݥ"EESŗ$eO7,v޷%T+Wc; :3ݽq+v[|];N'?px(q'>M{9 7M|hs/ TB>ACEO;D'MO@oKA%e5S4'f?8dy:ώ`/i+[{'~$)BŒƒBQwb QZR?ʾ{#?0أtMJjZ]424Ӵ+tt^/dLLk泴Jnyjceo0*v$x{k=wsEhGG''zD59b߭Tb{zgÁլl\ռ|kE%'yOWFTT8橛?Ps.9q퓝+oqwYݓѡ"5FO54<"e镔?m7h76g=F8x C1!шsH Y|DPhA8nB#L/VaqT}wFe8#&jbZ3stpϐHiFkd)&nRfv*VA֋lZlC~[圪\i܂܏yxEx_Qrt/ 5 R婛"wEyoK I^,}Il\)rB#ʇT&ūG  ҊN=wU`cL5176+3X䱲>`b{.A‘8Kk[{Gj6gޫ~y!FaiEbTMJVr5u9]sLY &8}W>}lPxDOdJrՑjS-u3 :MZ/^molwVB{BODDQ_PG_QG߈sh Ҭǖ")+W֤wm(ob7Zs Y[""1T@#Psh":9Ec#} C4x_0G"ޢդ{MBREap2\d4\Ȣ򙵆͕}3K#Oo _00JʼnI\R2 F1JuUj꡻4iqk>g2p5\=z<㼕}I~kA+!%a#e.Ǩv'&J''r>M:f΃Pš'G]+9_9a\zlB2jZZg54^lknop UW&MvvzsDWN^;7=5l>{<eҫQ7oߌLorX3=9v.u/_/-cQzҙN?~.(\5\}Sgᵉu ō+sΛ7"osnmپ1?BD]oY^y6 ů@Է¿|#2p^., pHYs   IDATxkeu_{}u~O{3 @HDRHJ؉#+%?\q>_TRXˑD(`A`0`ݷ}+vE@`(Pn=}9=Uk-ކ    }Ǔ$iiQ{{6DQ}ԫAAAAx֣cccJ$IfVJ(VKAAAAGJL( zlzһc"p   p? "@D̬S x(bP?ZkǓ&''EAAA>ԉg5 bV[!suYxaԩ>8AAAAPr PP># 7DAAA~$q_`&PΡ;D!DQC=\qvssSAAAAG1o ]MwtPl Ae5n"p   pD9cL=5 cF/H   ptO{J|{/|cXRjAAAAޗg_R((@[(QѝCAAAq?JyBBo@S|DCAAAXb }*{=t)ΆR"   K^1,,(V1#,` ^ {p{pAAAAnKA`Z ᦱq ;t =8]=    Ib EKMtg;   _meR K1Z0p0HAAAB<F"5Hcv7Hyx?u㇫؍pAAAá pX8Z@ LJ8ޏG|uDAAAAdQd/MFA!Ej" 5Z)EZO>,r]\06طw=X@ZkURbAAAi |}RP<O RifĚ7OzN_qc r/<|ڙKؤHF=350q_yv_ {3}0UOOW.*X\   p0kE`t`G~;Ӥj:^=N}lD (@@7>k*W8?~+o\Og>~f6heq%.uFm]<=ٸ<[^gNc6CYQݾ❇pCw.v=Ggܥ{yOO|LAA~z+r1Ĉ 48Q7._^|'OyA\VP e  !0؋O) 0Wʂ,` 0`em$I-+f항mbek6P"e}f@W*7op/}<*޼g7N%.USpeݾיeZk;gewDwd,72Ǟp }'g*tjȝeO7{ȖĚ{#  pRؤlp8U&E+Ǡl~8\fnOk~ `ើ*  B?0]] ̍4q `b0.!]pazꭍs+:ѭj Z@Yb[/og?й$?K;N I~'o~et&.7cɦi[at6\B%h/- s΅*?[ ZI&`vƓv7k'"dK8(\AشTG>AAa@ђMHҝ>z%iO̅<9A{hH J9LC/Q[ %-' À5ĚlULD "E5) *I.,u (1\XEbY3GxQ6,W=i_Zk #K~ϴ?,~L/pt @IdgK:W޹0fNӴSPJT ާs1IPdNU9{{dÞSHDZp٬;8'ƘzDZֺP( \.W. @Ez^ZZrl.'SqdRKݷnWvVjfj|?e>'y8)(ǯnlE>%LZ{Z{nλTAAAol8NaAHM{-dh%pYG&p$I>ݸO#"CxN`]+CJm3MiLڋ0\Aݕr؂y6jL l=eX<8ز"?MA^H{pro7F.3ziu"-bB$ͥ'/ll_ұcmyG,s ^nZݹ\.63O{nni) 8&"*;>p J6XDZiRj֭[Bȑ#|>|>NGIf{NhDQEQZڪjbqhhH))a:pTrY*[,٩eV\ tyq+njZ,&''PZ5VNH I4J$Ţ8#Lvrw 7u0^P=OY(jEQjiU*A"'0 PAA{I4A\&נ"`FG%pbKk_W_{naq!)X4˘OrQhB@ ,lcbX1[&{E?Ѝ^c "v 6-^ e"Ԭ7ZzKgк0- #|@y>5J#NQv$a,MVj`{{^Zj5fr؝U2NFq҆Kq$rXժj^VF#NAk绺rX$J^__w m5 jTV/n۞y .IC/A*kyKKK[[[Ih8vGҬ&388ei4J0 ih4׫vٌ8;RP( 777y]]]GQ3 ~Yx2׆h6΂]0(w iM8j7vlFiZۮ6u*r9'cPAA{ [ )(͛}:BKwoܸqJq38r\X,훘s+Wr8"Y w4s(rj {`0 J9 +.Ljŷ0 ]JVvoܸq֭ZTήJ)g?p}1nݺzJEJ)WNY9\=ȏp@OOϾ}|߯T*gϞrʵkVWWstvZn]ՆyD__ѣG<8;;WV766n ;էh/===J)i=L".\o28] B033o߾A7o޼p… 766\lZ n*pAww}8pȑG.---//J%cL܏zqYeJiZz}h`䁱rХZ֭oTzj47t۲\z Ow>_zx<7;8[̏n[˷Ɯ}ZeHNu\=tիW׻{Y$S72RՌ1Gr…SNˍFMrB93[m>l4JڵkgΜ?qıc~ᙙ ._tL#pۥ777_{^zidd^ 277 ]]]c+:ƦfM7jlegpӧO_|yuuaq-HbNv$Inݺx…'O=Ï?\RV(r*.YYjj̔V#T(&||a+X~V5"$fspZ/>>9H#c@.^QaՕݜ?d   |TX_&Wi(Spƽ8aq?o| <R`B1m8fֆ8%J`bXR7 E,PD%`8OuX0=VomLlj҄@DL`h01% Ѐ!Ā JHGf4[?ַ IS'J%8|hN4k4v^j7o8qX,~x'GFF.^066>h=Fpei#G %Ir^z[[[.wU0t*>I|.ɓ'/\ꫯ>䓟g}ʕ"MdG zzzi\v6gufWnT#Gh^{W^yQ):l "rG p[c\?s?pv8O IDAT" ;Lp7tdaWW{yH[n5z 2Q_ݬ)Ѿmn=|W^MmZAAA0kE`t` Dk((߇8Y0,_|qaaӟg?IիWǕRQoeYo?$I\e 3ol 蟼~ߝu4۰V4"32MXD1j~u>zo=pt>vsR XܽEC  p!UJT<< =v#eeQ?W][F!"(n0y̎8 dƲR  c0)b(Va; HQD*$:PkѾ bxֺǿ̰  sJ[Lla-fɞHb6}/l@烀$bt~㥉e`?~gzgΙ`O{G7{(\Fl6oܸkksss>ufWW\=6Iu)Z=::zС Μ9ϾjT* =Ǯ}3';ͤ"}ߥKKKۏ=ܜRڵkJRڈxY$3??r3S}< rٕ8qbmmZ~ _ :'fpD9㵵5fgFFϬK'ylԐP*`ޑ0Uap5j(@;}Eb^;66&&&ΟV}}}lyq  GKJ4SJ#B =8(W^; < hA)љ(&xPY2 p!HA0 H R^ƨr\,YY< FoJ -NHwRH)4`=Uc%`!كcbRj F (?|6tf@1i|r|>}|>_*&''\$Iڃ_ + 4 ]:p).^?;u<!C'u['H@bb┡-Lƒ%bP bKfj*5Ё_+ϋXv)[0+D;YmkZm(i(L #dJڊJi3)&D JIJNF _>8LwRh1*\zO>3n Ə=yw(,pY&iqcG=}j:qtAgwNh4'N_8+JPL+fy@# ^|o~.]r;|p+p)nh,#}p)h\.uѧA8i<禦jZR!v3K3--^\;wɔ;gq >z衩~{JBoKV3 Fl UEhn6L߻{/e7iƍQaEQ6/048I?:v|oUr}X RP ̈&;|b_yӿnW*==NdCAA>jR`YnB" h 6$"_~[G7[hg>;5u_)a0X3l D3h@aHJNyMc `d^!7kV]M8l & 0+!6PVD(Rh%"`}e1~`C U.ݖ;^t(zzz\ֹ=Gլ,N2yI ԩS=#<2::zFtl1θ[nx駟tR.uv MS_*bTv@[VZuv|w`WgQJ Vׯ?b'z+АD\p/f&=죬fGl%M=j(Z]]=xLxlnnAjX:hpkm>%˹pcVڦ8WN&[\.h4|+lZׯ_wJWR#x<;u`rbbW+0ݰkhc' E(=h `S$)EjjLhjgRB)tA C?xDݪT*\];O  +r1Ĉ 4 .u?y>B)x|!fupR%ր ÕX)Y&K6,`(b D(z"0yU[F)[dY(uݹոZ̈V;*`0sX5oe, bXO.X-<e>|t/)(*Ç&kYt\.裏fFCwW$IܼuښK:LzpQ.VoѣG]h4T4:YMZW*4M'&&Ϝ93\|%ٱ2_ˇ{{{<811144T.KR2|yyڵk.]qFZUJ,j$M|>yޥK}فe 3ӌ팶a ? LNiuƖ4M=ϋh}}}jj~_l6R) CLqb\T twwaWE[[[ׯ_|իWۃi@4u`777_}}s### [[["jc7:z4zӧ2P"I0c 9JsX"NԎ](Q Z~t_MqBd5gJ%]   Y6)[($46NIJEktݱ Xύ z")a6cS~b *&ef`]P&4TACCJ| f|{#4VYCk!I)s)h+jM8d@3[fXYJI( 6k=0M}/oc>8SnEbgsp|t&XIT*~7߼y1P(t{dvbիW+JooowwiU4d9mEi͍?W^ ðP(kih6Q:tرcccc.Qka?/^wd#dJ՗^zirrsss/_nZNqD.ިj)̍+XDwZ Z D88w|rf+CC2.z¿znBְfG+<ڗN]:;64oZ;ܒ\R^5AAA>4{iPdE(]w8|߻ys./BkJHcCpbaN ڂW(RHYyL1uC01Q =>蛑b!(MY1[f-ki*xqjy]e%FH3 aÂbRMc(b&v) 6PLl-1 x1sٱ6lZ@!o^>Ǐ2Niy(JR)MS7bcO]e5}>|xttk,u;qiPoooZͦT6"ᩩV/[J2Ev u)q&{'1j(666677k~XwJx^zԩSQrlMV;qġC>O>|ĉ[[[Y<3u#I\.wO~?tiBVnOMMϻB!ˡQq秦xO}SB!.zŖ0/ .wO:UV}8Igkkʕ+訫Q®9&۹\nUl@+x ;&i0='ώIڭV¤l >of;8ý9\]Bd,` tl篿07={dbvfʵkNqb\UD4AAA`69kKH-,h_7=;_]9hA`Rv7i,(,QMvNh)n1@~$鱐'C[=C&{F>abkIA07-Lcè|A" $9 \ L xYB &H!A_ש;̺1|\.禽fHgΩ]]]x'{{{_x}75lۛ[[[q+p2z=11ĉNrN}}ff;緶.\:b:Ţ;Dպv;###sss˿g}͛xռdr[o a8???884=ELQ4z ŇUX:ky̌ 9h;!%`(r?i/]T՘9 Cųhlll\t7777==_._~MdA: ܹs333?x__֖Ɖ~ǽ|t|՛M4Z(`%?>C_xW[%Z[,gfffуc]ӯ (t%lpe7L ܸqc}}}``on  Gg2R(Po0,RN|O?ZRG.O2r/ 4Hq!f0b0CY&fqCN,8%E1[^FuzC>dV(c*EAGJ+*z"*Z$eشU2 0̮%Ǝl‡Z*e26jngZXiE~gptpdcL^o4cccƘ0 ]=GvV/nnn-..nmmmoo[k0tRH{hcRYLq=666><88rRfstvH<^c/;vj={v}}hE1|w4JyԩOӾ?7np_[q]0/]tܹzj||ܹ]\O1?D* 3gӃzt*>jj@S(\T499>{ӧ8v:EY:I$I#8::Zժժӡou'&ɫKX etHR0C)0 `=v کoߪTZQuϵֶzuJu{OO(m,!)0 o:3ý[w sWP  G6`q c RFjk%0E+pck/~<+7+FbPJH@Y  VT+3L- buk=i? $-z$]-$i@2D)!RL òE )afIU)Ӿn)|2,߼o|h{%*nZ׸]p2A>w;/]G_~qgw vBD.v p1FkZ~۷Z{̙7o^J4M❽• LNN~K_:vX8w|wwP數] R$VF(7YVݱvJr…G}tddR\tkϴ"pdkl6!ixwfkfLt{{{zzzbb^k nLlU7=z߿>ٳWԓ4]]]=sz\!Y':ť١^'$eq[6<4/oo\1'pXV/2i|⭳KKK#}MFKRO2ƓxK_*S*JNb"X M V}~ag{u3 DAAhk]Yz9}z"==ӣQC=ɂb0C0HqHq` 8?DAqFd[a~DIKtO{nYdzWݪ:Z":ttG]xRzafiJ(Vx8 f/] IDATdnoXQJ(k#ڽ/>箭7mb9,U`ѨT+ΜS@ (P@6ذ8LH m,CpHQ^E Bvs%(Y(OqUȂ"%X84PDeyR_NG%Yp`-+2#9#m\@zCJ{lS lʚYLw<22[FȜ| `27锬ecՈ sޞ@aEv<杝???gn<_ t G$!D -Rv={vffRH r!_ D)u"Z[[AZJU,3+W^~e"uܜyv'L)I{Νg~_w廮Whtk׮U*>j5K8{rtןp(~FnwyRѐYij3==k]z5I7x9s@Zkq͐!gh劬 wܹqFϟ777}~"v) C}BȋZhX9aZCѸ"Ƣ^W/ʹ:{;niۭVK)$?|Od݅Vo.~]X HҖE@8z7TJ,Q/ (P@ *e4U<#xX&j>Ty|EADkko:))9`F A Df (&:&=SL!&J-abF \3I7R6/G+l-&RcSf2,QBbpP Ƃ%ͰL S(di*Z-?QCo8R4, <-+߽?|t"p5-N0>}foˉ1ݯZkq+HVmwwwzzl[nI…c EoH+l6677xgժ \WpH,q_jil6Ϝ9sʕ~ODNPtz]_.YgYja>5EA\Z][[}v$z=)Z՗_~s֭e= @9y(Op8f˾G:HuߤA-l`Ŭ@`Xb,Ofۓ.)!RJd 0i`hX@P|! ѬQA}h(;wse'Ϲ EB)Otӕv^?R)U*ZIXyJ H~{9B=E^xҥK|(&''Eڼ8%99biZo߾u֗W;󎬉 íNsB_km5n>'sr٦<7ȹܹsn߾}xx(|Dx:"F%_lP岈8TWE zVWWWWW׍1ot=>>^XXhGGGnLIY!T"X믌OͶ<poرWl2Jh!]mVQ pf#Q4_ ʁ?ۅK@ (P@-HqP"l W\yܴƢT$tÇCk(Z%۳% `x_[kHLlA$٢ ؂Y"4dXX6<_mphHd ;$ n[RDt:Ĕ; kEj ̆aa,:0k = UC 3je*8 _+rwg~;Evq~E}u +H _pC_ |܆3"CEfsnn.MC; C˗%Zb0,//#Kp٨yD;I -Wf)7O=ɯ#8G~b'd. '+pqDtpp0==-}s޽+Zw S=˗hsss4MNN(9891P9I~t׿yi͓E( RsssЍ)RFAi{p7a&RAk3텪? (I%U<56p r!b3It(=0?.%Z"OnEfֹBQ@ (P @`H hο~ "#P hԢQ-̻f[p4'U)O)PLLR# S7]eHX )uc@~J(JA%DxV%%y!3;eh`lg%`- ;7߿^,ˏTO&8\l.'GZT9h" jHYy4n8t: Y={vrrr0FFgIdQU*sz>H<IH؍"eyh#`p||<99|ʕ+B + frr@^JpQ -OYG$U>JT*;r… VEQnJ"뙟vM;FQh$DR9ג_O0"wwPc/I^Q 4"HRTKsfxەmR¶( 0%Mcd BB@ (P@D*ECMpRDQ|[hA0&/*n=XRiO@yiO)eJRI+4&bE"(1մ^&DNl4qJd @ 0L0΄$z\a08bfbVleì` kckSd6X$&5uk'=30QQ(MQ ,ypmW"2=~u<קƟ$zZ v|uU"2G peeիJ?p}}]b<(VVVt͝V@ (P@g0 XVDUIdgƧ!8NWRiEaX:0''RYi7ZhoN{G &C%eeRP DDLDFqB,Xb&bRPHRgmU,5C,D13 C㔏XZ-̂e+g2[Zc 3[Qw_JP{M+e߫IIR¸r+v7ήz=IgڃR=7[!(-BS{#bi"1Jg,S)0A%bkzz'O %}e@%&c Jj2R)((1R)clİv@r͔}kښlkG8E$߼q+zp<'•a&?Ap7eZvʕ8E1?ƍwIDjl)#1Jʕ+K/o<2iQ*~='IǡVm":::Oz^ rκ8mBpr;cȺOOH7&>IxY~EQ\P$0Dv8>1*60#*g\:q ǜf.+OJEPu" -"אָVYRzxӕBۣh̨\ H^LH) |?;?=n>R "s?4KD0dW (P@!6)[($<46NI&8k: `DPQ9F&p/ Hg~)Lv`€PJYXAGPi E V!I`SHi=Is-Y$`ְV*x*9Jo988TWqCF}fS5; Ks33bqq/^l;wh'&&k˶@Ze(D!aC,Jds郼 *o r y.ñ wJݻ 9| ]vmnnN$c)d=]*J~>J)+E'8q,JnZBcq发ma"T**y6Ȉ$"¿1($,V)%b٥/^|_{bxZ!M ߃etC=\:ƍie1Sh (P@ (lY 40 1me3 8\ٶsOh,Up䅒@e(B(,$ -Sjm{`A̔)8Wf,Ls1 ,4HgfUDR=$`IJ#EJA)h (wî\ v#5x$@pH&rO߼BNժHzU6͋/nnnv:fh4*7Uw:'TR] Xs;qr8g9Mp#*O7[-I<9d$kƍ&9*7ɇh+G$JtgggG92(D!~DEwq7xg݈( `0;=+/{gwZ Z#M@)X"Iq~_zrt F (P@h60 e&R $Y*h(ʤr >CC%Z12unRFHөTF2#H#ŀj{G)}3}?K@IW_Xn|#oua4(z@<s8 T A R >mnaȈOGV=D$!zn/,,ͽڲj IDATk7nܐ';CNCxʹETzOy⑯Og^8G"".ZL$k(0lt$oq,ԉ:x8vy4UU7m+ ~~dp^F0hIlllT [7{wPƌȾB#6n_ K7w$|3T (P@` h\p F۠"`F 3"8/voxS(c: Q^h~R{P *~n+g`;!8P!3 -l!l^A MVsVmQao _:݋Ɉ(43#=Ɗl&.Tٍ})L 7\*ӶCʪqi0ov+xy;6G.[NYEQ=88\__p˗*Q\PIgq%pv/ Gr<ȳ (#_Q;? 䨙ȱ nyJ$҄Eey r"y(upwbw88[oEQ^o⓯"V+#Z!$]E/aWK~w;Bm !}pQWձqU:&hGp-of},[vM 'kEOź*G^Eӊ؉|3Ip .Ih4 (^Y^;G[7~{ױsZ @"P+s_4Y۽ݨׄYJ`7 (P@  Uh%xGL=|g@pH ;]$@))(sgCoHES7]^V+q U 9G6 ˫+FDaS,$ WkTkasg~}o'A1&'D@( ٦ GJS3_NEb`yɆ <A+oN+8p#+ԴI ~E.; ΝƘׯ+_|jz] _@2A/._ LIpfx<|ĉz!?k#)YǓ׉HsVfwR'3FY@ǿp&q'4nB3Fq<cz-LOBޝ}&1L(T˟+ Soy$F36)P@ (P3#!mPci\?C)Gy!% )c3Ȃ-7d&.N-LUȒI2ԤZxd&kB?0kE2@D=Q1)>-]By::ra(xB9S~y29c8uBi2d(^:{ʥrvg}|&22`0iW/LVJ;'J3gp[@ (P@<"fU%X#4M1ʎZz"IPGI}qTQ'?Ԧ I"VZkaՓ儑u` AהV& }Ek1dIJKS0Jc@!sD\zIJPJ94%kN4^Ksƃ)t'(NdЏ 8MpƘýr< RY>u{.Ba޿aaayy_b$}QE%&H766VWW>h`8cJ g-rU~"rdNG, IGH4/24MáZ}p:|oZr:u4'~?z^+Jr,"jUQN{D 10l4z}{{;MS8A=#jHr<#y+g'yRRF*D}qnʕoܸMcҒc7JD (P@ |+ @hB|r΍".\ (Q %үNV5߸<;?;R&v0`fE ,Ql,lOa"`ܝvA媞菡GRhԟzʃpH ,H%cLJ ƸJ$qĤ_/-U|Y9v'zX@exițIFQ opÍFQ*`$ITT*yr(>ZV."x'xQ/ѧ󳳳+T$^ :&÷z+cɰ,ʂ8H|"JKz.-N9#s)c%R}Mhk+1N뛛F{pqH\KEIKtʋ/j{qg DH "H 勿+nܻwF$3T*yvo(P@ (kE,W gCpXk(6Ǝ5{4o?5W\MhJԦD1@Bls!(=1تb+z3 . +yrF 0$XFPc51:OpLT9svsyc?`zzv[%7aJ4crqYPq5TZT]YY988#Ex qLq\TZp82{45M+ @)0h4E>k)kQʬwUBZ]XXwG:5eXlnn; H~UI(wir$h8&i嶬lSёyfV\|ߏ鬬Hބdĺ< T* oYV FY7wQnV?LcnH[拗Y Jny!|c`]/?v MOݽ[ڙɩr,BQ@ (P)`@QSF" h ΈDpZF)P9P KP B+++?<}~59˕9Mk"T11l7 ؂"ev<"JXc?~F46HCH`Zجi<jkH( OiCCؔN{'GpH/%qCtwm4Yvxxhua2BT:7 ~$ZF`0ֱqf999nE!y' Q4듓q_~}8j"?PkNG~ vrJ%`#l8BV WK5q{;;I.,̋G88䈪BQ@ (P Ű2R>d2b ;|6#Wm L'}a 5NQs*5 GlŪ\NV)6*#xL8 lMRcXXNua5l<:`0 LN^Y_/,rlooZ[<\RJIbStǑM (P@ KA`BCcTaOrm,w:R̩d,EZHo8Muz'N41i"M-IRS*fKd,{dMbLb#g֗qq{HI4a6%9BgA`=|t}?f$z牧Ӯ`{{{qqQ8YkBF j}qV)\ (P@ Hli`c"5H2adZJk;;H3G+4E/~'AZs͉caDU61ldv_^VhƈfI'x1["b$>o% ɮ`/WZI$IR&1&8RfR#xvGZ7E7Qq0 (A4333sssn '!8vwwoݺ555u$In߾-n̮0)pgbf"f0lnnDZ$EB-)D$S-9s;CH 3M` ǟ+Nw_jo4rY=V-#7gxDX&I>P7(iSEQRԑly`X$aCbBj_gjQbXc#!R[mZp s!,cn՚u fzDp~p_(I!s [?Ȱ5c Y@) ٘Noopw0`T-AC1QT#$㶮,r ' )\)ŪԖ8&m'HԢnJ%qHQ@CI2^Ok=>> f].&rConn޾}?|$ߗG~_THw E]@a>쳍FcssskkKr|VMOO+a/sT9(;#N"܂?%vGqc8ۻ}vјiZ ZL 3եgJ~UzoiSnU摋apҥZv=DZD%QdF$6tzsqٯ]; @J!a0?+?Y7߼rgNMMj5]p*iߋu8i@=f Fˠ-QTV,> FH*JԂ,|T࣭ɱz*`q 2l-`4Xa:"5(.\MekZbX)' R@^́ a)85 ,_lm [IAjWwz6F` "@ dn|aQkL˫bam a(n('pfI$pDvI+Jz}}ԩS nv#.ٙS._:;;+W{v8 DLYXXN͛TAGvjyfzn*8En7w,u.hpgϞ>}a׉|J^vh_t{{{ZTm K*pu4Mܹ#]*kZq !{{{atrR޸K_nc P#PW>tЋ`շoW剉 "qDT'n>#ԒKN'61-3w-,R5Z $"B`!gnQ,`B͔~͘{ٞhZ*Rj΢=Ch˟V]o2YM )ܴAdAA{4@2̩ w.S m֩fkws@Ǡ?@91sj7jͱڻ(R b]káp ̜iM(gD/_ RK4cg*Mk^zhLOOݽ{W\!t_v7,JSSSǿܸqcyyYj$I\O0{ffW_T*7o޼w^$r>xjjjuM1 $Nt<`$F. _:Ā#h\ǃ]*͛7\rkN:@@n{K.=zU'y;P4%(Q/̷nkQHM+# >|t/}ەr9~_*$#׈'n81\_q=L~B+eCK4! Q2m 8D7U `pG#Èl$%Sv?4LF%Ph5ڰ-x^Ϻ)NR  nᏤzqBQQW`eM[]U[3C0:#B894)Zc8FQĖd?wh~j ZX -'c+|p@Yke__#-*$In4}矯jZkyN7gIvji(PJU՝Nl6Ϟ=S iqUĕ+Wn޼t&''sOP*/&I"ӧO_pRlll\~]ZH[10s\ZZ֭[kkkcccu KsF($kZ柦iZmZݫT*gΜwmV;YYY)J/^\^^huuU:LNNƞȯq@۫V=ܥKJ۷o߾-u9PJtyi̩So~K]9S1̕ ;++k_[]]]:uJR`(**h#*T8NùXD$ǓFZ&Pc)Az:lJwnXhHFc`a-T_!x#>R:.8` [װ`"!"` yg*iPJDXz;ߠoq Ü7 ,,`k`4QÝna{=ABoFCR18FX)kk&[֘"88(2T*Tzg윇DNOO juwwN \ pll^z= zSSSNޖ &D w.{|իWnNߙV؍$I1/bRz7vvv}U‚1fcc#Ic1Apw3$,//ooot܄(b0z׮]֞={̙3ֶ%u(E!ORiyyҥKƘ;w\vx38MPySfݭ? x7o@0VN8k64Hc}E{o51nT*Jp08& sGD9{  VAe9 w=F(mq3vwڵ1֒}ƺBIuI|NAa-?|eʰ>pa amF LΝ/PY{.#r 8==}vb6Bp(rl4aeq\z.Q6(Rizz:`nǕRRI쩩;w(^|˗/2kqv&Mӵ$IΟ?ԩ{… ><<>)wŋ$tmmի[[[-]jO$dp:62qq )jz^n6gΜ )\~^xqrr^pޞc^/..U*R͛7oypTp2z0Ǵ{pp~tT.oqRl `,,c_|i~7==/ʥ8ق)U%0v+d0n\UQE)qqJғ#8\573՜[Yۅ1@$`H l `"ܰ3/i|GtfXlj`2G < V`bp9Uc3l2!PGjX[Qa|VrtG4Ӏ@$<|Ulj;fh;3՜Wyj|D@ټ|b8vA$Rvw][[<*z>??ODյl<@z}kkkgggrrܹsNny* T4MZVRߗ*BpH q_~mr (e^`0x7Vu]GBʉzxLjEţHRjmll믾b~뭷A\VJHpuuu8>}zaajZ-;;^Jl y޽vB 8;kGpP")J`0l...wsc Q`R```7>|?xnyhkg/Kna}_!m_"l`L?սTlnnʄeO^A1XTB!P Nf&VVs2uu? PR,,?lMp .i5龨ue+M` !&,HSQ`yZ $"ȋ!!D 8̰`6L_$[ b$^A`9PրMɛdmtц,eq8?3l>R 5< XZáA#bxO^[wE `m#Ӣ^~in5QT"(Kz̙zX3 X]]r{&V#Ta@(COHy d3\ʹ \( ւ,LHRXF\J巢"@ Q^.f9dA@P P^B@0?;Th/_DEj†H>R `Qp[>2I'dG<(VzRnnn:ѣK -\iXXXpk׮---I-ٍNQRV7oޔZt:8S3nQr\i&I:Tn'AȩMMM]xl޽{weeebblJfq])PòJ Z뱱171.]rzrH 1;;;v[J9}a@rd yjjjffF2SD%$㏬0Jrj|*6wYG.1:o+_s p1J[A/}ik֮ϮAe S@@Pl$ -LcV&@AȤB R a"8`Lf71"&a -e#MXE!sZ@&L >Ιut%a)X8aCi&U&نEbm όU+EiG j']Wu y0z= DZ g$E1~1pIrdȂ;v¥Riaaa~~nI A 8RD=H}ad^E RNQd^ Ę<<70 XVDi,P%x2!DS0 P'Gn0YGXPK A!(B0â<#b~B"38% X3M]h(g]WHL.8RYTc؜ðiJJlau1YX ;:jiIzF%гgn,U#vp+eTfX;g033s…ׯq<==$F,l^7ntRjuuw8I@^]K=r?!i8EjV%D͛7䤔żI#$(wh766VVVvvvd# ʃz^󻻻W^Z:ujffF>]آmCx \\.,//WUћܹslj595QK#fG/3'"q`T*&''߿o[^^ّ'qxӑS.y4 na:11h4\ a7rQǡ2ƔpQO,w 5(ЅGp 5J7d7';<<<<<<<<<<>(0˜K `M~!x7Lj8*C j% 0 IDAT[gPCJcca8D!G: " A0` G K€hfF\9  L2t|'3(f%p!/[)٘4ՒaEal_0FڐuD@H4)>D)`Z>)҅%Q  xP'pPTǧD]~][[xffFR-7aշ!"+hZn?{3<355%}^ŏF7F6 $ݼy7j*N|u eZpbi8(i͵_}}}oZFcwwWs0H ?0bGQMNN.,,:u* _yWƂ t6)Rnvu=0# ň>=8pp(n{t@hlkcBhL9^-{ jqr(FI8q0B$4G(@gGa J£m@@" C@6ɻP‘A0l2u"- 7,is XQXa1@nKqQĢŐb2gHS MH5iaAd$1h|S 9{\J8OTZm||Lyqq\.jV~033S_AR3 PT677WVVžNG*siZ'\䆤-H\jfsrrrbb֖47Mqn"ZEQ^oZb!8$"]q+4g7O!1"J_}|||vvvffffffooowwwiHWm8PEQ5Y 7wJIӴ듓j5.qV1ً5\XZ9SNL1XF@`\l%c)Xd 'h6IAIf*'Cp$p9?(%L"aQF"E9rB( AE`Fbr~ZStD򦳒 0AMdEHhR38 Fj@5B aݜpd9dd0V#ًi j)N1L^ |"$.LD^YXXE^GcĈᚭ [ADZ$I$ݽ{vE 4!r, N!r!MSihpxx믷Z~???_*v4Mr dp6rT*dڷo^]]F1 rRINJpyyyjjJZ+Z}߹{u1y2a݋Ai) ( @0pFCیIVX>żɎx41dXT@r0&#,Y/X9Ke !nl0`h^xӧGDM J Z\kmt:{{{NG RizzZ:g \1>©$$Z70 4q秦i$Iߗ\kڝ8RIpeeessSk]*FgE @yV!ٵqy+!t _׉hzzzii1>,tZrX8ZV*u=kxE(֒;Iq,>4aaW<<<<<<<<<<<>Xሱo@;C'r^{a 3 $)*~u&l֭ YXZPNE|+캨>\d:m isy-텏;A^ /k/0I{d qEpE˳0L: e<GϜ_nXPm4QPK 0 .w%H$Sa)0ѥFϝS*fO\3=Ń  DJa `ڵ;݁JC~ !/g1ʙM9zaQYlIa,Tk$_kMk!$Ąh)PVQAǻH=wQ:Նk!,cm\dDb}}}mmM8g(&"1D iCå). $T*ȡeXyJeG s] YL=>>h4ęBD~nޱ;illl||ܵu,ZNLLq@2p u"|wۮm\/G ~hw) D!"DJAU}v{ݕ_$Y<2P"E{x7wЏa:(3S02AQS6X55f+Z>?rF|GQU|5eNdH^> u(P*j֧aD#w^Mypܐxv뉂B4G 9Oq@:q3Vӻ{1A($bV%)BOZ|?rmp0* @#92pf(I>ş"Q~ދJ kDf5a)~A"J2:ayXG(tpA6WEg/ji¢3DO+rY*:*%\ȫKo*,up퓪 3*%b'Bq\F(NmhA gR-#GpY8&P`N"p-oG8GQFw}wQ$P>!*b cB#p)k~feO&(+@A'EC"5TFsT5fPN䕆\G-Z- e07g +w>*f8lYZnw Hzi&`GF#ˌ92pHh 0J}yr\;0R1q#f2P,WX<δq݇ jyivɒ>S8p͊FPǢASGg4%'?rsm^l1!(ʝ_aP `a ضYN,EI6ȏ^L R ¤[F?ХY~)[mbVo;&Od|Q\M>Bs䁔Ә;}\)F"G3r\[#|8xq2/{@&ǁ B T)@"&8'6VW$<_?~w}FY(,@Ԍܢ D)0[pP`X XOn|z2G RicRkuoV0p1VyW#…F)J1Zc:2vC:28+/U*e. {Kﰊ KUA;{]PcȻoH? g-dw;ۢo l$qdx5ܟYhDE1,,H[] # f=-C`?ʟ}N(A*5T#d0#%D8/VM"$&ۼ͛!8 ~u\I>Wf R mbH흽];_w hKZ`[c&_cU?x]=<<<<<<<<<{_Boz NErϋT +XF ^ͺ>""AO4cc$ D]e(tw/v} C6S87ɣC)'86s$5Doh6Ќ`y ?_:/Rʪ*2=QTK]Lk|q~T*=[(EJ`K JcN7oM Z~RD(1\¥r8oݺ{E0(23e/"Fl8A{`Dt}*DQnqT"L8PA(1<&RE1 !$fѼ(R(9ʓD,s.EQ_1dAR &bB?@rHnS#*=p" %⑸ӣ#<R^5B{0.Iڳ~-!r7R An)0g L[E}[)! .;Y"7$e-H@Õϯ EH,# ([38X` :#F[JfmSA_,~~쥗/PO׶ï"*!a'lPo ͿVNOO?zX կ%HR#>xokQ'+IƘn_-Y#&_on#8dwu ]#=rmmg%1Cc)Ovs!$CҥZ{ ~""e*0aG@)p( TLJ %E@D@1 ( RJ1,!$8$ q?)'@s@DL+*$# %g$RDDPTRrB)@ _ ydל^%_ Jr=ndr*}a=<<<<<<<<<l,s,A[X2MwI6rTG?ootP l8IYFV12Bx'Y5A@ )q P\mw. ,Ze 6Q)˽?+]45` +Ukr"Gz.r7LCnin 1ʏȇOXsi#pi{'X las^)IӌzDS@AF@>Bgd{f6ZDdJ@MG@ 0( B+11b"VP@LJ{cRFhȨ DP@BpXeC &P*3B  ĵ @HJ2PJP~ԕ4#P7(i,)[7-m?)"" 1" AV|Jn@CkbDڸjy<` FBΗJCCEô߻5:4KJ8IpkRxV]p{8%ifQ1lؗ?t~#eEok aVxIbbF|$'E䳚G oTT|!\d|s^!gkIb]I Qn(炑۞@XO( V!Vm$LvngXXd X3%qo,ʂȌU"rKHb'/E>UZ|nxxxxxxxxxxx1h=0ZU mB??_{ؙ; IDATq,*ȢLQ0BP.x ,]( )rѡ )#UD3wO>? Ejc"V Pn<.D*'iT1 [긠H^<VG";dLњsbqd;C2Hh(l |}ƽ b", y2IAQxxxxxxxxxxx| G} :a@.g_|W% y ?ԕ @ 0 6PyJmWڢgj~0kio?D78̟"A"ʎeha@L5SphXn A?3OOTGѓXRL#2n!{NMj>~YX%#EvS8Cùq{ |+٘pLGsg-@(2J@" "CHA u>^P`zDS -=|-2u }-`O8_RnF*OAQGq(h*y 8"sM)AhL@U`h !B&D!jT#80?w\> d9!"C@"1>bQpGDF0N[Ka@Q=-&Y͕=M,` -)i4w3-i #G 6`..LS?' 3`2cTSi b-d밌]wŀ뮻TUU4UBO7x^'flex B!c10H39ܣP^QNP/nj]t;UK** $tL@ 8u! B~C6K"B7i)98,0`` )aV)Mb%r!fJAw"B!B;Iɽ=}q&@JiҥV-226jW~qQQl‚ԜhʝR XBe:Q;#bgg`H@ iA‰VpXM@0a KÀ.a 嬮"|ёcDk~ACzu0fYc Cأ4 |KrluM =B^JH))y( aڿH)v&+Z8O`iȇx1#,NP"trtRhi]{^$ "]ZM*݂2:/+Ҋ ,ߩةpҧO;Nωt wnN+ST1B!BH2 HCj#9 p0)/h\m}\$  tU`XSZ*L ]ىH$[ßǑe"AN *ްk& `00030aX&pnru^Щ57}a‰a"rpHsp8gus7/ro2%,+s(؃;{*qw:8g`([?9uۙ%U 0C̟E#/E@`!PTL*g 8cgq0@)^*)MNJKӨ1̾\ tdF7?K%(]VPڀ9cSp'/!%7L XB!BM* .4^3xA*0a2DU-6૕-[KBL !Tg.) ݀nAatgoIN2'$F8 Ӏ0LVd  ,7ig"WЮUc%!}J5I9GueU($$!xYTM*w&  *4VEa ]%YbTA~3aۣw+QH `;ܕFY@7: ^<; !܉Y0oZ' N 0( @m X8˘ L{ArWJbq@ev|>pET(Q΁Gpn?Lp @W PpOYK`82@r1&981,saNB!B߇ X`3E)ܫ+aPH7q6ӲE^mauiϞ=aJpxUL\q/tM ^;IFyF%`P˅;;@8Cr&i4C &԰' ]0T١ 4-E5SXzx#@՜Uo +Lpp:-+gF we0sq"Eo `R}Y՝#Wpp(99>@\r}`_l):.M;kL U 5]8GQHwڄ}pq>,?`cj w<`ćvnB!R!B3)%jJ, ;{~j߾ڶm>j]τiB5*PTx4*Ga t No1g> 3eޘ៫bgj)b1ta0a08oԠ. YyRab >y=\)*H'#˄1gNՐ0s!E'^ )>hO/4-Yyˊ=d` ;iȫbܝB63}Upw3F#pz-/} QLvK7H1d3(1$c ˜gqΘ3s;1ŝ>89W11@C!E2sUaS $&(w81p g|93 sX< RKّ !B!T9 iRÐٖMn1ʼU"a}c/qqvMV%kl^p4!8\|0Ca( pʜI(j½]/eh0`{J\< ttݨWƥz ӹ]Fq9kMg kWwEo1/c(:k55-´Rlwccc?`T~L7ń9hc(DO\iVZLeIU UԷzKֹNCή%?eΘd{ Mrkw]Ji#_o|Q '촚n/a)6Jp@0 @snw9)H;Q쬡NBa'~,_kNs3Mw?h";n)+ 6q&;vgVؙà0q;f`@!Bz v'ÒgG-P g 8R !DEEtl׼N|\5~Zc?LcPH$,łGrG;7ay H˝bGLݙ--g:)aXVv޾O[4ig$U^2eݼN'~9qA#44/t%(Ka\tjyKjq;B2 Wa|^?فCE~@+EH)ܠ`N@9 q&9LB:qP{;pҍ8C?MW2`)Hx;vNӎp; Zt7F^ #3%/:&ֳiq8 <;¹&I7"D5?s({ B!TaRZBJR ` tXBأvPʚ5ӿ.?57>SHDzVVT8|[P3vfY p 7[pp8 ',gM z.rNҲ Etmd.wm԰)61Mv`ܪnE.XQAXaJys^ZBʣ[7ΟS:C+llP:(28'oQ.\ޑ|. B!T})!˲=Duӂ 0VX3Uԯ[M;kشܔLL+<Yx[q)MaApLx!$tßz$` >)6V[޶g5mZQnB!B!| Ғ KX0%LK ‚D&NڨaF lVcwCFT_R2m«!Li)7!i98st2@HXR²1dC7z!khҶSmi!,K( 9WYSKL"(k?grP"B S@JιϞKHPg7|l׳=|4!*bDSTplXr*B8JiD(m:dH Ջe@B8>< }k?VWi}y|7?_B!BHE$R zZ">|pWaoiX&klZcwؿ'q=xipwN{X AeAXa"j׈iPf5[$'4߸YGZ [v B!BiL->*z p <pnؓV*hjZMktmsHҡC'O>y:tJƉ\=7Ǘi²,!qMQ5MT%4DF׌^-v|\Z1 T`²s>!B!B'LHJh^HMbpp8l0nҴiæMIKHKJKLIHO醡BJ; (-<,4"<4*:ZTdLldTTx(`RJ)!8gNgl !B!ӂ X8!B!Ru)%uSZ‚)aZ LdIFi !B!B.-% {rP"B S@JB!Bʤ%5!L`D&O B!B!39 *1h*5g7 pB!B!LHJh`GXh \"BST!B!Ru 3Aaи)\6h!Df?b4M>!米{-I;L1ٱIJ.B!TT8S("TA0(A郻{vH6@zjܪU w;>yvdxGѷs3C#e'v<ØC9jtDLD\htD9]wmw-UWZ=A=)Ww}~|ꛃ>V㚟Z݋:߳6sI vT^wmw-U  3AMSg >:+=M+w[L IDAT~Ko&UA!d2p  1@8G pr; Aэ<ΙK/Z.KW' H]Z/2\vg2qϿΤVZ1˿ݼܐ|QmBJ?ht_Eo><= ԣoOvQl?`}GoMhB!3I3@QkqQ< QM<xkEmy"tj_~g)ysJ YOe=2SjEW AlPCcFvv&7 5᎛jVHqtU(ی_jon~%?{m!v7n>OB!o,0ʢU0( 8kC;nXӪ3zӞ}z֧cBPsϝ s>X [{֦w :et6f~`tӨkiر ׿oKy TrZO. G̹Fc]!F=zjVOۮvq@7]6rJmtйK}}SVoIeR{x@{8}B!P.! L!ad: ˂Y p$oxjIHLLܴS<}럍jO0HRuF7%Kf\\L-ixWO{ SHLL^O7jԮӬz3Opۋ}~m1yNLMN>扌Y-)5TkFM,GD{{`zwO۴nAyMwק}_(hݰϳL۠_ڵQ]3^xEQkR5SɣYzɘkc}4RB~ekۡ ;@%3x3wXəD!BV %ΎZ@6Π1K җ?p^Ӄh꿆c4NyJHy]vڵא* ·7'~m-,=Hg; `/:S๬ώkp$OGK.h_?@t~GM)z!t?8"pRxmY9v#̵Ǒ eja|K6Skw{JSoVrc,]TN'/u&AFx-MK,/?|? ~uJG`:Uf!BY!%!!`Z $WL- xeDM ܷi=cn0]K6(X6߭N߾tѻ ~ A}iߦy2Zö4D1'Ў{tBE&U׊o!#w+hȊESϠ.qA'YI_TZ{n %Xu>W5BAz83SMwn9oݯ iKMcԏ-Nlߗ`/~f83"bKs5-p(0ﺏ7sZ2]|Dm}r&J9RԻ~js0C/YS&2yM@̈́SgDQ2=-]f}SsUwm|Κ )ULX"8G k{Y ݜPheo$AUdШhxڍi9DnYz4;Tv zyauLs#ᒦU{%0ϵY)8(:a9_?y߷xxO+;.)$3س&{_:w<ׅs~v ׷ _wwl3Ի㍝fxOkLуNKԼ H]? \~Bm/Ho]TpVVJZ tM0q{qbRMv?jQҐEvixAbZ̃~L/bl}&'TZRq_A*sm^ ħgF6˶8@_sޜ1'ʥE>9ۮ؍ N*|z ! C;s)ToLwxc!g|7K@pa|7iOI,kYzȨ k0Wf(S2d"U22޴cU{|gڝz}E[|tݗs^x{g&`p--f;OiO׌lWY`\b,NzɦTOw2gP't˧X;-9zC5iݣ؁ە/oOcQ7e՟yQsA%:/<ύuhe~'púޓ׶8W)&* &`X0 κ0N%`"3,5,jޓ[wU}_8Ӽ spc@yku<8a-ڐނ%s:dvoT4 *,آv -Wo:F!n,Ľt>}k8,1&^Rw:!Jߩ$s=*چ>)0f}E TBEhܩeT!mbIB=:n w1Z4U=:k9g ' yM*!CyiDҁU+V,\yվrN(^j.AN耱MB/:caoj{K*/3M>3lY0(ȩ1|`Jʁ ׺gBHr;2ziL;Q}AfyvGNVaW=5+4Od}c}YN^~`Oּ}'K_7c]K'}ふy*yVb +ߜ2[dj̐":e+=H%X=~y0sJg(S.Ai2qchCW|>O.uI'YL[O?DfjYBDC`Xl|q 'XW2S_ԕRX *2Ǡ/ phe+S{Ϝ="(-NO^^qr ?G W3xB5.wKk^hr0tZ.7Wk~5W 7f~-9}}l4AΞ\ݽ-ƷOIz_Z.r|D9;=/28|j[~xu9M7*abNŝ(o4~ 3d­{ݩ(?_hof^~Zwi.ePQ2\:}\=13! \tոZ0j2wiRrRث(:tc~=xtĆf"bsRlI*J)^֞߃ziWq^/;0!>sBN.oҴ W=7v_>pG֧8\  h K03W*uJ\F9 V;٨]HkdȚ6xšy^ hH??Tr TCDA_xhտEY}!ouUf2kmOHYԾl -IVʸ@Ih l rg)gNWz8=߹{ԏWʟ:fA 1C(]iT&qoޚ8=6晫&4V)հCmЭ+4%V"`5CMwݽy:=?wK]z-,{ ֈxEnT?_h*4h` 8WrFٸm~{ƈnE'=O@5fŅ;x17=fТe, \vd0jdVzAC{힕{:fe\گn7fo_s=sL*2/fM E&5[eo?i׋gqo-KVo, |RG.zqpՉ׋OY?d?\\P7O_sd|#.8O2(_cgX5vXܥ>᛾atiܺҿ( /[3x Iw>⅊a=k@ >]sU=?#9{x5kҒ2mHAҷ/]…me_ڷi^X0?lKCdb׼gwHgZ!ᄒo 5}_?ް؁T̠`n5b@KO;&&>D{qP*U;wܖr4)S;e†wK+ׯڡr̛ߠoPϠ.qŏ߳pFպ5Êwt+4 vWcԌ{f YݓN,ٟPKdeڳxek{}WUr;5" nx '?'`CK6az}TD{VĕK?]cZuɫܻw>z|}8?z~ApЀvazP[:Kg10H3q@<*BUx;W}?JǿۉEe8΍d[uRCuv|W|Oպ:C?w=_>7檝SI"C/r6O_FXӗgV ??*%Ml<3_Z)o]Lڜ/"ammj>3MnJG\zmwhufgC5j*]5CU7!J62 ]տȝ#9[Mt~%26&|/__JNNN"C e)Z94|~75ݢePG(Sdq#iMaۯK9+7Mh [{Uc+>s\KJ pT[Nu@R{jM{G4y}|bPK/]%YlekR3[7+j[Ŧphjͻ2֔O󃬻u0i)~_*=+^ֶ^E/x![6-e>' x9UOeǗ_7rɉ6ʔA~S{)**9T*UDebs D3w?Mzdh২fI- un]Ua6zpp̨62N^AkK+2jwgXP_{̑N>7Jܴ.Ee.Zyw9PN?Hh^KRV/\|@hۆ΃M*q ܴ版k˯p33h4vs';_ryiM{UuՅDDaSq݉ ܥ"&Wۯɵvzu2EU䣡C@/3HmRohqHzL2hV'1vX5=co'̽{z;^{'o>{7LhޓG^:%`3f{,#J=g̘1c 8M_$̘81Ǐ?!Pݣ׉rcHS@W;Z[dAX۫"˿Xy[)ws9D;zV\Yh!l>V/9 xKKYrU`G7ufϘ(4l'P/(㯇?`G7x׉Κ5so=[$WT0@z"UwYΌwn5Ng2hԀHiAc|?CMz~5]P22''9 +KZQ=,>-k,g HHջ]2/(agѷzEwKתKsˌ|VBU  Guprz IDATQ;ܰxJmtb3ek]\t=*wtZ{㥬eS׺@GZXxg}׆˟Z/94.EnߧlH~dDNG  ǃO1[z=ހIfbjR^DĒY *Dܽ.~-6lЬs}a@ ,^?|LE}AUeR PTpƠZy%/k~%8O|4ɑi1G}8Ȭyϯ(ys^_0'K~ϸA};hk>`%TXYIlȝb{ɽ5G>?*!O'Ulg[ *:sS#pDXh_ 矯Ф%qWS 5Ox]'`QiA2L:`(cFoo_>e[ ̉^5Hbuzt߾վ]_2}KWKm͝?~zy`}œ9XE\W2\*;}ίud ScZZuf&xWs<ҋ˚@>9ӛKe%huf#W@j/|M(}}s̙3gΣӣ,nkZ.ypLi!ݯ~<ǦS Z n}%X_/o.\%/^ӯ~Q |J[N᜛uvm޼moj^S W`LgsMFkbJG/^`+OpJ,LZ#}0_7FLe4n{̣=addOܸd=q"߅Qs輵?]گJ[JJzF蕯M7N*,S?>x 70;/ęAUq&`AHE)ܫ+aP-dɨ7of-ܒ3 ~v,t92Fc1#~?6ے/uM\vYv SK,HzܧGh@f^$pτ>7էD 8H9՜Բ}X ݾK|jMZ֌',r|onɳs{{nn=N>تWVCp_PŤmm嬚KoI}y(=gC!3mXMjqĶ5?ܚ sTHuMQ_uKkacO7ՄGג8gRKꁇ^z "qȞnי՝q3h V Oqjib%?!n}٣;z4,: gj%32\z78'7^n1 ٴ! ^=2 e/uxDp,*H$mr޼۞oRa6[_Ovz &^_bS3Pz3ܹd(K,wk[>[wIjx>9/k/f] KUdY q +j,(GG +2OJ(T%)$L,QSB`Y0Ygi%fd_Fr)_. Qj׏(zaW|}{4rӫK8 @x<qӗXеkV ֮VK~9kpڵV1BJ|kf{9?pLes̷]m1{CoyΗFvsӨl9#>wcܵ<~ڝn]@7+>YO`oȟ]ܬ#7ЯQ-S6/dĆ˻.vK&!򮙈m~W`  ZD[}ype`UG# U`~Ϳ ACܞ=ҕ6>Ml|_A%)nKNAPt ;EypT<_zVޟq6hަX3pagc@)J}}9'gfxuP1sA_mY߶B(8~I5G5y_E;CɼCQ޷?<4J!87-Z;].nK56({jOk{8yX-KwMϏJOm<:`^ gpVgh&IR5Ol~]krSQ.#Ogsx;7P )Ɍξ!tIT<+'08%tpRzݦ\˚u.ޭB+8sgn%?W˴~mW 7<3ITi<ӯ-U׭B}RRWѕz\sYnUyqXJ~d 2B)KOIi ϧҙkILR*+o?ɭ4yi?9I&~٪q)%XTǵ~J+}XbWqH|3,KӺh'sp>oHTC E> ׎ƮI}'Kظ_txު) \Kub.B䉓5jӿ]s^,֢f86{,Zm?v=&|r-P>iy5WY_w?A1[%M` 1+ j8(!Qy,]suWj:кQpuWjXªT+U/_)<\n 2+ȇ\vJEup:hZkԼf僟.ǯҰ~B)꾱~J3tzL86zC=rmO϶eM_ݱLoޟ%ev2CCLI1;h;?‚XWw:T%9\$/>V.sl||tGm1Ǭ#zSfm&_٧,pN|H9Q7w˭hũd]-,_0ۓ{/ԝ$^˹6vkw2;PáeR}ι^`6\CN{'~Gt:}1?ͩ]L3j0=,sS#>(˲$s}h4>=6r;KidE[xŋvwԧ;y41ϺXqht6ytlDUP%o|\b.1eQ=<1_ Un)]*yiY+Fe ƌ)vN3[8v^~т'clWMK$i?}?Pױ͒K]J+jկ&,M-Kp1 MV'ۧ@.<t>E!2e<_=Q>>.4.nb.)}n,\* s0fL[Ho XG=8!BHQ.)q!}|%_ʧ>Rmgf]~>;C2_qu,ۃ^32ԙ}ȲEAv!WBrd/niK'5LD)NQL ;xv-d1)ÍZۀT|7SukaHEdLRypDRʅ0_~=MX??](+ KXs?=V WյM^U~gZ>|3'Qp>q *ѿsڍ5jخocǺ͐T1\ɑ*q `GQ>bh"zBHM~jFhŽByII$[,R3=KUv&Br@. RXFf|sLaNyf=p,CJǂZ++,igRA{F|uuovs>pĊsI W/ߎ2D@Ryqd3߿L@lucɜ>EL$>J $ 4p$?ݰ5c܌ ɝGr1h#B!RڵsP:jΙz+(4bhG/Ip7Y7]fj!ڣr{oTEr0 c 8(D(bZb{oyP%ՈmBt#1ըZLڿq[&T^ɜaw.p66a\BZ~^- [)um[Ueyػ&#<|%<u8hd(Q}dWAjB!By, ZZA6P'y^XfY*FWl2p1,aElu!KؘO}hh\b 0߻ j6`Zo7)l@|yx- 7} /9K)U_Z7{d:} B!B!ŊnH3OJ& 9XzB2hР+"+F&n]pZv/k=j=}RpP89grJ^2XCjЉ?8QaB!B!2[00X9 8+x1H%+ 8pI7y޽{<ص,~a fmý;.3a~ږytG'>6 MG<^ެs7RݳZ!YߧNOX>K-K!}|i5 El!]]36ؾ,!B!bsĹ (IX!1+ jE^ٳ'<Bp'>޿{𩛶TmmnWF/.1L& 5'/]{%Kl+S Gr敭ΐWѫJE"joV܃R&N/ M!B! .dIG|(*Uʲrʷov=32:g_9|xMߢi/U1kv]{cwzn!um0ܒp+m$It+UB!B)BD+,*YaXM&f+$ GppptttW^mժU2Җ-]mkR-ѫ?kj_t[H']Ri:wAK5Q+q9ݬ1]@&u V/!B!RĨQ778"sx(111[nmٴiӭ[Zl|&f} )hkTIV.gFa#Q @ϻ.wl>k669u!B!B2nJTsX8铌**K./H*ݡ "Kg\:`Ddys踷KM1:}zXV9y}B!BHd<Ġ eP*ॄ>gDᬢ`ٲeH[7@073kP&æzdm]hLAGVmvY_:4|cz=s.TFrk>Zqc1Ywr˼&UUj':,!B!3!r(*13hoB\Eƥ1)YX>;-k"Ai[vдAQ2:581aJcz ! D]YqǵwO?EW~wH)vx̑Eƌ ދa !B!WR`T@)J0Ay:BQ0}7}l岿%m`Pwʑ/O0mWe>!LsZ5%\wPjx{c\6:Sɷr3-X IDATKNjC)4e% @Ze[LB!B). \ 2|%P3{p0yaYpFޤO*O:*uq&p MQ]^E. RXFƉ]c&GˁOB!Bɟ[v}cnR*$$Z04 -Ej2_*jFdj/mi/4eq{ؽP&k' B!B!řqA?p7PB eV˙Wy(t?ԄB!BHEcP S *">D8=[/WB!B!b`(B>n/<{T,D*:,!B!⮄!AHQH 9-5p<{<k/B!BH[-\3OJ&` K_:B!B!2[00f 8+OXLf$YҒQB!B!][%M` 1+ j$dB!B!ŁBK,*@$8,VX5-5pB!B!Vnpdb5 ,H*&ܴ.cJ)UB֓vԶ#w,v[Reڶ_h а!ŽB!BR8cFI 7p}1}e9x6fҗjz>\ggRA_uHo¼ 86.]\na%ȄV^ -j B!gr%#s`VX:H0Z{e%(ݨ9v(b>{C 9F=4{/|j}7;ڹP3c'4k|-Lك*JUTݽ{wΜ9 .,@!ϦѣGO:ϯ!Brd<Ġ eP*ॄ>ơ_g*Ęxbѻ# `/ȜL>pB=U׀{~~Vnx-9G>"|s̙0a”J%9csΓ3d咿3i ?OSzjʕ bGfڵkgϞ^w}ץKDE!g܃ xqx0#W}hxi7tXW8s!g"_d bdl ؽ;~Y! U;lYU jI{?5/\'2e LWb~b}.g2A0"UzL]ڝ!u;:8WEmڵBݻ,/պq:e?;q :Rz+||*jNm]ӏh*>ARRiT]3\i>rߍիW{xx0hP W_唏3ir/.Py&o/A(W\```z{A!hgJAT (nO8b_9:9|d}f4p$bZu[5F_?,#z q.|YG\>Wl2pqNzz~԰"1{>Kv[# D{ۜIG7u͸rW.[nx2`lQ#ԯVk{SK9^}]-QrCGC*Yᠥ廆G^SNxߣRsP/.ZKs=5c׌0`eXGiꂌH5Ծ<uATzxxjطJBʕ+(Zmb h6mvs=HB! ni9aaTF_l()FEEo={GFF-S?CG%UvKkb8t攖=ͺ_hR_v?U-XVY=Ÿ/? |*RӺlEr>s0]68}o4x̪}#'n*׵cz2@Ҷ|5Jyw?Y m`e-Tڿ˦1x|f\"J/M٭b'M27AW3_jw^y91ڑW{τۧ w)_DW83@)G׬>A%-_LpuZoi$ŋnOO?ݹs'\$9wwwγW4?O#+aǏO B(q.!* x PPjRZǠA"""*VxyҥǤoa"WldeZv=ӆ!Yn1bUUx'}NhТ]W?~y{ Y+=Z,tׄ0uQ/~⻟N~}\W@!3l^N11|4,\#$:݋Ztt}gQuڏhXʉcARzYK@xxcZqն gk? #t_xUmڠT]R\:v=i0fĬ;k=1X+ձIy krLHVxUiS|.o.ro.wLY)~i?}رdύB)(0@T@BT" 9-={Z7thѢ)bV23.̷7ס_ ƇbOKiv{gо8]̓ٓ]5~ռg#=ے% 6ij%=׹NN~"/,KUEښ)K~jˑv~S"*4 ppT%u&sfxv3CӤ޳lű;w΍6#ѣ|]a%g~w ҧ1_?˼g+2Jѣ,|9 E"飷upS_P'pCG7W@_>~i?aMN:-,q||U~,eEws.]~ w qr|o OS_`KeÇXeƌ%NAL' !e`G"T ~mW *UʲrʷoGn y_˃t TJP6'<`[Bp-qINS~MWG:;6 |D@ic| 7/muܣk]uzӛW/4dN)~ɜԨQٳgwСFB)% E&@VXi #888:::ƫWj*i&?,[u>c;'Ֆman#?TS^`5 F3WWr|57c޷?<4J 5t;\syv®^)鱐mBpy'vݷ#Glpl!iSWwZnrj!n>_NJذ>ۋ֙q.㩑Jwfl0arׇvj,m;fˣ.vQᅷ'$[u>$BU/ y7qsn|~pQ@`RY?j-o;}KW Zu4\j#o^Ldrw}z9'^9wş3?mONOMB  jyU2Yf(8XBuVۖM6ݺue˖MeL: /^%mҧZ@WqO9`Kr2|*-1oHTP.KrёgmܪvXxxիWoݺ<[E7Ohy=&?zC=v/z-M]ÿ'ԯmU'ݵiK~M;O~XQ ^u,ϓbFFsG]/$ +SD ,jWʸ4c \Z4ܞ˵k;RZ3/ї h_lV qu߻ ͘ᘄTEw2l8p5I>4s53[1YfxqFv u#?'ɟ>ֆ3 !y^keO\i_ʥ)~]%{sin98!e[%M` 1+ j$$e˖߿?...222?sdyK/7c{ G7ů7VRMf]ɓ-ZϢW6Qy41^]AYŋzW/%LZ5goת~S||?]Q3J`4M&_ II!Rԗ9d]咱ZܚVݵz)EⱕLj[޵\Ffb(b&66o!^0=^KvE)7~M&I:Y1 X\nXɹ܂⨏k~uM^ZϏzf%u8=~ޑgz|hC 3` w|V4* ,dJVIB$`Cd4[*m35Z) hB;(Rn+q!g&ɖM$Iu\=9g[oʶseݚU6<7boVg˝[Ku+,"<9L ?L|b`ZV+!J[db5j R͐F落~O/7\үUOO}o[eJs'p:(Ru:t#3j&| !q*#o Qe&ם#;SweۅYjmg pS9F%;},qm[LM̾3̳s U~wӺޚ=_~ӖLP*RV_1s>^˥ A9Pļ2K{;'J~j2ډ)Z)2cjJV \.yYo2:$n u)xT2'0b3'e^ d~9ADѩvjB!P1@o CPË/^ ) Ry{k}saKp8#P)5-1\%PjhhȽ]C$^A9qѨOIH )\N9zW fl0iɸ/yĔ>㿻+w ]ttS,aZ?ٗdǑlykGEv|>mwiJ'Oc`ё//934z|d?Z=ߏV,a Yp^xÛ43 TfT=듷Ө܉9zl?ڐv zű;21vZR<<QO_*=8Iф8q[5dN|Th5@ȥ +ǣTh sVk!n{3F3}Li3;J iڳ6'e(yPgof~}wrZ'VAeZqW^0} ,KpCEe P:Rw=gE`s|et'BۧwKY?O+~ jXXɓg̘aҪUaÆٗ%I(f%RY*2O(1(e x)!ggxo_Ock0>rcxT Ny+veQGO\ 㱏ܬTy lY} e;|l e 5Ŝiqkgۇ>m>Ԡ.ߜnFOy a[놪nI>oS4O_*Rck`vdn;'QOSɓe%4?ԮYʲekˇB)8"R ?OAB)(Q`2\흏E|$ 2(ALwIq@q.-kM&g0VT,u &R^'_P(<<?O+~Q峄-A|ZjkݻwذaB0C($)BHee`D&@@   @q^EBf%%sԛ=<|(~wW63gΜsd6b%!BƘ1f9))I!7V0H\` # jj *QOoؠB)?Ν;Ϙ1^zZ (Xl )~/H61v͓'ON2eҥ~~~ B "qXxGI V+,dŰ|mƳjU^錂NM#4;i;VZp{}vaڴijaBy65ٳԺA!(SX.AX%EZj,( '}ؙfqlYz5K !$g~~~ .\paaB!BH0[00X9 8+x1H%+ eoELcc@h&%&Dx\_g=CB!B!sĹ \,$f$RT' n3T ֽmfllu9:|8ʍ3wgH!B!($X8Ē$ t,VXfMOVAZ/k7} Mvm#4k} M.mWv/_[ҮWjhU0=kƵl9 Sz!Ĺ c-_*U}"SV8zP!no;] Ҹ}ۚ&ڶCB!B!Vn*YaXM&E*mڂT3DJGMsvhQt krݘO}hh~Iݖu;ae$oḙ1q&]Yz 7OmA!B!Q)1$oop[iE琻۳gϪUDK᪴3l[d?ad&P|6eFRmGa+~d?bAc{|mJr|igϘ<M~j3!B!VH80[a#(A_蓌4(""bŊ矗.]cYChzDuѫ8O5zϜ нkAG^_1:43 WZ =zXu a*cnu(զK5Zx(!B!R\Y*2O(1(e x)!( 8pI7y޽{ s|d0uP+ĥAb JM B!B!p=8D0@@%-៾J!4pٳ'<>5e7Xqnr[I6ij{JIfLh҈r/znɖ{ɺ8Nhb*G~O\.Ν;&RM`ֻ Z]+l'm؟Oݏߴ?b5>\Lԗ2pĆ}6 XR ~͆Kn}n Ie[j8(w\%jwק<e% E&@VXiɞV/^j_>'QeӪ) \ɞwP`:H j;sOK~ZwA}BoٿMEM~Tʹ&iȾ mOY€ K`[\7DݖGq_oR,aNS@a Wvh#~V1aP -o/ע_[ ó%s&m`}r!^O3|g3-[#4E(#'83]AOmy+oKKs|bch>豋7$g.˹X?,YA%k'ѸxԹv4xDTҽ1q+!JaZfbLjA ӓjQͼmr|U([XNwr:F3 L1) o,x'BrV 7طrBjn{\ :ÑE')ZgƯl<ԝ`IdgxdJ۳ lNt!˹Sƣt}6bg#~?68畐Be``r@`qV*!b0jIK,4p/7oTV i[f: F8J9#755Iϸ;ZHGggzYR|]]gv &OU@ Â0YVP!66^_mw4-`>nP3oõVZ1c?nc M֦n\ޡO^2މK. CrMF밈J1O?Ӂ~[q+!sĹ \,$f$RT&2\fw4|B+T N) M/[A7ߛmvkҵ]DZxDѭ}{ؼϝUzU4=8cӟn;At~=^"ױ'_ĄMpȗ5?F>̶ύu@©={iŽ;|xUԴPRC0\vۧN}Ykj%\ %?cH*oXX?Wneʩ`̊UhLJ;OvnJ>tO`q9$~!S@.2ۅƝؼ)3F[95C^\޼FjܮF-ۻ-\kw)4 .dIGYӓjicO_O{|v͟JwS}ޔm-Ssʹ)6v'uIOʜNޞC? u{sotK5=%+~Ƃ3]!QuP5az;ܞ˩Ǝ45TY`yޚ8s7boڽkfl0wǗ͵y5us$ EAE@ŽUԺP:PjkuVku`Ӷ~[ u+ р! D<ޓs{rs!=$p@O/m"!4b㋿8s= cr/ $xU j=dnɻ_m)zH WifD`m.4ͳ&kؘg):|< (awLzb\#g_~+l OrQ2.NԂ<7"npHM'Ma,1^qil ';%a6I2/=dպ i/̙oj0~͍ Ft霵Q'tH6d̛-׎miʬm\9/ɹ ѷK̚qٝ6i*iq? &שeq}1UD(. ` VcޠՊ2W$zd͑w,sn]jⰰia߱k%gBrmJy-$b+n,TMܷ)<˨զ)Jf-P,|}9 #%$̚mfߌa щo5izV+2p ỞS}MSe8 D= c ]~SE @S4? 4*Mȡ&{^V|`ϺY2KЭz%jBT~j+f\s5Nq'B 껈 Y/||s9kװ/23yo6M+OJ$,AH9QŹ8T-74 sXҤ*56(jmunQC6Fieiy, KeWVgʓ\;RJN BP zy+7B&{sK.ghړ[k-$A>J/G(0"Ah%R{Fr0OfYL"]{]”>py}%F>k} [\sVHɳ꯬6{ɳ b皯ZZoK"d|=:7]78>%k5yyy˲ƻ ]6{~h>P=SNQuMm)^U _n5kѪ_?0IJ$,XPPR )n i‡S6gkGf}Y+Th`%l.)svCo3#N4e 7,MsaޘWkwl1MiVb;i%ݥX]ۻ>tjG)g'Y;3tE>^ui-*[٩zI' 'r ~p:Cv`;CQ^u!+CV].c7eT7^#dOgS{5u[ `m=Ǚ|,:NR(^F? |{J<[KJQY&0mڴSW \bR9_ U$Y91s"_ݲVk7NoCnųD6vw|ҺVm?cU4GfR]oKGJ>]e9FzPnDQ%A8>@\-sGov`C-U> NVͺlBgLh̀۶Jl-pq]sЊ?ȌGOOOBa,QL>.c.s^AD׼ZKaJYD@<7*)R"<+١X׬Vx֘\ob6b@Q@ɯ 9Z,Qѳ%cB0F6RN/Vb4X<5 ӗm77:9vV+c&T;+tX#WBwzbWvcH@S@ VLѐ8\{p螮ɺQs愅MGF3@?FbM# LĽ'>r:g*Jͧ.Y\AZ@qDz<78Xɕ' A֗bxZVJ}~L ,*̯嗈ոr} mN^W;v28F>qעbok)r k[Ti/E]]46]tyP_gRyAn` C"yHi3ݯY◈HS~ԙdjJ?yFod>++LCp8;W΀{2nPulK+PPEPb! &+ma8gp7=n߾cMs{2z+<ߊ~9A,[#2s#ˌHO,s6736?7`Ό^򟮽)-+nkӮRvߴ;󊳮}t]}e3V~׍L9vhlX1P=ȿhhL7W#`Gc.!5ȟ,k۰73~x3!sF]ð+Өrm-'82q Y),5,ج:'_WΖ܌17Sqn2e;֔}`c@98z(~%kc9Rmq"od?a0 +f & 3/6<d_3tK׹kf6&2ɥ%^X ;W>=f0BUjOU,VĠ  QnBD 1Le=-˃C9 9u#K.]:Vac#h.i供o!>80[H|;[*Yiћo!kiG̹M۷E}L~.\*uOY#4m,jBAV˹>9ylcwQWSMӆv<P2ѤLH֗g.^ꄄXV^nuB&8"WWW&؅FTTҍյ,0c:2Ueb)+e)iT-ňerW/2~XYT&+2bYiWIn= kq [;ВZ9=1Y7FΛINqpՏ.*Yyc{CGzj*gV؀On9pBWR54;lPAj5O/ݴ-b7#@ @ Cg@TcPꐑ}P bC :o3-%OiiMa!i=V?` ;㛆>42W Q 1\v۰jpӲGxszK .oں緩緭S H@ @ aYòZBCF MQ]|3 ۍ"u\hSuCQ@i_f;ǑFfAXɕ2-<:Ԗx #Td ֬] Z@ @ 1г`* 0P0 d(m'REc2u_ -@*U+r'>Yt Vk(͐KmIkߠsN7X߻\)W"@  BXg7h :p5 ugZ^2`#].ȍ. r.?}Z[T#uK]-yd LfWD&vX)@ @  (@(@ (C @PdEl @ @=`PDP B@LA(i(@ @ IJϰ3,, Ɉ@ =o}d2@ C֠gc V/0葡Yxp@ @( (P7*送Vޘxp@ @(aY-X!CAπ E`@ @ = 0 z`LN$ Y$傡9^)QsgVyЬ nŤ+._vY:2],+e3 G[~sM,` hod5 rXG(ɤ>^lF `YV D kJ jzC"֝x6жAuF")ή`4EBHR"WWoo?@q>s"k wD׶o5 D"呻iRR))jǧLl_1S͚Ը@FPd#45Z= zzV+\ 葡h_W1>Z!oC7n=ʸC7n=ljXJv57/_g\C/_$tN8^|$tH=111[xOEӦvR1\Ԫ} T6 ۟y s}(O0UF}vDIܭ@gw-m [c /<xEV\Hu;1;)}7?:휯 ܵ76tN)IO 'SPcO,d֦Y!| *R⏓ >uE_~LD+وtEi*0z,\-*׷7c|~=jԨa+>pƳd9$_?0~נ9b+:依w;_Fyp׾72@ Vb|ss٠8bq/}/utɞPP<{x]r鞮{۶m_lޜzrKg|~T#L?*XL-ICYw:"ż HB^ ?p4 YF[6_^ȣ. "{|#@}7kWq 7߆Z8r~tL9s6 ԵKcۮSJ)QK=}_J~FGY>3QXpGi&dXEò,t`a,)AFO2fWbbyvFWH@yx7m#XRxT-/-#7b毦품S;/@@ڥ#^DVFNl`Zܞ;GN;!rin]r ݖbhGBJׯzP%{V,4cRp-_AIeB$-ܳO|L:W@  }ÈP @]FR=0b?1Z-̸γS=G;k-)j/*T`'}Fߟ[-P&q8t~tH9thYSz%4^m=?5YoUX=bh"^;D+r4  B .4﵁Ձo̽)>h݁dE]g?=Hݸ?<ļ:V^ȉŪ/BTșBw7jD˴&N @ EbEW5RۿWJRP{4gjRܽ2:J֭bժ՛5mcKw瞤PUmݴEU?s%(K }h&~ԝT-꘯b:u!NRiZƥ3P궨tL}{41 $N).m…Q c>˜*~qҴ!֑ /\o׉,KXpXFͺ5i݀{F} 5Zw撦P =U@=wy r'-_׸h$?-HL I=B{4wɺzzGvTƼRj]<7]lIW\|tXRHU йeիKTⲵ*Zq=5RQyWh<敖qs.S)G\^?KK@'JRJ5s϶oҿxB++>j{H y6ڂWDa/m`!wlt хV 6T2Yw)Ut;?: 8_}tm{SXF"<_9w59MR]#_ /v0[v];"h}[+=?y;TR)B{ }}ѣ Ck3Vq#Ke-ypן(T.?#< [qKRj*SP pa!\y C9+_V oUu&n'+7P>)~JQ=4품I?(sg(M> @oLŋ/D?n݃FC/xo5]KcI;VSYdڥ!ch!x#tON XŸԐxk,s.pB[6 0KBU (.%hti;ao}ߢk*;y[up…kP\}{xszQɻMɯ; cg+妑hs,O'ɗNߕ]Uij?XCTS\.?{y \6}z/]4]U Q.BV8#.A'ݑ{rןc1¿c>+份0g{ kg߶ ~28Pf6 :MWW<t@ewmW_'~ֻ\vj]%=.Row?N mܷn8e[+`f|uW]%sO2WR 3WKߙb z]zvU?nX[Z'<͎ZYHuds/#&y5 2k[4rϊ%R_""ѺLԶ6O+C!Ef ƍ'"!8 zggRoϮ9M K.B{쒈{T P!ء|S [ʥKY:ug۶ >2$ZMJC:)K~7nuF]?pˁD\QZױ'B'G-1}gAqxwm;`^5j(m-nz2UoxuJBR}o Pwuxyh`_>Ni&vs.A yUVreٴ+R !3G4w;,[n}EhBȏC[x%(?o{ߊ8 (B% $CǗr8نÏR&ijS^@D(MºrkcMo/OGQ![Gu jZphjYL*&ޯZdz*ͧ:Uv0.ڔu듾q IDATB9,蹏`>\QVznɜzT!KN1gݶ ^2n,~0 "J.'Q7:co3#m ĂgYlt >k|>=]oi7:%/Yv7)<|]_A~;w1kelk=.@O/mb|`]C`_e ޮ#r͆S`S)72*c[Tsj7~̓h-2XoDŲ EEDBƷwj_|Y\֭[2 &sa۪R22ӷQR@lk{|z}ɿY4.bKBdEh!u#`8W颪767:3s*/ѳ%R՛z9o[ߕtl˅zdb{Igm4{ک7"αsZ?p7ݧ'B__|/ܰa^vɭ@#(۶Ol\u(kĭTP~p.˴np%v [Zx;fsoD־5cܿߺBz ~r^##3gk|,܏No# $I`_ hs /}hVobWX@~twrP ?g%Ґu7 IyeyWxE>q%rX+jԸ3EU'(L礪>z@Z~oW06mۡʟ X;Օe_Ψkѝ/T'n!ۅz~<7,,E,rz- vm>E<[36i2z~ tfãh`9O5((uPU\w&sV2P`XEAD !$ Bw`Vjnر˗Ϝ9ӲeK^Yв:ޔnr5k͇.yqE>s΂h2r-7M;S] ܚcN4.\c.n3h4=}HJDo)27VzXΘGWm\{8gebN@>Ϋ-u;HpY|Z F[*Fw`ݛb:L$ z_ACܠCRkm Ljmz M H+|R c=ڴ.> hV.e!x׉9\fi'Ңwкt|'>g,D˛ QPC/;A@q3-_tV*[ka:d{kjf羘/@x|*FXuH_* VRz.i ƎwD^MPOE׸_>2 hq"zW'٠ji Fnd5Ov͎/j}\gGK3EsqW-4k؜yq u׋7.>䴜=:M ~z }T1W?930,@gBQ@%lgо|`G#fK=+ȧ:|~txarͧ7tϥ۲orUi{ \<,9?ϢI2>'.̍y0 ~-Ҏb %^ @֠g:6h0葡˂ܔ ʕ{Ǐ;E1ob]αot};nx}aG> \VҸ٭Z'F?Rvkm2|pǎ+'|;iJ$ѳim"ɷvh`@ oduqֻcJԉ͎?Õ:C?LAG)eRbl6 izC@6~_ET V.XyGxʙdsi?zfox>];\s}~w?, ˘X3+?ŝE6vFf̈9&sVzn֐Ьϴ}_ si2狀v=S磨=Ԙ yIvՒ͖1_p r;zW'xu?mO7e+7ajШ^.:76)}|J5wr9 'Ydα xJw{ Vݸ"쇡J]ъp]+,7E@f5Wh!k&blva HtqS/y=rť-BE6ͷ4jE2?#C|CGY1dbGo$-hݳ;.\ 2,A@?ۈXe"m( }qBȩ`v>\=kD/^ɀe j  z e-2 dxp?ou9s۷~+Q:oL4*J@tfțPlW胧JڨdsOuk5QsƇ9\czd:D.`nJFpz!EDi׳ÔK(ImrrqиFܼ.m4rGf̓w>gXE52M3d/]ҧ!Gubc)c(U?&`]p2*O<V@=pu95J~YהBfl=>40K G4q 4Xٝ\˔]<:t o֠A ?׈+B40[fԞVĦ_3sYW/Ǽހ"++栖x*{vlnքT- @'e/՞"+0?|y-6!9J{7״~9<-]x𗁍NC(z` h`^K(0 ڼS7]Ώ<'bl е7 2/Dߐ ʧ ϦӇ|TRA?=Ǿ-u@#O 37^&ª!A=#:O>ؼ x%= 0 z<)}g SO/_<آ󓯌JRA?١JEzJ{;(*M>.7M'F[v/-l7~KU|HN'=:qOcS/{|a]!R°S6PP0nHؚaMkzYN?gZmYԖfLԗΔLc@\ͩP2IOxKG1)_ $]3JJ\]]en_NIHQXV^nő ܊Hj {Add8P~خ7G0Z-H_h-蔯_t ]j=[[PztT$G ́&H Ahvl͊oҵ?|]K|; u"9UKIEBo%uz|[ ܽ?:`O )2׊: [Ç;SGCi <|%UIiYiFE$^`ER7+Ii0鰀ESsd|+Hy %Y*a3?=PC+&s@:˂4}X8g{yҏ m~uFrڽg35⮽ۙ*Fw_ZneM o+Ox=[ ѯL@ݲOS^OC@(^Y\VXsWdPGtX@ >xTsYaXtE @78ǢS*_TF"8Ї6=>!I+)W'=F,vu+1"|JI29gw'n%eFW޿+[3mQ)wp$ sCܢB ‡L1Vb/8|pNF +-Ic)??a.m) pg)]aȀj硡%A  j8&O1@ ) R1>|S'ׯwHzccD<7Jsu @@H$ƀŒb.@ BSb.^)- 5 z}H4` p("@ @ !t4PjVTjHh b ѠH$@ @ BqD x@A IQаHA$L!IMA @ 8BQThnCē[J=D,eR1wݝZZ@ @ +D!Wح%tzhЩ okF<8@ @ GBa.ՋibiI 1@z-k߿;1p% RE  L& (yu^KX)uX"PL aj /eTOfD1JIo\]]hP'$&dRooo'}2^ňK{:$?@ @ @(}"*y!yGPR( ִX,~ WN X$s+WwQ_uC X( wcOhKT5FbIKK(**c])R"w5nwǕ= {@R87*!PdV4Q7'ԥ`K@ @ (.+ mmmy<EQlVitMU\.F-[ SnGG3%5@eG~Z% R ?G FJ#18^"J̐.i@ J?<@ j0Ppj5J#;b2_y3]fӚZ$ywxFu.ɏ@ @ AQ3pڿ~BƭaŻZt_s1Z:Orس BfC~׿xDy+x[wwP(å}f|@ޕǏߏJIK{O9;h7S^>fԚ2&El? ,2ClUuP:{7a`Y$$R~}{;@ @ '?a2mOisݝUy׵|TUA]j@I˶qAQ!?-H ̙ KDCqG`Ok珘3FCµٽjDRR"A7rP3Z{O7L IDAT[&D @ BS.]JLLtss/kes.{ZW9 &\Y5eݒVy篾Om_/pDNTuT]sΟ ֶ~E1m$opCn|@3`rycz̸.ngǀO".}u=(rPQ}8_<`HE0%A']YDjӑKGKI|5oZikG1i@ u&URi@loR&6dC;CySXPHRRSSe֭r͊ /K}tQxrM{J(2㿪mW> ]hQCQ"XO!^BzXuzEmZTBPP NQQHRR>$;;+_4^X$Gx/OB5Jpc>>>ժUvw] lk,ζbl ($YsTȃyΡC5z}On3(7z:bCmS}k&\p |GEZKHҮ_$r?s7ֱ2Fċinv|ՐqG z^P][mD<7Jm}Lz`Mp)>̌}' 6[&it2LPРOhd$w^ŝzL6g%ћ3cbbTϘf4~jV/9va^4t!%IH3d;5/n{=a)9Kꅵ՘i,k(J)m>(p}]+βM+ }8qM*tu`75bwv?^q@q +|<}WPyn8@PS"ZG_ >|…;vla+8 u D-"$){h5""?OsP@T$1X#{t *e٣ M"Y&Gɵ۷67)/:J1.$xm4 (=7ui]AyfJJӐVx_낂;REnoV˦gS*yN)ȷjyऄڻ|ZS~EF7 $So,(1B]V]a!u=,|v986`*ߛ7@EBzb$ OyREӀ!}/MSשڥo)⻶_ Pׁb'nMq|ӜcO!MHzq]qL`PoS*}.+ܮP}nfg٦D6/}}f܁}X+R@4fNk3W8ʸ3'mw~ 6 "p^VuX{ogW6jHLL^5j3wÁTR# i,fR_SBmLciR)G/-DM=lz{2?ػw0P Uq$BΝ,9#_ŀa;~=B'4r۪kQJڝD{raWt-.Bg]{wjQ%($s۱r7iאgVꋛ]~;:Vefrl\ 8yC*M]mܵft /.4ӫw3\*5vUeĿz^qѤnEFyɩW.]8WfldZLy>,bj&Sf΢ܢ23J#)|x'r>n65'g,RFm[k920gvBWw?>its~ ɘ7cn.]+B62 7Dj?Ӻ0d-%lv 8ȟyEgǝh@KO;#>9RiҭVk\?mL8B ޜw W8y>z߂N>%^pD~|J4u}%}CQRXOa޻B]E 5κMHECVhiGeHy|,Nlδ gtH l5w7tre Gkw/<)/s^޹s~we9QrpjA5?ݏj%Y%͛7gϞ9rɷo~gCz}v_uT*b]t#Welpo௛z5hi*dmX÷u^X*bߨ}碑V~wiQG,&,? u;# x_PP)K u;kAR03!u<Ӟ~۵rbt sسcFDnQ{F0-tr'Ghϯy(5$ۚ/vl=pS-n{oIyܞ?5<\_5:oM:*~[\ҖaHlTc R@25bR6iIgOg/_*Yx~ 2"OKHyu3ԘVM&Z$kF7N#^2?'Y}66)`@poۊ/9rI h >>1'.GZ׳P o' ̚WZ)/v=v۷o׬Y˗o߾ݹsgRIo bgM3ع^ . 5׾ϵ!{uS3$ y]&,84ƹU[;N~=aK q@P"ye^U$Bا˘GN{Xs P4g~<#u7`h3_~klsE7~^8fvX2- KOny>|Ue43kƬpFZ6m>ίV(sғkx@+0KF}Iϟg4 AiMzy䧷L BKQ/qZxgU[wԙ"׵ NF[6q06<{4O|̝7c.bx ;3$&:;V]EeۨL]3E%)UԱࡑJAhQ8ERs| UFWR߽IH7NL\8W5 Mʸ681aמoۃzkt5Riy1e.טO k SVv`RE-OCzvtm SN=1<$=.te ΢iy6nezr(@8驠<@O)S2cYϒ3%.((~n۾c)\weoǖ-PYFs[]OTYcb|3 29ֲqTfM>כ~Uw4pCX )w Fi <.Paxz@2eܩun;C!+֖#Bd1< /Ig *dרh"QBکSb.2;vL6@nPtlRE=Mh=GFR B5 E֫ffW]cEIާTJ6wbv[ҵgŊN߾7#mBm{.UbLlzL><ݝFT.v+cmEQ.hgH-Q$qN4.yEK.(P"h S=e}]z+9WP$ҝw^$_ԣ2t+_ڽg\3o/":a\ul(XbZ?ۇ"iM,gTERCQq6m6lT$Xu=1#oEٺXy[Ȅ 8{]&iJۙ_.% ܈𹖟'j6a[XT f(;zrG;|/ID/j^ ]{d)Beg;ٜ=o G,2r"Jnߘ¬UE\yT(`x.LGq\#є2};h/;<{F{@)2b*X##Zx\*=ѷL{.lE) {3|uxݠOPvyj.SNQEE隤8To8 y1J E.&^ݼo4_+,R Asf)j=.(,S\}A ޺/jy ZD<^|yR ;B4]V$u7{k ={:1hB$>3&& tHbknIv[Enj:*HV(]TX^9πX,ynWҌn16ܽ݌ B]^"X.:m?-v:7q+R&WRVsʓ&qs Z`WY%IIL/۱۔"ᩁkEBAD[Va)޼;/ZvGc6om621!W/x0LZÉzR[E:,ӏ\L_g/f[fgvI?^{-~EZٛW(4Gb~e4{۞-Rjvb\X֏'"BT^ oŗ0e3(n( )>dszWȲa'`aͷiTQd:}%c Mp-*v*Z02'>1+1 ,2PF82S f41b ij:z+^y)R(ɏ>ŹPW!rM[&DŽۧns.BΊc+(LCJT(> ~ZTjXmWCMӢ[F*ֹ([L3a*r 7G9sI_-|87LTCAyf^N5`y^C, گ =7 $]v>][ w&k&)|PA컼żvnv%0A%Z(g6.op5gf4  EIaZ?҇, -òe)f ,ڴ8 (6E{lhj,Z×E{8ϯzCX%ˑS^KB5) tiU_;2qymtU=ϔ"noRT[e]qao3oa2fWsaf5zv<'`Te9'gժK=;}lduTe{mɇüW4M@lS2N`TN8S]iԥWo|cdfN)5lOnѰaÆ |6pκ>b^BSNk6tq. tIU"2R0rg}*<ϧg6. h60>9k¾ŬË[aA`P oS|Ⱦ(ĮRsY?EVPd+xW.ok/ȂGP6ִމ 6_֢Uthޅ-UOqp, 2uCr4M=/ ؛WhS#]1>w'fT/ F>O̔J 0Ѯc3hxRBPrɆD& B.tt+GPΔKR 쌚jۻ{T}%m *i{OKbM}1(JJB":C*S\!/Vd}yuF,!$_]fHZ<+ ϋ Mg\HVQ4&~ Rº?ź Pb-;E.3ZT*]EXtP͘y9W'DXX@  ʕ BP |r)(*++KRT*D@(+swՕ͍6NQp3$=YOaJ WW)a=@Q_ys\I(]"k U@$ւG PXy,dϹ:v"CG]d%9mB'j@ @ O7Z-p'KF~kZd @ @ R';C Wڀ58R/ JL#OqY`r1?_#)>,"X "n>&UE]*`w%Q9*!{uGGƥĽܝk{4Kkg?@۫cTؾ?~)Iջn2(bNggb[Z~];7IWHԼڝ]e0/ǎ= h6t}kϙ@ @ 'I@!|=[3%캁^2n09b͜G SgD[&yvPonLyg:XV N8OAI%wvA93nk7PmFx3@ @ R'ʥK]Ħs;ZwvՅi';;#\Jɛ6}͜ BP;6lXEDŽ뫷y‘9ҡJmnI\ͳ)ޖE?spX$L%@ A=zOjծ]ݻw'uw5/ζٺ f=+:~T֣K.H+>wsi4o}+`vA}>XPC\ ~f:I^=(蹶Ƥ3k yP;9thf-׷A46qӮ#6&YS @ @Тd8F_tܙzٱcݻ7_) z^r>Cy(ELYFrTHNIQJH"*Y p,7ѷRCAγ7Vx EikG7Bv7;,CI8J @ AtR͚5sF7t=<<իڵ_zO09P/L$P'kuJ@PR{|ʸh>#L=m>6@lOKo-(.b@ @(SGbbb֨Q#666ڶX4Ps8ܯN^>*/Οfv ,1_P5K >@zdTTod@([,ǹ6(ЭM @ %2vu/_l߾}A1~I%^8 6Z,ܱK튚dnMerBaTZd~ CEuWJ$qy#2VdATܒ6-* )B+EԸT`gX -˓R@\pIF@ p(6i;eզ/o7?MSA[X B<=S=-H[k_@{!%i"ّhte E vhϖ홿B118W^cˈlٲEm;؊6*H~NBXiM|B|ld> ^nDr']Z4?&+$msK,t(+m:uwry#V2B\VFBsקx D~ RCw2c]=z1ZNBbFaWqvmZTh#=.yBBPL=h饙( 9Ғ0  )Rj 67"|ݙPTg=G&K*;ǽ(7n["b7z@Wi; :C"fHKsfe۳Em ݬLJ\g2{|m-Rfv\σ oWE>vOK"OC%l6R=:hXT1קjуoJ}BdXok(J)>d{}a =JF{186 *y;Ng jV ïf"/Wga=;EΓs:K|b\^c}|D\K\ >ϳJ [y'=[f|Pn 6~ݐXae?z:ת$.ަQPUԫV-,bj&Sf΢ܢ23J#)|x'r>n654(}|츳m~~gdSCGb*&znkt':.p慍0y<z-c /W& ?սM>%>[A-~ʎenyuܰQjmCl4T"dk}u^7TK?(yt; j6zkOsntz'ozo =[xarp+?_~$T+Ptsp|¤GFn4#tC2$A-\6ɟy&1aUI} Ts}dஃ=.<>;.-po{7n\|NoDeL[BFI3+n.pi D^:mfDcOwcEA=&p֭M sڮ`Pff̘>}ĉAGI*A7?s,Tuf8t q[;O_5yœsEiQeʝ\iўo QjđI( j5_zZO`F7=7swnϟlYoeo左go-{gi0{6lF) TtVά9(C LK>%+6u?|YBe(`CQ,XOa,?u)6YM(e KE tm*ԉM ~/|] TP8hO*G]d/ԤlqEOl5$֌n,_iͲ` gvݤw k q\JRwNỎ<ˑT6yr&{^!YJ+dG!#X|JlJBFDh u$J :0]yZ|ƶ-0XTvkte>}hwAKk @y |:smDorWnB2fm@6BθX@!-,4mƝ3'JmG҆2_CLJlVJ uڷL5[֮i@Jwo|&"iߧ}CLl-K53k&lxay6E{J.w kK4Pd-?q4ԫwt$"}C't^Ipdf[у{R6n"wΪcZ{ʻޝ[eϨ ;8 %.t &.c'd<7%.n( )#>4xu-1|VufzOs]6 mĒxG[GPp/r;Yzbiz֞X  Bz`@uжq<s_Ny0tR۟9]L4gr~#hdSfmk4 YoHVB@8}E/^^%mq>l org$LfD(9>UP"@WC9BRQwгuMsk6dޗ Á}ЍU@` oG-j'T8g(_L9$iI]ؿolabcQr\\\NۇEKxgwR١[9Ꞟ^{[:w*89{~t۸q Jध >L>OȌf=KΔHSxi}wԌnA=:]۱\J SQ|m?r;xt"#oʣ" Jc `Z?ÇPSxaģ| Q($c7iV# Vd,46hT(b Z0Cs>P+d9?,e;"֔#f^ scM*[go7mI=65 WbSOֱ`+ 8d@(4 p!ǃ8%w5!=tgs)yBzrT5 'MG?t (vߕA,(tfε,xy"vnUެ$f3wuweuGq|p1jXj^4@]PM𐜶lGYQӽ>ߩx"'5ܞo׾)UaLL^f3h>sǧ[9O $ -~Jy`u74+9$>"ȷ"gצD"")Ex J #p}#:ҴKv[NNnw]Y0jQx=}3BGkrfPFIŠVT|.Wu%7Q1 IDAT8:Y0qk>:>QrE@zл"QM};WrHNe{[0W6STaKF&m[P?oS}h.+VrE``@;Bذ(PiӴa"'yDzb~*2߮R ?m5:?e\fgg{oք%k]ө8$T+S,"Q`䝆BṔ:0c0ߞ2]1hȍ}dZ}X^+2cu*\u;toftUso_5K `0A)0kSw# <=[xm~@,e*i_w?ձLެ?/{2dsjzcEb9XXqmZTP>yP 9wWw6EeIO]Z̴ %{Ѱ4 HGDk,@M@B5+;Gݓ'O^p)AAAAAAaJ…Mf]xCN4cu(~gvE9c![-,-:d%(Y1IJ(ABT]fYr @wrb]9)LUlVph^積F%cC:N=ts@Zb>5~1׏z3R&a( )8PJvjX6|jt;#ϒJyr!g/: G-ݦ|ڹMKV/ MzXYJ@XQ6%ɹY!Jo^.GrQ!QgFiQ9՟ 0Y BpӺ0c!- [:w>zmrA>;yxz9FE25_ nX ->Mzfܜ+U-' @e4*V_g[^S r6޻M:yiw5.\8ri| =pͦf>mO\5}RP4mADzəm2fD_7e;xyMǹ,j83}ıS}2x\.l(VT*f>}/.Q$119P^߼s`Þ pЭ377]|y&)hYrw?@TkC y:ِ߳!^<v8) .lF[7omn'urwCYaĿǦy5nz7:v _qcǶ gnj Uze0fw~{Q;MlJע#6]n"Szr;(}¯]לJjȼ|^ EPb!Oæ|s#6iߕt-}ޮcA93Њ`%V7sRе:9>FCM0\tn#܈|@ҕ9^dGyB3;#9RhLLd0FΉUx} ':J.~+Fv9Dž{xM褐gŽhf5d@#4qYq4Rن7s#sU>K?}QH2e뀮˫UT޽~7'Mٸz@ ŊB6^SB..(?do3{?IR{k' g}L W߮y8 ?9P;=|vqG[m ǭ{-ưm*̌kv)v] &mLUm>~q>ބÚ V61A¹/NJ:Bajt?[2н nDoa5f&U#)h"k׆],S_r9 AP̒ PJ؄/dW)]T,fIv@#O^SdwQiѝk* s>5Z~5(E"Uf>qh=tvmV 7eL<ޑ;/֍$ox֮h?_t/4cL_ǷB|7e@GlݰV|ml\=@ b}!W(oS}lyB ZeRƏ}AQgoEK@9+Eo>HܰFk^34`_=[fDtU_ TM*Ve7v}RXex#= ;ἴ @ @С:8x;Vav`+Rw>W=[ .@4j+ Y?rԡ;iε޷5 }?(e){M¢XJE |@ @ (daRf1=!W>Grk w/EFFFGG{)nq}BmOxjU캚K9ԯo͋RD>%ٕ9Zϐ@ @>6MuPU$EgC_12M8&3~o2$CC}E6 ODeD w.<7(p9 uJ)@ SB1A [b>|y뷐ߞf;+{qAd܋|~Q:jWer\ԩPGбWGT7硍zjTu5[\[PoJ[3ΫO:C>8;fwnN/%zOE2΋@ @ %DQmxiV47)Of4Yԇ"h͏[ѝ_9U ߅->ӌ5 :J  %~gQSʳ_;<>uQ3|2-Z6_Ѽ vtcfvcZUa @ A^¯͜NiH҆P<] eqJTT*T|>q\f)(*;;[RTQpE"{Z;E|{bUj(ZFܿO<Ӄ {~zՌNǷ̿Ʒ(S.mX"eM|9_q@ի3G$ePX[`L?yCkiPHӓiii2cVvJe5H;ͨQcPLUxf| hjZXˁd)R5OTt剫F 7,8wasdRf0)SF?;pʾ|:Y gV~RN/fvW)&,ēFo}zaSawÞ:o߱j!U)_$oJ 'g~󹩳3 u3BaV, a%oR^ 3.Q3Z3m}Ƴ{r9qX@(F1Q3225ހѳC@SWK~ȕҫa"=+i~K6~ E^7ku 0IM4Uk_"k^s]!)_d m boϧ2>}@V&Eb>φ2egO^}m##Vjz 10+8:hY"3RX k9B8_@fp;㩠(n' ܄qC$V\r$ N_/]ٹf'Kpz䍷Z=vcNE:0W,˦:0M i. i[u[]3kN^=48>o1",c]zh.pl$8O<>K2GX7WkhK2|˘=a[4c\)F:0Ɉ)aV,tyvnkL3u>A ůsW@:8Ŋ,1)#-pww/ue.IwU2E3Wffrڻ:ٵTzU;JI\soWw1![u+ B7j 2ӲؕjlR6Uմ5K/'\yQN:مc"~֤Of^vwAUM*2_<{/U9WE#Pz%i.\YkVkfgBX?EA!E3/gA^RYY⥯)^}x' >nܶtB, Q)}kDIKj 'v* fH$ Ӻvـϧ aEQؾO!yUG5kN7yeX δN+B\Z5eoN`M(nG]MYQ\xkűїuCgroMJ5jѫOZB.Kz3*"w6oD)R=zүA:ϯDG{UI[l ZjNyJ|rε;Ipx4o״T.]7`9>$撄Bu.:߼y-dĝ!k1uvfӎۗA)cw,]\]c=cwCMNuP @ǟ?w^!za5)ӐLRd4׭[P^5{U'[⾦W.Ǥ K̼2~wSKZ.|slP/ GsZv2O{ڱ3M+/0lYʥ#W浐=64]caJR˽O%S8x3]8\S戦Ƅ;sEڹk~{hYcmƉo" /ۈƮG[ =f>NڬW='~%{>钋g@Q*5}>˘ݾq5ĝ (aFGCo::͢,:@Mۦ8rU$wY=ؙji30ߐ˓qr1ݺ+Z[9+yڱCݥz{mzW]5[Pğ[ܱs9;o߉~sکes/J[O'&撄Bk}rcjyϯx_瑭:o̚r^ӻ!u`߽zRGK_O524J^zYDMy ȓuUW{pYzӓ^wjLM(kf0iRՔ Asw(/\$Qk$_I&N8fX/FmpKf33. D&fզ^4 aڻkð52 ӂM)wrtimUnwVBQPjψ6;-݊i5mpjrǹ x9/l߿ͷlW]oV^J֕ѐոLɴHs)m=&@cLCfόv8z3/z\E׌)DEQؾ'd|oYa.Z)Sfd>ridA\rӶ/oʖGԁ]S@QQ0x)絽j.] 䮾upz&cYj 5x_u/xB@6s1[d&۴Deu|fA=! [t奝1NU?4#حI {L n_8RҞ.}H6o׳G1;h!jHG탚])!:b?hg|F.ߛ5t Lmg7 ^1^yEAG~?>KAz8drp]};:v1onm_kFlu*>:s95vu;W͠YáC *f]Tyݔymf$M= 3N+­$K+5}>}ˊ쌛pg^1ς`WS|k5I*U-9&_d-מZ/تqg@FꩇN C4r_69Xqʸ}τQqr'z*qVnY.ݡUM Oq?g͑ԓ՝XHR_;:Dgtg|Nہy[72#CA98Tm2@yROOR$T13 A|ߡS)]>[2âd-3bvUkzSoܾt6m??-+EWkh2nt)jMdriи\R(^#Mz YLiM,eS#$o#0 .TFڟżF^=#hٌ $՝0E^H=!d_R EvlQjImw^ZϙU)a.F{lysu n7}Cc/u7vl+o/.>um:ܻ_gG{L|3t'wk]nQ$115Q )~.z~[/)T 'q旬ŗ'{ilBaui#OÇGzN1eC y:ِ߳!ʛu`b/ ;POG7vb^@"gkoZL8aK3Y AWKeL/UÎ 2|T@m֘5ن,+ [5}ʃ -lr)RMg_N#:Z6 т3Әɂ~v9XqqO8ƽ̬D.Bai1HB30wU28nfaKh#'|TǗ9len^?͎iͼ|T|n4{u̮|EB' .wbn0rؔWY/uS]J}O$Qo~ QݚyU$ّ܂?c PWF @/I6=` fpį2dqcr@ԫТr2򮔏CzOd-מYؐ,#H(KH3\XbGd&i|}Z4}-ئz<\I}?a3]IqU{V]n15nWx ]QSmh-+Աi…ܜ1JUzLF(8:M&Y~\?1ǣDp/tFF)?[4%k-T҂j`o/D #5e oOl5- oS6aD\<~aByi*pP;>6omWЄ1{@ ~!ptڵkϦc y=a d\9P;=x/;֨Y-nucbxxKGto*5D4c62:zu7/U/Z6RNuʑ& 'Rר)RM+۩۔"鱑E%W$QC=ԸqRG7w4۷ RHo@'= Sy?&>|-q]~8q Tjt'0-]w {}rL;א٦YҦ+/uoᰠnzōM~4(3ਦ4RƄu;Te=*yEA_y lI`lϬIVkhW2y ]vS 8|bX.:YTgءQ@?w _I6ٱi( s@xk<̉ou/ d E}:NѣGQ#53H'גݖ5ꊠܽu;+9Cs҆ 2R((r eU̸)3H(:YuCe_0r֤I'^I9em@ hNჹ>lfύTG\Y'E) UEDDԮ]K.Gzqƍ˗/oߞQ9+ܪ5y2B\`q%~>OٔL5Z(Ed2@7\n-%L4=o>LWz񐊮mN^ "huiva^8("Z"}h Ytbd97ڷ XkhϖxyU,R' _e8G'uYv]FA@MzU'k]s.gNn߰KsNՉcN@k T]М[dܮ-@CZ~R9,_ylg5]ĊY)M~쭷ޕwgizgqX:ۄje6E5oL>Z;7;mޖ+MVk;*;ŖhhCOM}^c 8V|}bVn}V;π {itU.azo2@H Gb@,j')Ѱ -_fGnWR C= *O?IE?`O4 F TkhK2|˘7_'Iu\ߦӂW4L2b.f n::Yb]SPT iu< :Ö[򎢎9fiߗ`teWz0#99f͚kժ8 gKig<|ix޹ʂ|S˲Ear)Y x화d khɫqü :6u88ull85e]4]W3S,9iXP3B1I,WyptB29{5+mʍ ZԔ1oLڹۦ͋]i@nQ5 %GenȪrNΆQ=Iv'dDMûg^ƽ}q1~߬z2 m YC[XN;Y@L~8gY3jh %GhԨQF~ak{pH4>4\[a,jӜߵ2އn}`{6<)ոF< o:Bhz7ήE}@kܗ4W@+\ӊjn|lڨzJ7ZEP>\8φ[eWDFugj&13ӛΠN+jd\*:aԺLA0֔ZoQKUޗvB@ׁ4KhLL !}GD:|<ݖjhSŋ^{pʕ:tϙ!>NP;<٬ќ֣rs<3SGsٚ27 ق|gَmM^Oٕ ~sUt "+ix~LKjU6k]ݤty7*h6vr1=9f̸{)222::Kq^hkJq$!?&&}W݉<=)Mٗɼ#3fK(DowS3DÑJ +e"K!y 9ju:-r4S*PhRU3cqut%)KY+Z%M}/Ur5ϥ{Z "i\݇yH7ڋB$G$q w uT&|NL,>$*\YfJ:¨͋}L(D*b5}ʥ 2Ų[(xo-'4*}-fJ' |~ |>x\.l(VT*N IDATgΜCkhh۷on~Q`~ֽMMI2"\WicosؖD)62!|6,` ڦ>h{lo"/C='U HT'*0;68 rIl_sBQ~1[Y1Fw7KïPLgAr)d* f$&2Y v=׭"䲔Ϯq\ `VV8BgBx#%%x\I&L; }VYEQؾϧlO%Pm30S!++N={3k~۷ogRp29h^q>ԩԭ">z_2^ xxiy,淕 TuKתZJ Gߞ\ٓo՛/<M0"_8^ W 60Ǫן$I$4)@G/x]K8veT.]&1&׀Z|ϣ;/;_D!%҄']=ȉk3$ʗԥ&2\Ǫ{-RSt^I,Bvh઻ZPuư}m!G Pظ/d<3C%kR⻐Tiͦ4O }ԑTZxvvzRw\ᒃvGã!H\^T%-&=bӌMzγ.M0 g }qtiLw(!+[Izy0Z>DmYE/o *]}B(2ՂΝ LEcK 9  A @ eLb86*T2gjTVdvAY!+QU}"P)E@l0/tupP`ˋ$׮p yv 7D&*M=u,9xanTcW:ද Ti>|u\bv{X۟#~y=O 5lMq@ l\vQ)d Wh"21B:I3ojxvdk$BAqŠf772 Sm85#繸{XyHKԊ"5&qVH@(a%n9fW+l첹Lɔ!TŦͫڠA_\N8G1 }f/IOsrr}6bٜ S%鉩b {TFR!7RkwQ̶rI52{>er|~W%kh?2o,*R"*bc>#KŌR@- H&N"+`gHkݽ*>IX/3.YLU92I皔)#>wu tʦ/׭n ?98&4|@ކwll^s&~QpAs;u^X]Ȕ oN5tmQż&]IW)8}+JrN|БݛVl(5&ӿW%`]6xwV<+'anqf#hm: W%khC2~]K1Zab&aVpfuZ27òj~:]/9cik9\{_WҝPD9KB!ët࿭N%ΟڮA1g8hAn\@֞3 ^\?_`[d^}=(3`[f,gXu vsH 4g* ;\6%4D^ i7gnY꤫gs0`9Ko$h,`h8}T1'joSz)sD%2}򊢤} mhCo㲿ou1.L4Vizd\$ ΰN+J`j ~٪v">tΗod6 E[g ع߉ ]jטjeR=zg}vʕ?cϞ=K;mx-/0[9.pwy~)un 7Wa &U#paA FF >IՅ2dk;ܐqRt +(٠,(J)6d1/{ߴlqم-w6mQ25iiE"&nQę:zZO%ǫ8{,o J}u±\gskН[ˠ<[Găs.i}Q_k)R>xmӬA:O.?yEԾRڸۃU+]W)ѣG5K.Ϝ93f̘}Y+}Z5G_CS@2eEXWR#T 9 8Prz74НH -Z#gjl{Ry[>(s_@*_n)٦r2o2enNo0-rwָSv~~4QN~W-їɯ!o$W^OL螷_uhs ]oShGOޘB AڬS 슙g@JhG.#.$W 9xrSp ->v?:,K2oNJF@n9>T8x3]8\S戦Ƅ;sEڹk~{hi8Mewõ6#zso/ /vO nܠf=t͢T} mOdž2eĆ(aFGCn:: ٭铖}S眿ۘ~{pߛ+I~xk@Z^3se411~9 3j+?!J싙3g̘1iԠcOk$[vn^` l#E3/_ώhL8aYPiAɔ;;UiMVnwVBQPjψ6;MObmMFg:qn^wK-|@#[U[նue4dx5|6S@2-2Jk^wHω Ky>%6}!?IRE߰(%l_Ca0e)30ɔ) 3TEbq38Ut',7nB&Oi{7_\u\g wƯϷ+WͿiяOٔZoWX2Xӽ0MGRE+5=3FU5 q TR 38k֬ipVZHͣQmW(_GטAW^g/>WN쟀/ȝjxwQ_8=/!"evPf;; Mn׶ t8tm Z,*6pv[scyGM>diw߅T[se:s95;;/=ޑT494OrAA6, .;%M= ꇕy1(iq7hκ0(JDlh-+3nzJBqfA}[$^;]vbW| `=ڵ%k%rc W7΀yܖc[{@ԁB^A 5!/|4ٳ ┧數7y` Mk&0sAEùzwWml77ȵѶ)D:v;9SRtjK|~骗ݬ|O\\: ZNCԛg8}@lp{[nmjZGUYg{QW I%H!$~wϹ7{޹K6'N10 !58]W~{hg6Gl;GBSB@] NJ(O0SDش(Y4,! v%οu[mt:\J߉-OϹRf2lʷ#޾unύ$#>3P!/ioփ[h2#y74ck`MK}Fl0# Vd*,465l顖Mgtҟ,f.KOD[S|Y%9$UkҐrp`kTIL 삭(Qu Zm}ӏ_V)G``ݻO>ݹsg͕ǏUV'tL:">V=|j_|&9Yi8UQA|cdafc_.?u:B!:R RSTimI?&Д@xH̑agla ,<P;7.&T+@Q.q]&Y ɧ2ͣsd>q5,5KW/sfM𐐼,W) /kt≜(ug%]aS"(%xOQé;l:GxH]x;oQ]CaXO (CK{yɻ !1w?/!/G86%fRloE_3l~xޱ#j<ޙu9{k ¡r޿M,;-IE%Z)h;v1֭[UV}mۖh#;FZpk  #3xܴtm'=Q/<){20F@(j@֓&ygњJT 8xceƕ<<9˷MM!2\9M3hp{1?wpkUvT>{JX ZZ օ-b듩 *: >s]uI+pԮNL~̿O2bY|Y0@zª;" swc{K+9w`J)łA'y _)l-~Jv (:5Y$dNĦf"l4} kz馣^Fwfy6+oMPfU c-aA@\REڵ+444:::00e˖NS7ԩˡiVRs@TŽyPjUStZnA_FTUW;sJpet;1;@lS)KG%ŢTs(jͶuUݷil ndXҭ]'@xo$pnL*9+`RCdG2%d|… z䱁:oBU-- SN)4 O@]IhhYj EbZ?% sz $9>"sŘVdqٴiRw7#?{NgK# ڽ*V(STtjjРAnhޯ*<^=fooǏ:{4ФCz{i΁wmm رEB[jt{} tnժp{.r2,=G,o C6RMڱGC ΎYۓLf=T39ookoUU:=tƥżyӷG<+L@3q"tc/a:2Se5&LC3L 2K.nK),gwg[ < IDATm^AO0n4R~ 8<Ϟ(5Q=)[#{TL]Ǯ~ʳ?LĵѶ&4MCQXOI-C6OYnjTe;^KϹM˝ 7ɋ"H@3kY4,5hoLOZ E27 P8y7h7xަ&j[d.lΎs2]{:1Q ]/\:,oj }ؽTM _3xfZfWcGp*6?3?><]g=Ve~ [}1Rb\[7*$@bsO H}ӌ8X }P($ m*qhW5elkMgQؗK/j8U>]3q%`hssk#Y_C3|77*Yyš<0}Gj?_mw[ ݽ{Y] [ 4W(iS+;%$D'hd7[köo_҆/NMz9͜de^!@Bw{w]2Qq[eK }l J.x_N?HfGWio"֟K.o  [fu偶MqߌC.d^߅(yFZCCQHXO,COeyW(д@ 9 ;B]ON21cX۴ȊQ7^cE_Y*>2ӵgJ,)`+}3 e7"(\rNZ Y`^U+ͼ>׻~| w o6:&V9s/#|4@(l3NLh}A.lʳao감wg 2/ذ[eYr_9IPzv~M?]IBFhx|.a/cx霺W>,#&ir:af׻Tx*p¬Ų{b'ww]WdI'JWw6NmuqUnF1n)+t]8hр}"NiUxD2T>53HJmr6 lp5>~鮰00YvwD=nY,LЮPNs61gg()a{k>o:@Mo|{9 nwrPoSːS:"O] gcZh"b&GϹM˳ LxUUx!붑1fr( Yl[2|۾{پ5p,1')Wy;r$.ilyx{{{hBF[-g¦S8$ 2OkL%!zt(zVn?eI7n:]DWdv:?xdE9Wþ;3miN :2z?I(EY]Ȟ>7 Ŋ]]d2,c? |({@BƵϷ O38i_BsQI>FW|袮e> HYoȶ=3B!O9W8Ppv=P/GMtC* i6[@ukqffۦz5V|QxYKgWeޅh07)o-]ٜ_N. Dü0~ W{yg\׮Wj6؋mYiyTa)7DWP6oTFL9ۏ^wzCEҨ/u2bYC0w{⪱7TA1;KZt' `YDI1Ir@-&$Д@ e R8B!ߨ^fihxR)MjXb/T&EۘDQ|)^@Y\*ɤXlTӼ\̯,(?ʔjkq7<8rID~+JhuLB^Dy B.I|t}m'vvFWP )eXj^/e(ZTޚN]LLD\Q\8rq U+PP( BP |r)(*==]RTbD@ Xm9KD,W*˙DZs.gY\pH̼} I$"vG]{4SE(]~5cėx)$,A lplMWP ZBPC @ B82qc?_"0sGeAiU.umi)^ׯ2Cn_ @h_15@JEm'$MI*sQsdZ?% KdV] PR)?F\}"z?ŗ(6:}%4jltQa'uֳ;};yWP'Hjuh+K slwط-M+mvhS&)ȑgoS=e*nXC @ B8j8evafz+xGpNKmg>,4#q5whHwꅽ3>Ի^\ݿje#ݽ=qu6mte-[$,8I-U*=֏-g12N=1tK!@ T)B/^ܷoߥKԶnm^v>o~ΥIc[W,ɂ}j/\MMgw>dE2EPxcNPҙF2h6p ҉ w8p{,)& xnT׽{ix1 6v@ P:)ÇTիWw޽cǎܥ#a~\YWB _tͫmԵ,"|HAG@UߛBރ1<]XO>f[u5Eܻ랛_UO sᇑz[2لO8,Aޚ2߭~Ph۟Y]]{cdNV&dYQ@ @jÿ˹s~-Z\(w;Hy5B2@ @ L8ݯ]fp˗[KUFr?=$V7tCIGr9jUg*(/D17:N[}'AY(J(LWKj0tS5H#kجI~%'F?OWdtǎ ڻwopp?m۶'ٱǠ!̽3՗e7 O [i )=RKoO k5ߞeVΣ$!LvLovla=c5_ٟC}^PΟ}l_/?6Tu]B=eeW?Ԑ1EJgþ~GX6E[h2dλ"-11iq|jd<Y"fvgզM>{3{!Hqu`e.hxV_w B~9Nfװj1v  ޢ𥸰7.fv?^:b7Lk- 7}{5pǿmQo7Bm۽ :2 we\TP8nڝ@Q+MG \c@Ѻ !ZG;O|Rim8aO!}/7q5*jf2q7t (mϡYc"N#Cܧkg>}7E[h2d{!i^a)Xfewq9b雅U׬^(O-NZCm*DL,-tHk/9B\@8qM=+XT|y 7ӽy 469v4A?S!1fcʫˑ૽8`lktCy#}rg {V(ʭfӚzG)SGWo <4+ ū)m=[uޮq] Ρ<:5|ʾ6tL![ͦl_̴ou=xe@QI"5@/uu#^]Otlz9 |?(oݾgԨ,I|~3W iFײ<ӓ^ܸtғݫl[Yuܤ~EޚG ,0t/sľ8+ѭ{C/[_APU}7Uj̫eJ[*~5jr0!!>|+M mMφE^HJ@ݜh,L*-QͧR u7!K!.:,>ó/_uịUJٞϷP;hQ H,~ 8R2ːS>_6l?=qHxt^"b`qmZT舿d$I4f“>sm"W;_G/ŸoW\Oe,)=6ޚĈ9D79ň&6ڣu1LvU4xPE'߾}ȖSH?w})*C{b2be/KЍ ?{tUs?{3 &~7khYVl mjër70N\bOFV9f#o?NJdѸAQ Y9'NOhW [щLKOT܉NW4=BI.x/T/۷9fͦ5Mۮr{80ȝyGs/om`L(񫤳C:uT]o/޶;' 1Oo̺ Jh~η9o~D/w2h~;7[h2ϻ]^ <>/JRQѐuf- t-i,IYW|Qq':]Aיypx&C*FaHs) 0ݹt|۹qARw훶{w'w˳y+!;;M͜W]5k8f PnO6E#v63ι$*(}Å1ȱ{!קdzzWۖ޾]IȻ-FHe2hb2;c3tճAw^m׿6/*?CAtK&sZ-dRQy}ܭ=_ l=lN=:5qU^cHש5?ϓ1o! yl\" '˨\폽H^\/Cc6MT)uVQrYM-(,N||gË vh.K}kĂ1_g.#0324_ˤm.P]r~Ľmm~Um]+0ɱBBbF{nwDIw:.j΃a}n{GټQzq#{gv@󅏣 Wi۱'oxw^n3W'!ū*n 6< [6%g26#i~ KB-~JQf{rw9EXv=aR{GgݦY(uM8!5 p+tw\E$M}oֲɾ:l5A9mtC4d:~aReZ`9fm?V;i9dD77Q5;&Ѭ8Pimמ]S@|uyMNN;r}lc{p\w~ 8R./H4jѥy#S١ZnhPq@TάfKUpӯ9w|ׯ' sozc.2g8VI+ a]kk|:'Hf?& IDAT$C_<·3ۙny} ~ RRF;뮧]ٷiUp-&X,SDPNҼPo9ߡ=kZk oP&O.kʪqg@ղ5k}w$ja7|UMsQ^"'&z*QkGhPYŜO/#i8 jEY 9A btnm^צߋYhơTGBʀz>*Yb4  γp%ԘW^&}xo {~j6q7opve4r(_ˎzÓm>B]`3fRej2j~zf`C1ln-OhB.u1M"Nܒ3¦'!t#%me‰U=1ҼDv],QY43S@ܝLI)!'% '\)eylZ,YBTi jg:[9bFשPNlywM7/T;3O_AAbP~ R{}wJ/v<"_z̡i̵"Sgۦ@}؄P s-"?n߹RΣѭxt6l[ToϭhrFt$5kH.pdWl1eNyl^Pjyf*/U+Po8x'C3b7}Lp'~n 8 AwEʘKo6Uq#=i#F|ʻIF7oFfa \q5QBک["E/rI?VɫJ}46yyWP% Pͫ12B+iz:(EeD??x"'5Jۙov$\5Sק*}O--~JBZ SM+U'^'9T>/'ZьmءQ zz+41@#"ߧ:0XCZ,Cr5Uj̵M{D8Wj`њw-k:ޜ 7Iwb-j\e%Zi8҈PF~ djizŬ;Rҏɾe=vlUs~B}G.UM4]ٳ߄ջOb!ؑYwfa@֫ТrOtEɛ,?="D,ɒ>Np˜ir]k KCCQHXO.COYݼRsgTjĦf"l4}بmjT/1z4=׊³\|)7yocSmvޚa(yg~U+UPI01iKMGvL12}1x7׳mt-nMws(>w5.#,L{x&uP+lG*fOb$>JŤ3^a*>r9EA+PU᱔lYE0cL*(xhPol?hŇ6+>]r_Y$@C;yY S3`fue?2̈bY^ ѕdE\ ]+h@oKŇN\d].l疐!666%3 Ŀ_z5J#D0{~(}sXxμB{dEe-<Uع }l Jm$+7rA24%yf')B:m7g/u(Y@+@X6E[h2dYwM 2|+f@,3EϱM˻hgS Lq}̰cǎjW,Eg>ӵ^ZIêF<1mvG{*Y“sLZKA@\5 +U*#*:)Yg~]NA)T'F\mua#c#A(LAuirÛ_g5 ͕co}q|wem̟Z{Ƕ\DEOwɚ-zWNv] I{v0=zzl6~?`NPRd"@a` g0A;vB:寯8lƜ˳ܮFzJT9[?zj2F_,Â~ Y,2yx~ʐ}i@Mb;Q{:i(b/f ~tmZ^Ub2Ní#eli"AhlmSYzk/t=E;@o Ķ(j+UBF;II @ۂFxpL22)gҁec2~g7Q)cQs50t ~6kN56>2ک+89FW+zf.o;cǑ B0/m5Gji[p Yھu4,vr%l S]n?=;M\Uhڠє֭[_>3~n)̀#48smv gup Ok[wlGs;])_o#aa^@C&pvF$DZ2MAV6,lE7>7[c536՛HAy+]t /{3. Dj0쟾MCQXO+C˼IɅz "bf`h"+ʋt{eȱA jH7~5C?wk|ӎow Z'3Rw(#j"!Pj~K\5*(,+#X/HۡMڏE*5sL' `N,^ɱIr͹\NEZbb M XlmҴĘRJ 0#jhx2MjXl( 8ml@ jm;-Ϣ02YL ؤfxw7/_"ޢU2%P \rM<կ$дRR0W|xbg\x#e%| J9ٚvK( -~JsڼB l ǟ[lâjn^q +Ϲ\^^VLQ))^lƨDŹXB19wS]&Xz<"V@\+e5{?z|?"1):1#\ݳA@ߖlOH,'XZy(en9~ Y%2S+ԮS2\5C ~(~18UW6PB{{$]x Ws0dӂ6 :wVnݿ'yL'AcokmZ V;#@(Pr p\x1..=000׉8W0ScA "0)\LGv'W=28ϝjHӧ4n>-*JP|=uG<<6гC/z͍[t5@ @ P4Ç۷|<}#vU@|nXt˳AQ/R=SHIkMr)M@e@᡹/5!Q&d5⹈H$A(T:(2>%̭?GoS"˰Df`%E(AŋUVE7t9<<ʕ+-[4w676 2m+>7 P%ye]GG uմ3r0:6מ|ޮ@ @  "pU\b*UrZm2B!5ѻcO ͬn+ŋ_[E}-S'@ [ ~5/_lݺu.RY oqו+m̈"Ҙ-~ӡzYvNq~38@ @ B~)|o޼9}߿ߪUSw>xg YS;QRx}~[߭^Ft@ @ BaS4رcĈnݪZ˗/߿m۶%E sG0 gj;AOC][tMFMMX$A @ BaQdڵ+444:::000{x|pOs#|5, ',Kɛ ͹_zc5B79Tu-7<շLEGl{!tS3f[[ܗjvo{F7۹'b/c!0 vuٸkJ0 ]ٜ;6N@[~R @ @ ˡ\J K"vH?MK\;s'E@ 0@  mllBP(|>q\(JOOWT*la_"qĥ !@  J2 Kd٘I4rьd7"]RRh'{xx1+̦0ِx_KNrrrThzs)䘄D9+Wc(a$=??|nt%7=ijNA[u[[Ji;AE}=7=C|S~uߌFSÂ~ W=d^J+WvFQ 1#qmZTd#jWۥT7wg#6͟|N~_Ƀ<_SX*[jMF B0B&;f^ ^N;4vc{1nrVAu=zڬn{Wrtb~>%6v߾, *'.ӿp,0҇\SHjleę.2b]?y3="-~J`~w O/@|5ca186-O*ߚvT ,SL =^󯽷v̢r ,Pˁ%q 38B !ִAQC70/+mC ͈0o^4D؂=N|RNۤ$bgBJpڎkyjfc>5=9 (ٮtK@ĉZ8(}*)~PCgI')"-~Jb}Xў=Vh^KF{186-/*y rVj1+~PSz:>iM{}nTbgݗsc|q!.1 qՊ?$A |b+Hyy'RW_n7*yzQ$^EᆔoD ʲPvaً)ĕuչgf HyqD۸/e20V4 [oƄ(-<Գpe(J A{YbP? g\w:SI|,L:*<ͰWI$vUZԪ*I|z78S̾g";WkXV)~<⃔s070!!>|+M MφE^HJ@ݜh,L*-QͧR u7!K!.:,>ó/_uịUJdPmk[B\7嶴~q,#p7(2BD!˧}m*~ۤCg~z37Ey(b/f gצIEq~9 jgpLQ؝T]v <-VįݓY>mA+Al6@C:guONPW"14 ]=ކЕ]~8sQ;U/SnB3w^gyפm-ث3ld+֞EȥyGs/om`L(e_%ש떦]o/޶;' <1"Y^wik M̫cywo'SdƲ(BBa}G Za(hÏζMHzriI?wX;.lN?wSיw$;l5 ήSv>꜁LzkYrfgo #{ Y[3=Ofș]W]5kA x\>V_`oaJw ĝD7C;txϚ)TxާF Qyk׮1xFwOcU˘<::_*Y54IÃhjԽ+&OI36hֵgQ72…ˤ%mcKǎ3EfDa.S06/%=8ꌔvu_e&nyx#;wYl(:Ba,7yg(3犰Q,@^~tm*q ~Ư?qL%ʫ@;O*E^I11^9Y-uxk;n ^~b3[}ﭽ}pЌA 0kmxΥ:}`?pyOGZyW ɻ wU5 qr4Bfp%J}A_j~NFrɦ-iɀm4}Sv]~8Raپ}UxR/[oѱvfe8Eֵd&u-sꔌ޽w9sfތKNaJYեB1?|m{L1CI %lGR? G3cFr)\Ĭ+2ǎjg֌rS.(N)\i\3sF.+ 5n3g8}I^$AQq0ފVqRŽWuokq nj[unZWʐ E /̈体{ν]PjO^ N]p~إW养lJL^\K|dYum]d畗2Z{yxΐsotߎ5JK.s6E{–4q53|U.&La3#s`k%$>cC 髂1#i^OQtLZȸD(&fv j\WE1x]g:۽]<+#%E9&5UMO%&xɠ428Eƣt"IT.`LmkgoҢS˃;i> 8ۚn~ެS6^UJ']N0c;ܲ":>ţ  c-)}urŸQ^rg0\+qm*q2 `hSTč|4UĜbanm ʼU(\Cj+BE^m@?;{kI y`{wΉT¡l_c& +rN1PZQ{W# QE1Ehvn1l PzдK,*mr /S @L˃?,{\7!"ƶ27|>;%>7y.=C SZ ܽ -ckr,V#׀(e0>ġR']ʸd7Ζ%q7y1~@îFe]`tĕnD7jخKiubVO Xa6.-_?5}?)"-t|wZ (֕5],fOP-M (EƹipQyD&b?ut*2 -)~G_Ƨwx6'oMPʀn Gك@0Va~.l,oB-wC 1{Pvo.c9HxX e[Nݭ1{}GpnqG<$-ŖI1(Lk}*]o14.b|rXVdwƹiR!*ыftt45[bkmcD{KsT#ٽq(Qᨧ\5y@s_~aBxg='UP܋n;f׽$vۻ' /888)[#mzĶ'c7癧jQe7~7wc'L0 [oEs?~@ c[j(E;߄?qС=/f,$8RƸ}MgteUPd EKAsATX99?lڱ؈:?3o ЭG]T#9Q[+ФΥg^Ru @l%Iq /<~0kI ymA\nɥ]j3lL|Yk_'-vfgS\\EO,owl^A@05Ei+M\u [Ld ՚h|[kl7V?9&&T?-±(BBǧ!,y4IJ3۴ܩzzp1|S@{mo, 24Ee٫TfZԄ; =gpcŌ$j eC!8"s0mgg k;d_עSd2L&jA{5BWXrFqd,95>bC:[2LuЮm7vuEvmt3Ւ!"֭',2LviZ~7=;51n}ЩKnE޼}οMeCW=m߬؛w/9oޢE+,|`s@[ ilKzvZfgsG>@k==R{?6_a$' Yu:V^\\;2d k΃ީ =Sz.M7`UuW* @JL0lpg5eJj0f8դ=\=i9ppi&eZ` x1azSooRԦ6aþƫdfAkk=f(o۪ͯ!fgC:Lyfuj2dyCWFW(y#?+Ē.ۏiޤR?|9>4zrdʴ:5syrlo;Ƥ52ա݇>x.K*vK2Y-*=ۭ:ʪt hYxxqc֏\r4h ask2h^E8y's@P~lZcu$pq&p(=Uڽ"yC˼G_ be:-͢gTn$ƫ5w' qdذٟi> .xvݾJ|>QlN'`+mK[sQ簎x5GxMQ.-hh2pTMYs,w+dE'*$3nlvo!$Fz`T&)\[.9ͮy~;ɐ~K6jQٵ)h} 0IJc ."wYKbJ30FoJ@ ;ob]WA.%'5E:-/9ڰڕgh({мԵH#r'xIlx͝ E:>ű >eȨ>J|۹j,xtL;mpQ]"v۴<0b(HPU;&!#ڭeǶܼ*Ö15E1989} XO-GI UnGE!hv&W^ ܫWבrJYf?̿_J Θ&b pO&10tY*jYSu2w_$w_YW$}m[ǫjkYIuݖf̦?reXn\N[oZg/H[lۼ: f? dNWo0 C n oT_?эX[ mCFJi@Ί* K| 21Vů ޠ] \q׌sh„ i!ja"fs5gEuɀ[-Wo͵徝[8-FY_Bd"Ų2`UsdWAƎF 0'&?#Sn 9f裟}~v~~m,Fu]~Rl` 1 f)ueNH=+ $Az*fQTMR5^znCk ,iG_gJK)$Lxz|*SLyWF0*kv{蕃lZH p'=>0R9D֕ J@'Vp^ՔiIT5% I{ZRTHDVivF j-F {,TR5p(L\P)IJy;uMV}ЀRh[ig8fI(rJ  i+JԺ4REE gq dTD|4w>Z-CZDǨqm(Ƿ{!dʖ|kQ{kYhJ™w{VȮZ) E"H$DBi@)(*##CjBZgk O|dv2X;;-ˈx̧jz(uj1@ԹPN1%@iAQ.\1BW(+coqKז-k5Rim&DL F~ (iOzr\,vv/oeٽJV+W*p9ElgbT ~F뮍T--yG7ک0iKhiix#d=Ph(La[ ]B`($6E[|e2^heIQ\5@9,z}(RkI&CNn:R4~C^K`K@ Ԩ[dٱxTDz?ph_Һ'|Iu?@ |h9t#&[\@8y@ Bn^SH!?u,Vc;CrU)uxѼԳRό?N zՀݨg䷅<|;ԝlZޥC<@ }!7qdڥ:"Ϳ"V\lIKM7E9TZR(LM|5 DR &D4~[lyCi4ɧ{W4p:e_y@@_?22-b@Rjr q>@ ,6t g* kǪ +-o ߼o<^_ߣ{ٍ{&(2ՍVŋkx4o ![nκxh!@D"In@ BྐWX8ZM-a i~{q]NpӴ8:D?j=څ7VVTrE,b%&ޫr\q̈́d57k7cީTiI7|ۼp6E/l˅׮]>!]&b@ @ _b}AUJb$Ǧ2<"֜F7('os#7H!)WO9}1}O,~'h8ʫǾel IDATXohEN @ @)2:cٳBU> ^mV?Kix$CZ4&J<<ܭ-ڤlŧo鄐M[~{ 5޾wB-i/_2W O(X:״iSnω}ZH)kl@ B1qGB**^l$?o :P"q/ij-I H$%E6F%)XKsuR) @ @(DFF BH$D"P(iZ |(jZNQ1B\ǧRTj]&YJH.sGWΌS(Y4MsF78ZH @ nj#8%߇Kp즻u3A)VtXrM 43K*sQs:>Ų e]iq8'kPj8$5*VzTlA @ B TwB8p{Y,1-IBF5=[@ @ >ZH*" {e}\ g{@ @ >H@ ZidQ`Em@ xEm@ @ B^!@ @ <$A @ ყHq\"Ee_`KY]=.Nȁ/?!.jk PlUYXȐEߋMFYf!cxV+-W'Z).m=t]ĻϬI8V+ReJ@\BղHpJw{mL)AyV?Uj5*POJP8oSʐ{/`W޵ EPbA&3KUCVS`B;{(2ٻW)J@"q-Yֳr yj[B'xP{3e 2<ӹ`m<m>,O~~O~ +)ܹh)dM{ 6SWS,| =ᗻzsL=VoKwMM>ll~&NNŇnˍچզB.M)^OoDSjL pSuyjy]̦jn(Ƿ)nehO/`_ޅOtR˟tHW3+r!f80GA+nN#xG~+oJ.]90r ߜ2p3hW'ˉF@/QU5mڴi}/{m֫/ ؆d:S`zҵpE;[i|<4Ԯ^y0 g k+)T~ Fӯ͝]KjڔY]bJO%ҚEFn+SE';[CQ8O+CO缫e/𶪙A|zl+G"pVa+R:pQDO%K*U*/X2w]y!HJNVpj1.w߻b'ez?n;r}׻ܨDX@MqcSz\# ?ܽI(ތ#Mp:7oWhFm{:ˈs4aI˞Tz+0 1"LZ7uJ]Bo~ݒ`S0^u[~~t-q^ )"-t|er|ʸ=kٗ1k3|z(.fBrQM}܁{8+RCTgW4G_u#ٱ=צ7k\33\Z:x۾ -6!Z1Bak".Qnм--)r-;KѺccwP>ueT)O^4#ZI)pn7$JYı%{f~xI|raUfeR{z6nH< 2JxإŬ? 66u̫ii`*WkҸkv iQEQej6tt{v).#} EQN&7:|E׎XA0[ :P#s4:e-?2^lIK?3S:''9b]{g֖٪~mZ³oU, 0 )7ߤԃצr,㑱)%eܺ7n.N*=^"Ż7r v+G)1HU|4OJ~ݘ1IJ[eɸU݊y~y("-t|gr{ʸ]\ƍsu3RG\ϋRpRY[^]4dYJ3%5 U#.Y.}U#Qr%)=.K,ULN1)ru;wm]EB4I#O;QkKTmڠI:O/R?.;|#VZSH_nzH~c>E;wNmzj۫{NNEuvJx / ꎭ< +V5vmK_"Ϻ~Ю[mN+2(L^{ҹ-| jdBn#+|~M|PQyz~~m< 3߰dcqꉝ-sum7__v87 |~ҾMc7UL?RHT-Ҟ]b&z%Ou>5&v{_ ӽunϬYׇa0qF(ZaKerXE&eQ}tw`Hv㕬b >7k0n=7SL) PJD,:Gv}JEv([#-Ҟ,mOҨ@ls۲g Yl5f޵wt)eS%m:l]K1ȝyKGGO5~ndNxc% kkF6*O|ʸݹ^%z./JQQa).wY|@&芹$UgM_I˟hŬ4/ŝ R1 o @lLg ڍ381>xOyAtoΆ,%0i_ִ=ݏjE9E|Ll^2?~={Iw}}XcǞ\¤yӑlt#ޠyk׬T?`"TC:cqvg~3/V]tCުY_e-R*gYэO-uod-xoCRɠI3gf|a/.7a/+߿?PϝuEsooAp156S2#;~ͭ9OpduƇт?D[O +x[6^p(,.{ZP߲x\Ǭ C4rzŋB'UR: CrQ%hT]WitX* 4%X4ɞ36(h^r墐m0Du%niܸc>›a{R'i^j#W1%V}ƞ; a}/cl_1{1?|=7-+Ö0#v*7Wkϭy8Ge7ip:OιjdM6բ։ǷQc?_!7lXeA& is _-f~>a(Ò]ʂz|9k)ܟ+3Ngx*9p|  s;tO\&}_nZꒆ\E+bƬd=oM>st~H3[4>t^@xkӢ(ECgYx5biBۑCYW !$Z4R38J %)' ~vD54+UZȽ"1;_< =)xOlbv2Ņn!olAmN#vOŗPEۖ@.-O}%>lH;{Wj]OGԮ<pMlAmX=?]uηp~ ?԰j⾁49#L@}+4`n>#5Eҷr9OpP 9sevM?Frӱ-XTuـkzy!->\ϐeK ;Tfߏ *ˆ'_EgܙW3_rO_QU@'a{Smv?>|z[^{fK(ʵGlJv^y9Yp>)AyxI} :¦|*C߷YxkUiD*O,U޼P/=ߡjȶ-K[k okPʦK/n(a˪yg@6tɷC+@cɠsjze؋\'$26eR{˥[7,>0il$ZѸjB5xU]zg-q5C63X\o?#@]q93Rl՛Yg=1.->@)nsQ 8x'=mf2ڇB@_Gj]BAi>LK͛@tC &p,p{'8u>k[9{ sSK(hJ/$z| YL&ei$gMp ڴGw.;ߛ6bsd* S͸X\D[xG)͏[֭Ai\uPkH} ?ӏf؁)7dd˵oyy է5?F*~FQ|dj"۟H+=qn]fn+x} | ,] }),o_MpŒd|ꇩ 4l]v`uZ\d&{v1z1=z4))1׆,,5[2 ׼[PgLɜӿp ffZQ>U4[Lf_eijن¡p| PvY)d_N5h 'ZQ>U0/ϱ®J  {t˴R SZ"EPeZo:6-±o6o׮6nrOkqMbVN0N(-{WcHܐF)2t :f< fgT+,-x#`QYd0o]˲?x*;yj.S\bHC~ d=k4Nm_Y3{ n˖tS>t9kM eo96qbdgqQN˽scP4h=ݹzzNh E).=:MN*S']x8gOȲ.yK4jv{ e麉+n԰]7_iyWn[:d\_?EջLlfL=h( Ƿ)eh)+[W(tw4y"F-U 65|.*6X9~Mvpϵ"=->+YTbܸ-歉R6ua.$GE21w#h>hԱ|Zs,ҨQ'E}:i;J SHSό?o z`Ɲ\b¹ ŌT544j5ϥ4hLi=$-B~=&ET~JhF2R9R;kRM8htR'yn0Uo<&hxqVQ[cm n&79 @q2UAtI:{SVx̹Whޛm=Oe.whڣ!,dC)T>}5O0R/_ \KP6oSSV yhzf~cYQ!U=2F#.\|<v!7GoMEQʿ :jE@>!".H6ƆB*vpju ?0' KM7- RiI,y&DorɈͱ 8F5N] R3Ҡ9(#T52g Ze38R'>4Bᯜ|V"G&vJ[L˫RN#='FiV ҥ3t=p$5Bav0#Pg@m#c̄72KLZ}ṡXʵW<Tn^A@05Ei+M\u [LbZwdZNl?/SsemM??-bgCQ8Oq-C.OYniTc've>@.UT Ԏ>2tin1ﲣV-*xvav5!+sK/f,S8W.> )i'^[ݵ/{JWףU-׎Ug+-o {rN/aF5|pf@(8_}R>q/G+rMdUf~9 X+w諛Ffeb5?.>ԚgwS=}tz_*諛'wiq,B֊}Ǯ)_?05f#.>9 IDAT{V{yP)}UpawbW}{>mړfii4Dp߼.mc~aSǍŜ{Z+Lo=4#%{kvE]a4Wnߎ<5wiܽw#ԏnèU$=8?8k)fy4Tu;u "Upįnq5Cop8!þ=>#7~4˝O^kKZݽz>O"w,yy` #93DJmsT8 h+sf@GO_bRm tn&׭g9_,[ ކmȒtޖU̽'kd$G&8b/KTՒfZ>|Xp6%7syq3xGf43ٰS_4pxܿi_"CTQ1Sb@ޡKO *w(IUk@uZ(_r*a +wnPC=  엥~1MX5 E:>Ų 9>eWV~6HrutL;mpQ]I)4Z܃Ѷv(> P@%o[6pǞyo(jW]989} XOGEQ&=/qոQQ38gUc 5j¸!+ﻥT?w1: jsHMվy[w JtkN,_W^ [ugB_1 7# ֣cǎF6nf*oa[ʱyulرC/f-9OtݓЫCLx(Z )  ? lߥH, 𡢯n:\=r1Kʤzd:ɻdE(Y*VGi]d;>fN̬]ʹF Mkd0sk?#tH7wӨ\%쥞 mgopepu5vae nʑN~sĭ4,s)L]!\ԣQzMRUڤaK2tݺumЪ׌4kaD֖dOi6#Yf;=S .SuEڌG: ltC! ${z7bdE77[co)2g5 ?3R:O tVU4]p44vv׼&\g E:>ů J&+pVj.Y!}<0&CZ =0^A(rm<}@&_TO[v5M26M;vЙ]a@%NRwa(а%w>*(Ԭܠyi \}|j֬ffxus|Me Ҡ-!A5'_+×_NN?ի7#FnlY Y))e+ >0 {d's.R_nU6Syeg_R]n4}Y`V3uEoym2h̀^N^nc!Iʺ;>qFFn??G(?|aS?[|#t.`^~B![\>!0Rn^T)J\.7N*]&KUSBP ظ4]VN qfvFPj- \d2QB!Vx+WPR(rH$M͟ݫb-+ ZE[P鄥mE`^R5kVN b!ms]ZP(H,.Sy]Lʃs./_r=l@^ZR +Slp +8O,z?ļ B+[nhQ.ZdZ&%jDddP(DNNN"H$ Bϧ( Vj 8'=,W;\V qvҒ`Υݬ$/dY %yT#b@HKES6*Inqy8)|ގ7M;3X ^gjT.-y'~,+w/Wf<ǂ8ϑ[2,"8v#pRO˿{ygwK7"53̤}y8@G(':ir=Dn@ BྐW1hed>D[ֵ= 'i;L_uj[7G@ ( 9y#/Q!vC~j%V! A B3̡-|_;~BǧXaA +W,Q!Ba @MZ7@ @qp_+4@ @ <$A @ Ⴧ,Q!:,2>ů?GoS,˰Xf E(ސ@ @ >xH@ @  p@ @!@ @ <$A @ Ⴧ8@ @ |cb ,ı6"sT&@ 2@ @ B18@ @ |@ @ Pѽ:(/YKhdU&a3[& BeE^T*7u"aV?*e65³Z0pl(Ƿ)~eȽ+<](-iA&3ӦR*F$sH$n{pg|s U$B'x@B񆗚tb_)9S&q 2ʇۆ~r2FRi1[% Ifo mMR6yotw5w͚!5ٵsBݒ]Թo [[ByĔS@Eѽ>xwҶA0;̦6d ~%o5E[23Pß~Bo(+r!fڦAE~u\z`9(IKO&|S5,ȓ<_#.-A\5xÈ+|\d>z*70 ϼ-yӑltVs5[ҵdCAio^/~Ikvصe8&{ex5l] xI=&'2Ť;ܫ }s*^N E:>Ű 9?eLKJty^W3ϟ";,-ڴ<0DyuR-rܳџI=w%~XXv(@R*jvuη(A ;`e%"1x}r^Z[x:tq~Eϙ{lOA4Ro#;H1Ef%^ejف;(cjKP̋ AʼƵ~gkF) 2}[CQ8Oq,CO缿߭2eP܋t=\qslrݍߎqUevjVSzEwpқRh@_B1˽ǁ:ZRk,C\ ?$J>=qorw@ߡ=fu /ދmYp!q ͚2ZL(<)OWv J}v~ttԫħ JUVoެ[v /=mY鮁lX.rskݾTXeiG/&L b :_ r Rj"T];wJd RgZ֪*=""4WwE|ܿkmoZR$Bn±(BBǧX!ǧ{ޝ*j׽4!s˼(E,5ڴ<;/_(CPAnukޣ֭U[ujvXG[Ѻ*UVuWL EB2^1?9{ϻQӡGj)#oe\kwno"~oO%'k@g߾l5F?.`89%I7sDg^NүgnRIqҼI:/;qTּ!` J7T+uH{n6"m0s+ޓ#鍚;ܕy =o_?&}ˤ ޾S"va9iQ꧀-ۇ6wE2-d!gﵩn BWj72PT~vʡp)O/>tfV&O !g\S L3\yxG>2"|u^GjsW,)Jf\#r~mGm07OWZRWo<<Q@N9-3sA5nhfN3޴{m5/޶{ H7`#Lzt}E =qurAďշDk.5(JǷ);>ϻkM 6̰5Qj lC!~tu]*4?[ཝ{gહ$NXE͝u1-L~y$_[I~Dk@zԞӿyn |rƺs7?6QQ~vD7ڔr~ڜyt~f]~$T+P {peA^_. ]:qˀ/̊ Hm۷VKxH9n|G>ǑwP>bDho^,۸qe{?Fc(cB24M4X9G5< t󧝧 &U.|9 RvuS)'LڐJQ ݼ}ƥyIv?k*@)>}ڴi&L{J&i'nڹq܉rh*7k{\< ebL?qgMn?nQ(aZ4JOofؼ"G}xEAߏ|ңލLMprb 97ݖl9nppͥ[lzuc(Ðиҁ:lJDjQI=M/_4.i|ʖoy܍%p| 2|OYANSVa-,k4;ThS. @? XJWK@f8yQ'%=}4T5fs밉֐rR׻lK~a״Bgkڰ`.%eG:~=a:*'-XƄRtWUːPͦy A 8?B1]6=',[`k"j4ʛK;2mS~VeGzR?oe¿j.XhUì(9Sǥ>Ʈb.jau3n-xP"4[w:.|U:y٤rE@EJgo\&"iҺCCnbFFĚ*2U.Sͬ#ɂ_PFO=d)-^W rAդ|3|TӝT\:q~]]>``?ѭWE }v֒^-+Xw=4/7/=מ=T4Vw`AZb(o[9. >7E-OfQZͲ=(yoe۽XXV[}ʊ;﬛g]١"žsS\yXOBY+yyvy.9u&ZRlXߒ o|0iǘ6Хk#mIs)0]lcg6O.0<5wmь98K:O;*xR(y٤wiMuٶ*[ɓ["OmkidTP@ZT?  ħO$%>}-6˫XhUϺ 3zS&͝ƗH̪SNkf*]>mk2;܁)T%דvOd?BDž1]rY[c/FJLJVS…et8&R5+b@w `^qC?WE=yrʶ>ufJjUhp2ARɟ$g'sKeriT2Ѡ>tjXߋ~UNlytpU>Rh-jR>+i]rGZށb*&ƹy̬B}XVcق)2@+,e>>\4j^{ODO%sZJl$PGm嚽΄SGC|LiM.Hhـfy%ZtpΏQ5 J+T?\<!U±e:o<K!D~ +q2)U)f}zZi?䒝-F[ûȏD JŪ3Z@v h=CU)@Ǟ7t5(w@*U3vG`[uUsȼ2jk^4@Sږ- X+*EOWÏC2w[*.ae4S|5gL1ђcbf *D_Q8,o>S[Q# O~cps:8 |i,ѩ Ž-ؽ$T+c,"hQY[Zb2|^׏yeJU5g꾾~'fS3DH۶PF @gx4 ɘ єoNӷ_pjOԬ?Y )M`eQRzۚˠ~pAQ"dS1I@^sxб.yŎÎ)B{[śʕ@"n6 'P^*Ͼ\'Ull쿪WK2OQ-{@Ŀ )*-t|ۇ֟bʻu y+TcSC+ q6u!lT7DOb{A/ IDAT~6x<{U&(EH-XKXPLA:8LŃ1 Rs)`z a%׺E6oo<ߊ>VCQ+ιuo}@\ hMȿ*&I?y@{ LP7QӦU [1-67sf5=P<x/^$lι6]3jE*>Iv0?݃7 qմ ѦN;k!cOM<hd V>Ij$%ʋVk(JǷqfzʊ%6mQ] a BYыfll,$] ?byyaJ֔-i:q(HV Oq~k] i&kDft2\cP]WX3Pd?Lڭ؈*_3o\$Ч?Kj^ᜏ[c̮ݭ8c*P޸|-&%J1֙t]ʧ pp[3'YWP@_wx\sQrN`^zi1KpB&eXlTeهe6%-+EEV/!Z@:8TQ'O\{Ͳoyp1@ + 9y%ك@ > / %NصjAM`n9y@ ŊBn^C @T*% $pp@ ܼB@ wP%!qp@ ܼ,Q!@ C:8@ @ @ @ <@ @ ; @ CNQ! LV&B"0\b8?Go>tLR ଐ@ @ yH@ @ wA @ ᝇA Gg$MtYHi@ @ @ @ N @ C:8@ @ =8gNvͱf#|,*6`䜩M-ɼCޏZ[6+696.%CA ݽ|d3ِv7}'pww 35T*yFrZzzTU|"]xSZci՜;Ja[@`jvSRQfS3>XV[8ٷv]#g $-ac]bfq괂0R@ B۩%IPb{J| 9*k\+x@:8#O? # PʤNfQAI{14p/{^fY>}KZlzli3>?>>^SGrʼn2ZNX8Y۱䌺@:8?ҷ3(vO=(o6;Q( Tq| g=}y6r;&g6=V@3βN+ S8?G cY爅"Ne_obrӜs;%UܴᛟC j%*sC玉ZȼS&P̋>{o2^c ~-WN_/(M̞o6|ؼ|lRd<7*V`_5jWƹ"EJþ|*)7d[aSQ.o|>dλ2;--Q+ef伈ۊ 坯(#t`ϨW/$1KVwcZyPT$'+kXBB5 po/<"E96N<>v覤r%njf<=+υo}'S2qՊsЃ4ϥ;\;M,iS6Oi_*3kNb>/۱"?.'qu1|#5SHRk\}(e$o(JǷqJ|]zᛘ1>f:a%XfeV<=Ѻ~9bHǨ:ì^O-vv3ܜպEBADŠ=kbql9@p~^U+8gN_},WiX&G%P3XA_"mֲbB<)$fRq[-kpJÇq)eeTQ#Ey)du3צ~,ؿW#_{'_Q%p`XxӘ 7΋? ynqi0qӬћiS[xgrԉ"#oExCA_]VukHҞ\؋_oHPT{?~̥ yS QVz6׭F&UJ3zrMT)tq*Mzg؉RI>ORUSot]y/&=\&?bo J7T+uHtHϷn] 0K!).ٙNto}_ݱx3mg׏ d ?{aw"Soܟ3 f5J<-d6^^MdL> $G\Ӧe5APRsBv;%OJE iffW}kUk]ŦFE343T{(E%o-ۛKyXx{|̓ȓt@Ý骖RUumY#Wֵo6xR}yEiF`sZf瞩j̜g .0nMGGճ=ݏj9d h}ysǴe>~1U7^p(B7f-H=n}ݼw #C*5>v+V,?{U3~1n! erJFFյ~{P%8 f˴,>mjw(h>rݺst@7&|G iğ׵IV_hƘ54Ue$7:&@wi2UNڳko}:hQd*=ܽhZ9c( Z/~DԆMϿjBz4ӻ߶NN8;g[#\:WzI92 k.S ȦDX[>ZR>3y?~?~ҥS 5,+R-t|ʐ=eɻRhRTd= 󣳮PA^dOپWf 1IHNk3[c_9-Ȟ_M&H=z|4ɪs~aШh;;C+w8Moި< ӑW4v|&ܟ-d l笐B@t=j0 A;SY?S}a[E׾gw\>{%'7 >*ixcHJ(od8LO6k.s-UD8O^6%{! bU,*faFewxzn=1 [vQߟ0-1~0Eq{\LDҰEuӊo"7ύp^VWJ(}55֝o|9Gb6A> f^1Yv(U2BZ}woy [WQg~3[~%y ׮ct_ҜSgd̠Yaux&%&6UPWjWm]V$-OfQZm]x&ioZD@¦(]BǧSVyg48: nCC_o;.;ֻ4+l!%Uv5|7/ԋ#Cw5wv6梚"ZM(EU'W_u4 h=>YȖU}za_A1rO{3kO82>cJS}0fyjnX{]5D}yY$T+PQ NZ\{c15 U ]'h^~o@JzyJrgҬ3سMTЪuu=Z3oS߇z|Ot3^ݹ#7]Xl `Ō?nW5-ゥ**EvF1UUqNbS7$^͸XGo$Ws]sTqu} 1мb{Xh>\G* p/~3K/ ԇe.R-t|ć[C w JS/oy<*_׀rT*ySy Y`E2ζNcN7rr_\ˈy -̝"l2qRdշ֯xb[=zFibVmdZnE2oKC4Ϳ]}JiV&$O[ޫ=]KB2 jh{  {0# OW`YNNuFP%d_ MF2B h]zAi>\xmկn+ǜ{?F8᳙`3b%iZVRwem n&7 {oO bd*) XcVmRxgo^1Q84}mVM@X!M!8H\5mB:: gW: V>AKHI.:s-a_EQ83SV,yh v7wgQcYgQH+CTxI[zW[M # Z,5%EѢ4 Y7 2#Хw~ax>+ S\ͫ.hF `S?L+۩۔*E%& R^2->Pj0ۇEP'ۦc {gqGd8F5X6=9|b;3nN+*=p`1N c//d䅣Q̴ eVњcw_ޘf񀘒jvWv u/9[WDo;ٹK~a{5|w+.(Z/C/;oТF]\8~_/] &9W3+6TQަ<86f:8|_}Ry5kXʈWSKӢ A&P ͛f696`eS>tBȃF̘0ag Qu[s6pWuT:5#=5IGnKh}5ׇލ[N6_7J\^[*MNJcHNNgvIMZq%Rwa; y%}1Q4'gU>JպIKUod 0?9I|.>,e Eh{H5s YOő>]|2b'PVM+W[8Y>ev坣RP$U uc3]Y^1 S)9c„q_\0tȕ ?Rj^Lgͫ>7[?Kṭ jvW CU#/$6=ì?cI &=Bݕ.q}pŮ g}cF]u@j h9~a!-Z3!j3_GZh]&ǥ >b萋t얞]\9zgfg`}@a3}{/lՈorV xZޓ9wv/G?ŕ"ŧ;e϶g~T˒~ݿf"Q*4i]Etp(hNnm$GW4dxshśW\aEZhMYס]:[(j?bT7H @m]+uߥ+wnWS C=%=8o' PhRkf5vT; +-t|҇,2y}~Gkt}mHmkϽUw:e(b/f ;~tuZaUb[CPU06E;;Chr*mSYFk/c+:4|ߚc XOGIQ&-/ ؛W 38F7IC @WkƮ=q\(h4,Q! |}tt)_b+UZZŭHLG톅3biS~F׭QQ稥OvD `FE9Ğ>FJ<{BzXK(l%͊q| 2x"QDkЇmpXP{W PP?ϝ{vZ\L8y@ ŊBn^C Bɼ>xt>G P8x,J%e65CXt(aB|Ⱦ+["lzl6vF -8RLJ'+D`BPdR)˵I'@pn8S \9-ʼc|25w_WˑяGkxiSƙ6y6nccc:ϙ"k#3sIũ2ݎ%gԝQT$o(`_>W7}lj#]RFۦ5@ :>NC;Z;n,ڦFllgmVHۭn0s~^sFQ±SVO2S}ɾ%}**E@>y5O yuuqPD@pnh!c問;"K*pzէ6?l &hPzTҵ7MuR߯3^Rsͦl

    NCO뼳h1a4 ō)v˄2* ;ËS/W ( ;oq AQR! d@p^{5Ӵ cn<5|Vr$cuK'Gڣp>wi3HK)eRߟM׌p>7Q:"ѳ=TFhðp>kl˧Zq;Ge2OR"7Jp g!ۧuٴEjYY]YiQaȣLoG*RB[qo>l]OzEp|'+nfkCR.u_"%2$T{!\U@sǏE<|% Bҿ'߬6Ov 2] ܺu}ń ^|AA#޹gƤ$?AZV6[WKA_מS>5,C!yxV;seDR%kVzQA>V:}5i [/+gnF\|NAZVג=t7ME]S(t2Ԥ=rąq(_nn]2GPH衆8>mgoEqǪ^6tj9pLl:a@t5T:7'>}Jr< X7$A~ѫċӱF>ތEg#/gSP=6%}?AJ+ ᷭ+6H!%125׿j_sퟸqRǣxBp-20YD(#ơp>Zus- +(M7 _7o>d;MKE,6B%QӡGj)#oe\kwno"~oO%'k@g߾l5F?.`89%I7sDg^NүgnRIqҼI:/;qTּ!` J7T+uH{n6"m0s+ޓ#鍚;ܕy =o_?&}ˤ ޾S"va9iQ꧀-ۇ6wE2-d#<b [cIx^JFR _̬M\J A)BVϸ>g@|dD0sYX SFb=;~ ۼ=`? o |h'xysZf瞩j̜g .k vgtBӢHTzzx49c( Z/~DԆMWndor;n Ι=dpk.b֫{ huC̀5)dS"R/JD|huI+S|'5snFerP5{  4Senp}kuuMAgD^7? iZK.欺]Ǻ鼿9}yiѠ:@y(3΃ MrQ{5AKdE~,o>$sν3tZ;{\IL?4N޵iD3.j< q oHVJ@:8e݂򦧹62e۪ro%O nF_>>ZNPQ"@kF@RP(T>}$\z+C.>N^,fca"T CC;Z wKJSίp$Ɵ{9*,p͋,ee U„zfQ{LZIZQ[{"hM*Rb#S?j,=v&ԟ:bcJkmrAB4˻ť~`u+ 8@p~ixT^bݬ-xy\ [^_)$5[zki]j1x" V${ՠ(JmdVQm9W/'fϝLay_BնN@k[s7,Ã4c鮈jg\>_i ?jx"-ܙoWޖlҬOQ{ l1ߣƬGK]]'c65@:>NC{[=ɻ97 8ǟ*g] ̡.ȓ(}EL\N^[=lyߍF.)uz'wdhM,j5&a"_d[*ZӂKB2 "E1U%Ռw2_d^e=7qM{ٲb | Ҷ-d#c*_̜qD )M`eQRzۚˠ~pAQ"dS1I@^sx[^H IDAT`i]{S47+EmܹOϽ|EUr})NϝWbI)wѰ%}cd1墢8j-t|ۇ֟bʻu y+TcSC+ q6u!lT7DOb{A/~6x\*Z"GI%,HV BYD˜JF0}=y{ꄰlkуo7LoEvy+DUޯ;ON 3 Ap. {]֭u➨ܭշ:ZW" *nu@Q$`B +}{9Ϲg<}**P/Jěmhᝰw) 6̈́[kN;@c_|xo$piJf]2*)E)2Z]üLE@H2CBpzڄ:kSYuZZng&Qul,XW?wyףd9#)rQl_Bۧ0Cs,WniymEuBgMfUBTb >4ÙD6f^Kmz7J]ycSSHANQ! ?nk:DMUd Y{^ww,lG(2Jc=e|=K_@133M=Tsiи,.[Jۇ/^<{g|ă0(]TQ|T&ž>6Q;0Ki,vH+.2ō?񹔢~R&;krxbpp-JUnpoQQwC@8ꔧte@u( 3nO. ʳ['J]NhUxkHʤG~LZr kM顰S$ S"A ~\߇ދNfz=M C12fvWR_yх_2C]9|HV Dil'݊̓'oQ@JYU޹ ĈYݖu[& RPB׾z[T *ن)kÚFkȑppɗYaj<<mZr?48دaaW`+>^\=Ϭ%ݎ(V$G-u (^63C͜d%^@wɈD*^q۟N٥R ʧ<8v N|j/}&yu s7!\:3BbA{ @󿆗,PF??ǐ'F-%o#/\K{|%} ;d,˻9Z\B 2nO~h3F~eX#GqIW EwzO7m\r5~Q v#Ju-&fJn@L5 +R*#&>ì?w^ʳ~wwQHoحmV>{pC*.2ɗkw$ovNӌ]ґ;%ab|kc}ѧ6 ˞I;ˡ}$:?yxy;K`^ݽŪ}RTM/L/qz taMFM2.sM3놰i.$Z|'b17Y]K <]^3׷.wyaS'g#^۠fK6]sW ]a Z`M_x׎PqJw_jLz1-{/o1mͧ?fl*T?̷c_?۱d|.;ݙ?g`/S!V:l4M;kXfĂB7ߧe; #t*J'm#Y'D lcp( ylZ8rہY猁fkd-I*vS6752e j=,tc+XZsn~fzad+-52bI0Tc/^i v'/@ JF .bz_fyr\|\.EQEjZMER ]9{\lR8K)sgq9*倳XlkM :z{;Z4i'۱;eѪGJU+;;0{`I|q k<@`(}]B.`#/Sa{d<-rGecvlj9.^A88!_Q}:rg.\gY<8q@ rl\h(YBTm('~ՍUca.`ě93(e0 ~ kNs|kVߛw@[w=ςNRލvXr}8c!`0esb32X.R)mw8ЛO4_e)SZU-?X5xA{zk &h#XTs pT3r](?[7oKg345cwzM ͒=L#kZ²g_4Q,[\6&ˌH~ >YN$uՏuBJh$hנ z:x)=qM`mP:JtLr_fC@BSi#}tq2 VG[&SW|xkXE1!WϥQIR=[vޮa]M~w;ʭZŞ\:w8^a*UܝKjE2[ O*6hTP(^ܾL߰{xΕGdRJ-U(_NZ>κR^;=h}OrتNpEQ.ih~+l#?{ਢ޹ g U qsW }Bu[bJ$5.?{m+%'p*4S9S"-x%WTu6;)yxȥ_ѭ{}/{]NZqS'G~YwXZI]Y=)q7_>rtK(ԲbhAeD YP^i?)5\}g|;u*;D}QJ } ֭Po߽Wr|mn?O8 Ȱp{@ !uƻ վM_|5zL-QbWNڝ<#YOM|t;*0M{i9 >}a8&2@q;M8-ml_ӛr  Ѽ+.+ T߭@)݋[8J雧O###c#>IN-.]O=BƇWʛ݂>27jJꥠЇR9m`1eh|@(x=Z6;wc}>^[#I͝rl/Ro_Jܯ1ޮp#7`3`qw/ /6q#fg'w^#Kq Do뙊"б]p G/@MN?|N9rL=bBnxפFWn )aݏ=݋főظY>/ 87Z:Ί=(ɢ3Z5_B,=~ffBٍbҋΝ~ak)kJH}KHO ˈVn~^{cL嶻544&^ umC^2ARzc!<Y{[/4FBuzO bFl-\Y:v(%%.r1PrꃃWhaˤW%¡voڽE ǩ%= !kZg_nE eaZp8opvu2=( vO`ެ5_%> e=éq @gHGm>Z?Ӻi\uVͧ`e{t6CΕ_l:>Z19"ՆVA 5;M\0oL3#~~z ϕ R5HtS*̽_ߵjBLޟiS+~šdN\0v:#ś PXqm"ދ J4k+tl8zƝkOTV} e~ׯZժMF)NL$y*:6( j׭[7-C5?T ѣx4"ZM5z~f&F[(tܽ!EA0LYv78Ҵ6uk3&ֻA;cwN+In\ 7wCmoFөv\O^cʊ@#Ѫ-4nGy!R> 5O m{y#ҡc{<=;$ąG9y廯?.V~q7b+)ۖrJ 7c[|&z"Mմ#CnM!TV'dUô|>pĎ5s* Տ[A @Y\"/Ę7O84KJ%kv;kU.T;B^z74 ^S7Y}u~t#rl N.xbJQ4cڔ8rX\N.9,w+_r7CSfQu#TJj@ DU7yMO3}Ei;wpS/͊=*qF 9yHM˴Zq5eR? Ӥ9L: _ z{toh^"C(sd7nZOeG\ %o*_d-nlnXvWz?*R wEeUh Ss[m/{ϰߵ1.^ ^VDHÏS⣔:xVNѼc0}qi.qlOLkټ=4BFǗ4uOAY?^q(JeTfsc$lm//ݢ!@ߝ8#[}l7#\jLM01Kq#xfV4R-hfhSz*hi#6O1W-abMJ~<]O uxoUK%&ZS&e$o>vox7أ;׻oRKՠ}j-ݮ?2C_p)(k-mT3pxvKWpr"".s;K-+RRfVmK[~-]O.v=@;|r_S凐}Eò#*ƺ''Dcf,~sJװ5SꄲVq(y{ {;N3IXXu&nǪ8xTк/WMYS:9c>K'^}\VNjR;|V>iiލ, IDATU_קӉ:1]?H$TiSy[7Ѐvn]D*V3Y8+4HBFSa7;r2w(kO{ ݬQI57ybd&eZhn~*1הjtˮI{Q5OFAPSC`}zMMiљ+S}*px]vKD6n,Cjɓ+! 2N4]B$ìHXvDE"Q,udd&I8ȧV'iח*`BB$e j{`+k+_)>YUԩlC!}ˆ֬jQͼYe&@\*UʪgU ("jwUpRJ9ѧdvMa֐>^͞veҺ~aݵgVn[? >,kBn9>1g.um`W|P{yX-rʩٱnS(ygKH# (n8\ҧfx&P`Wlȸb4^x{M xEZE_HfW*NJY} ?b S4 n敇w^FP/1n8m+ޚuc{zxbFYr)Rfϥc:Qz 秝R6J氒E]JY~&~V"UſIx% wGjuZұWɒ %iJ"ʦ V88H,4TJ%XLeƩs80,]7eG΋pX%KÉ*߿~s=× |EVY#,T{x&P+Gϊ%- fzš,Z`2 "`kL WwʡϧfS /IӴ͜X5&ׯ"0UHӉFe2x-L;|W?Ƈҽ;QU'̬ji7o@Y)G9{c 7),ѻdNh oE 4ԺӨ|VQ[}dn.m_*HܷPВg!dִædX*lq6L 3a!*ql`Op G)?1OiKt#i[[OQʘgjO:|>1,@BGØm71,`|fZfsUN(UqO wTŵh+eڽƄ1{s9qff{{)}>r]4GVݪڥz@/HR*SC.3LDUkQQCv7't_rۜTRSš)7pN%OiG?8ߵXxhJu\Z@k=|࿗l;.~ؐiJvnqq1 111INIۤUW?0o.- pu?3Kىgnq5\@dȓ< lreB N5%}_Q3(vEtXў()YjQݒfƄD=<3SB4ãLA;(Q*(U J9e@º!XDԴenQ1X+3lqie\'zlͬ3&LU}eBQFl=纾󳋋7_)/ k;w)ӯ7bcJyew~@-{zaKYN%7 )#-&Da>H)рgn )!iNKtX4tޞX1gavB]ژ-|3/thjqv.Jg ȈDQMJnܛ< 8.Ĉj_oь#_Z>z:EF$2jnYǀFb~?-0&ntezoע{5QfR~+ƎnKZ)+Kuw#v.mXp(nl#FƲf#r{r*Na(f~pGrT 8+l:iOnzf<*aY=s>@Sr}zp(7SKt_s?y[TMa`3,kaه?l{a-. ӂCf)~Am6fՏ[AP76҂v,bp(TۦҼJ}& szyeCY-}(]#og6E$bhHAfp$=gW{:$WM[?KdWkPjg3]:nqawWnkLw@laGM.{NlM󽾟ث HH +vćcocfY3fqςLP֝ذ ;u &`KVb(=;͘VI@gX0aZjժ۲YfȈZW>}74{?up r?, 7UoWL˚xx|Xf5tkPyiF*?O q3[[j bi=o|SF gn:Ĝߜ>&m>pjp53[ܥKTZ{"hL˅baɗY6 }&`06HA'Vx k& ?lr?kpF* s~sΈ[qlf=I VhUNMYW Fsz32GHӔF^"EE l4_cP-pbS$$șs$EJ||<1O1Dl>eܮgVWde2`CN%c㖌=8-O'=\.-IV5\:ZqO[ؒ˄_ޤȞK A[26.^C  i)2R8JP*`R^J 1lb ,WMiv-La%9.*...IID"՞eI~,:P rW̰ /aBPP;sP`1-RFɸBYPFZ6~R$E'%D%ĹT)Q"磦Z.Id&'MYmҽK;yM([w=־܎kdA -pC i|lJ2f=xЍ5לDGqqOή@le_f9Uov[oڰjEmV?)]GO^N;t˟vHjȟlg6ƻ[Y1zcw˗/H@zοG y+/aB~kJ]R!Ē-Kv`݇"RcZXuG1Icecף&y孝( =ԒP 9e@R m|^< A Zo<%hVb~8>(YFM֍|>޳F8rgA|.%Vfɋ-R3ݾ'3WsmQA|no]`G5 Nj  e{N כ>*A]n=:tʏ}u[u]޹onKXxgiw.$MHMk~ҸvY߬߿/h$PtLs}"Lǂ CCB'HWNyI*z5M.Lk]{Ij^S!aҍW7XL( Ԣ3r5^ybԓRMzm#@vZF|ӻ&74T7nO4aKäIQ\b __jgA'O_yѡZ5㔒yfxbرX=wJ[J*(sÚ5^?ͦ(VF ?vC#/&PP${إG'/ ;YE/^ Bc=<E\Hs$D|+Id{gCȻjuʚֺbvn>n0H*&a ۗ1pm и߲d}x*+ l3.)иlkcj}mM8n|2,aOa >sʪk,eپO.LڱlJU4nm<_Oj*oiS@KCK<{dN, Vv Bqp 6}fȡ^ɇaL9.\헮 Uo^.~ݘ@r6{_SkL ٕ5;uX뚞 xM-y3 Ozu`KN^qvuG Njz,H88@­]K>08 W=3+^hR^"G>*É;`?vnڌ4f#9Љc))y53n,ءEAtnsqz !kM Q(RcEG e5 5bY7l<9J21gofW* w,.N8=nۛ0!.?Y>T;`#~vc% {\$B'mG)P/w>> .⚹5ga ۗ -v왙c['c(l*l4׻ ;~Mz-J]ߝ:aW{#$;{TN2yl$Ck td-ݵNUSӱH<37mkf516 d ˗Ml7meV6[XFR]?r>fRaw8rty_Ϙ }$nV,=Zxg/LV,_<8q?bLK$,OK)7|Ͽڲ|vwO%t 8?fe;vl\7qO*YQQ궯h!Wi2"LlnCtIb) rl۶qĎh^4o늘L>}ڴi&L,~F&,ߤY]]:}^_bRiۺ)7}N)k.Q M3[-v aǞR4kF4uXө5 xBHlVy֧']׽snɖ*V\֯tZN ѩajW\O( Zʧ(*Sί6mڠA'N\<#lێ`l7moЫ8%^p)UaaM3[l_‚~p}ݸ/-Q EVqXU{f;>}_3`_xNJȋm4˶)rY IDAT#6seGJ-智䝛ci߼rvӎ SÜZVZ  ?>=͡@?S3ɧDYgѠY%%e!t?N|CC/.c P}pμZwFf.}ƩsMo 6d .EI{Neff:2zXz"W.^^ۦ)՛O2)x԰SQNqzqQk$en;{҆bI+ֻ&#1~LZBC>zծє9v M.deBs6| 2Ckvޖv].vS]<3~|(sTIIJJ(a$% >I K}"sAA, 5J"mնFʚ]OEΆƥq:V-!ywD)+giN?M|\R~ۨ*ެVV*9s ۗ0Q~gT_ڂ˛TST #'D?8;O(r7Ϯ*οR'eM²gW%F\Y|xf=*- U/_DGǾ,(XToW>wͼb3QYaeOoKľz[c/Wbhq@(t TTalƿtɀL !淏~ĿWfA ^6BKj%Dz9h\wlܺq[z (bS&C覡lƥwq!3EQN32akJ8*U\GvꏉDSアþ/|) esƃJm_2S[dQ/ׄNlpbňՁr)OњUj7XFҺgU ("hOu_j4J%}NZ*`\E~ݯ_Pߩ.@-Wڸt֡0= <$ԶVBRԯ${I @Ao(`WP+.Prn2j9v#(7Uqt02ɔq\n/9?3'Ăor9UtI@f򺐻vYN\ٱ2\,Su}+Pmۑ?v'Nޣr.=3\jVCMa#c3gKhV/뾑o]0݇R1JZM$dEmV{w3&~V"_T gƏ{fSگ~ n("oB?}M-2l bh)@(H*+Bxoߣx ޹N]LJe/gWeqܻot**T='sg^q(b9xoR7f?3 {`kLNם,t/x>˩ oD4S^sUr33{LR8"յw*4 "|o AiZMemjiy>b')킇7F7"G`ڎ{1";6;>rg3gKhR]7?rq^\+Ǭۊ-&,8~fZ[Vg8$_YuñbQou93OVJvwuX'61fؿ&@ sLԤ[*^H NŽu|DzvfA%f_f.R-[(oTCriBgrZu?z?S3=7FK=Tsy-и,WVΠ_x.Qx0/Ql*(-|T&̸h^(g5;M_\JQZ)kLOLn4Eʍsm1*J3*7Q<-[.QAeꃫe}@dY*Xc/w" Q)yX/Cy>~C[ @˂O]ݩD6}x+2=,wBt 3.*~\qIUoɋ")U@G?d]p-9XO&2?}I 3Sr޳bhY$^с88{La4fZf̎PJ7/+P~~^]f0_z] 3)*6m]ddjx3N.^mճ[M>pU4v>5Pv|"9mO0>]m}Ri.tĪKg kf4QET*0>T~{h#G}Õ@&%x=y‘Z1dQal?~+Jw[ q_C®ץW}8BI{Y-..:*!::K%QlHN?ZtY\P<3mf{JY ƮԨ QsY/ [e_%y1M]<Dž94\5րDw=/ow ̻~gO]Ce'A.2-ﻮ;lB+Ի[fe2Mu?aÍVJ2] ^:?o9.u f2- _ jd5>q՞U)  0ՁwxHtFosڲ LQQ]?{/&7>GO_F'z`,ڎ]5KϺ8ǜӬOfX6|%,X2msTW&`1Y Χ?fl*T?_Kv"mfY}4qq^ !2ފ{njvm:=VVj+*nVߺQ@$` }='ݓ9Q#Bml$X酕SZ=T,~dMW {Ƽf- 21a~\;o;h9HZ{BJ7} n 1 -ӤH2&[.suРk4dkqe·, INӖ6r|zƩ9q> )0΃[/ -~eoV`HSBom5ש~a]N?/(GppitʰS>.Ew[wy5oРA ۿ=nڛGRla_{vH_< -VEs_ un|{T"9qYs/i0OsƋuk,  g[CV!ۯq/_ qub`E;rXK}x)a` 1: ʕ+EynXx9:1>ظ*anUgLgk -ddwfSN|'=OqxZ\?BY4n|]tɏoD.*y9/ G9&~Oꅓ%'_QF Ka5 9`;kx 0?0Kvj  2Lm ? dYX }ѫFQ@>9iQbᾯ/}ڪhooGLz_cY M ĨcLp3Ta@[|<+:a΁"mt{=/Dh;3[K+zъs8o+cbr '#lYnwt[R"->4J,QYXrEk#N(nw֘ryo jhAH\+Pߍ?X׳HTڮ,M.Դ~B M}dǎcWߣ[FYr]ƴwbi >,/ǛI7z*XfH~v(:*^|Qh,{Sk<Tde[V [lI\tbZPHŪb{J #"(_o3NA*:rfV_\ ؑ}$npPkԕ0/9I<>*$t,οʿӨ|⌔ }27I$rI/. aR.n `rS;{EBa,)H^}bcc>VQR5 ɑ2#9.-d`18ŹVFU6ȱ;'EF_}J/Uݛؼ[VAZupR;/\8s;T:kS;` 1kf-c [pƙ'+;{OZcQ;&Uo,S 7{9ɡ2oh\pzǖ 9Pz'rҔGGFM[WM<>|-'kw TSepruMLV3&_|3l3g|wGڕM)cqgFFt 6eadj$3o{Ulz{mY؊vUXl…; ˯~r6Uj1dÛsyԖw U+]0ۇxiͱ|x~r~l96:w5 qf1V3q{=qjse]ٹW*ѩ`s?eΦ6C߽r5 ҁ e!y䴩S]L&Œ U[O8g`ʾg-I?y䴩c50FN:w}$ʶMg0Mn@rBP!uiwޫɐ W[x33n |ㇾQU4.I2í˛F};wuF IDAT@|D7Ll6HIk2 .\tԷʹBu1bC eY1_ՌC.,Qa3.zyIKm\^xϸ4ܾfegNϲܝ /a w}7Ř: T垵[VuH iYQml;67ϥɐ#"*wnli8*kP7TH~le\\1e!C&N8ugYtǎq_$koQ `"#` elT :̋GNo~:;S>dό@AgV퐉` ]3Nae +ZwkWru:8mT|5$6n^4kJ֥^݈פ{CϗSo&݊SjYפ}w=,ڈ ]Pf楽tY}Ӫօu] M9,=Rw~)Q|p0V}ھjgǝ /1o+P5jW*=nvbU79t},>REgh[v>D>Jt")uz&nyh˒x05?siS3_{zȎe=1\@azus/+vN#t}ks}*h;ow*%NQ^]qqڪ <䳖&¡Wrc:Okd{Ϟn;-ڴ(׻Oɿnu_ 6]oA})w՗Y3c/Mnݬ썩o85ihAHPv|Yx +eG.'cȲ^p&9N3,Oq&ٓK+u酝j|ܿ^!:`m$;_3GlIw A}o, Ziݫ+JY^Y&c|{ba ,1zzF-h׮1,TDx 0_`[[^OVʿf1{) BI-ARV~0YK~.i9r!S2pyu1W67$Wk= eg"&"p T3dͭǚIjMR 7RQ/k#Z=$X_?{K(IټUiݲta_4;<jEa2\?'08xTXPӨM-7STrr҃&_5aOlE@ׄZ  ڟNsp1f;ܩuk>b0)mԪqfmZHX@cm*5:9Y>JKfY?<͸6.=(`)ē+O, 7׎iP(ςSN. 7mDpYC+H!nW)`=`ܬzf*'Z}|c(3Lw,5^6AH*50nmybEYJrm*H ϾƋ?xa ׾B_`, o?tN}c;/HYbdτ.깟gۘ5`FtсuY!ch[3yɰM j}YgPQ3x_>JsEDp ˁMF]iѾg"OǛB7~d* U{d|ݭ fKh"?3i8mbq1 w͢dD/~;{XɌ.a?\>YJ+ϊ^XfV-tGY[cgrb|} U лݡ{&@s#8g {aZy\1 D11!$M"^Ow2zA3OU]wEHv^ &Ixꭴ7OJ݄'Pko@:vN4K+:0h'c-Y?%#t} ,XE.9HOiH6!y}xTѻ;Wn0Wޔ-'L0?:'.Z/z !_9Nr:4ȝؿrw5ZөH'>]yy sosoϱw.Yt\ `ӠFo4j=DZeӈVI%~&ߝՑz`'àƯ;/G0)[d/{grW9֌ 1޽Ig|DEl[3j4k3~2+9J\Y!}ye[ϊ7dx R,scs^hs&.cb\^s+  Mc-8 ß+OY]{po>+d#<UÚ& ~=eVmڹ9|N[w5x^8 8Xpݙ/`Rjgƨ+Kanqz_b 7,#piv_:g$󉨡Px/ S>>{5!|}riհܰXO@lvZ@oH~o~z9Spu96O<ҘW1xsn--45{Fuf?mҎYY q7]p[ YFXߏT>]uv seM^&17} 42# 6}/|14+@~ز!<Ɛۻ}knN[ia{ $9"+_쮻ܣUSan\w*Xb&Xw;-]Xڥ.ױcU5O~Ŀ7-IPA郢0/A{2ƵD<;Tz=A@Le 9iȡdyr;g^lC0UشUt?<~Ĉl5vk$_^/pzH`Ea<_w0j;_xR@x/UEH'DN7O.fH뺖BwJEW~sa{O8cˮ7d7tJi1c[v+nqsY-ʺYCARI02YQ7^GkDZbwU܅j%|ryԁ+g`RHcwA?xjMl.O/2.oa~H(Ga vp,C&:>e7)9 '5˱g}'<>^trSbׂM cu`qB;kё2mg7C+&#\2ޫ{ukWn70OfuZ@b4~Y6נ/kldFcEU4I={}o3YgaM-׻9.εݎar\y? t3Qa_l~Y4WSm _3dߋ|JS1d?*ӻ, qݶd=\ Dl.w0lfp7+>&D0a Mv==ã? EL4;7M_sP94_ȳǎʕ~4><د{}B:[EEqb{M֭[nLbOqn$lV6/;c'ã]kFE0\=rGyuYb'Ԓ s8c(ܔV>Of''z ;6T?/Ijnk$?҆vNhh1פ0i:J_Mӳog=`Վr՗=v,˘$jh°>Ij!_'KW%TZ@ WHxhw|ow]NQ)J+fOlsk::%>))9Y <+V )@.ڕ瓪m06LV.Z 4籑AJJJryrKIմB9dkO) H +o|1 E.U?ecmמ\`]")*nOR)wǵ'tʰ,>yK)3FfW<+fv? Gزeۆ/̫kke]tB"z 3~t8#CV㢦Nximz7͚GTAףbOkύnA-R0üD~njZ!}]iII,zo&jy ( Hf?` # KNV?^eμ_^O5h!FwuFeL<"'9*st&/Ҭy.8NOkѼ+J)2?f ySH] MP+kJ85HpײS۸]MRĤ0x--)7|IX؟ōHe!۟K ܷDog%_a^r\?BRb5=?v챘1&/P.|2&~PWXLxv"«{:CYXr. [8s|{>Ac $Z! rig"0&fN%o8?צ_vi'%O%ikw:~63JfjUQ$,qCNz5i''=z ^jT3-I-TVm02 -'1c7jqpփ1*F{nމw>u7*gݾTTVoYǟI7z*XfH~v(:*^|Qh,{Sk<Tde[V [lI\tbZPHŪb{J #"(_"}Jȵg{+kƊg>` u߭Tjl<92oT0BT[{׏Ѝ`M+tF-˱|7Iݨǽ[yjhU P!_tmnٳQVh Q>[ xd{'ͼžY#.mɗ۽x1GGmȜÚpksfpU/mgʪ[̻%gFmi?Sll1S9!ۉ Ǵ--:vgE8ɐ='y+/2U{Jŧ {%.3[ ,;.m8ry"cq+iFb3֚0; |V%]>.Ö nRr^31pkm {_8^nOz@ml>|P҅A J}Alйܥ8z܁6g&UsSWuMł]R#eY|-_fB 9'*Tzx^3"v*Rüx~dkZ}_<#\&<֒djόSLv{ʂ_^3b ͒;f;bZ|{nk.߬Ky6Z}k[yng#~޷GױuS#5nh!/ IΛ;gd߯g2iEr-`21C*z7Anز_µխ%a%I6鼹j^rWBO͝9 Io աuFu2/;KH=4r-!/bܕڰz!zdM|`tFYvŢ)#߬U46..@oX` U޾ky1+&W0=/sFv@qƎ;bWL`cjsWZVmovxԤQoo.zEo 櫏'4O+5+.:_er;^2ե&4nP1u@J\VuH iYQml;67n^ 6C" w3z.V(PIcPDƚ$efzroxMynʼnü~kZ1uwɔ/98oa uxʂU1{[*)I]ud+V|1ܠ^Fs'̽&-YxN֥ FF*u3ұ 1`4\͍ ߬M[ XihZ>wX{ζda1{69\?P2gU"uQ䪓~~Z~:e=1ҔօFJԒqGtPճoMݻcs>^s] ˆ Nf,'E1o+P5PJϷhsSWx>`K{~oKlهzO7"gOje.=Hh$ޞவU]eeIr\<sw",zټ5tMej3N9~H;Y+w]mJX+#ڶ`n-º?)ƲljfY-W<m;]b]ˑo7i2omy'Z/lp'ٯ񟸼~] efo뫬ptP3*<"M_;^n"${J.nUݼ3W67$Wk= eg"&"p T3dͭǚIjMR 7RQ/k#Z3nN-ӗ-e7 (SmHp9ÛϨDĎ̆T6|f_}ڢ{.^匧@e bM?Kלven ˦<"aj\?’58[(`#_A|!_MjT= Ngd7N=%2#̜cOhCesU"T*5hn߾x+&BBlEpbaMlʉV_۩~{jW5cmz+5^rAHguW f's ɚgݩkI/S/~ J\oN4{Jdu4 r<Aw|~1=|C`bt@D "l)9l^aM矏wf&NaFҐqJ|Z\zq@h8 bLeSY7eiDHJ.U EhQ$SrnpC<=N,?:%N7r0ϯ$Z׆0+gmՙ̋G\"?3U6?wJ"X[߰(S,^!ɢPeq-zxؠǒnM^ n;"R=US 05\ 5^*AH}NCy>VT,C j-78uY9=RJuhH WyUx,.~+]膸ȋc@@7OpC(` 8c0g7ZZ?-'=T>Mq-.D0ХY6*wCh{znxvwXE>+90չK<9.5ww~]t՚ݜag4;V8)5l};< )X9b /91.{S!d,Tklyǘ[6݊6sjqQ-*/E$k׹=[yivmwqQbX{׏ŹHM+ |N[w5x^8 8Xp`}oupY8ZΕ;SCˑ,YFj^tۥH*Px/ }9>;XnXf׼"T@lj/ٿG[V˿ U/O֦֞ByW- uj'v6ܩ-'Npwm^N9>u:]Z?O0"#-yjW5ee,8oH% A-3>bֿ,h[4j|O dwwT8&Gq&Үuxg$@S:}҉#'wi.<_&w搼0os"v~c/y/fh[>0uZj.ǿW矻)`ǒIkgXkX-O mra^b\?BT5=??|cOum snn2?>Ofӟǯј)Diqzoe#eT({7~޺1{KLLd_%׻9.N=ZR^*4|@k`>G}LCo4)kV:@4h9- 0Z\>, wCҠӴ},"qj.wAJKC_U3X1T:vx6Yv7fw+:Oj~Xϔۭzssdt&p}.f3k\/G-n9zgwe#4:eX)W7hРAߞ?V 7IM^)6ů=kvr$/I[o乯_ : z7t=Bb*̜4T맹wEú5{v-!8@/Ⱥ\b1 "WPllKΟns:,PXDs,׏HM+\<+ /,#'#[܃N1tشS#jɨP@dyE0v1ohE-̫O[h8of_Xnh)Vb۞Z{0,[ЧBBz\s`%Y*cu/ʔ \j;YcwhjFrꝕ-j:zQ65ܠ~Lc5<O7hjFI(ћ4Z#2\WXW~ ZjCP.]s6uFuCk?j0B~Aδg =UKFCzC,\ɎNHWbwww/o)vlװLQj%x߆T~tJeXj&L1vP,'\Ѩ?ˈ|3$(bSS_%Խb%_O)m"P\@IV*C 7e;w^&>67LfÔK KlclNle>?R%ujgax;&&Fwˌicn|je-Euul*Xz">tG<{บی|Wkm6n[Nb{58eR+iʼn-mmGx/u©G0ɜ8,#XaXOxlȌ[{>i¶(;Ac}`ga_=W\d#7>.`EҜZ%*Ba. S.)[w.][z,s>+3qoR z=)󟾺 @U?7.ӕUiڢjE|8ܡ2Ⱘ§o083(ݥq iM+ ǹ7\6՝bjq| VҊm8JM}6hڅSa]-/bW<"DiNqVwV2YE HJS,̯ 1 38Cu}P=p)͝oxW C=zލ5w`1B> TV_We,-ԍSR IDATe. Ƨ&,kMh(ho97lN '}}HMg¬GO=AG9@ᰣf_òEibgοyc) 3~+_;V$]\8-,Wx Ҁ|yV+mAg=춭6NBEׁ.JJK^gC@B'T%P< =~ƒLJ#5>G#Փ{"jjS%g~xjl&U%u-) @M}ڹ; J'֬ѠY&u| ?q Rު_1-!MVK*zHEQʷMkİگ7<}%n@qİFA*w~Cvⵈg(Hkulզ~MYxl2ꕗj5Ԅ{#G?{e>:u错%t٦YU2GUj;uUϹĞ}P6ѩ}μxl@Άֹܻ5[qLR^yJ\+FȤ I-tYNcNeQHpʋ*+K$ͳWI 5TݡV(؋UкKwD-j [NU,FM7@쟝HD^s΍6>0v#j [5,CbI+~ˏS[.BEex <Έ҂];ht_%a$܏-tgĊ-? 0;AT%cbbffUYN@wԨaNW`49>Iιv&<>&iY\k~>UCˑaYeYa?X5 pg{=~_vI[FIdkN8u߱_L4 d:0xxo6 2n*ߟ ZIJ r4]o?#xoɪsf<{ũ) 6ZRa_pa/@ Ã=vNwJ=/xޮu5^@~j]$zB@#AQ+z'F(:2o0$Tm][]sՉf^]g[Wj%m(k. m}sRQ|qI> rұq9:5}в7$ĥ}niǭvz3OS-{?E^ |+OD:G*fSJ@5Sjy>;GFrQNѱ aYtK`ο4 N1!W7Eex [+ٯ(E},zKHPl &fIQgO62dœ){(Ƭ4Zڵ߇=/LnWn>tf(<\; 4`5Eвrpܿ 1!{po }ׁ?/lu=- 6Oπ'Jam+'w<°t^{I i[V|X@)+wZ0f HFS,?Xn%3@J:{cqm=vm\3j N<4n3ǽSa K:>!@.3~mZd8(pM =GS]Kȿ?̽69[( `ET#HD=yIƎ9ӫ_eXME1ĵhoqTzt5s4@. MZu:]݋es۫ Ni7v?(0^>k>l5aZUnpލ uHP9<ӞEzEZKvT5}Wh8q ([Z]j롓=|OgHɸ(+斷H̻VuPK װlP%5Q (M OA('}N"m\\"?aMMEӎc{֘ WMczJ`ʕKTg ʼn'sKVl o 3x3GL UN azaB`r!387M?-ںgJ~׹2ѫպ tڀ1@*uF7.udGҋ1S1ՇϨlއ#̺Gp[7. 'hሦNP~:؅p _ 6n~n54cLzװ0˒havԶKf:~=p?@@=!y67~kn;|{sاu'ւ.n wipŵK?*fzM󯜹9c ~@hտ<:W8rӣDޚ;ÃIݐawϼvuDAҧCKțޥnž_^NKw5ZUxy4o#nЬzA7ũqME7MFYo/3+#D Bn7LD)ጨܝ;ռq| G(iB.?;׍} tw7 铹\3laSXLVִP71oC࿾Y E >|4xoJT0!;?o@ڬ>˫nwZ}i75t^{W5ޙ~8F[CC1bp>(|E+A =f:o[tf⸚vDHVMV)Sc5pT7BXQ~'kۃp`{|5ÿcOlD1gזϒCw;nq+/ ,(ce)gd;jV^޴Sk=?tTZp"7fHhc5%4|~c^+9.:M ЖkR]t0ֆ!?s>)]TD9Q*i7a¿dW^*'Vs]9%ˁ3!T"$޹nn(R %KLI&2 ]7ʛiЋfRV+/'kzP7.dC R_Y ~7}?o:{I73 @,F1՝&Jck5,uDI+xqᘪt@Q|fjbo!d.. (UAѼv fA%ŷV-ldzw,vX}FKtń<Ck[BQo^t:qpfX{u  VbtnJ;7KzpFŞXr δgùk8~`vlվmbp]kYbG";m"Ejb1bbyƲR,HеV% [ aY0rĜ`P_qv(Wg ϬQTd|fy@ BfBڬ^2*a647$0"/.XQ2 o'DNroƙofA4=4h\iң碧թ)0d͒z, s zUhn؜)0b;%M@:aS&JZQ 7Pc^(z.VFQXvi͋}b݂ M8~iɏ(>RFAP]@wa7RΚZg׮nr}%@ŤaW (M1:5nVac%V9eY^Ϲ6L,rna@xsH`>ߩR$שTTSA|ouI&P\YtRM R66U1:#[b0@QVt o&!̌ih5u5sfaQYfҍ);!K rȲ<5^Ka>QԌ?ܞL) +Ѻϧ4į g_&Ps7Ac`-.^TɘY[Z;Ƭ^:Y((*2Nʟ{+:E ye.o}2NWMQEEW{1"Fc_=hARLRjzfDH{UbWOp^GՃK7V_]8͸, Sz>4Pݱռq| G(i%6DʏՎC߼aO7ٞSOi4z<H(KY<q!i'Uν)u7NטqPC# U*ZB&ᕯopG ={ U6RE )Brbie˟/ ncN}01pzVX@jČXШCFo}Ŵ?^W3̴z Tz쑶cl G~g`JdgxOv|LVd|`_tJN*-ȰG~-C鞇,2SN53&Mf4{?oG4+⯞eqKT?-JMW \a!nnyua-(m6KW@2[[&8~>7-G-炁Vy4ڔ{a0~7P̢ߜ{ ;>i;O/9; ok3= jSGA@ wW0s>hҢ.d!mЩòN:|Mam|-|T Z}c3'Ohm^ST:4w+dUAMɁ뽤) ;֭a{םb $OPʷw;%%>.#>>>#opڹ*Id\ \:e~YO})y6U;Ӗ !_6&jHs 7Ad g̝܊~"|(U*;>&o}"ޢBN Rjp| &;rwIjwKD aEuFOCobf"[}>;hk2bWǴsj4͞y"US叶޹墙eAUt.ogql&wFnrL{ ay7wmM$BE~+};hPPm>sIlɊں>  sښlY]Qw|Zrŕxd |{ū'@kxσO]w+/=%@~;^׳KW߹]K,Tf#Wn|n^.5) M?)| ["TׄkX6y%ن/?ɧf.߼ul9a>3 J{V@'lVb7kpAeS*jmSYQ!3"n=a? Vw^'QԮcwkgYGMQ&=/1WaYbŊ4M?|PP8;;6h4חdPɩr@N *_v,Q [.tyGe>~a/`nMZj8\ !WPςp%7TVw۸&SaӢc(+ww`޹嚽_ r; Cc暅o |߾?7'4gn9AMK2e*KM@L:Qg\1) q3szm1[xFqbZ7iҤI杇boq册XCy ͊^į @lah+Jz|qmFrλ.3_ҼDN9+YcmKW>s-_[ s3*^6U5-"o^4 "6wYGPш@_ֳ6o6Gl2L՝žj:q| RҊm8ÈeF#0 8191,^#XŒ>ghAkYѦ3b%-h#9gjrP j6`䳣L㑰,ň{^bhWm,Zz͛k4/^ºuo;g=y5@p !_Q+v0Sǧs䖍uVjj"ԵO1~q>'DŽm;V)J%SZ2UdbW)f_qegii@x&;>byRT@*Je6ӭoM\8hAf)SZ@͈= Փr\¼Av(FUH$(0JJDz'gg2SUHbfPX^Q(4ύȩ0_Ejh4jK~5Us 8<,W ˥++5.YAŖDu||LZя[&UC q%OOʕ+HKKΦiZ"4MӴX,iZ lD@xScLJgHg/{)oW7g@$XT*#f2oyT*[ ?kzJi6ѥ3fj\Hc=SŴXl:#l`ټ`.:ΈenS=S͋kHpd򖟠b;V_!VWpDYYY@ 8${˝?w:,&'>er=s&/IDATM KW@ n8zŎ@ pqq(pp؜@ N=c[_.-b.z^w%G `d\x@ H$\=8rEyZLF4jر5Sr)y)r| r2QS 2aHDڢE :nJEfpB9bcvpx@ ,WDEEm۶,rg֪@ @ dɎ;ٳGs@ @ @ 9."""`l/JM~juRɈeE}Hqpo '(+A^;QQQ=>#ԫk.8pS"g%}ګ ^Tb5u)(҃/G]ީ }]9^Z4u8>K~]7g C7<4EԿޯ?'sFr9p/[Uo3le|i.-8/M!XqFd ]J8yrpCM{,#X!XO8~P`G۟iE X[/լh.b@o $sΖRdin]/a˩wܵ˛-ro* )ǗlMp_qe+/_\y:h3tP-g Jq|%X rJ/: ߛlP_5`פ}AzjͲv|饭ZYDuejm JRVzx'Js-X0^w<,ί*c,?ؐEʹ"R\y/E>|38rرcǏ?mڴI&ٳ5"387ua$52R>Íb 㜞 LEgƒ_Eo|P\/iKu8z+ $ 75ωkzmiؚg^y5?;.-hN ~+v\uD(-C[xj~ ߗЫWA@8x]roݾp7ARm{9/_~AkWb__UBc#}=8#>W".>I sVkێ kR#^ͲI@dhZźڜP֥F?bȮu]2SBy-P/n,AasH_youv<rZޤG`%2è$E^/q ^'+%ѭpS 5N_| +԰T)7-+KNINO0^*dˋȋѴNhe5jSI7ħE5|V(݋g_huU*tnwE\+G/o]D"YWvB0v˝3ĉGwçEW`? )R nvvq_Y3byBܔa$Z040[g?Q~ܦOZzEj>3߄l""ŏ#멙"=Z."fZ'EE=OxXZvzڶ-Y' ڊ_v#C|5ĉуyDr#KLyvD')J~u}XChXo''C~ϡCVPeYC 9i?ycwO6qio]~⭦EDZ?#8$bТ0G6d<9w/ F;s-;ՑYJZOK4^{ jf[PǗ8ǪH_ g<`LNfu<rRWK׬?hpѳ%w!.ǝ0H헤\KVZ-t7~(@g}1h v= -G-Pd>Ex|+DX-~id;kO܃E̴fo]47V'R:3O>yrP lќAW} [ׯ6du`e6Og 9 ODB=:ggo; WU .]w꼠mkH2~.иءJ~tಜךҞxtN'Nj\؊y H }N:桐VO;8T^[t}B&РoBūACVzIKY8WPd>65VQrиY_a94MpX>wzaOۊ||ٜ-k_/PH)t[ e_Lcǣ瓅9 ӷSmbZUlA^mȑ & [dFp(U Z̆=K7QElx*{m_vy9tÃR۹:#WF? :p'8T6):3 d█f ߚqZ}cL5ؿJɼ&D !ֲV\[1S>]\҄K{Ox޳J^2bfk"M<)79}? !=|ӞzK>yŬWjx> .N5Z!ky\\v[O;f#,ӯmPyQq<_LO>{#ib[5;v<üK}l`_,Z])bEPߕ]?\w >J){.H5޵ "2o-H օg.TYBNucg­6VvW8*k+SĹX-l=;HbxSNlSHMr;'sahɝPӳc =?x50 ?VEQ ð 3ڧbgͭ>}@u)))))/SQvpi^s6c F{ ۋ5Nm^V_OYF""1|C!E)Nna}n%}v]O]\9nFJnVt0>y͕^/O8^vQez}/ow?iBi ɦr :{,y' )h\nFAlUƂ8/)$h!jhhƽM;1rV9=Sf6汦+B /,_†c,>mz(b M8ŐOfiP_^+q[[m#¶89_޵H$2gO۱~~'syp5=(t b,M.x!-qn؝8x}&lÆ Y5x7,˒B9FYԒ֢ؽW k$ߕW_ݍR.MLqUꎗ!nE,[#mNěak5ܾZE٨EQ> :W^y۪^B>6_ieBKÖ&,1Y_O?6p,\ v3 +=haG-cG(4oRAQgP͆؊ӳ?\vwߛ}\[6rJojѧS9_MHNPb1<>gcJwYDs쬶1PDYO x#՜bk`ΣOb*~~׶yyhOw )LeOrfŠuL˪0#%a؋錽!<#(Ur&6T˿75VV  bҏkR.BޮJNmK:7tX\UߧU E,6#]6~2Qrh<~=(Rpq _z{^C@WPpŊ%XQxvA<}NѴS\`sjW~-c&\Sf0A[Vc+nT='m5[o 9:q҇Bjvw\-AtIBV$_6tG`:Ӱ'_j]O %ʲPy˖sFޤ$>-#65Vspp7z=ك@(lFU}"ZUXtFZgwv:|p Fs3}/˿_:6m]K??&e=ax]tK@*—Ү 5zb Ӝyr`.{h8k)6I+¼Ez"m/})(`7OVU" (XYظܰ4UV} J`^[ @֔p-: V׷Mk?@<0Z(-ٯtt(Aޙ_Gebj>]݉2A ?}:"#C$cmt)N.@?< y/H++sߦaImboO b#ach0Ѩ_:v6z0fGVS}`;`} ;X'RXG",,bX? /67=_ 9#᡻wֆ3^f [ ց8m`pݖﺐUdGZ&i~19eft0n1<PWLa>$9Y#G M`ƠX`af-0āh$9x%pY a׈DGO_$E n/?/Klw1{2i?USIfVfr0JEi4QbAqIJBep/?>k/HTs3GK;ߎOϷC7vǁ""c):HIPdd3YѢͯbyl.>GG&z)\b-9N> ae<@,:X; <}0$}`?8A58 y ``L9:AX"C/$Cr @ yBP8@P!T@%!4Bߡ5AD0!i BaC#Q$D&"QE4#݈q H$RCZ"]hd*2YE !!!Ǒȟ( $a?5F٣|PQTTՀjC&P -4- -VCt}}}=^`0,*Fcqc1y*Lf3bٱX % –aOSUNj\p \ )nNCQI9JSGs :OkR| ~ H  kB!PJHO $2ňzD7b1XO"". jӺ6Ҿ]#IљҥUе=BOC/DCAD_B <$L#yRINA!!C? #Qрї14c/$I }u;)& Ʉ))e]adfcamn^!a͑q<':gg+k. W2iG\?y#˸{yXxyyyny5yxyo~0St(RJeϘ/1:?ߞ?@@ >&B4B*BB' S P٨&$juLVDK$JV(FTE4DJtP !((V!D!$$^%>$P$JHK6INHHKeHK}v.'%(*S'3*(k*!{M\syZyC4o ~ '^*-)(n*)+E+(* *{*W*0XWEꪦP֪U]R=D..]u&554j45)4ǵjk hj՞ i+{UwEOM/EKo؀ޠ!aa႑QQ1̸xĄǤdT4ŴόhfkVn\<ԢblIJ*5ʺ>{d=ltڍڋ8;94:8;:;I;8=tprp8ujzuM-m؝׽߃#=^{.{==ynxYzz6^93[;W7_?P0X8T-8:x%2>d;1B.331<$/'boPxdVxZh1P{LG,Lʼn׌_MpHaoGb $}{ۿo"E'&JNIHLJ7Jo؏ C&0ckܙ陓6eeEgR?T~|Xpߜ2%y>y)=ѓ0 h42&NYSs9_PR}"xyiG`ٱ**WTV==}:zTЩ5F5mµ%1OswFLYg7lq;ڄhkmvk<E˅܋bOPyPa#GW>VzDIǠൡ]Cj=~s_~14l?rmd˙W>>yCzSm;wƕoNOs/Wj}0-E%V?.-䬲6Tyoqmz=aQ)ymlkl;l{;+@z;ñ x1şbG` `^ .)ҁ|3G n! -jm~IIJb{q44Kb?@fēQLhf, =9G g.eQ^E/F`ZH^8@MOĆt+9>yӊʜ*vyj6ZGb4 _cLM=Z~clMg#`jgaxЩ̹ۄ=O[G?, 90/[\ؼ$p9JՁNk}o |;+;'wo_ܻew~v1 ӡg]/.|Uј177oގ;4<>?>xRvrCTiٌO>ΝLyK ߥ{:mnoOv 4#O Y6ZFFl,kezD/aoF`"G#ћaI^d̼ºNpsqGQ3 X f ݥ"EESŗ$eO7,v޷%T+Wc; :3ݽq+v[|];N'?px(q'>M{9 7M|hs/ TB>ACEO;D'MO@oKA%e5S4'f?8dy:ώ`/i+[{'~$)BŒƒBQwb QZR?ʾ{#?0أtMJjZ]424Ӵ+tt^/dLLk泴Jnyjceo0*v$x{k=wsEhGG''zD59b߭Tb{zgÁլl\ռ|kE%'yOWFTT8橛?Ps.9q퓝+oqwYݓѡ"5FO54<"e镔?m7h76g=F8x C1!шsH Y|DPhA8nB#L/VaqT}wFe8#&jbZ3stpϐHiFkd)&nRfv*VA֋lZlC~[圪\i܂܏yxEx_Qrt/ 5 R婛"wEyoK I^,}Il\)rB#ʇT&ūG  ҊN=wU`cL5176+3X䱲>`b{.A‘8Kk[{Gj6gޫ~y!FaiEbTMJVr5u9]sLY &8}W>}lPxDOdJrՑjS-u3 :MZ/^molwVB{BODDQ_PG_QG߈sh Ҭǖ")+W֤wm(ob7Zs Y[""1T@#Psh":9Ec#} C4x_0G"ޢդ{MBREap2\d4\Ȣ򙵆͕}3K#Oo _00JʼnI\R2 F1JuUj꡻4iqk>g2p5\=z<㼕}I~kA+!%a#e.Ǩv'&J''r>M:f΃Pš'G]+9_9a\zlB2jZZg54^lknop UW&MvvzsDWN^;7=5l>{<eҫQ7oߌLorX3=9v.u/_/-cQzҙN?~.(\5\}Sgᵉu ō+sΛ7"osnmپ1?BD]oY^y6 ů@Է¿|#2p^., pHYs   IDATxi%Wv?73^իWU] hp`8;9X2)˲,)m* EY!Lr!9$g f`4zѨFwU/oor[zS `0@7V/ޚydVG9 AAAAQ/@AAA"    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    ="p   p#    =AD.>   𾹿C=݅    ^    G-pX^    %G;z0ۺ?Me"%EAAA>x[޽z彨"g   })E>;tcD "qľ!   gWÖfr@|t Pk#1;qUqD"pSd*TJnk 7޾EAAAUaftrm}U٭TVm? #T:M2d3P, FJB2Lp   ǓF`f[9BDQQjiy.߾]^VV7˻zwZ( ##DE O9J$zl:MzB>3671P|9υQ 1ZH)%AAAA8@~:DQTԚ~kK[B ( }@@`Bd`4tW* gN?$ӎIR h!   ]>8VGhο.8ݗ/U/\G84(P3`?`4"3BB6LJOД8q %   |Ε+7ҙW._rn 9L@!v; ϪR7 pdSpN/WK?zʧOG>251_tT4=@=ʷy_9;Dw0iorwd`  ]\.Wٗ^|kނK0 rK6*?4)4)dH&JDZ#>Z>Zm@Axw8#sŁ >~逈cIw0?UAAACk qە9wKg/-\]\3F)8 B=/> Y{b~rl2v)Jyi)^B"þ6uֻAn~nʵNTmzGpIY:!`Ņ_Og=zl3k0َ{(5kʁ۾4`rxy9yx^sc⢘{}ƻw%e z@=wwޖS)AAx Vh4ׯ|G.v4\B!GA)14 `.rxt8֛̥ T&<;TDd8P @floW/VwUz+[5IG b=xfD@,no^y{y뗿M]ŇO6ɿeF*(piXO}wSt?!"q>66LndGahwХ'XŸ3ιw*bÑ`AAAqv~oWg^ρRpx<6bv0%p`S rl D`@`+cSJs'Zk6f7o`t@kh@=E''XLJ7v?{0wߣ; Zkm6 c?ks]&qrn6q+='s]8p㣈-B$~;VOkx+.kɝ Ħz]JAAA8 Z81x?~G/߂s |H#‘_8uS3dB+*f=Dq q`Xڄ F;D`b"&b)"CRM5+m_z~^"* 0#sE4" ?DCON w}{@~pN: H;)^aºY~Pн0 m(bC!Z{B,ܼ[ш;bZk* Iw]|+N]#>v屴ADJb!怅PAA qJ]֟=\j$]dR ? EA3ePN%\i;ATm3`@"E DLb0Հ7*֮92ft (H.O<`w?I6v4HA<:jy?V<]謺aT65 ΙcNcu8\7qL(k0 ZVlw J"d2y^"at2Qٝڟ۵a?Ӿw>xqnUwǼ>~(0t: ƇܭUT*H$8t_Uꖱ e#~iv}*8V%!AAtpE)l ~•[{戤׃u\D Y̍ʱCOLeӎZ1D.b6RPL ClqD`VL hf؀ c\RϤn3Q<Q@# O_f}w7zM lM!6,E+G,t5bqzC cn4;;;A8d2L** LeFh4v^ Booy}<~-4`#ld+Jq5h[&Lk_vǮ?~aX׫jExfl6N Xzj1&NJT*8a#i'{Gbi#k >Kcf{RbzI0Vq<{w_bAALfnS??}ʕH%pLUp. 7<}ht0N&SdSPV#%@3U1rUNAijjj 'yay!כM=aTC}{ki[KזQi  a_O~3λrۻAl|u=ZY7`Vy^z6tRBLST6773L>/t:Hiw;J6rf}kZZlvpp< qɤhrauJ%"J[~'CZfY(Ţ1&ywU1bin*Zf JU+&jQY# VU.+JlyJGwa{m̻Kgwww5(N%ɔSiu"6tN=\Xf^$9 iCAAS>[,`um~[. B6 (8 R09.`vĉX)F uR^@0& (&0+ar; 4AA(0k8 e"Hozϗz3%lWp<ʅnTE~ԩ#wMm t0N&ݝ7qKݶnѰigVӓJ/0Vpv6lk% JB`XkZѨjf3Nqt:S(lOOOTz޶_d2jĦjT\/8Nq]T* nuj8AX#i\D2nGGG$(ͦ>vww[V=r===Lfxx~[T===3~pubu{aآ$kAb'Ll6J%ә`"kmN';f[oZ~wkF0zzrTq#oW(  Ǔ@ ߾?_h@Eƒ!$\ ӏ}p!A-?4z"FdT: C32O{zst|V`lB& faۛ3VjinEϟH _Gp42.kڣ8zq$mt(V|wuTH&b{ (텅Zfu8ӶO-R$EjEGIӎDQt֭ťrlq38RT6- CCCӓhFGGSTV^ kX .tX,RJR `d2988Ȫq)GpYv.鬭%əq[Roܸq[nmnnV*znUJ)k?tPzsssqqq}}b$㉭2_s0 EQT.j'?=}htd4J* r[A`@ %޾ eU7o\^[+w`{r{wAAAw|Giuˋͧ~X6tJB9{` ONH>ciD:w\2"9b&iCF"0Ɓ&'Iuy.HQoںiA e k20LZs̤"1xd]'NivRSqu] I@>/1o~O?Qm[ܶ\V=쳗.]"l6k=w+a#d2@TZ__T*[[[[[[6"H\U񶋉&㬯3Cz{{<+˗/_^XXqFѰ.N؏ݸ-=p]9yÇjNcfg`p[8ٟ]rW)e;ޕP-IĆk||oa+>, l L&3;;;44400+++;;;YI<wt2CCC:~ɓ'GFFz{{766VWWJR.ZӨ2%vmCS b@뛕kWFnvhװ˜ut'=ӓb‰|q`MvK+k[[ccV)V;_   |uo>oLIDp].\3c_>5q[JfEL9d 4`ÄXTGcP!&'s uPGA1{X3Bb&F"V̬#PrHElʝ bO'ɑ٤̈́+P<D"x SigsTs{xŐ`s{݋/>쳎 Hnu]sssG]\\\^^tL ЪVZѓ'OX__p›ol6mD*d2&rWwո_I,7nܸtRT:wC='ׯommY$qۥ+/|̙S&'N/bOOq cLVkZB.,//?ŋׯ_ذsXl l6=9}" ͕Ϗ=çO;tPOOBV}ߖtmHwe2SEjN=p2.oʭ7kh0@sT'?ggJydWwd2/wY   s><#NVV6oOp@ L&ql?ıå^ߘ9&""BP (bh!( ɾ\6h#LFM+4B0ဉ2(hBh@(Ab?5 9 D!R p_ 7Ҹɂ yӄR^?ƘFQo߾}ܹl6;77O~zؘ}t+SwÖEDQQ(?>88ٳgϜ9sʕjjp[}OAs'y?~~~^z'>NLLu@4ZXH$z{{>d#BυnjjsssǏwgkk_~_R; ";$Hojmٳg}/| 333>իW;u;/cu^ D''Fq~-?pkbbZ%*  p! DoȦP\8.<EKW. H)EJ!`#L#p@c3X8Πɞ@!* x[H _D eX1y"DL`a@`1LD c1`Fzr_?:1o`RH($S-_V7ѯ*q{t*X!J##ޯRf|/ǎ;|pEz @yBat:V566f+S~;wneen[YN~{\y,Ax뭷*͛7/><@2Z 0 c׉;b))v;  bJnإZi666(zgffR/^(3T*Nwwwϟ?NsTTZ^^m208 }7PXcLSJ w^H$@n]7bB́/Yy}}}Ƙ_>7;;̫d2.ɹS ܍w 5WOǎ}5*am5mnnjSNMMMZ~{V"ɺE3s̅BAkSOj}k###=؅ 5؍5 qn=ϫT*Q;ȅGU\Z|zOڰ)߳ofF"a|XWw'FƠիW[΂   Sa!ի'߼t uH"\L ?xlP_ TKJE 2 G PĤ )`@ 6It(畲iAi6bCfB63 7ڍ8Ҋ) "f-SH2  y3*&D аdzxfoEplƨ6~G|?g`< !$vHC B76N$D8`ph&)@f2=miHfVPL("M2T:CÆbr`BfE1P 8 1afD L|n߼y*M^IureumnZg/ JyH5m0 E&p(! "vgoB*BOPi_eg&v\NRDdgE}JAA{ͽ8l sF_ "?42lYa4a2dB|@D`cL&dɤ 4,Nu󩍠uB0 `1 T%4 ]ѕF5,׃h3W>v~lF9o2oF%VGHR<\luw{ka6JeSN$,DJjkvI!7M;&VV2)QMNNK.=ׯ_Irؗanb8==}Bb&kkk7nܸvڭ[jR~})o/Z|;V[$v|VJY>)[oa\^[[{7_{۷ok3LwGl'fA,..b%$!i}ߏĉKKKoOww[Nb+ccc<'9gϸ4c]6v^?tʕ˗/7t:mFVrgΜ:|'_n b llh4jکi'+g(gp\0OhgV6_Z\7o&Fi% XʅkgoJ߷KKl&(bfl v^ (`Ɲ J5 1DC3kf0 ( \"fb[b eHW 幟.ZȺHҕn|9ó/658,\.EQdGlؽ_*;6::sٖ 6oZfW՝bX)$6}1fmmmxxxzzn R*LQ]BmJthjj{'FFFzJl6mKlTJđ#GΜ9s…mmJxT*s=z3̱cΝ;WVx;  FTرcԧ~V=tvvv vә.JKKKvhk&IRrTť.A3L& OѰ%d2ӓKR&񜛛{.\PuXIZ.,,+'+++kkkF 똨RbF"w( E{&Qu13': vjAG5y3L١pxX\E 5h+7pbfVӱb\UD4AAAsϛ{o}ݚgυ rIoiPdH A=448BC%KT糳 Cj:??a;bZ٬Eݾq[o###'N׿>113ܾ}ؚx_V}׏9ߟL&r`ȁ)*qFRu o6lc_aۣ6l4؟K0R~K_̌KKK׮]̜L&=ϳFfs5":qLT* /BR:HĮ3HAoΞ>}ZXQ ߼{oFl=TɓSgO1[׮/.;O)C 1&LzË/]\@t!T?^X##nכ  ppOE_ٗRix\\#PRa(EQT`G.&Ff(ä v G""9`ÎuQ':䢔N2Q; 6l"hHGQkU )ͺB"VA)`-a0c1"6 X2B fp5"qP̖>Oކ#6whFZ'I1EQөFիJĉ'NXYYVƘd2i8)tZ(b[b{:;vl```}}+++\nZ8q_C=n/_mk4z{{٬>SEf\T*.\~'={gnݺe+`߷b`^v7>7>>n.'ޘw-Qcò}ҥt:m+v#݊=X&بZ-ijjq˗/_x1SA8alԩS_򗧧K d2F%Si={vzzСC?hqNrۏvuu7xFGGzV:U:lF$s~qkeDP (ǎinӨ_f{14Zkok'=9=ۻXނSJ!yesT)Jva JQAAAsOh dH U{R@.T&*E 1)pPeA SDy/M("Á]"0fΫ%bcQTJ'fh5HtD!>HE|o0 eM9 U6:#^b尩E 0|/Љ:yo[b7y5l;+iۼswwڵkN>}ԩ7o={m-ͫ#z7lƨvǶ2\t۶׃R*"xk%SSS_Wzfo,--+q @D\Jvۖ~V}Z.y䑑r|ڵjE?j [C5w?~+״ZVEdx嗗L8;>FaLLLJJlQO:hccҥKLLL=`2 c vîLF5kmi9~QNr=o kl]u:d@/BB0@[I Sͦ>;Ep]88 AgfFe=w  d t'!;h#˕v=<<<55kY[#Z'F2&&&z{{WWW/^h4Blhccc_qܹݡ!`um1RxKoooƘ'O>}z{{?n6VU'bcc'O,J/_Uj%*/"ɠP(.l'=.p*5Rq;2&X-'wٳFc||<( f l CCC[[[J*g?=??dluQ*.}ixy A b^ޞH6Ꮽ v/> N|`Gڬخ~??~ԩSD=44dUxy ;$޼yT*MNN~\^^~7o֮_\\\]]rv&XAxw;8llUQ-..Ǐ߾}hQl(ZTy\zvwTy8P[} y AlyCCCLfqqƍarqSNimߓD"asբn#\׵hGFF677o޼?׮]{뭷(teaf[R.wlG؞\ܩA:J!ЌL)VkȰ=LFؗRvtz}{kk}-?34-(B3@"_DT2L+SAAA>e#N\0함xBC>t2ҦLLL mFw+E01eubψܷ̪^g!gHCaєWARmdCd%dچ" $4$=2 ͛߷ zT/ҥKqONNŖyqJra2l6޽{O}S/ʚ1^`0>>>^XXlcXC=j;Id30"z;wݻwOKĈd~~W^iZz@Z-J"S]y#i}X[[[[[Z.[ydaaj1%Idezʭt8:AQ`xbd5I !+;FeGQ$I?~}p(P@ ($8l1s7}h $tÃqeTJ\w` \@ bg"5d]PX%Ji]6wB{ hKD@J ä)`pː"r5@1 ` 2 9c 1 m` )̚YfJX$c4Q-Zq 0<UJz~<SǩzC#%|Vbx@qVjSSS|||,J$I8\eGGGRW[v@!˗/KD_^^(f#Ks]J4M}ߟ,??h4Q^t`)^O=bbU0uGOllO=꒰wJD7޽{Fj9… /_&(&''mby]Dt(̜bkۭV7ߜ1F+?kQI3s JQyj$i Y={vrrGQT,L>r|ܹn{-Pi*Ob7ƖE`uF$~drr+W KIZ-+JpXFI|./>eg-M`GV(M loouwET:|vqZ-9\.z槝gNQD!: +TS]2KR*h"JF$iJ0j:mp/X#lrp>aPN8dֲh>"!U@ (P@?.N>:!teB[G@|-to`XܹsZloo/x;&"뇇Nj,4M(j6moo !* gffVWW+nכ6ƈvJ-$TT*J(vww]m4O=Qb\\U*%81 /Qzr=o$IR.r=>>"#F*̜={۷1l٢1x[j~I%,^&IIAdzA(0}0su}0T*YQOXnZ庾H% |i$KF (P@x@&6TE5%`Ӌb$ 04bG3HHW})Jnw(RP@TFnFqDy@ (P@x>ǰsED| G-ժ_i?%5tRA8+r@JdR@ A3ke,ȁ41H$E< cG!flŏ/\b2f&&_y0-1*3OgZkf\My+2 Mj$)FAxsv3.'4HvzvC^Kv-yc ;&u8oFRy;n%;ÖQ1x㍙ %cC&''IVN>3iZT\MӴK󄂔J|Sp$DJȘIG~DX] ~?33S;NM෻S.ggg v^OObd#"? !dC%PVd_u IdUQR4W/aLFɏ5ʹ4NR (O F$ oQpF# b | Q)=(Y (P@ qJ)Ea>ozYPbVi6Rq9(4L b4!VIA)1RiţJK.`ؤ3"&ɓzOjQ[C`bXb v D23 MÔPGÒWj װa&6 1hf6 .)) 75Y乵R/ͶzQ%QTE<~<ٵn+ i|m:J)Oݘ?K/}F05 ÓD B4T*μQZxn]몐.☐K0g,It!~Q)wwwk'g"te jI$zy^4j#\3WYi<۲u %rb֎)8qcHCV 9dMmdX( lrEEv??E?D4HA5hzq7[ (P@EwajׇB)x.[6ḑRVQ"E훣POy1.nu"za c>&(s+XA!?H1Fu.zeߩ)ވG)d4O!hSa3 1†kT a4qp`@Wn? R5*>\Q+hs: q8 ExHv^ȳ\>њ縤FILChH0!?ka(L-7+ފD$C/CΆk@2@=c52sh 8 6S)q !<|TsavID *W ѽ7oJh}a#NxrrrwDT*ʫzʕ~Z-)weV6~R!aJFhk8$jtA P2KF<վxUV[p9, FqܹgݻwrvJ^O:&8dZh$Ię r,T S!dxش, #p*y>#2pʲ,(daj/^0՜xw t G< '`8 gg.aN42|?(P@ (P3dݹs^E7 σ> 0mxK GAwЀ܀Ck= 8-R2#tʃ,)aOIC_@Zd辁Q ;_yO-NMVO"8xd`9]v۶2|Rf(MbaETŋWWWUV=ϋ\. HF*ShADrYbPmZ$,!=G9V`:lTj~G2J^__ R,pyy… YaaRHWac/D^jvJut:GGGm'wڇb?eȉ$@RiZ|5Ml8/r8JX9qŊGF q+$YZZpxޯwGREDhU_?fBaww?1<55E(P@ (P@pG[[wPQ C‘ nh>Г2]P@ re4 7A L+ !3сyW C)eSb)A .Aid<4@  NCIS8EQ0 Jv{<xL/'U\Qf<{@'xx/Z-85f[H R0s"ߐ(!r @9A^g?i]9rڄ҄e^:77' $6%?yr<z}% Djͦd٭yGc$&cggG"AP.]ו H"" ,S~XbR"]Zz%~[~ J:p \F'Dťzu!7nҴT*َ9C+T@ (P@?<Öm{p RK^(I $ao$_0 T)rc`A4RpXw F#_"8`4 @L)1d|fQ@%:CB$t6#IHq09E)8 PPq(( QG.ek R e\.70~uJ".n+qFŋ[[[vhu?ibtf=H wYsگ[Ir8g9Mp#*7[-J<9H(9јkƎ&9joL9WH$R cxgwwW928E!~D#%coHq~vz3oy{CT8 1 s޽̓Y (P@>AxU 4Xr {A DžA?S?2u⿵^2FHѩTF02 #HWТOUqZic?KF $2ԫ?\bnw~({8~H<sX <1H!Qwk CKF|$>̲Aґ;3$Sc5x$z*3GQTT宰evxH4M777˕5y| CFUd_B јn%{[k @B3T (P@x.уu % 嘎Q΅(/jT~lԕ ݣaf!@ fcZiﺊ(β0NO_ƿ1A CC  s&W*9G & &&n'H""Y+yg}xx\ƣ%i(Kplg-;mGO8;/_T.ܹs||\*$TZclj'<QuYyk8&W֬9jIr,Ǜa^4aQXͼx<[:{K[U|CDbooߎV7WOʊ^"s%;;Bm0 [n?2U~}yTHr@&gB a%}?5*fOBCJiJhA4^W2NV/whiG4j K*4O"872y|-c'c 1Z~7@~S{vGc;W"+6o"wc <"$BեiEQߏdey˯@9o_1\ i8F5K^4Y߻SUYJ`7 (P@ d9Rtv)x 3!÷HE|SW./Wu(2Ilb1Ȫ8F;lJ^aX yb69\ڝ;'_AdXŢ2a6Y~05=j_R0H5 <9a)ôx|2 ),ggg'&&ly|6M~8vfgҹSk}ڵ~/_^[[ VvAx5W'L˗DbSeg!;<1Vo򓑏XO:ND2Ce-#9]=AI6"I`@kvg^|$կ-TL(TA!//÷$FlS@ (P@=N輐a )C3Ȁܡ-?;ɹש3 IDATfʕjlHiM3m HR4™zpXk8J;d5f#q:L)t[ޥT'3r*fNicdFGv\$fi2ka6l1&J2UqA7 䜬Cfp(IY־9== ^DT.3g(sεk׶@nv[[[okǑPIyA 'O$I5"[0z~ #Bx k_9g8̜$Iq]' " @jfѳO8O7}q$Iy3r!adVkcUy>bk@N*"PǢ_Z~O*vyDV$: zrS_?I~_֍7ZA6OF0 Øo7._~at޽~93*؍ (P@>qx>9q!G)>-]@p]*r|8'ژ,&0lL #~:^s_Y*A/uAx>Ge@ke` =lIPC~\KS+Sz3+RL`f)"fm`#}R41$6tOW88K!v*z J\g T ΌZgtiBUjKKZ4_r>f Fp4Z1r-oqhyӄQfr@٬jYɓ}జ6_MDKKKf> zA4 \.#ʕ_8zjTEyD>\hRiv^'8TXKp.~$I$B?r( $!"^'}k8}|RBLNNE(_JyEVN!b,666$40WL84<㓓f_ ?ֻװ "LT:4 14J>Z?_NټݻJuiiIf^.F!(P@ (PCp8 5}j/6?3W܌_d8R/3aH)P1pFZU@Wø@J ˾Jt(P0Gcδpp,K6J/_(ʹ˛;B_Va8ainx)K8Ilˉgǘ _Mk) 6ckN%!˲;wA0;;rxx(G"M\.7` evEFCjrJp <55E,YX!~Q%T*y+xS ɯc `}}j1au+O0rݲF!Nfjbi4ju?sKvxƪ!!b~fTit"!҃eZŌZ;@&8 >\uov9[kN> r5'Cw!Lv'нO"8ƴRfcS 8_!{u,sё1ƆeA֖^/MSq~??[$Irrr;wZ(8d>Z$J@eZmrr2Ik׮ Y zԛCD󫫫2Cqlܩus1==:99#2c}V+#D\*i*֞Rwttjժ$Z%80̙3.\݊lP2enb8B|lddq< RIh#9%oW9MBH΋+v̙U*?WAӅQ bw^\ yw\.Y^ڐ?Q@ (P'Ǣ9"ky<G_,u:9_i4u+e҂Yd$Y +@"P`$Yn93̙ "mXsF^QgW .*#5<$80 ucW WǓy!z )\.f911a{MLuFqܹ=ѐ2Ւc_[*ex.Zp%GGG+,VDtxx4| 6AYg!8N ːkp iF3??l6wvv0 89s|(,5!#lȶ)GgvvT* <}Y:ն3I;~F3W.]w̿x7!JQ P m=SiWʧ^sW|ۻIRF6Z"EO(P@ (PcQQ>><<A$kZcgr9MSrY$"zċJ%;%>-~Ắ$uYf9==GDɋ8D7777;;{M,)B`U!0i# )h2821MrD\DZֺ\ 3Wƭmx&niHC v7|}Uzݺu[Dw#Ԓy֏c7ȮR~ (P@ (8S.`zi|e '9voǚsma60DQ&լe@^FhV_$8frR]tv irNڅ+2 \!j:4͘hƐȘI)u` !:}$ɰh'^B+_uqG`렵vv3W}cYdYV.}ߗ8?T vd󎏏X/%-*'r޽Fh4>^ZZ:1fssVyA7zte-c 1]] 0ƀgUR65‘)M3 3'bH؂#9JLY[W9vbUjKvk+Hd0ԢA >)JKpIm:TҢu0dYl6ѬĈg{{{W^=|$(> CQU#9%uaaI۽rJ^FfD[VgffRҰV5ȓ;cN;;j$4!fpgggϞ]]]1 Iߔ##npҥjz}=~L%g$6tG''Z5?(o^{w( B8Cuf_xzi:llFTZ Z%4Mblm-N[l (P@ (Cp(ղ`Y0\DU!}Urt1-vj{[FZf0Hi`WT Lq,J6!C3`R(۽;vahhj@G| fΌɌδfcX<)HM?֏{8RGDF f@ v|.`ćARK2g/S,*RIA[sk8Кk0tR0NA[lr><'iѵ*!n#Fҗ/8v7&'[Q  ޛXWb7⽗/}nv7{asHQ졚ɱFȒF<h``/? 00 6@C4H&{ʪά%+o~{_ˬxnܸI;qc{(Zg:}+_uѐ71 2E@^kv $DqAp'0LCQ{e }ؚ_.6ۓMl Z@^'wJGY_ xkZ9ԱΑuJߦ5!%$0=(0(αu;{˻ݯav+ n0x|p)p[3o 1()$qvvԩSV+rmPJ5, ^y˗/DImbQ)bֺZ T^2{ffŋJ~Y]]e4(LYkkkEQ<ǎ{'Μ9o^~}wwWKޥ>̌䒮+UJ ?~FQلVtep8j2vj?CbZJ*#^_}G~gΜ9}խ- .P^pϋ<'FWD+Hpvcyxxh|w{m(l `cb2v{vvg7ZHRwGd|<"̒@F¸I} ow&2_؊!E; r`up"p3/|S{sljac< V\ V9vA!&tY]b^P8(R0\'VNM:_k@ĎB-.`Dq f YR!P!8u=bDv#B$R߿tښ@u@XQ,..tuW!@VY__ؘ>}^B*d֒jY։'&''ƶRj v_|4((-vV-///..vݗ_~UUu %>gSիW_z}˭VW_vFC)%zx"z^w񥥥I;_CCCBi]v7߼zjRQ9:T1H R펍///.~ J? @/䉓ɑqaW!ȶ.`dDSvivvv]&I?QBBBBBBBBBB] 8ɥApJ}cqPekYKpVUyaH^!Ąe ["ϑ”@HP¢9P bnZ|[C t;0P>B98 Y6!C-[?¢R 5Z-ޢۻvDcH ܸGn;;;ۭVK,$s Il6vww]666&yaa믽ZV$VgBDfZj_>===;;;333??Uc666666^z,FQtRjN>̗/_^__wh,Z˥<.]ZYYqSòJ Ƙ1c̹s笵gϞ}ꩧ#4UZpmllZ-I*8 zvtݮܗH2ܜdJHX=aΎvrtϯf_w1@hֱϾ=Ϝ4U9i~O=N`c1&MHHHHHHHHHHxq7N84k-L`P((kRABh+`r&Y 9+MXE!s `,,4< 샜)ו8`'q#p({(K/*:ߐ2Ƣt(0u`Xsfb*͈TD 9~*!=3wvzdНTvfffΜ9+WQ(|hfsjjjee%˲SN9s/]$%t< -H^KDY IadD!D|[ɲ9'AcccgΜkۗ.]rIhI[Q̑!Uks)g $HQccc?8]rd#Axf$E1:v5p$JrȂGC;zj\'Y5!pߕ4CS͡sװA-  OUL[u$;y/=t =-ƔntIHHHHHHHHHH;!GƏ:̲vO uGsb#q0zA pp4@ 4`C#zF"̅DfpELʾ }ISUIJJpLpX 6DÎ {l%iF55ZultnFIg#9%!!!!!!!!!![Tp T8A)ҙK?S/Q Y ?JCgF@RgDePCJ)+rj t ^S@)  '5& X CKM48Ơpst'#=D y= N6t^#/n~@$q#˲'N4ͭsε홙"FQ'R3ښ48qę3gFGG/^Y\$ˈH D~FJј|衇z4K+rX#edJK %y n*f7v^mnn;wsĉzjtttuuuggGSnGr$YU28˲4jŇ~X\՚ͦv7J0sY3P՝=34vBwwꮞ/!!!!!!!!!!^-U?*;6=oF14d@sn*Z\.6:҆hX]M 늆Iu'ZJwH(AWP+ԳQ!H[t+ WvoU<$D#l{2䘜aYȹ¹!k-s]:m5vak gkZ9BLzs?91$zh|n'Y^Mǃ֏ɡl4077gz‚y@A!Atn֋1رcsssyʕ"%SPUp y7'O6Mћ\xq|||xxX.-ҀYy|05Q%sCCCn/_/XXX8yƆDDQL-/G.1::*ڍ,WWWGGGc 8rUǡlltpvESIٸwGϠts [}xّ{_Xr͡_7xA:j54jkr29t_fjW Z#={J!m"b(. ]nx(sP(4> |@1;!VzjCv;)ږ45awEa]ǔVڐLtrǔ( %E>gY@lHjD5&.*oQ4%ahhhbbbfffddD.ﯭj9Il4"߈Xʆ$;`rrr… /_>u#<233#}^www1կ76 4?錏7MitK.C45jGL@ػqyxej^z+W<䓓nWETh0|dIu%Ŏ$y>==t1˗/llliَmwFY{tbg`SS 춟_^h6s>>BJ";|CPegScO7gQi8z‚{Ʀ>UB:ؖo\1`ft|schZMnCg>tbCanLjJBBBBBBBBBB»w::9@z"4[@1P#uh  p`Z2j|;p|)/| >):`e8yyآv/'^;Kf>r-c h4:΅ U9ͻqcʲzn(^7>>.ErThţy+WZ鬭moo3֖BHV9z[[[v( kޞ@FGGFvꦉJ^w:_~Y&` IDATٙr䋽^ohhBZ{ZǗK[644$uzl6Mѹh,;nz" bjn^ڢ8 L:#dJ9\/I'|Yi*b& (~BFpM#l}[(KhFa 458G*ju +M\/ûYĊ XvFEeR 9ZɡLE.x`0k}* HXo/M|?89/=44Tʊb )c'$6͉jTc1o"DQe'N`n{…NeȈ,FGGI2XWޞZݮ&''gff$~IqzBcebإV77'ծq"c2^羕ʑHA8SZ->;p$&&&bȡB9T|#)țJkr- OpOq  2>~[~{j(,@ N:(`,HC '? {س gm?hĨQ t0sYSԆsG؃mV$HfXL͎~GO>$_@VNJoۮ"WB4dG5hJmieX ɥKHneTRc"E%q5 /:8 H.4AdՕ߽r*S#b2s|YHD,8d n"9" 9bŧ;HHHHHHHHHHHx(8yh={PP} (iP@a+8ĺB>'V!*&:[{'ks̝^7SPC:JXww~:58)&V^!< :BFmڈb ok+V#%=}v;#Hv\U*Јd[0Vˑ2ȲL (舲y^%/ߍ}I\: 7fd(dʀƘ1nuP.7vc`RҫД g()IPTTW[ T8 m߻voP9, > !%A9KWs(PdA PPK!(Drx7>ş"Ey ]on=x|bdF"FMv֑,/__ÞCٓ^-$dtp&G :`<)a{Ui \7/|{LuGU15֥w7Urc:@gnՄTX1\bM~BD.JjT'DEtLJue{`HDC","7 CtxAly[D$J?8-xjb=Pn.҉6/-C7P9zBA-*Wܟmup$?Cn_}}]Dm$$$$$$$$$$q6O|/}^y 0 c0$8/VE&?Ņ&,^i?]8]WO7tiV&7`,olQa,g v> $Wy&7 g j.WLqI)Dv$ȅ@&OD)|͕a{f1ut7o7oW >ȃ@ D RN(TPITE4T %⁸ݑzV>8!!!!!!!!!!]{Kpv?7^zu,2FN0"8fπ8 f[.*6m=t\SW?pxd,J+>فҪ\(EvְJ$Flo9q Y G`5[JY)Ƣc&W~gfPw׮XE^GÍO`d3燛702N׶ ]>9 5Cnf} ԛ>`&o{|aGGoEp7wZ.1Bmߵ%CGd)[yܢE=t4AӁ!")+j)jSq4,Z5H_C]@< &"P >FR $!]-HA{O)@H&"&RPPb fhRrEB=P EDdZK) ̅"R":,|Jrh`7 R &VJ đ)޺HP :#&$$$$$$$$${qo WzxkPD-LAO#C)d]h6wηA@ )qP-WBir,<ϕ_eg 'sMr">wÆaJݰƠ׫5>OxYy~}a-71oAgG'8 M,}MZH_VjО8x[Bov:'[bߡ9>CwP qݲeVj1搷N 3ar`+R=?3)#az$?;T8 ebJm] RcDD<:C5(׈k5RФD5R a=if-PZ)N ӡBZA Х""ȈX"bf ń Z1 MNAiZ| {.1DФRbȄ3~XkELxYN5~p?Ƙ_/\:=8:JAŌDh*"zRi W^JnYխr՜&EY* !}\C(ky)Vr臌Zp$q!k4/??qb֗7l9L;%}5jZ=Zz#]X!3#P?ZV5`s4jz"ɴF]utmyDp2'Y|b'-K zH_)hU4pՁaEDuhH)5 P#9FP#%5RVhT}3ڂ44 ! b(.†CV9E2RrZ)ELJT3?³TU OH1a(IsC e?G R& h/>%C[@8l 6F^ÿ'e?w&Wzdeemv23>)0a--0%L%#6LKa`JoQlgsW~S2o3z#@rEp|&ȑbFͿt[O3Q1PSuG\T +m-^Rp/ nġ F{jp@Pg BmC N3,2, Z$cc:X7V*.!Uߊ0l|UaXn$$$$$$$$$$$$B屳~>v~nY&_!'?+7daH/Xsӌ !P+H~*Zi]r՝aߢcI3WXJ`Ko=Yni-] (2 ༈-] ؁-9cQq2 }@A:q;>](PPN[CpJ w`a&8 3)Gj11P q!@> k@U iTZCWHEq灯28D§2hM~9 '?2AGCepPyo˞7}&d(· $T'A >??/^A[*5V`%tx+J0X(ARk܃B%ij.>Rpec\|}l~9 Oh@PH3U\98^a\ݰl@Y>/~XP( >7wb(4#rX%ESUe , a[|ȠZTgTqA+{L}  Rq|0" P s4a<]azh"ᩪ TX Έ0,A3@paA|G4&#DTHAQ84UG;]oЄЁnbJHHHHHHHHHHxSfo9S fCK@"=|a`XT$ܑਟ̟eLS^kmŵvvo93uP 90U\(KX!8JcryiG?ٟa!zn9S 9'>cW_Kr&|/F}#I5-Ɠ-'tφS[-g ` Tp 802l` g|GMZ Gϼ,ˌ1YvW<oEh *H  !#˲}~{G`߷p6CiKg0s l cJp>py(ܗ=zp.7-a %‹5!܊S\1 Xndg_^LA ?xg,:<ťWV1=FZ:MȔ`%eƳ H% A3*k3` g+ 042P(K:*<))GO|? zՈD]!D^لs.Ks;M>`V}mML7?,Nۛ|<鿦4#t wVT?R9chh`J^,$!p,CkD2(!:MwJ.L.Ep+Q8H:P7Q@p׌c 1#zUO~,~w쵝.z PӾTrSP:XFQuX%  RSF <}6A+kW>!C4)aLGE ^fRր(J8i6zC?sp)Lo9<#4 RM: y yRBlzeI#5XRoR>xEPW%3Q`?W%Th;FS4b4!#8"VP& d+@PD@gY~#9)DE,4, n(%0bR BZ rЕ5PFB1`/BܤZ ;*!!!!!!!!!!^#8Bs_<#s5XF` \$3 ʡ5(J̗kDP(-92_  SL(Jt 8 -QZؐi,zN?O=$CY)ZZ``VpN4fQFPV 5Ꮄ*ZA7X&&g2 \A a i*et/C%KHB_V:ЀRH'd,200I0N9 +"Lz ) #79H胚R!dh"a4AA2p !h(%LN1 -.EMQ ~@X"bfVA!,G#@s&$$$$$$$$$$0!0jg~'>Y{߀a4Ҿ}( 1(q!ae`*r/p( a i8JH 18@(YsM5\9+nx( 4cۜ##KCgvT$x@J",OpABk4A0t(6%8Pc&xH4~+" R#'R@p&(h:ȄBW.fʔ"B*F#MQ$ a'p*J3>_9Bfu]JS! VPzQ}j_Y_їאQ' א3P ӣ8}ىWneJ :CFބ"6$8iBpޅ6Bs81:plgζ_u4Ey|y/|'NO#S^jysՉ;:@Vq2gdѸRw:^Y9rFW~@W摉 np}WKk{"/UP~zvb薓g&2|i}](vɳ' K+ 'ue^"bbIJ!u CȭgRi@5Hk@b5C>W&^QP#W $w% c *ERC=uB#xBPvD<գiWC;<mI %܍W&& " Ɏ%CPYDs$$$$$$$$$$K9Zq_7wϽ|JEM9sxSY5t hZ>CU^sH7L aq|'f]y+0J eO?4ԣ'>~>w_FB+3Iၖ1A!-҅8a}M:=0 ga,ih=ڨ?O<`IRBBBBBBBBBBBBBJp \o~җwzk-^ylg=j0@ x!`6B0‚sE;+X|{33㓓qmpBBBBBBBBBBBBB;pITg4ɯ}ۯ{sR9676hf@JXc&D3%13,[m%޻}X^ok<<O.9gZ-bj-Mw5s+w08~6nƍnvδ-x  =C-}3{ղWڵz7^̎` S:VHVE! ک8绱dzȔc COyŏ>{psvr,s9AԆ*F;? _>}`?%= n_oBBBBBBBBBB}b`*pmsoƛWϭ][tͫ;ۻ@'qPu32ͩىN;CO?r|yaf9dGL)!!!!!!!!!!!!!!»iYҊ`KG-VW\vus}c{ck~[t;nQƖZ%T1O~uӤZвeKEC -' ߊƒ<$Fk\3">}~8Yвe[նd-߲fEp'RY2Gy֦j<##иe #^9L!BHY(AH{~ :fy 0RoƎ@CgmUZۗGþɥc"1vԊΖ ;z%-ngoЄn FwӴ|-2Ke!,U5 S/Z'nOȹwo3 &B!,Z-cT\t#͐w3K+OήSV2 :B|oOv8l"iKn䓱t?c[xuT[뺠VjYNj^?ʕ8l#\C1B!^o7a)Sou܄O=~ґ8fVia11qnO(&u،ɸ͌Aa'_Xo;ҥ]C#L,(L}dKzʷ:[r};׽z+Ə^uQvS6B!* pd sꕐ' i޽z9vVkwI} nV{#A,a)Ö?f#ﶬgv;_ٺ{[]YeZ~4j6zS51u;u6{j-2Vql' ~A@髙<%废<d[+׸kB!|*$rgnj D0q Iy gT pCLy u@W:M;-И[mOũ/S|H,17L3cSsEb O̾UOKҐU83CRRƙ,*X5#5~\SA5|f%GLh<`~@\8@Jr݀uK80# ~!BGʝD 5>V/;ܔ>cxXnUK)~vB faF͛5ȶ7{-"+)"ݺSf϶x*G2lG+Όvp̅)Rg?/aեD7̺xrD H[k7]+2n>sla-h--Ky0§hWLW 2ܽ,Q;⮱cgӑ*;oMOFflgOjdNW#`]B!SvYLGb=_p+3YNÈeA`% dG MC>0n{Wg/vLF' snIxtZo~ AǤNZ*:\ T+jql5T9tBka )Yt!" | )XM~Qjwtֺ|ȻM6mZliJ 9JLP2ٸ'lWkNG6:r/`D,Ö;0xѱ;o#sC9R)^=؞װi \˱ *@WM"c}T=L[n{[C{SYh%%c6|ݑ?";=fuCR/I Ͱ"Ë* /@fEe9DYѶ_62ÒXiZó@B! Tn߿,_6AgN]^}^_{:†OhnSs_y{CY~|r\PcN.7 Cy@#Iժ(ZQnv~+Hϟ7#hK웗߭UҜ?ٕD]iO&j(^i]rn‘Y;٫c -r2~Ӥ- 22r!j~AO>;h EAݴntC<=!N|xcO6+s՘ʠn_bdsq }hc H{8^#rڬ|˲,>}qcGhw~,Vq+ 0|skkG5K3 x ~)3}||.ൔʫ(}zZUŧB!TJ43wT;YF˩bV]L-XT0aSRXþOlAض+Fx_rPrzr)KPIFme8R9噣}+7}|Dw\!EdD?`zUc_{ ~Y:.G@ʍ Xm.-taBTǞtxyV3O?2,?hO6ر$$˦c^Ebd'DWKz&[TKM*udBݶ-29Y+5 seX$ю[`t|hFR:?>R|}G)fL>5`~x3- v-^8GC0Uԫڰ$+Lb9.@~ 36Ҫ'NRbW4_!4&QxBfn}KuA<@%JNb~n,iߨBsy! ,ġ}afR :{+/`?p@"yQt'ZB.ϒĴkx1͑ݦ>#G *ӇC sРv5`Tg/`ϕ7A@ !'&Y*7BE1Bj{W{ZX[7Pf$!ٌkH$qO="%%Lg9bHᎻy{,9Μ9ɹiGesY~)UjZ:JQ#KRȎ+˸njX|z?' KfAjľh6#[WE>=1tͿz?G;;~-c|V+,wn\hW1aꆛ~Mj^])2ё|ԠOq;LF p{M˝f7#OO|B>KF٨DyLx'#`ajUTU0U|/V9{+'wB{1Z\xIr?}PIR9QхIsRaPR"cq[ ;Z⟔vA |rx|GmngiV@۵?F}֢-.}47m3nzY|;r^aZ+I|"tSvkCkщ:~/rw;5]m76ݽ2Ŀ(gFpDݬ^݆R;H^oZ%j_X>oS = !!9k*I]'qs_P3b6F|Qoz pJȞ!_^ιE } ҌDЬv5ZGtuF͕K(a/rQYի{eсߍ^rJ߄C5QEVUQTBjW,9e }CG=xOۯߕ2\ŕ(cYbM|~ۙ\(wW ˿JbcE+k  0 3"7,co~TTUsu.MgVu`:w[Yή:σli \d~~U EPXiz8ڤs0вmmĪ웊4#_q&h3[^kݳLV8&_w?F:yK+i'˛h  j5gN i  e`aۚ6sŻ|1aKz <"lܱ9>Z@*`YO IDATߎN䦩$/zTǬE(Z-Me4)U,ִ[!akVm< i[mmު^]ޏÂ1:׬8ny:m~q¸12^n|%M)Χ:A5l[I<&}M dbvb֨{% +6&IKs@hҚ.沘 !YN00W .@zTTd)p)X~ƃ:d?2q1θsy#<bVdXnK&7Rn܎ u\UEy=,tܺ v~Nڡ]#^!(r65Te]wWܻce REU|ʼn-U%EO9]fKS.w olpǘ,DRe2\L @Ɲҫ.1cMr1zx+O֖ʄ|z ۭ{??Pōּe*I\m z|Wl5N9eؐ!lk-%+<ԫx0vژf̗/wveRFM,ge?.ŖnРGjc<=\z򙏐o}ݕVS9WE%ʙo U**r {,ts*>Gv٫ kwi[W^mq#s$7l<۰ʮ( 6l>;=]zW45SW?/ 7QjLR[5&LiZRˈ?j%$נ~"kow=Sʉ? P9_lYEEӰ|@P64VC`J|ː_x`DtlE΃m~_t誷 t=xtw\w 7[=$`2K9%T|ÎM #~n?ɓϣ2utw0̹i~3u p8G3&-\t<8/~)зi8ڥJn-e N`ʹgs}JиIEe%<,atJj\J{ʴE]FbfGݽ|J`o#g#,vhZ9noOzhivA)s‚lv WIWw׷%ӹ,y/ںߒL>EeSF,fg8/'/aծK3wJ,n | 5Vu7 4Qw-!eG_pxeb@^m{;ί1扲o[PT.襋RN /n/5X8tZEd+ϼ>Q\VĠo6pV *R'uM"95#/bIN]_ #VVEEiRu}~c1l~LgzI|SF?ʼB @=a#ZW`{F{^+VC.ˁgѮ} 1C6m( c-UƢ՗\vQV%%墫fq)7VL?WLn1䡢a:+4*RW^v]U'BHP|ЅR{?8> =?/+ܔ@r7 -Pv\SRRCJФj!uӦ5yڼZ%hԤbIyM[$ ޾{|=cH6l0 ~GSZ3b mIuR()]`.y†yؠЉF@GFD˦~kfgԋteuI"  X˵]=.4!Ry~IKYޫ*e>4zo)oܿxuys2>D JPOe)))oB {aRQJr6'4&*ޒ)~0{M˕r,?>9KNv2m$# _ S3ZVEEiP5cթ$[!o!׺4nrQvky*]R L[UCʍnOI >M>O@A3 ZA46j/*b:, g)XXiZK*b*wd{HE&ݕϲs=P|ueg*8N]t|ܺk Nd1f)+mL6+0BȧEy*c_)7Ç}l@m&?HVp Ns-؃;c '%Ĵk@sPF7<f 4$|3k;N; `cH'Fa~N[ű9Gz"[QvԮ kԢm ڞ E6HbHM/VquHeBtup?zi)+dz~-ni ^EOA! V'֭Zn 6kzo*7n)&5|ƠD"a䭃%b='t(w隬fNo8M*gbE"0#3ݻ晚ӣʨ(MUĘ\ cu8m:O|L$ X7؋R&2r?‹eG;y*FR$W dY6|%IAI5d(kUb3*Lh_[*زgW)'n_tOTĤ/PЏ 8_>vSϽfu5)ȟ{/%N,]n*G rׂQ.-4j!UTc{͔f?%?Ώρ}ٷMf{<xOoy8q9#GN 'YR=/w.wY+Wx%)M4}WBBBB.K\oRާn?~˰ +BVda>N߅;OZ]'X:eA8\ J(Mֱ1w??EN1K@!hFAbf{_~W10@0319YR=bp-EϿ\xA@7!t|Q"qût.&Mb Io,=;BDw(b&qX_}?6,~E'i_ W )>*eFwܧ*%U5n@0UÚi{v0Ƒp-{rN+Ꮊ qkan*Hx?1B,c}ҶmgWɇ_6I^yr[eE~Sѹ>l]S^|sMzeMuxF;-bܦSK$kd oƑ*9Z06Uw)FH9qS{Nq'ud+dxs/>>.#耞v/|{S$01G賬̬MVfi?}(qo>x2R'3u:[>=cgfjmfTM _g:q:\=3KCs%SS0a3? r͚4Ahrk}x^cy \)u] {,MJ'v+37=kho,d{$Pgo -M[UYM*$B:\pFe324{=1K c /daha#92h䉚4ZW{>R2KG PFԲoĘ0ݾt4W*;;&Sm>zL~Ӹe*UcT***30״iKZphҰFqLmZ:ka۪mF<ˉgڶU&ͬZ7A_%;kѤYMnYֳqhٵSڴwhвFkܩC{ʪ枟#|8>;m(:VNB OL=h\4ս"+^δk!F]`^v٫,wj~Yw+T}cCUR|qLV ʟI `j:% `Qv-?dF5Z'ɗhk q G8p誨O\Kk˸~E^w?ze۵>&&)~L烂GVPB!|ϝ @sc^4e|V\xF(DŒWάv>~ Bi=+2#܌JVwT,wj~J—OBc>Dq~2YaV%ޡ0޽RIS>Dx#B zӌힶ,Ȏ9 {aw𑝙^(̈K~/ "mj/Z5jxd4;UHt-w[ +'ᥐBH}7q &c‹a׮]v!$Fu{]UCiJ`%o>PhH$6w<( n)|oʭ^9H~o6`D!B!(j٥Ϻz(rizɕG, p~fYQyf3Ŕj5KPI6fe 0p"r^ڑ- O>k<3k1>vK%v a/p}1ܼkgÜ[>rKW%)9;/=-t=V@j/5jtd48^n_*ϳϓ5kP' = "WΔQIWEi6淙_|Vri詍i4;ƳrdE B!\*`%(-pȶӟhzmm-2|UHm<;I[(Cy֠MT,A륺E\ے}I,LǸeEjuJШIJ7--D(LtW~}g1df6jNdUPqX٢,]}3Ud΃g׉wܦf)Cׇ9tƣLP{=ݞ&u^=]"5oPj*FST!BW ^=+*Z4jR1f635aԱS.Uy6ԨKGUdWEU#:5΋Sd X%c;sz*X(A!B!_6a׻/`hcDz?|mƋȢ&.j*))U%exvx򮮉eZc6Ǩ!B!,IRx  qMGEߏR<~sZhV :81%1Q +9 IDATj俎rpB!RB!1DBq @›UD)) l!3ߦ֩= )OP;sY}L{zͷCRŵ,FFVw!BIV=LֶE :Rj)ý?_sfAQD鏯1>[WKB!B!T:%Fq>S jqyэALR=A~qHA޸6 V.#}9);Vt&dMݧ0U:_ݿ0:%5.%=cdeڢQ.{v1AZZo~ AǤNZ8uaB.bF {O.&k'FqkB!B!jqҥf͚_ewܹvZϞ=/e}ݫWn]~rUHH.s{G1h(%fq&,#Pӥ wl>e`_d axnI/MɃ݇exLZjB!B!P #84iRhcӦMccc5/LQ oq4$E\1"WŚ׳JtKخSO8}\etCdjbվKJdE76 -H<-k#!B!RTKKB_|ٻwoZ޴-\ ܈5/OtdH=VM_0y_kX{O7Kn%B!Bjj*}W^"NH´uL&l?G!!,xZͰdzG]_WB!B!P=۷oɷon֬˗/߾}gM Ⱦ1sٙTHpQogv|RD;#B11#E9b _7!B!|ʪgwvvfY$].uc7E&gDgn `Ίn}=\_/=( ߄_Z?a}Q×1Ph(39^/]]&5C5XnV%B!ɫrI)$[MfpFo[\WEiw3Ϛtn8"A j"{(/B]rQumvA^aDn徽&AYvFF[gZB!B'Fps{j߰>^{%>'-qh溾 عe6xG M+0q 𸵁NE<|ж }u-ٷ)cWf 4JDԻbB!B'd݆baFd<=S:Ud&ǽ!73+˫G%g jH \8bi0_eSt!B!*P[?4f\B!BHVK<8pD `WnB!B/ǧ'߱G0a31B!BHU)*c$ ۸y;[n !B!R(A!B!ZB!B!֣!B!Bj= pB!B!֣!B!Bj=Nu7@k&0L#0nnc[}ͪlnlȭZTRx>[C!B!VUl]9 ^Is]'qs_QH/&8RS>}4k\ou()O}}} JB!B!Hl6v;114EjnRb ö{NIp<ψ:en=mj[6R3?0EB!Bj޹-cG7P!JsOëCuL\E\G펺e&Iu{u6q4HшU B!B!5WpD?篛0Go * fH|'Ge7>as"}21LZ իmokd:CGtdƕdGݽ|J`o#g#,vhZƐ VdƖn[߄GD'E0hܳ9+aSRR3FV-u2g"/m_j#%<A+vıw.*y;EmFzmtqtpt-*{D)b+0315Dsߤ?xgWBdQ7qt8'x͇/^1a56B!,F&UwlΡ3UZl^YG =g t޶s6FO>ȐQǝ79+偮v;68iNvo]F' ]iy-WYQj_}"26'v^lzӹ{Ƕ䳽n^%62vs%OEZPR"4N1β ^*DŽ;l߷)Q]NnƎ۾U~ѝT1nQꋌ+w-`3> N GɎ0:VvVUb$$$_ۻB4͛7oFFFeJ!TqҥDKKKgg +T"͜)y7zYz;~VFmw|:H^oM;:g2d]W{gcLWv˹>`ٶa+/!ga_w7ؗ5|$Tf1Ȏo30Gk  +!xsN;26r!%5RY{ɃF/4|ܡu5o>n"~r=S@ki*J}GW?sKڝtmE t[>kLݐo7uߢK2CAQU$e"q @NA%a/-%'UF7o>:K~ }SS ]F͍ 5ujjce 0^Tyr aæM͛ghhbX,0ʞ J/i׻+Zf)~uU2UtWˤ_W]rmY,y֬YaM@!_58.]ԬY3etܹsڵ={j\'7RuբG> ]|;`5P&̈X2ݰmc/F كUVrT33d*wqV5Y@p縏泏 lU͎!)lk$ 4raf Nixo 51svֶVPv MH[\6gu(>Q r;,;ʁS {LcIIC wmy3UXtaa 6hG,'*H(36e@nsqݾE<W$la҄;v->/^HIIis,ŋ9vtJ{R>,ȾTA)uN6/9nJ>6/ިw3fH~9 t9x /:XIxL&Jgâ0vEbXPM[Hlijc,1hS4&1hL4&QWTQt,v)"z~Oܹs睻sf9\u0nW!WC&wVZ}  zppdeehBe˖hQDzI :\RŎ Rʋs%@kƏdv; i"_WV:r8^Z]-FZqP(^N.{E]ܷ⾭솮6~3ܯhNfnanHٻpIܷK[GƮ/j_EYg|wҟmo\?L+ssppn|};1]٤eŶmky]vi‰{?SeӜ[i.'?=0Tڸ$.hKQϽy-PxtbG{ݍ[0nI{هZnsba8^բAQsh5g`M!ע _vW^Y1JN  zpp4n866VhMI#x][>zq9N^Gj9-M5M-cז.ڸizJdu|?bHYEoWKv@ LrY>:ѿƽ:wӧOJլ;ɴm5)K 3gtt-Xtn FxiBP;yi'z^jốe5NI:;\w=L;$U-{LɳÛ99lf |L;TQwk@rfqG]0ﮜס)63nkoUч:(G/<@T}RH?'ORDDDLLÇaakA䩇y~lrPdϞ=۷o5ZH1ϛ)M\;)b b@l2ƂoF͍V0jR_ǭ9oo 3lD]HcȔ|ig9\cΚk<}9X!Euf拥[:o? i.j>+vo$P+rJ.?ΰ l6}foHЧf4{x{6cK#7L=ZxnG=tTqZm{IQ|nQ˵qYIFl}(>e%choU.qMD~{ٔ8`1>, PssJMa߯jΎ)Xprb.7'')o\\`ƍɔT"  ڰa#""˗޽{ݺu5oqKz'Qkf;t-Y%$scpѻ@c@WF܅LNuPʪd@>}U5)z:ޮD!/ںDNSg~[ ?F .jP(<Mb$\'}nRRI g֠MPh*Viu׵Gn PwNUD˜?y7|Ɨl?cA.cbeVa5H~#:m:Qs7ևeo;vϦRXGyrEowd_SuT; ?1o6yʕ׮]AQԛ}ƍc_~`iՆ \jѩw:MQי߲[ׅ\vҽBT*-,ʸyb'zMyDQ׹zĨ?.ܕJ WM9mW2yX; E*KY=NWTTXP5*AȆ7K9c4vQ2JD['KYc٩be~3}I~z{FrF7BBv#Riƙ-+<^R Q >QW'̸d-mf"?7w2'd?$ݬ5g)pBnrɢZ@-<o^moӚZڻT҇ӹ r7FZ:`3Em 7o7dժU~GF! z>sFToNJ9[se鏮o&0رНcF.ppukl57C"89kߚbfTَ&ȉHPѻٲ5՚$nbę ձVM^>**x)}h@UmYSm7ZG5S4bө M#?f5E OcTLH$_VU} 0Sgm!C_@PVcwNZL$ܪM)  jma6mXɞϗ*6FhkymcNjݻ+W.^~Y$7AQ4ܤF*5wxnwn0|Aˋw!n!kyn 3ˤ^ѡ6S?_F|^b%N1[讛 Q-Ԍ[;~tGGTliL#qkr ncGD{.[5y'v]E*{۔Hg=;nZX!/Ҭ]o97l9y7OCyoDE vMxc{ZxnoR^p#-1i^J(\tQ}WU5l0᭶6Z in `]s !-/ԫSey%ע~OkKv_mM{  7Y#e[8?OLO+,.*lm B=zXyY5['ТvfC⸅Ņ <7 z IDATaa |ʢn+ǭ0[sdy\XvjNE6͵Um"6h&eIR`YZ ŠRiaIn;vr7Z|,3\kݽ 0ک86!FX IwaFޜk'laɬQ?býJb3~ǽ3S{(R٠R#{N'ߧr v4XVxi߮dnrJbvQ" LyIŒ[>#8 z:8 _,v혯^5:Gإjfy{[PRBн־冋ˠپM269,9mG,A}3h^96\-z%X?g2'i?^گU?kT8Zmr$7QD~klZf|'伄JcFCf, co7)4Oaڅz߿?!!Aow{{_][?+p\eADB {ꆙPҗ m%Yii'N[l*_-Qȼ^Kc\eo{CZڪo[4poŠ]LKO]3nfoPA7s?|0kD$E伹hj٥(2vΪ2.6}i"8A7zZV^vBinf٣|<#jT$sLSΰ\.3: 6漅6-}sNI?7X́avڵk7ydV\a]<ORQ !A9&K|tDvK̔#w^Fg5O ќi ao@(Tsr,#r0kL4grz:BQWNw4;صn,[^g9RܢLa[hr,OImgo \.WTYx |McT*.PQ ! sc:%$Fsy+˴HhhZF~;DkF7Iou;ҫ}`M] Yw'hTH'eL~@q]ΠF/]N\ʲ՝CM%Լv?)z0ZkcafM]~_6,z,X[+}f! z) x>*ZdbZ% wwM{ Q Gc57s]kx2=j҂bbO,6AHg>ΗJB!''JR \rE$xrR |ֳ~YA:]W\:@Sՠ$S7n_X"((]+5 }I6p~_TO?0;%K6l ,:%{/|~vw@ae˖}7- g~{ٲe  a!Oэݗ!ˮYur>ABoEmڶys4ꝗGu@}%zwgҊk9a5vjn0̛F_,Ծ93XlݺvBM k֬YfM} !  :وWMFpcƯ?n[k#(|}6̂c\b俣fTAx: FRyɹnjql   0>;vl֭Ǐӣ%,'~^2६^"υʹw3W/c}\ u4»io%|a%H)#+0Vjz Ō Zp%    ZYYY-Z+lٲezzcS8ATZ   0B=887nWxm'/^ф   Uѯ_ٳ}}b(|'GnP @AAAY**6l6mZ|||Vn߾}uՋ'k^6(Z\Zs% AAAA4tmPq茌~ӧd#{GP9YͲ-"|#ƶm(fՕC~cQ(>w_''y?U,AAAAKtUqO A-KU}M}Ś  ;LxY׉}(AAAAuPeEsj}|?=6pyr ) Ν7S:O0~-+%GqаǴSov}g޴~6l-IgN\q|~W? )\wvfCN$i9zZpz!SAAAA5n~xiލ25)-fї?ӷ8>oE ZssOfG|D?NrF]5޷gg>t\3g[    NObqP #wʽdI}/t~ JYܺL߿I6)3qK>]@Ĥa{@a!3z93|>U M)/9K Oڴ5!T3   3!P apH!*mi"_Ih%Ӹ~Үy`Ԩ>k^-67#MP蜠VvNsrtUTݰpzENA9bZ    "8uض<#-EJXQp}}W`u{M`dY2>^ƻ=ZxfLfLwOr Tܜ4v]^GkAAAQ-2'~n4(Os|t?ސb;uaQ Q ]( ʎ!1k1S墰KpE§/cKCuoǽ$Bm$oQ%clX/E}\ O(Y;AXnkRPlWTLkS=+N.x*[ X+/N8my~]Z`JŜ  %ncV;6⎀MCm1EzLTsV[+ЉFZ?~GU *}4ߩS1ͯ@G<=M5l̞﵃א^9i'cƘxеGMۋG_{!$sSgW,QpN[ ž^]s86MLB@غVJ9:~cL 6|FP,!׎ kP2 #; Tz;]۫KͬF%$_uQ)<܏B .44>{SxۗqJ=;6ӹY(N{7( pɧ5!(Vs]\<ϫ=j#XxҘo,r6wnzJ d, ubOc# GN)_"Fz!<Jz#:[.|`jďӻ7eaٞ֫caC>?5V0)ط(yՇo\u:t f\{E<^R=7 A4,D|" I0hӴ墾Zc{F^Ō싏K2%EUGG~>@FԓZ`@fF2͆\3rIK8ZSV~Gk$(lG}[ Ԛ V4MuE gL v2vsُ2?GjOz|\p};hf^w=y=1nAe>çf81@oʊ7#guN8矓wO;r2@eۍ6}fR 8>*Q q %ͪf`}ztߧ 7%^ 7G>}F?GWE ii `JعGoTv,5n$ YPv~aˉ+?N`WA >=q&.r8o~, uh֫"ŝhڣD׵u}'?M9.Hλx2/`ለMڿ˲섶G/\uzXk5@ښs)mJYXs8\],d,sh,Te.@4HcID|a 7 Λ[j (и* K|r^騟̭ˤ߬VTk:^ v0:ulA& IDATYɜ>^zGq?oݱ߂OF ~ _һupwn.w|zieWiw`2AaNcQI[ wԆAWwWS K^?vr -zE4~'4Ou}|_Y;ൕ+*;TTQ CxH;9=߈1ul4FtSsRWd,d,σ \*H :X< 8_eIqNqL@vk9naqqaEO+,.*lm2͆ 5"h+"X_,4A’"ݾ%mO lk/`/`[\Q`{V,w!pv6Zڨ[&6$ @ΕE\3\ѮMۙw5U[ 7ْjGVB{kN?Ȁ98/}Ô ͝UzG7V! CNc+1g^tWBΒA^/ӦVg\V~N+k..lNM\gc"B5@<>5.܃{W=Lk0;IWv&c4 3̗Z4 yH R&j2EW*22}Qcg@.&Nm(d, Xxwb)OgbX,ލ Bn{j!fTNfȩZ F⢂2 pY|LZHv-V=P;'@~\j`yώɞ¹bV[f>&[hg+4Ut9=(Qh9͸bOgin5ߌh\NUΜ)v3i7wAʑ\&^ݹ<}hVצZe]fE5n[").tǷB)ה0gP0#3/!-%1_t]jf2Ʌ֔> aO֭%yc:%NݣF_6+P.x*U_9hAțyqU%9@.3T W$f=jr=61տ)_@fj2bեEgXxu[]^4e2j,4A a\QMekD[2 ~ԹuC͵s*!~-%Osv%p?ưz7*;;Xp^>qTѱ bJoov~Ce˨WNF98w@FqlyU74C~imɴIPqk\@F릯xB@pnZcxLP;~{]PȒ WQJ\bjJI]j8C]HuĐ˚冡<5H^gaCi~̬JGU3Ú 8v ݴ_.}-YK><Kց\9˷ګTNŗRYejS PT\KÄEҫʻ Pͬ%d, ) )I0#j(*.mYLzu~_;_qv#*f'Q<>0Î˰borKt3Vu?XKzLP ϛu1XX%~wC->|ܪY6X#;M( wQ<[c(#Qt`vQ?5CE6qv!V:8Vٰ&9rtJ;c]?o֚WXg,{q'/Go M\^>eSJ=M[l7JUGS IcU d,d,ϦPdqj@ADӹE(MbTro$u"hP@⩘bݪ^=h,ٝUjv=CG:7朌fBΕ$}4|k/`P-Hi$[Prd%m={f31 Uw%=Zܡ{ߡ u.2Fv?3*YtTdn=sMbnZ\铐Ejzĺމc~s<&}”켡ȓ$[TC$!_"CXffMf6kWY% @ @v͂k fZ#`Iz5aKT–T#C~ dO[,IlZ84 V@*[79JTk~]CpyvZÆu68.׶kyP[7m^BOd,fKJT M#l%R5Mx^߁D(-rbL~!=Pi랉CB.}}ww{7/ɾiD/qXMd(er$y폣]_E؋lc'JΟ<d-gy4-mhuT{/[q' S'Qjys!B5ܻqz11.v=_Hۉf}y컟|#;46yw%:j[fe[NPI#iȵz ,<|w-WEr>mt֍jH8[pm&6JM} Lٝkh'd)i7VO3>ʨuÂ{ {e `2RO+d3 +hiN \EAMRpg7+k/Xy-$/QW|oIݻD|ʾ̹4o%f4=@_ZP XchwqӾEܮ1}L1]vn&3潰e`6;-8޽])tܪYe%/[%}RA3GؽGykҲä; kh)hąTwj+o:fYsT0|O d,Kͯ X,T@b/ L~s}ùݨ[vzuNM D-+M+4g*ADgE/T!?;jCXL^?W=#|0>W9@/w_S.Wp[ ׏|*#J<@5dqA?U҂ LHH(T&)TBսڋr+V{bHP]3& D 6 %QPݜxQKs:f{,ύ=3௞ѡWed\nƿK[1e"rQ)قW^ *֞a(R|ΰHQ ^,6`y hR/eJl Ǚ_St22Q2jF'@QϊovZb6|d,U ^aN!cx+.kUn]&Mq9z򿛏tprqIUvQ7_!Û֐_ _rAadW zݲ%UN}΋$iXjShª鲅~Ƅ6ra%1װ kXjuhkF FsIdPJ#^ _X7Ҝ*xm2>?xD,Wv"ABYg rrh2L˚# Km+tlnU_҇w?i6OS]RCb kXj~X,a]~ K-pG{Z )eL=HҢճ%G6ɽגȤ קyV ?omKBa3{W\~S==cӃW? dPp_wZ @y„/kiU&/jO|>Mwv.?ģ'I)s7hx8GEQz71pa媍B&/,)/rP[l,:)WkQE 9YyT5=xf<.)?=f$&Ы%$OZ 4XiiPKBBb&W#ДwـQ/Y2 777mͅ@13s\{SfAScD@q}bgh=ҡmD8eї_?0l"MC4I-rE=A<𬼚̻';z9V]ia}&P!Ь|P@pwpӞ-hhjd,а.Ab kXjeb;uǏ[kwȭ&ɼ,}ZM(~_|Voú!v||0wJH.er>ޗEr_0/igMKrF7>m^@߅ KqC'̛ڰBѯ{aР&|ݿ}"JJPG1+#</hժkv=T2v'f5=_~,HܙAX07   g>uԐL6mƍ/8B8<Л_G0V6`|ڕ|2LjH;y+!"OhCc %q-LZp}} >nkȲdh#g1/jqY/= y4Կ炲ATN,P * ̄ zpp;vUV gϞ=yd>}j/n(G /L\߰2T9+u8VIE8v۾pqצk#PpÀ77D/=wG / SՌ`_k~ν?s|45:s}a 6_,7Xy3υgXksAAADâdeehвet;p+ٱCn,[ E*G.{acv=xJ <8q [_Tb?>N}S |hb<!Q% N(r&!waQ\_vzDK=j IDAThILLiAMh^hbXPl H*1 .ewKy9sgsg{.@ @hBkkHϞ=4hp8Rߞ{9vVݻM}>7!OMN'f~r"QYYֽNsmb (x|):(~i=L:kvzi2ʄ<;ow5DضЋTٸAnaG}*F>#k1 @ @ :FLK͘z`{νSoؒ-8%Q?-Ił3lB& ^ WXǩ8lM L>X!ָɚ(QGL*ŵ(Sٞ0-܉D?bc T@ Іi-[NntDobSWn=w{1 G5  fw>duOa]~tOeҒowkPh?۫}Ykޤ%:^0Nizz i$@ @ 2L&kmZQYYAy@ r,C KQ'@366h A)chhZnkmpInqH{ACX !b!4F,/ji!*- ȨŋշqlB_:4aUEeU2.X]rZ- 5*/,H-XRC$X^?D,: 뇈EG!byHEU rn Khnވi)kـAmm$= }ԏjkK9O :QIULjjljjRqE>UUB;tۢپ)XTD,Oǖˏ Jz66EG!bQA'*ɋϼRXYO,|kdOwAĢV,OV۬jƈXmV  L¯}0}2Ͽ?WFγ?k۵| hɚd9ꀀ^v=ϧڜ~g/0 y4{^@-K Euo&wx^l"Z_UZRjD,: K Euf<H@ }wyr*Knu 8miМnez7t&ur5|0)8z/:6Ў-c_Դ}; e=ݻ9lq Znײ>E1*RSTPjNBhjY_/1cPs£v|}J P,2 vz&Ua!@2o °Ύl}QLV'nS! `[@ v%0͝:N6۱f'-R=Ia8LS"5|iVk_HD,׍Vb1JnHF\߂Mw:v\,o3mϞaR,XZ  b1>3>)`{yN}5≲B*V4XWo_?*w5G[nʄyeٕS{]mM }& cGcb37)^3L vq^Iy''w2҈O8بP)/BpVh\|Id|Ѥ,LeNOhJU5UBq󙙩B=#c^ bK!6OuT_K g%XX(=w4E)oIw 1 Y_Iv7"my/CBRrm|i즿uwk(]cgEGŢ py1fߡCz/yN%AN!\z8% {y-=nXȽ:'"ZqbjYVֱ%<|oGvik)Jy R9PxA! -+ }.E5_C#BI g*o ~SH?]kTqh Wprȍ/FLX+˜Z Ac%'ɡOn|:g! GI޾^X-2yX",ĵi,:9 ]ԋaE#Xh:DI(uL˛a)h0[@z^6+|2mUwH &?1sn[ JYsK1 $&ʯa"xZHx -+\|x?\  )UuBnYy:q3PnY0cLsݦsd̈jCoVk~xjY> POԅH xg[! '-6נ`~`na;ʜJӒw>:~(=(偍}ݶTwNE7?},6Z:){N,E *'X6Ss[H^/:S8jp_k VΜ:ä֎x'.ȻznhMoad;̋!_1 ?OuƬatv_Jw8L J9LKK~E4%nVg^s6otuu/R U@Ϟ*bg9eİĺAsرౚm@h$K g .WD,D,*)pRjpy~s:w@U'׳y}ԡM!XtT,耋#J!Yq̘{bՂ6M m3/YM{Q)zrQoa-}p퀒<7ncyuh+.n;fA(Jb-FܫiO'rR`/Hg"*(0nd|YxE;"+7 tl*>kn qEeέ_WA1[GKn6Dunt}$L ,Ia |Ն rbM}.RUh'j/M\y1W{Z!-Ef3'!z3L {ke޶D,j_|C`6px7wH__y&e4ւE -" >a@`m47}5vJLyrb^{hŤC=9cjZy%6~dߝ4WMME4hM%nXJ 8;洬"p20(y P6~ wk[1̝-Q犴JA`Ö<1U[Lroh"䟦AGэ띈֤z2Du έeh; FҴ +ǁm@kOnVL줲:MEXhxP$6^CRzDqhbk"EX\K{7CVڵ1|h;UWD,:2J-| 0!<fMĢgwMb+_&uWRlK> ~­[H$8Z"&~)C5sj(6/v]GHRDŽ%c~;1|X~ ۹1FGa>&PR ӟ_1^B[,0}5]Ͻl C\4pW*u.or\:\ul.~9ؼqjOn' v #F0r1)X5Y!bQXz†)+p+)+l"4uBkY.}]mvv"CӷvOGr ?]a4!N%UM>g\@'z?εY ;0 r? 6OGҋ^eeP6=zOѼvJ`9|1gjfE>Vadh7yw r0?Zع+a^D;{޾};%BEp@u-+ŗV,c;)0c5pᰐ{0{jP`#1a) v7k纖 c@faF!͢4,*J[cG;H3\`PGݤVVEЗG1^rD0qm'PTOF.6Ǯ_,dCgA%լCOUJjoD,4*biEMD,D,mN,Ly ;qĽ8j^F&!fMpND,'ʨŬח|@fxϻ%w{ku/('ҶUs\zxuoȣ(knذaCЏn7EAА+Oϸ &Mf{8v/ Io4:g伌ߟ3El~OYv(g3V}wr{9Gdžfٙ\m#婼"of6c&T'Jy?)#OC~e|V#ታު?>3u<ٵsZKvOOOILheix# Flno5æ?W>zW.\m b=?0Vjj^9S1|P%0/PF Vyp|ma^۟@̗nc-ݬ;N]HZp(XZNׯ8rLιvZKoX4 sIMJ|mm)Nn͟L\,W7Ry!74K"كի-^\Y̤y>zÀ%\j0t2zƞAƄ PqLJ. \Z 1n"7+@RyvCykT:Zu(-0)p>zuͷ0t=5_5HNݹvb3R>9x Hbfp`o_bC)Fq. %[{;ScYQRJmԭIƥEw u(.UZܾ1[䃐Ǡ*}N\mG6sMPKsoX4~u0dK+N]ܲDv &b!bokUȍ1lD`&Lp]SeD,D,4Mа~r0(y.mXڤXiǼy\]]#""A[3 E2'O=+opJo쌓C8n:gI4ME+f(up84Tq4  U,<_?3\@挚]{%WԶP'öލ<M(:CT͛7gΜիWϜ9s͚5SLY`AKcQ JUhx?JKGD_Xw0LWf禥=wAr Uwn ? h }p꺪  "x^{ ø*#b!b] rCwƾ;@R?A˝PԮO[$гb.D,mK,@d;SիW###׬Yr дѓ}慳>,,}>7:z'f0vt*ʟ2>pe~MH_ƞسgkhBx"{qm]39?cEo}bu=c4AgQ\lM[K75i68qQ*"U^U*eiVGT^U* ZD% F&FF rCȇ mU"[@U駦dgg%d&V }m4sQyUz&}u}+Oe*(,_ U+XZ^,|:D/AXڎXPk0n %bi"6%C0%\;uꔕUg fu09v)Li5FEЍEMs70wwl'k:#1ZeG.C0C=B}?h ȶ)iņ5ߎmgFu|&% %O-d @lkC:gYhSrxlbNnշ~xӿPضl IDAT6MVy\,l#[M/K{i#bAK腈4"K5శTYٳA^d/Lw f0n\ڋQ8,kⵀ>^kvo{{ynN\MpNCg$j4qK°ӝ߸qۗRb]]!o|/QbX9XboXMR; [u閷6n]~-[nyD+8 : ը3s2&~s=,yOs/nxԏQgcI5Dy/V.ңAv j0:Q-՗v@x ahрI-bض6f6x`AĢ u閷:&[D,[nyHkFΚ9&~|wyoCW QA9:>bW.@9mk9񅑡VK&Msŵ'뻅=%w4upzf[ +KCيSmU6%)vMq% @ B+PĺJ=̱ǰ1K|= þz>Q=gySws/o|3Zؐj/>U MoսJPe\`3Nk4vr=ưt=kR[UkxjTw_?߆j@ @ %h DeeI}4Dee఍``=4q2;'O(gfk]@PVYTN'7Ѯo*Z#M @ V/.rFF6FFjm ըQ쌾c R3\#MSk@ @ ׈Q!@ @A EF0^ b!@АVu@x= @ @ t @ @ @ 8@ @ <$A @ A!@ @ :N[z܍ MzkîcOt9YD .#F¯**qy-fIhIVyaAjǒ2--غ{!by(D,"݄ KB&k['K^1뛃K:(AgJA,}#.Kŋ=v ] 8ȱ:؍R*, ߦ Ź<-i@?vl-\K$_T֮X>%hN{- TŤƦ&WSU%ۿCi=-[E U\ N/, viz7w7SMvAĢԋQW^=Ͱ!e Rݗ`;3S-cߡ:Oxi@5]1oszR7JxQ͊=̴(APExVfT~Yv9Jw{Yݺ`\`tvJL|6d;fR@~!vpZAAɼ%-u~EÔ@~2*`w rؖ筻|gQK$UyvM, 7^`(o49i. j bAs$^WZ~#$BR̝7ưa@CSY5n"8}UzF;/ Pr)U G/٘NJ]-4|M?"ʵH?ruGGēOa?S|G-V2ۻ"&n w-@6O@a]dr*Knu 8miМ GFF3)oc~E{ & 4">hǖ/jݾMݜi8-kY mRݘ}&9OoMmW| Z0AB @ \CYX:Mvԑ~\00 ܥ &ooѣ],u?cԭx G TDg`n6F7 8OiKw{ ɹ5޳(y+D~>z^*(2p{ގ KJ`;fYzԁcG"=++.._hu4¬'b* z_*,܇<ʦ4!N%UM>g\@'= ¿ ~I{auXgI>gǨsu ~3L ܏fl8uΡTex(oB1xZDd;umcE-R=IaOm4p?_1x;3]ELlto $b! zi?.JxeZ b!,ܭW\%~z)|v3~7w-%]EQM3HѬ)y,?q1q_>t>dߧ)N%/TĬ?\&5rzWsQk:yMzĄ=bs3k_Fǯ6/:j;k]4;o,**Jϸw8W pT0`E]7rjJ=M#b$?|g|RLE1٥>,DYIi\ @v«ML;ˣB[~eBRԲʂ|ީF &>„1#QܱI1ŀ{u}R/w'/V$2qnԪ Pv Jp`ѤpErٲT$' f)H12 UOigݳr`WI2]=|79n،cn5'KW fahr4+"@§4")ǃ!iy3})_nq_6[r4hիWsss\H/*./=7v&3)kfp@j1pƌ]󀪗poLfq۴9k=] bI9iS D_Yf`v70ŃM TJMto/* ݹ@tyLJd>IJxMoЇɡLul/'  h)k WD(H-3 êA:V$Xw]џ {n*񅘺SڴAj!btR,Ptoӳ󧌵2 t k]" n3PdPX\U؍ "6טq'<"Go.\'250,Yf^_uVS&$[7-7D!-wlL\#튲w\̣xQnHMgANl>0/{L+Kht[v_Y!~'鐚20R.χOi{V[@iҤ]wUT|U@× E'퍫ޗ @ēԽOR dzzG~7 KN3m6̼y󢢢d2YDDĻrBpHF??>4=K&is/Ç7gݭ V2U3yƩϚ8ma{B |OP8 kߑ?9h1}SW=eAjh߹n`D!׉36?]% KA_ 22}]1࣐iG;@ 1 \c-O+*_h?oO=g-,(wB -z:r2.I`Gګ뜅߳gP׉{לrR߀c??O6}K ;,dF\? pcMjM6e$VH]XH knS~4N'=>m@ĢN,<LW1@v nE~C m !bQp~FKvUzFAMtcqsхJ:|aDզ0g~O췴*y_)at:-ttcW/&:c>H `/N.ۭ^-2=&H ͘u4pf2ԐΝ`O膃թ}xf/Է]st=l:#$# g5 *#ef]O?ߴC8gyW՘5 iTmӃc޼ys6l… ,طo_ .K>ߖlwtzq8Ir Dzyf6tƯ(,*/ʋMc" P #}_H]&m2>O?z0~?Y|W`=ۃ7I)8 0Ww2- 0o:?;6FmǪ?+8_mnc50/^8O‰O7̝ 21lG_3XbwBڃr QaLHG\ŸbuuN~vWh5EMyZҵqgNaRkzR<佝7NH?twDU`^ _y3f;]mҞq';߳'޻x6/b'=9޶D,Ťs1b8}c\OP*4%NBRhE7ҭKCp(rKr޳-G 㯭1w5ߺޞ>=xWc{('hp:y|r=z HOI? fRw +`Džh(SVDߔ1'CsyL'*{ _~vN.o |{Wejoȑ{Yzu 5zp67ybcAU(|Zsj,@^ю[d)1Vl+S9[hqUW?Js4|.w]ݩL(Q 0^ZTG#᪏t%\YDvZ3=Yg9eİlbj XxQtjWU"De9HhABR7_,ZVmPX!H(p3Wq=C 㯖{@J/Ggr$悚 .Ew} 5YnX@9Eo[^=^.솎4M 9ɴ5DoVJ[ld-%X5X!@ nUmUET >B1%{x-rP ǵzp_.+n{֤-~&8` mSi=溸ԩSVVV 5vs}PMbT逹jZ;ڻz^iڞ5+Pc'P^Q5Q&(%>b؈{U9}sq8qG %=YUi%@Qt#K+JM7X鰁 |@g-$G=b@d7vWiz؝yװGunt}Le,Ia֛@-9&>F=VTQܫr4r2*to[ "5TE8{?^DaƉnau4ւŨZXmkK.Jfv٥X4+5r8@bD|.̸x%%wJN{Wu 8x%vG>ܭt'4{ !"3# z[>r`,K~.P[$dRWs$elp0+@# -》xϟR㤇!3L fR|oQoA+8###UV>{lРA){+~GQFk5D,:J,( IDATEGaJ |xgϞݽ{w֌Y4ȹ\wXz-5D]}p[^7ʗW)oi٫ '͵OUs% ee,1KD*`D0ql'c. <=A\}tRSɲP 5s]K0 3 nEE)c-4W"Z Jc3ԹL Vĕy*N ׎AeCt՛~S:E Mb6؆,Cc=6W?d?"^ڄX4I" ,]Ԅ(A#/HL#;&\^얉nq rV֔c!tU˗w5gԈS-tBh{zob8]Ry@=:Y 8RY2űl G֨;PPTF} bh* G7dZk<_u-3SA*:ݬY޶ܷo_@@EQ- .-nc}E@PQ}O}/nbz!M tgpxwo߶m[ВC'._^E ( goߑib #y"of6I_T'JI?)#O{wWg5M'Ζ?2:sY]5֒S@hY=kXyp_n2|v (MM{0<`4J8eh1wHdJFB{8RnBIя Ju+ d<FߠhlIڰ?Y-+3@v?D ;GSgFywy-OFmK $@-c1a3Y[>3 [0LB7ۀW^yh`RyvCykT:,7ptQ [NaR}qo`]> gQt*,8xܜ-g]ksf }iRlϺ#1 MX"~ebʋuƹ4^o?HN}eEI)aQ&Z{@Iu u(.U]ܾU1[䃐Ǡ*}N\mG6p`B@[M^soX4 _&,Y2^vN֦s;o2@d7yy.D,D,rڒX"qydLOϷZV%2b $"[~PZɊM%8.>d=b>u桡 >Ml'JR⿯zg`wCe_ĭ/2?2HyG5h%Ʉ2]7| )Ψ;ΫIH}2-4xkFu4 0?ު\7 N8g㚺?@HD!(8P\ UgU:Zj_]EmmhkZV7.pF2)0BB ny|$7s>9Q3Tf<.6 ~O^Ɯ?`n@E+d3An`I ٕ9Pnіdw f-ھclV>s<";9H|`ރ|dr}vtI^A8UiRLѡ SUl dPo'#B&ZxU+pR{Z~Wڹ[/Vo4iXSʴB\ՕCvlo<ϿWS:;3ޞ6'rZ=sYkwu\_(VmT%)%E2βcTJt5RPeBcsۚ}H^I )Y~PB?{U/bO3g(tM:с+~8|U3)NC-OZI 4ۤV3Zm-i䙮URK `bfVgt~he }uF}[V b'y;$hzڕNBĢ L2<=@c$+"R,޸[ZHwqGiD"|CU[k@ tG.8 J։©+L rU-PuRZroGOK@ǁgm(h1UTѳq#uDY~I@6u'wcS+Useӝ7'$Kqĥ蝓Y p={* >~O}1Q9 *;Zt[Ƿ\O!n@{#c` ,|XB+WF>"&ʕPUԹսշ~~0˩F%\(f4ҋM*c?^U2K'nȸL|NPfއS؂?+pcNq 74ۧ\]u30󶿶\b뜁 `Oi%ޡQ&WU.tlzIَ{p5rx [ Swtc/{F*Ww@Ꝣ"pc'1R\lf'K3LM ^It ]NX&W]۳coPY;6  b¸gYDBR%KuS蠫F}Y\bQjqt(1pk_Y,YKQ:6Iխd艥hu 5<]?խ3% *JWQ **LsÈV*n0-{-{D[e4k{߭iQ!Y7Էkv;.lP;Sg Ԓ#F\#v&\xpΔ rfCq6]K.4Rp-{wN;fY:)捣f ;2܃WЬAbMh1xvHO߮NU~ f`_X@h:V{=N]%rRЀib1x,+AumK)9,#iA!bOkﭿL ,cSSC([*5RpLM\\.Hx &G K&̟1LeH2(&|C).WeA(IT%̼ ieEfP EM>\"p9Nɕ?wdS1g%H) `?ꃮ3+W~v'm_&SB~*e%j$n*l!k U6c󍄨A6r$2g &* ήvJ~FKu}.ݪSBe';.U'C޻mC`-|X_%O>-Ƚ k_ֶ}X } emۇ k_ֶoC?>%;]aǖd7j/y>8ux-vŬ%98W spX(ɍX |`A\QPW,͑Pb@`ɐ -I[ְ":G`-   M @JҲ唂oԡK)NV MNH2JD| nvmu5XKrpWJ}hC/Q ζ-Ж}Yێ bC~v k_־$(|{_7(gKörjm;6:@ @  qp@ @h)*/9yYr vVyoOHr3JK-/GТf(@"&D,] &7^һ@qg. Z@ol-P:e Wn_zvz^f,Yv|{uwjs/e?_}{VlƓ1s")ѓ:1@ @h;4C]؆D>E>hxδ~!o能$kψ6={:\8jF6G͹->XPZ(zэkmK@ Цhn'<%U\d' jѮ!M>ʏ2=B3Gc_PyG|0ҵ {.z?pE**nvhj̳[ۂi1?9x)W$"&D,M6籅rpX.cߌMs,ɜg#:8ʕMc]>g @dEnU31fij;BWl_OQ55 Д@ @h]LђSw 3Cb?Sl%:(KSnv!%0*`5ddo! cs'fI* 9;"F v4>C )Nz֬qGϞKG~~=zXqB%g-H;hIM$|ľgl}G͟7ICZ)H "K$UNIA~9rߧcRJ&m\,YGy^l[4Lh7UFxr N@ @ hB hO|Zc{+9_(g?2AQQDJMq[Ȯ>y֘[j[[wcn_ z/9arIH[3x׮03{nCҟ?*Zލ*,z VH=>ggJ]zvNѐVo^sz(5v;ȳOO}~_ e~8eY{{͝VȊp6ƪw@ @ h!ǩ3Z۹U}*86}fLکJDO=9a `6cLǮu59V)dDèɠTWjZV?fw~gy?V)~}<+f!(Nz*|_ K()NFoN RG&\'}F)7D&…oJa3G~& L1?m:Yf&sUWjcjwCI@aGTJMJS e߱Y`9*APonn4U@ @xhgol{;ö1"ֳk)e&zJHӏ>3.,5  0UJ{#>l۹ ]o}>-sߐi99v4Q!-~sgRy`#snD&,MJ 8lؾ K͞y1 ћмE .OhZ +%_ZMvVI^nRǒVVF6M@X^)X+Kc byi2D8`3Vo/ZT ` eإ##ݎܧ炙롿K9o1J2zUMҢ4)A٥SKkB 0 *PGTܼxR{!muD ճ!tiK!oe56ƀHc'%%PE߾^>F& bi KFK C)J=<(//xwظK=4&JrB"vqǜ}B# bi KS."ɦ IDAT14F,iW}fj鸔~fYc&ՑT XCS ?֬-3V642XZvk39Ӣ2FĴ8dcC.Ɔ Qſ?ZEE]8H.nIgl 5,; q43JsS&ąov[д6^EE]D,VXdcgsA ' =[*|gߌ|mʳҙ+XL uUsDuϬW_ڝXV[ӛPwׁ*BbL1J&g-:$jؐ!Gaz1}.hqmztpiIdVp[/e]3Tc oЮf pXFȋ" ,]J>NO j;OӳTחƹnrgv.I+Y vgeZtz#"4^Kv8sK0L,mjtZb|hw3}I`=;Op7A`^f VI'HΡ`E"BOG\v#(%t&- l|ŋR7o7A>^lR75G!eܼ&ug*i'[ xż^]TTdkC9(^gbN߈k']X=tGǎͻ r+ٮ]l!ūbwH^{4ʝ/pYą iCIJ +Ekucq)r LRQnҹkM{<--Ib$0ٻQD)*P'J7-UY+nX(+gvnnWs$|Ƹ($?O.QbDFBbi""],::D̬V`@ʑ?K b!byR\ Fǂ;<}' K EfFpIxs\^L5{(M[wÂD(чG_wNj|q(N+/oؙB7뮢COƯA6M (PpxNmʞY'jBJ :9rhue;<&}nwy]5\^ZkvV̈Szd# ZgcvR1w^'/0TΫ|wo9^T;e]VO-ޥKyʰwPyK}Xƕ^ZW={۱YHKͪgo/qf?'ܵr ʿX+橒>JEp76D5X£"TYl{ǙPNY:}ǫL|NPtc-,ܽjJXĝ?"^.j4Eҭ {*?{$їR@ȇX7.l]3H &QŨ҃ŢCF"E+%ifD ̺D,J^f5Rm3 ТbQC򺮘/Ѥ0Uo@ iAm'?ٔϩ c3|㹽Վ3cͷ_lUr4*`>tَsDU 8&⻥!޵6=C TY<,=z(lBgjTÌa=Ɗ>aBW $oSW:V?i ^k7NmquڗsUO8LPLۮ@VmuEǦ5b'[cjcBKM dF=y_6ꭂy]R) df 0s&H\G߱wDu[wsĢ}g V, 9;ǩM u 6g[rw RROǨNĸ7{fݿkU3jI=T5 "X1`tiAB*1ҌU  b!b!bA,#/wRyD,-,*vo]FAi+kog؄\c?rCOD\^]{OrKJ9>_hVɒuě_xBT_|053$%c)LY-.+-Hp|cKAm׫͈ 0Yiλ1؝7ވUsxuo-,r*AЙ +Wޤczuu6;c?A/$P:u4 v`3`T/rOJ|L۹'OͤZ w+ 1,o.ޝ=EWW_i{_$ CTgQ@)ۯ|' Mu3˿F P0! F_EYFsQ_Wjk'W:XG Ώ?/^Z!Ff &i #3)u%0eCLsu`sY1KA""WE,bۿP2}?}@C&b2~}h@aXh$v -X ʻC k=f\A[y7#uy782,HyvԢ1&~r|7,_{٥c#-*jh{}UfbiP2&Q}"^k.P6[TL@ms@fݳ @r WY ՉmEp򇝴F~Fk;x?^#|LV;[M3-MSYr|a}}'.ꌦh[x 6d-"L?J`C{g?]RI;W\g)3O/91)`5p%sfy:*c5Z biu8j b!bB]tJdJ酈)?VE`Ӭ΍M":zXB NھrM/(qW#3^)DU6*OIJql B$&/0))ʋ R?1*J9(W4?\)zWB@ubu -:T=e ,Ԯ)_43E]en{C&חV=5ޠhV!HU8sBf਷)W PXZW,.uXiXxF֔ˋK7nmZR`_?Qh8Wz!biBҒbʼ|m)|1),iS夰2]+K#m 8V{7&w֓c%܎U z^<-<9,9:i<{ @v)2eeshlv35'% 3cLtj;t~)k\Tn!ysMf<վt%xJ@k"@4`t K C(JY|Sc6osoD,- K/g1OP[O5FU#J$ {Pf~z~ViH |{^Ͷ2-#S0̲lTd)snf]'v,Kn Tʾ1_IJv#u>} k\xFpvʱw:р] Eܕ) q@2a>lna S̼yTaMy~~SôٯZ}'qG J0΀Haf)?#}ަ8׊/@a<4rPoץ0V5S7UƩ9u&jMK XXR Veev C,;Ees,R^a re \uH KKLQ!zh &#g'гJ.Uvavq\G &6+T:,.{zCuU (%SӋ W3t$ Dvh%tDs-E |aUT} Fsc=nJ|QqG]I)m}G!9n$i.Vs!r97fu^^V],myuVk+RsTW48P;t>mha4jtCbhQ:&WnVEYmca;@c/߾s@\D,mJ,wfw(( f$>|kZW{p, ; hkf [y37AnPkU~>ZykVV,Ko^>t Xn 6-7D*4FtKޏ_Q#m˹),l>t~bp18wn 0宱 &W"Vh&ʥvL56Rn{rK3wl=$| 3T\u|*/D{.M OO_o~[at sN^X[liX+MtXc@E,!m$D,-,ƏjX X871&r!4c+{Nm zQuH""WY,II7rcXڔX!=ڣ=;rjYM_ 6\j}¶(wq:eZf 5gQb0kss] PM(DVE,c̤yu  x5ϔ9^ꙥM2<=0|52_u:RwNC-OZI ΅;fR_:SCےgOZ.?~G}_äuucCm,D,-+ƏX .%F\1~QJ@BU =XږX ST͊83+9)3SGVKZUَ4ٖV{K}apW Fɛ}q46agHOESeGd&јK [,,>5AzN%mv2aw_WQ{dmC!b biER "Q,;š6qqz[4Ӌ.ΨvED,mC,:Me&\}yYtdY I;Zв(L`RJD$JOzi(kb!4kX%ycXVzqKKʋ%ߴ9B˸~4w&VB̓PPXBSS%(&ekKgštZ RYC4;iIyLFlVz9-9p[q3hX,((VQAE ""-XځXEe:V1ED,u$P?K;"I?~3bM3Ğ Z Zʇ44VgQ|u;J;:6)7r;w&e'.lt-f-D, b-#C8 "hbiX!M>*=dzHsѵgƾ(0`kֵ@ 6bw, k_3%*/zRI!biRk_ֶ>D,MJ~ևIi_ïmFYپy0|[Uшc*N<#t4 ʈ2&ˀyO} ))($֑eO>-ȽmV5h_ֶD,E~Vh_ï}YJ4k5<;rFoEgGcMwQ}w0SsQYIRԩE.|i01a݅Hr]<qR(LL]"4ȶv7싹DTL],LQ}Q{#"/|ZHN-vI9S${fBZųSXKE\VHnjU]rf4rp;}CRJp{O:.5iM󧏞=}??+ L;zt'Dbp$!*rk7IrS [W(VqyFAI^4GR@c'C$ְ/D,M  KCBļb1(G{up<2vց<ӷ}W H=>ggVܵx =޿eo^[Y Yýo$0_loci9pQӱmΩevp~5vQZ4.MoD'ujZ^^I@^B!b!hBB Фa\&ܹs;]J 5zs2 w>V(:y{ [&Sj9R FZa?`%o^!̈́\8xK&+wg&ٽaFCU l\_cԶ-S~>{#vIپi^æj_?M.]n/GSZO@ @ ʴZP77Ν;GGG;w4doJLH W[n~-|'Νk.K3R%8m$rxSXpsMS| to<%XzE ڽ!F|8*7+wխ-&.ZxoŰ]63_f(b5Z.tY?ߊa3>ʇnG񃟯CoFbwq|+(I5$O gV&Ĵ4KAu@"&D,MX;wk׮ ....]{I!-3<!=w6 h̔_"ҐݛHkG]G`U ڥg%=M4pnUuӮό|iO2}LiӦ}w3F|FY]U(ׄe|Z`w{Ե;)YEM @ ֦ٮZt钞NB7:`sdrJ2zUhW #=y߽l |+ .QZ.mISXv]ZU(NI-NI/H/HIMMϫk^a Kޙ>!8G98@ @ 0Ecǎ111Zygx$PQq q V@ 22J }?9:7jQ4(PpxNmʞY'jvJF^իT-ʵCb, ؽFL۬I.\zm[:;hӿ>{/cd(чG_w4')I/J8EUn .pQ @ @ $ 8&⻥!޵6=C F*b,=z(l2B5QX׳aYaVs>L` R|{&ٽqF*8Q^ό##&wTXQ,`3jfs{bgƚoTҘfgڸxvĊe;n q=@ @ |@mKJ%6R_JqvJ򕾉J`/i^#L `K3+Ebf\nGZHWp9v9"QnI 9 f~**De%ʌM~6Ū[TBݐUTUTzUTTE>ZsJ3Lhqvnv[K Y A=k]k@_Jmtr\f<˃LT\T i9(D -~mzK69B;dH* M<_T򄦭Rkg&Hy,9ajee~i~Z"f@[9z*lǤXvgZ,U@^q i Z[QYTzQF UPwǎ3ztjoX w=Sܧg\R9|;[ֵCj$Q'ߠD<,֬BKkB 0g߽7/^"q1T ^:h n^=[oH7!oj-{#l&%]OJJ(,͡ʋ(})}hz4nt x$웒ƯBBxPM)lYd¯޼TWDl&X6ƻ臩;¼[z3M`%ա.K `w~0p|RG PJ .GG_Ř.]TYd,J0?gߺťa;J99g<;:p^z~? 7}+U\9 cR/>rױljs Eh4M+]D,88^=Cwcߵ5/Em@J0V8GJ ]lMLNPQ2R.&dp! ڰTs:- ȸo#.ti* L<hcK-C\<0CbV+"$|eqTcۑS`cꍕV¬uW#r8~KY%yҼ̌ES{Y7W`q~~[vifxLH ̡im&e\,*"by!W-:FL⮦ی#L6Ӳ:]rW󏾻 (Nvy]Lh Q\ۭF29N?TΉ68:4g-*_Jm_wҮ|ϼӃOwu%/ߝ5GY_/x {  -k'vL8Դftvӑr*ôY`Ê7gLL{^w_`Ü).IϺY(WƶkO+]k P$yeEȷ_7sf*)SkA|k=7d?+-Gfy >W`. ~ZX-簀3'RwЇP%OD38o{7J--Tc"`POPg?S="^BiIkobc2]MpD,;[wk݋?̤`{;酩 `'*N{rN J\>Q;d6|'.!`췬amGX?!!abv rr8=]NđI*M:{ 3Vxa޿b%IV;{4ʕT]*TNZ]o^[ ޹PW̝ϊ)>zݮBQ|Ƹ($?O.QbDW(] b!bi4A tQdjHJEG<ݨx曋yj/"ci(~8s8s]}Ҥ~J:c;:r5&B a·IeeIv]sjglIkXGycPŠ;.V]? |0rcTqP|j ߬:Tt惕ʿ UݶUh闡EwmPπ!vVFZ AdoV @/pa)bQ 9/);fL|Vm Q#e-6nm(ggl uY$Ia#zˮ*s'f) -Dx)Tɸ˺XwAms-ޥKʰw|b!ܬkۗұV z1N+_Gi Ɔ;X[` PgO?8K5y6f*ϑs';=T >[^՝7 @c!X0rU] |ƅSBM_'L8v/"ꌖ:5Wv*2D,BۃQeUw.>FtM[x;wY~wg #婊4n=:q^ߢ2E;n+_1'v8.Y}vE<5Ju!$.ow2qp=p:х* ekR=G97'GUIc~ .9ܯӶ h |ujZW*9Cs4j\()T}u0Zc}ORo3UVS,]#FWw.gVFQU-Fڿi1Oat B ] 3S&HjMTU)hX1*gٵ `z]YpeazzM:uVuVM_߯y`@5W/j ]Z4AInvFcikנFL V<o^Ԙw~= 0̟&H?pn3*ΕPnڞZ4/?0Tݎkj'Ev?,߬mJ|޴|0!c,sBg}#[';gR@Z ~vK[{Ó^G!v%B5^b`cr2S&WOncEkLn;~#2"uo͈?7QY>_L8|0r^=ρ8!_DnJrt3^|Uۢ\̓KI_KY9X}kAG0),y1X4ݑeA Oz 5Ҿp蹲hջ Buѣ9}:ʹʶ=SN3gJ@3}Gz[(p ?Jxj 8/rV:|68g {ˊiWt_;(ABm`Yd$U #M׿7ҁB,7*GiuVmx ߧ 5nz̚!͔VIe05M0INJa kTO1̄o;0>&h"%-R7yϙV:"uSCVIݬ4DҤ2S5*t+nlw^QJBdVd[t{;]iw@+@U%Uf dH-b]DSڵ]P ]ר!bHs6^w>nI s l'3".9Vhw $L3o oyHBmC=;ʎM5+7'#7 lv<ٱ`CSEɌk:^2ulл( A;փO~<=I,𷢸,+G Cz;sX&BXIhGRZ^֍[NdXX/Y#xNO%8pe.d-d8)K;G꺽F-@Fv4R`Yp/ KnGaMo?zg1\|3,޻]V|0ԣ60sDz.Q٨}&P}ܳQ]$a]7&h@'+nYCIv}ga%TM"V9 3ɢ!Z6Pi Zʢkj,DYE B՞&M+.%cg/߼|l{~ѠI~O3܈ Ax<*Ŝ0*|-h%,Sy籔9Ew.>jۄԬWīJ_~4R@XsR&^ݛx5^͂.$|S&@CMk IDAT 9>Jyҕ>IVޞ?F8_V{QX+niMf`ѶZ)틌 Ay2_ZZk˓)<}AXڭpM$ZŚdB\4+ ,1Jhb͗nz6/cq7^Sa!535@Rqܑ  K!qgE͟'8 )8+s_n% r3d.w?YBEXj_"6lˏz#> IюiCLY"Aurj֚Saܛ&e?nȜG$&tW8rg 'JɁLu">/eaBiŦ뢹01$ܲts,JKP rss۵kWddn88DJ0× bn7go@'ĕuv4;kyg7-z,s褙zΛ)E7}Adl5mP-\xBܦ6R'1z淥^eIe(0,}O:u9IAjjl#@4,`ۯPI<_fO<.= n,&kߓ2S^t" \_޲7WGFk/86v'EݦO Gs8| jS (ƈE/DJ}D+_t/ifRR3> Tu켽=#P (-#vQܳ(7uXԺ> (+Ѩvƫrk>L[^H0m4`OeSYQ 5Hy`# 5j:#K^C FnWz9NYb+ͥ%4FUh)-Ay4Λ7oΜ9+W9sUN:6,;`FzVܺ!*ȸR3)dɼw@κ!mf }OZ69[A V>cnjG;m?}\zu-%ߵ00p= idP#ٝuzndFԠNQ]Ƃ-nUNx@Ԗ_FI'yjC5+ k=/^n(AaoRu4l4,Uj(-AÆh$FGEw@5qQt~5RW+Lz[|qKB[%e>V޶J0&/$3cնe!Cr{Yr-Yu/Z/Ww$%m'҉ EQѯ6/i @S6t╺}v$6Slt5%-9C~IۅGHHȮ]N8&K9zӧO V`+će. ?~ףc&){i{dXqjkT_qi J!6G΄T (t"%m'҉ EQѯ6/i F~jM~jݗ~6zw!~IKxЯ_v:Jdd<==?~-[Vs~MWUh'o9pHkNKXS]AƱ Xɜioz{T̻ux5ayYnuUY)máK_v"PjCq |SRYT.*PoҰKNꗴ ~%mӶݶmۅ rrr-WB! W])L>l' ʌ?klYE ߎ\z?\^{sW?Gǒ(o{4X72pro;24M%övn9gW@D:5j=Ն%-v7w>l_3间]N3ph嚔Z@{SIԯ/`WL]7/}us[pjNՑe3%=b @ @vs΃k#md*¥ș6K@ Љh5~%* ʅor6ޥe!@ І@ @ 9/@Pf-NvBS} B 4QACt BgL"@ @ b @ @ @ 8@ @ =A @ A!h0/Rʱw0n1('%a:X9( @ @ 1k(~3bge9u&ED ߗ0jD͞LtsYRq⦝{u6.{g,PX6@o554gib\UYTV)Ɽ kkr ] ,ޭ(²J&ƠΖF{Cʲ*X\\^Co#HaR}n{@w:3vg_>.yûMN:e= _\o(<;>[XJMlca2j#X9,r/O%.Jw}TN[/:|J,TI=\TTH:XXyyi O3g tӞnH`j[- $%=,*j(vnll=͕E 0cQmg`p˽( ]i O~lIa=4% B8۞f̳ڶ삌ug%\OvRT[4++&i|AEN%UR o'Yzf S%=,p+C譁Ri ۃϢ2Bg)?B ȕЃkC>y7o8K궈*mg,/>P[^.@-g C };R`blVGP(M' ξsؐ_OJLcj6.fYq60z$]G؇wQ+jMhf!B 壢D?v 5z8QCЕUu؇ZYz`Ι( ]i RذAqfض=8 Pre*: ~8W i;b7s\$dF>ȄY 6cH;R 1*:8~uHGəXچQUM&^p fstÌ*>_ #=ʋAƵs GbL5P(aS;n?taFSΙ⡲F#a~`́aoi0),8=`we,|{^ut3qnz7kE6O%,6 qdiȴp>uӨ9C S :~I+?pE^VY1M|wa: 64Ef!BR !t8Lpn)w>8s)zNXJ 4G-=p+gX]lʑKKU轁#dnN5|T%S&5拵֑E~,ëTISn;rV-{,);vxPUJ< t3Y(-M/q4/}nw*XwVQS]X@q9ۡB^꿏0n`p|ɯ)}WORM vQSP7 A-RcrΤ UzX?&KII>fYb+PoP'GIwdbp(G"1r`(xiѿW?}&4XqmP(=Bey}*~a|*P*MjP[2)=vh0]kc+Q#L~^"&/@Ci_dQY??6п{xjֱ|Um,DY4GAY}aBi)`ûushf7))=/EiLᇜJ. tm! BDGER2khOnKz|Ie_+c]L#x,i&Ye8'ЌK2(n͟fQ|4W؁LPsj0v\(MusorIMoG)%,LI~;b9ÁOs,Ǻ畤z$; gvDHszXH*޽wHl18ΎM @9iH*o~;玐@sk=}̘gK+b{h*-i:gggs!>':Q9s }sۏ$ѭ|fpٻ/d\~)L*70?k5 _d뤾Fnu&I<#2kXF%=~R#k_ZN@ju/?f(*:W+){k% &pr!9\hF(UcCĄYȋwUQJo^XJtafY"z4~/%?EX6*<ä'Ќ(7*`Abׁ&m[ݔcwOsQxg ;gQ^я x?6nO^B/44NA)n 0e-m)d`{sspAʱXJ+pK&E߉*2WJBSWY :_i( @ESPܿ1 {̳Sx="H2RlO$(@D' ?$*&U[ڑצS")|q-&/BY R!,@Ґmafc{Ab"wTḯJ~S[*SaEu?=Ȅgaju7AlT z5?#﯐}2heӮ:'Gު; AZ䃴a՝Z(|;Zgp8kN+%]͛Ot\\… u.G ɨBE0^~H#mYtZe~#6냰~^g2V iɾt@ٍ{ylvKkR73L<(dƸN<Xxr^7͡KՐ٬rE6 HBKsf+mfͿZ> j/z,EKb6|Dwb뉲eueue_L;rwBӡ^S?]m0Je4Ty!{pd˕19t6%%ߜV%ܬaԸLڀ Rcsr`Cbnf/U}x -xxnr_+֝KRu5vڠq55t Î A7$d>n.W&:G}fqѽ:Ҝwܔ2憦s$D8&]"6Qu@ji :˃c޼ys5j'ϟm6 K?ٔoZ6娽 ' etMCQ?$+&.&Tk^PMpLpW 65g IDAT^WFwb^MyBxs@h |gk AmLgp:2`I>s*l@ sFX(x1~@fYWc>*' fRΫ9PdMN}M]m̐o{E~=V%L$ŀO~{ݾF}5WǠ#_μ?Ru9#pwdsYl/,"FbيDY@x5M5vQ f?תz+nɄk6o)٠U-Ӥ5% BU9?)߀AD`=GQG g8WBiYf~z9`7 sxkj'Ev?,߬EwN87h|Ȟr|BO~G ֳy52';[owEnϰx/|e|::e:h:yҕwƏmoT}ZO_(ۄi9qcU Y6gNBss_k%illgu@XXŋu.aR/ Vxoh>QL*.fA$4V뉆,? o6A}49cg 9cƌom@sͩ ap,D>` ]7` `9Yi6%ĕ9Ief5_P >⪿ݾ_)Q$K9l03>R&U20{wfWzf׹tV+ ޭ{Ó!Q ٕˋ@8e]KEo߅}_m|:GtĎ#:Z' ) _~4V]M,DYty@n_m*Ts~703D9[(SǧI+! B(#-V~+|QvHUYZ(QP֤TƜ&@d[>-װAQeR]̓KW <߂l4q_HJ \GbC%|x =~Qfk Uu(ի5YaI?S,V)F }NI(үN1 PJ PYY\Q\Y\@h 7:(QGyiY;W10ƥIWۣ#*VMD-X}qѓ?@YSp~VGbA) yӘw#;45 SV0M\67Y94.KZ\$Di.5Ҿ苲^^YRd]j,Ťnsٜ/j^f44i-DAZ)-Aii5Q`Wj ^欣FyMqIY9 μ1\ G*I 6Y!P+iqɱ C\;W^HGWPE Vj7rhHKPLvvv.]RI|[S6>ʧ(eM- 0Bňejvan{^'֗̕g4Pgxm_amPQY nO&^onðsp-<}<0cHVNIu]hQPnO8nB(TҀS$bzatL'8BBBvuĉ0YѣG>}:lذVk4䧈7m'\b3聯f`lGl 7feZ/U0>2t0B!(qu7gD^u,z1\|M,޻h'k$0Br%t4pHu}x7ʮwNgcFwh6j,OY6jь$[ ¬R>X))9@ BU=-5( A$Lm;z}TDACRZ^mdhL1ddH(ӈKgq+K'>ymx0%xYȤuERK͊FZL<͟½tzw&BEX(|t^f&(Q".90(.@QVl @i]@.PՙYf2tvgh10,1hbKݫ2 ^Ra,t(%V?Uӌ۶m (*$$MޗJ_1pM|n /0Vp1^9^{sWW2E7N~iC -Qi˼kNzR!yxȗW@h_x6$ Ǟ($ܣॾEٹ鍼#J3nfM+rF#A/]s`"gh *6Q f6åpܞ'4ѕ#%Vk"yH)ۚ[sl-%%,M`?/>`/!NK割eisT u|'o+SI41' B]᱔FjEw.vjGFe:y Ҁty3Ŝ9ngPvI >`} 7up3M7j|jSu$曲51?SqU*:Oҟ,S搒gݡ䓊Pcn#lo ý 8UG8r߽R+?͕?$VɨXN؎2ڎoWqSiٶ%3^N53"F~^^:v֞g{_{n}xOdu1E]ኋ%cy%Ndž;}.wô5D FcUT68 R3FdgMWyĺy+vH~(+rƭdKKh.,A(Ksc7↶jBAr> _;O @;(Qvԃ>(1.ۯ5[l\&;w-3GN-<3nCNWEO/R?ח~l 6k7_xdq?'a3'Āth\ #tZ?e; Wf@jc$&X b݁j.-ivF'-drvGx&3tJ8x64'7_X g`gv}\ ˗UdZI /JRT%fmܧיb)ϲ19Vʓc^/}FQ*w7=C}(9"ͳv5?YV_ГRu z͈^-KQj]hZ+i_lOYNSDY~4zu|`*M---4XSM(Qf>_c(f%[SIרLǹņyZC{sF*(װY3>`ߐۀˑYCx wPZkd~c@"U&~r<E6?80؍fvZQX6ϫj4:,hcUŴEu6zvAKSfWX53?(^FM\5m%4EUʸKZl>sk |urrAP|3Ų+kkdal;QeMLlw6qmA l} bomc(v->هSY,MCEX[s[ jkDZذ5uu+(i4 =ᅳ^6@13YTǠ hE;_%AMyX&&ͪF(p9mr `K/f[롄b }̞)8~i ]۔ЖfFئ|Vmw,\ $&wsQfJ 9ytmhw^ǥl H)K@EQJ6L:L B9rɫeZZw Lxlf_IO͝{[Af=;(!.b/9l } @0wg$q7p뽗%6SlZ\ܱ藴 ~%-9@~IKxw^0ȒoDoɪ[ #Xer\D@ t5u_݅KZs~u?#a vQޡ_]N405]_T_T#g{mM:KAbp/(,jrJ*EJ70/iۃAh~%m{ТPYTjeJ{/W/iۛcpkrK-m4Rk2PT^[]UV!,f@pfoe?m~Iސv7w r~IKxЯ_v bhS间Q,Q-,;[/i$dg]@ @ z1p@ @{ťa(@h,e!4.@hCQ~gįÀ#<4L@ @ ~Qڏ_02p@hh6уŰ8Eo@ @ sK)L&gggrg N,/ !B@,QGVw[86(@ @ B>K7>íq}lQB~޺)L(_ d2-2}ƏܽLX c=7߻{vfqINqN!efog\,:@ @ p轁Cmႁ?]OQw{.N!J>(. {^.0AQ1WsMlƳɳq7.c~-]M3P Op;_[̀6/@ @ u:gggs!ĬUIra7_] _oDq dVUqĬqvG'>f`32ג peeJ|S$ 6cƐ<٥uP㒿G\f@ @ v rss۵kWddn 8fFȄNM1XAsvv֦I!!gV ;ۿ 2`NC3ce߭Z.*CԖlx ۵0z+WSۥu@ @ Ǽy̙3j('N̟?۶mt:W"f$Wl._E=,L"쿠]T|\f)eUWVEfR3YT@ @ N'8bcc===/^8tPmK˚=BJ1mf2r󁘁b+W@QTń^2\FgnxRBTFPS\QCsy&-Nʢ´J%5fX[> ʢ Q K hQNh\8 3 \m:ĠQ~nj@Uw<`L^IfL <!m+*b7Qi((8cCWo_g镀CURq1̊ &9-g޵b<౫5^' `((%QKa +,*0e8gQ(}ߎTQc6%a鯹(,m(F'8BBBvuĉ0YѣG>}:lذ;iޙ+:j!>]0GHI ]iVSWZsKUs_EVQ"=hC]}{ }rA/;ίd*\ȦYv2%ɸAcKm+39@ceQw?{yu\ ,.c$1 .,αB6:FFUеEN]DY"(22r ?~-[t+JmAgi ln07#Nu Us M2=>--!`R0 Xpz3Y.@f6,5m^F~sV~LbY( C41x&ڕp>uӨ9: S :~I+?pE,4WEr] ( kPe?0;V kwaQ\_J&= IDATRAAAW4֨%$Fci AMXDI,16(U"e6`,<3wsSΜ{ vAjAKhbiSMRogϞ4hP rsx~:\C僊EwVN4#{~É>%G&(uc}XU% ;߉KЎ b ax)i eeewu”-cIzZRYm9UuAF-()Y1dKr! u>k(trus@\/6?JEn#1- {{}?Dב $1Lldbk[!CM$7 ialͫBY?x)4wiT,]ˈsU1=Y5(q>GPV=9wE!:΅)c|Ow g= Pj9E,  g Kпc7@xҙ=2n}fBwo\)2UTy>A4 TST,Vhh)GK<;b0te w2vJP:]3d]o`ۣ`X˓] bib|o?evj1?8?1<v؅]}~q2w^yhƗeW\4頼[OzhA?.2͋mnm v~үiqFuyVQa)}w ); d(J?p.@ԜH?C>ը4TnFIS0(ïP]QAt6ĶsRwlrzloS\Tn✤Q(Q~LP~#.V-+ؾQ}h@-RpKrriQ20g-\2 0) } NzDe6cGը־`/Pp2-#opm"%߱ivqFBT/w9>s P%Dd)rSۤKEdր 8G#OGfVqxC3\ly\wJ5"|AНf(kIY ,KGAz"E'=plM8 +*rs||j/@О=[Avo۶}*PfB $lOpW ^rtJ2x3(ʥ@bf܇/  DYV6aFyʺ9PZBaBA--clS^S4mViE:`A3DǜCo(lE7 Aa!} }+z|GߪxENsuj/J,% wmg1 3qf;dbag4'b!b1;v.ÆeaL$z0)$$@oERX}XQ/w@2-XuGVʷ}2uZf)3%HU+~V6Xxh 7~/T,л4 bi_bIIA}tG͕2ŋZU؝Gqe4yŋkWËK<|SXC@h̏v028bӟQ5`>jzxZY_*&SDrm;ץ)_[ޣq`RΫ8PMQY9JB<>YGc?fGA&Le=.ceΒ&eUW0),2Ts8HsX5־дuO_#%b!bi<&0UɲqU?FF[a=˫Q_~83hXԾER%i MQ JRV^>KXy *U ~KFIK:by '\qAj{^o,-I=~:Dv9Zg N }௕49ťUg/;FS6X"˜ 1NcaW[i;JL mg)X(a6[vwk%DZD,ҸX ] kn %ϞJD*!]@_gk7^V(7Kk\蹫ҫ]ozNިU7L3zTL0< IRpV(,;qB_3z -ЖbSpqgv(kca~Z[ /Oᣛ0)j+j ҳy@6jlN2~] 0/&5JzCpjlcHUU!nM# 2 ^Fzfuwk-DB(ޥ K+m;?LN4WurXm[Lr־a259؛a#yTakVUah`eY89'5ws$Ԑye;JZFBk hSbQ?hſOw+Fu]߷2zS *"B iT,pG2jkkR2j4m)|j7+ey_Kd(N ynpMPlptRL9»ۓ]CĢDXZлB"8HE )K9wfE@sIr0)VϤҍX%`*޽NfaM:{uNáuvռҨӜI !0ce^UozL>Ph!-:KÉ?rUDH{"KF+l˝f`Mr>=76d U۬ K,.d,Cee X"+< Z 'i|l9޻\~KYZc7m1`sW/drXKh6%;뇼Yyz+FKmCBJ.f75M5 ˧`od'F~ZZ?3ݹPp\Ϗǰt [!mׂ%biT,-]M@~BQxQN(G{Ԟ\ e•77}8ll&`Ǚ"QD;5h5ڲX/~<;:#b!byX,LBǁb<'r29xȘ&B ξ8 Tͅu|')r1@ Hܻ7t$ni EwBbm2D@ 4J3{9t66BUWEUǒЫ*ngaΓF 7N{y*gdiR<&q2fiiŎc אzH}B\4sl_~<_믘AOħaDM 3g5gvfFto,`D_3{]Cdi/LƱ?VE)="]U|VX[998IUaV,Unڽ6 o-A6.]HDE_sXX* m( 1 &0f`{sU&%o|騛o*/"{>͜V@<D.ɉ]!ؽJ`j!cUyRZ8Ft<l,Kbi}jhm"m@hQ71ć*rurr6T/9xg/ͽK>?UmJئ@5 ~١7v`}|feݧTLKgZjȍ{vHа};ty7~8΃N$J CWu[fo-Av! b!byB,#7f˛1}^GT|?ĤZеb5 "^"~=`Ƅ_ ֤ ~Yx v[Zl,Ksbi}j@k,d @h `WWn($jK1Wܔ2\XΎ=Z3qzBų|`<.f\ytߩf\̂).j.`ƌӰ&|èL8AS1s}~hXgF+@-Cbi5w8V6q*g k ڴeɶf#b!byڴL, M3bN*0{?;- ͯzbZWԿ~C(|c`M3l,R-ˆkʢZ"O En-#T&Xqu<΁ Epi{N`mG݈@"E<1ЦF|h<`[ʋ%X"K,<.Ri:93PV+ @ lݍmdW. k$G6;Ҿ%t0Wk_O%e-Ѿ_V$(@Є)Tdl.mr^gQF >7l7e)Ѿ_yA:v}I[KҁiOem=m!;{_J몒*3حr ߍڳµe0>Yz'=j̺N|{T$?i8NAӦ7^%튎p-Y{+^5,Eq۷rJS|O^}'u> 9]3qw0_ a9lz|WsOGuXO4l'p~BT$|L Sqt/k?D,OڗϟcwTڗړD,ڛ:xIFEwV̎f=:ALVTsxΔӵڵh˾vINŮ}L"ƽZ:E ǃn^~99W9 Itsv7m_WJ O %EO4B@I2J 4 @ !b!􄈅@Г%}Fz [̂q>pg)}#|tg}dXY>]sfB0u7kz\EF/~ߵkώͳMtI>|{jc{ӏni~%S/2222仅{GOG==bݺ5+?1LZpT'B@ @ xfCTtZ6#/o˾#*yV=Yi/74.@X;%L*)!^aApϞIk]tgȥnMS%iݐe |t"^qs]7͵W<2|V<3ʦt/R]/cA+CLcM`@ @ ȴ]LW|*n1_Nɬ~sKٟ/+~ _%S io~@sk"eEwi99o&PB!TO~-8O3=&^CP]RZU),)-UYH񈉜J D%_uS/ _->@ 4  'D,Og}ys;=†W_Bw" ^=5hZNĥ3)PjOBh&W #Qޡ\c{fW)<3YeUq& hWs/^.{L@?[nMRۥj Q\cа+Mb@ @ <#GWKs0!َ)hj*L>bI:S_g9JC%)I36B]doЬukV.N>T6w'a!@ @ /WN$dqW3t<{4b} _.LˈX.N Wv줂&Zhl+T,$]m3U$=4dbAOX=i06L%ifrcU9WL} ڋ|%3g$B)8d@wIۿӽZ{Ľ]TcL@ @ g4MtWi3UkO}Tpwk.[ f|W/O+Y5Ue]b& l~i+Wl@ @ ȴ5gOB".TS;i eM^/}'C#߼_)O;sifePm~NH{XYYYY~)_%J_[(~H.]-.ɿvo xLǮE__ݦM":qFXH @ ‹SkMcꎙL.62yce/=wXTF?;M?evh!@ @ /,LݏЖf?bltH*R6ԔoS't&w" )|TgTy֝IXݫVU6=0Rkm2@2 4dA?zR/hcsqyy<{2B6] zK=PT_E=S=w6km'? 潖vC뮝6n]6f!QymBf̴";w+؏.L έ O腑.JH7_|d9ӮZpπ+*2 NfeS8VpQ: "_h!ݼ'w jPGSPBERS";Fco'IAh@;6D %phgT@2~|!8t킡jb~4!S\lW>DEEMeUzL7VN,?~=Ȃ!}VZm Tã#*ZޓePTtr񾓣-ҧAA0d1.~Y (bt)IVj#*2L,OpOv+`<;ɀh4 0کXj0gu?m88q¯?؝=)/'wЮ JJVT ̴o/o'0-n((ɿ_*L,c{wfU=|sMwۙR/(=x+9~ƈe¢{ sb!]M~n+yBu3Аʣm_Ϡ6/`){q)çyYD쬹p'ӞFyw/Ĩ .;>3exxmL .~HXɧ ڟX$B k5G 1Gge3ˋEb|V,Wp&sgͽ+7;u[gV"_CtbGy)0햽a^4Y(+r2F*S5>%ֻmM"FzrĴ2UEr>1qOZZF&ui;ĩk IrKƖjM_m4*ViCePV=9wE! f;>ֶqX4Bc γpEGD,D,],  gKN@ hQqgWNq/7!ކUߜ]UAV?=55x -r#MIעU!?D#\O1t=m4L‰+[WgK^ۉ6diaH۱ICQ{wٌٛߋ*8(3wrF+ ξθYzw[݃s^Wi ?\&R^N:u%'(/ HE1+^KDa?Ce&q4eҶ}v'sިa[}4Ϛ:k_# Ębe_jg$G{w9>sPTfQ;8a8nQE+VS3/z2nÎd|.&EQ<(wZRVPTm4Bj9E c~[;69=67).*Oo✤Q(Q6{&1w!zoYٚo WniҿC64fQ20g-\~;CǤߣ/(%;w1>rYّZimۀE? $+. @BaJ,DtZcl X%3RZJ[R ލ1bf?6r(lK CK s&|ƾ؀a.>Nz1O7S. 2 ibx UAk7Ɏ"LPO*m4w{[c;LyX |(Nma=.ǰeΒ&eUW0),2T3Z#Ik(D,j+ M8olD,D,\,"Fhg9EvT\;t(FCǏ|3'ݷ]ʦM6mt@M!~>iC9,6SndP/NL^VBןT2Z˼ sd$(-ν^\i%{5W(dLh5SS@SSxա~ Vnɭ:W*! ] U}C()ĊhXO .Vw^jhsmr/Xq{57;%||;Zs>j b-;RmD͉HseAPFS6X" (UusARhϜҪIaŢS>Qwk5Vb{Gjos틋Ίq4ss[&j(gz'8;x_l 5_C< bW941èX(թ)\7b)i=Qg?fJArPNOѨU!)AЌ+iJ68Ű &R{sxs`8L'LJ V;}5ֶU|X(8|tsY&#sYmE @Qz6ORv" L >ҸE?G6[)Ű +>@lqT;l5vI+Kg86ڠFf o[]O&jтL0< pWLJ=Z#6 XFsWUIk+W,<>ew K+m6}V k'G6(66K+m^w&' p=p[hiŋZjG,̔/j՟ .~BiWiJxdM09>3hU7;zR;5S6״K ;/_@l,~|8 \Y @R&`*M JY ?G@^I;hXX }A7~䪈."v9KF48&({=AjFu1SE4a&6L.E%o˿jtYoACV)[:֐ɞ[v8 ݠՒFa[:5uۇE~$RBK ~$ȡ M m1]s"bx<#[ƝFgW{#Ex NG}">`伹sj_9Y1 2n|VNbžԫ䰾.Vz ʌ;x<HْN0Sj`Qg.O)Lߝk`f! $W0gmf z!~+MTn3~ftFMx=N#;ᏼ.3iκJ SIqyΑ>#"v~/i1d\/> hNmP0 #?이P+)&~a8Hx6/HןqQ28>8?QȨO}> EfxOSCKҽ}|dnScgv/Yp]Q5`2X҄|%!!.W x/~F3px;|27ˑj~]Dm,lJӻ]*sK^Z+Ob8qbA*pk"эnbpֶK^XpEyY]5w!}Z1wgJ""bvC 9=$>%/e_fwL7ߴpqkx @z*YsehdI>DE[mmR+PsFL-_btN`2@ z?q{Q?oS6$NwB- CL?37ޜfDյ i/g:m!G2c Uq34AĜ>.τ[U]xs{b|/FjXl*ngaΓF 7N{y*'ccU"f#2%~YZgrrPRWOchΞfug `=PZEZ}=Hb5%Ϝ%ԤfFٙuӽLF}@&v 1e#qr~bR@gyv1 evA5v *v&!c,'9-cV8>J~F(D,D,^XB")ҌU b!by3xXy]^0mK ߗ}mGTa9ą{/1k>%?_|O^Y ϯ~嘎zp/ &Ncrغ|c=^b;C80~lك3xtx]4)={&)Fj`ȑK -ݚ:}(+ IDATz3P4qbئ)IC!\0 8̥rkv_4^6~ŠGuVٔN6Ԡ _Qx|hüg橭Sy5uOoFMy: (-`};wԗ1-=G%eѳB| wؼj%gYr=K JU#nbp˓S5R5F>得(i} WX{Û]L7܈\"0@Fng; T+ pDu$=m(ʙ!fĜ`Ua  ~ܬaj~V&TKGl*g0{`{rn2K |gǍh-'X:ra\J שhN}Nm,f̘z0 m7 YIS3h1#fN <~Vh8{+r }f}dc3N.rQ冕tkrNۤnj| Q'*?iAap5řCc-0sq*dUY%jۋF()G?[nM>FnOh۬̾%P]KP+ &c sDF29)\m+bI:Sa(hl  Ω_U*1OlFZoc-=q<5Fl7.jantEU<3^3X 6_6 \#3~FlC,NI|`Bly'C dlڪM}ЦFF&r|+}ZR@ qJ&k * @B45V҈oHqWR;uWVdzͮO穎K;ط鞃ϏP,9ArMWBE&'+*M\i@$dqV)L%'g'U|i ]SxO+N/\%r{) F|آILnp֡I' ~URZl>/Ƴ}YKhD_ؙFvrcQ͎^~/k ڗF~ZxQMEm#Y5 \Bum&;8 Q#c 5B(Z_J|ٙ3g\Dy+81[R6z68+6E %uJ?kO53s}Ya W[G2?xO<ij ?յ/k>D,Ѿ_Cb/k987횇WyS>k{ߌT uWNןYs`CՀ5Į^#V}O}aN;S,B(`eŤ =2OJ8`s4P2ݳCsHT u,UY4V z683 `9Y3q*_!c~5aEyAMuy)̨CN<g8?f;& RT {"jbI˥\ܝ%#ͥ]LL &gb;X("/, DW^˳3ߙ<;<!P֨k9 \ۡ঒1K X3*2ʒ}ՃnNO_5A@67xgQa^oͶ'zyi,!k~h/ۊg~b!و`GK#[+=??bna-T7s;)}9%<+W:MQqh{ fWk[?!B!Nŧ(ʻsv7xGU]]RS@nb`<EmiaqͥtH\رLU SEVۻ%wf<҄B!YtҝGy)'ovv`GҎ6ޭCK0'B!B !B!B}ם{pB7BbcQ% !<5%E!B!B5pB!B!Q!B!B5pB!B!Q!B!B5pB!B!4#BEyJ%sq}бt1E}Yu=+:t֖jJKrkT2V.ppwGdd!ϮVup(:rFms, 5pBF"j}SoDL@q>q;auofZ\ыZkh:ٺt,\xk3J]{JJNX8?@7Mf߶.2x+kld!dQM%u ۆ:T<ҪRI2Xx_1+}'s7Y3ɇ:KKXҷsc&g^B!ؗpuW߮P6֖,_[񕌝{ d]-YD?:D7uX/g(j;w[g&)(Y?j tIPp_, "܄ 7DK1jD׺I7Z pY?떩%]`js޳SӗV{H8YZ74=U̽`Wdiӓ.!+bN! d4Ylw \ygo;8ˮj)Y;j 2w_WB.5?Yo(*P=P>}􇜂\%E׈F6I+BE/){ r6u o5 4,K,t {|pIHY!iW.x53p<8nWI;Jڤ_՗k|Sax<{?1Xۑ= %*A[2Sk+Б+GGմ[4'~̼-sgn'{L70;d:v7'3I4ʆzo)Q֨Lʨ7xG_zKZ0j(BEO'. >(Qzcy 񉶛d1Ik3.--5JJ,I7ddR`9mZjXpnc;}KÒ,j  7$'4]`#SF&{t/)_q|STvuCOW Wd*s+.-fV¾!^dTe%vz-Ac{ 2*a3 :֦~sW{ݗYW!cw~SڸsT^HʆUw սxzdxOExإ 95W|d"?][,kȦӠx5g\4`ZH8yj0b|Bq ulh[JTbv0b2@-R8pnQ#%z#-zFƨmؼfKķj#0;94_eWϮ<,-v)d>hu\7/>AicMY}I am)җI1;ksN3ၤݶ#vS, 悥HwmddbUIRXCrKZ7(Y(Yzt(DQ:+`mN6wH^(BG,'K#n!_ KԐhOmV9b5*!xYS HMUŌZUk TȨ TRoTQj ĒW&jY5)VIEm%& ^ϣ̈́@awM}eD5B/*+ʨᢪ\ a//4x>S:Ь!`pT+5o_-8ElxinO?ե򝷸OEZ>Y)?"џy4cR(Y0f|bݘEx\cǍ5?0qs(Y(YzvPĥeK>拴'Ks^+FqnrV`s' $ N !8J#[_U0>%l~l,7MUc sW+5'.5<#GqnmJbYT=TtLwS)dôM-Fun.q{n|H9B, ;gV,^G6 gF!15J@j%K` E1% '=4c}n_ mZݎ|Ih-JE{S?k(Y:mE%LkHcë 7$*}8޼˳xvd`VzpBZt*H˩QRwjN3 5-ԋ:eOs!61 %Z8XRgYIL'hs{`8܆+|ʑaX;J,p ̱k@IOʽy͇Aq\/r՚oh3J[J#Y ~5#TPUW;bQQX9JK;^ǼYۏdE; XۇK4=>/ƨ@|7yBT,= %F]n@YU4&EDXt늊Su3jl_II5'R>ey7Ng#F\+ܭ&ZZC'6Eb`ֆ3)os6o]a0A,Z[E'$T~6ކ9eios %K[ŀ(S-M(Yzޚ,*@ (C!tKÿZ$;u+}5)%K3r?2R[@ƦeĆuEYZ`y%٧D0ԹʹJ ڱee9\ ʲ C"fR,T1j3"n͑Vs*k;)9*l5k|6?ʵ"lP wu4t,,=.Y{@d]g&KQ7} )J,A዁(5|<uiN|^_i*"PW 7~y@7uES%XݷKVJ=wDWn nyaGyӾ=SyCsFeԧb=wݞ5+4)' Z8q仴yee٧b:B+3LjK0pw-G~E_km J6Y$Ç423f1ٻ7IBɂ^,M$;٤fOddA/N=$7 6,^I_1% % =BO8$HQ~愱^bHU[yyCʮ؋ό}i (,2 IDAT7ݙ+<+QYo?ظ;YMmщSY?9<5YP /fG_a>sc'{BÇ_{7?9H!r![SUck@;oc#g@60(p`> #+Wߏg]_䫀&uv E8TqG^6cx@70ʁ-{R7kt]Noߪ2Z۾q^`E|l[*?~7!V&KW'֥˾?"YYv½{~Mw(Y(Ydሐ8<[,,zk2ns~|hLUtgOp_L5 %Kj Xkx<ʤlV>l*- Qufi}G[su-K?'v_0|K8"yQ.)srrcW\\p[3^fn LuZcϷ4z/|qΫ&ŭʍ{_h I=CCzP}572ܖPgpHۖ7oҺ%ky?!"sGR`ˡO2آE|$QPpzkn'#[΀;a#F|~ey[~nؚ) MZqQ!;?N\>1r4gӌ`0pr=x8T^G01[;`An~b 0? Fdž>q|#-LfIKCBtw9Jx@ǒ[fxB4A]ga}Xs ښO-{sSTX;l3qI/9u8YX\}{^y>~}>=Oki@mGQyؼZ_ޛ<b%K>t&'p 3B7%x:S & :#_(Y8,z`t% PK˅WɖQpznP! G'oΞ9E1/֣2uEKz:+ZXg]ђƺ?2ABn|Vgdv/^N?F>n\QYW%Eh?JXg]v,Ϻ w185%iHzGG%{5Ube9"m*'% Vq5h)Yz*8XE,=U~M"Zc!w$=2al֊Pa]uEkE(YϺ",|Xg]Z%B!BգB!B!X=zDޫ6BH(Yቒ(Y!B!B5pB!B!#*^V4?!5NGh t{O?,}qQB!B!X=j B!BգB!B!X=j B!BգB!B!X=j B!Bգib !]GBJjjcbzV*su-Ք֨d"\n#yBBBTWU!up>PڋQ*+k:8=a57k!IeO[Hj#jί#کt !]Gq##Opؙu+Me`u% X|H gvwW໿'5r~"O O=YQ[W2 Dv869gpΝ>X^2cAN+c,Re緻@ʾ4tTW|K\Yw0J@+uZ5!zne/(3,M؂a,C NW0 !'8t4^;,wxԝl]:#ho$4pŖ!X.m$eg%e'G,IyгtdiR]Rp oG!ߴj% +T>@:<1RѥRm`Дÿ$Wai5h _Zc/ZkpTt@ QVݸflюzͺۉj8!]ރ/uա5ҊqKsE>\1[7 ]:La=G>+X=B+fEPdyi!`(?ؾW  h:@߬iSV&(qmZ pY?WPCfa]K@Loe~vΑ{v |}~CzkѭMޙ/ӪKP 6žh6B Hսmx/%'+h-+%q3 gG;J XVyan6o dCocI w4ݭf;<.A5B c2UQJG2Z#" G)UPzxjDXoN"T WicPVFu%V֕3R~v7 Q0+8$fVT΃Ǎ*iGI; |6ʯMNLV?$B(1Ga `v$݋g%*A[2Sk+Б+GGմ[4'~̼-sgn'{L70;d:v7'3I4ʆzo)Q֨Lm[PQӉ?߬b~aTwOҶ$>vs$Y?JR#QTe'j%KK(Je.fpPQsj^KQ\[J\_2}˗/n*:WvG >Tͬu rko՗2ЀףeNߜVxL HG]??,] Vd4Sz1as*c^^u f_8S `Qq+U@\z*4'2݂4P4~zpjP\Z΢zg]&s E c/2FĒF62uNjԞmB*/)_q|STvuC5Gq޾D(:(9E~ɭp6ǚY wT7zuf YZRQZAbUAc{ 2*!)AF|K*M9r,׫H;)m*/$eCʪ;_oh&  +KyY\GE'5cϪ\quZk?[3{ u0$ اS^M5l3~% УU&`ReKo51%hco{oGAuOD7_W^Ӕjwjܺ.8nCOSv?z<4$MِRZrFE2gbdh o_)oY T#pǥO,pe*gg)M']^KlH-eEw@8Sh2c`sqslc_lgrF]K?⪷ o;~@v,ʚ 6=oCx+Z-;4`JO|1~ O C"d'~-jJ:]TM %ʮ<'׭,wxem 5Cv4/JY wU^LZ&qڳ" jy/L{H$lϮS_*ĻCƨ^KK*Dqp4ct?e C ȑj(PifĻ1KctMdNcT"FU1As6=JVEMlލ1_<% yQ]3.hǓNmKfv)ǞЭkg>L5qM֍ʁh^,-?d1pjv$<4nabgbW 73J)Tlമa{Ï>4` swE7 8iQؕD^; @[A.1;#]&h AڏNq?:ee@+w6zGh?Y(XZgC̈GV m4%k;k 8`tJٴu~4EnWWSr5/m証œfrn4׭úf1s2e!K,@ݹʗgTyE+J q64/n3xi#bsK},N<;э ?ѵi.jYCQBHVNZD_T2ᾯ @lϡ{6D;vi!`eGW>zG,kȦӠx5g\4`:H8yj0b|Bq ulh[JTbv0b2@-R8pn-R#U/7k#[U u>x3E7Fn5[ VaEǤ*zd~vaoyJuO. ՝ߘ?;=ubPݸBHKX(W,LEg0}jc;.FQPT|W ˚UFjڨ*ftl*uMUuᝊn%F^N@,yer,.’ZKMj 2q (1zUT5wn:){{a^T`W4FQEU^^h\%tYC{ W+5o_-8EXpscTitk- ,WrFҌ*OHj%KOMO](HqCjغ`QC5Fy\)(6x_|hD@Sqvh;к}j4u*0 A/*$qćܔt:#ME V?l,ndpՏuJhmjp,kAIkNE\T5 y')ތj])z 2T-H}bp۾PqkeJ '|pR7:Ɓ2'LE۹f{cbhVK@1uB79F˹3sGu궜Kl,Ub1nlH1'=JS!F,:Gs*Q( }#vY8F\Fs*jMxZ7AOԄ?$׊tKƎ RH>i䙰9!!c}ݼELNt /':TCu0zpBZt*H4Vr(QRw5FqPރRKOuʞ 6AяQ(q:Jf:G˟╫ 6*jh(Q%qKci'X]!HOʽy͇AqY/r5s팶;Ÿie9JިA$ Tv҆ ɵ ɵ C,o|dm]DdPy{ƻp /60qz_O+t `V2d=8XdP%C$bKp¤c4{W۞X`% mDn|@; j6ٵUvX;GNrO=/iHnZlcWm[ۦ[#?Yҡ@jnzpB.smִq}U*I+;=ݺT݌efAwRRM:^C[k}fӒj8vnJtӦuneԂTP5*K| V IDAT2$dQ)E*ْ%:hʌV4m1,m&HK,H+ QPje.XDXv'ԦBMӃh9遅CzR$vχؗTF~Sz7we~_juIiokE{A y ތA]RXKZJS@ ܕ{;Fvq]H~,SV</>5K't _3ĺT1/_ 1|lj%t% \N.X'WG5T7ҽ!tg*8iA*FlgPIXea]+Bn|;X*BI) unJ̴J ~r3R \$wKm*yx̋ny,`KiFTp ⾚n[}s\uNJzgu@@nҁXSIҞhMKڕVPd>R,@(Tf, mydisZv`%'.7 6 h;{ٷffQϤ|&I;@qGrM_2N&=֡uZz$:fmvww\?1ʫ[?Luy&89y*XYk E۵fTU V2<rɤ( ^t{6-?Zzk(zDŸqHJ>gp cB>5] @$@P#Wo3u9WylWpB3$~qvڢ:B?9<5YP /Ghd]#䉰~vlïÂ]$Q W޹wJ񷘳G6lQp`> #+Wߏnu]_䫀&uv E8TqG^Ҡ6cx@70ʁ-{R7kt}oߪ2Z۾q^`E|l[*?~]fO(nVG% @+j\zA4v((Y(YF+MNx]*B]Wpg.]n@GEb _Kngߙ+ΡuLћu_3sC 7Tq$eC:B*ZY\p~M yh"㝇;^_a%PpP|sЎ_7}⒛[Z)y?  ~4oHQrsMdƽKJ/oK/bC* o* ;5-39QÐsb0&T.ܑ]c@DLɎvF:f {KPv_Sg164F,AQ{ `M炐9|pD_ҕ˯\+hEϺ TCk(j Xkx<ʤlnt/6a˅(:3ϴG#-߹L% /%րj@wIz/(̏Y<.%b/r%˷_nZ@ʾ4{Vx̉?MݰM9㼴7.׹5KtFpƸR@2{3[I//%`çٺT1eIK0;2.JNt$kOzxi4ch`gk藖#]k,}WS _'/͒ %q3;st6:ni B' 2^K4'KznA[eon jUcm/7v&.x;Es"ǻ3kqo `uObp-lT<mct1v8DdiR3V,,Yt..BKG}c܂0= 䠬s| StH)`4u醫΢"tgo bi5 `Јb؄n><Rٺ>d1g?m; r/aБƽ&5\m+x %mU̡]3 s>lѩ5s(?:.тr3Leӣ>gFxna ;}Gj>9|PL f: yD{t]Һtg?IP(qc ܨ<$&m{',n# ٸl:M5zFO[Q e5ös(BUz-[Q7CRW)%"`Ҳ[Xog$SU";ۛ?iw0e qS'5jI}`h(*i+D"w[\~T5TR;VɩjtÉ{6,_4i Uay@d߷}s{Ӹ,#BҲn,-HQT dD*@**X +qptNݵ<VQFU%Ѐ:ǣ׍!]p^ @ӥoɖ( uph5JD*sۜ;6p e޹l[(Ue =m!mNq>5KI|e"m~W*5X|<n^Vlg w~Z _cjYgET_Q!Xn}V:Sc17{"J  FU5m)f\.|BlXn؍z%m\.Й OSh %L+6DdGZӫEf#46ݵp zN!cB8OqjyUB %B!Yc,&+WwG-8i"3Hi~CݝtV僣xfn`%bvm;t^A,T~çк@\ema];j(NBz꣓[RxsgϜn]1/uEKz:+$JƙmW8Vmх(YïUDicptnan~Ν=fM͊:V~VJUU坺jePfO hu_h;k B̼]T^Inu=Uhi BH)׃K57LnnfEm1S_Ɉc|.BwOk~cE,cCZA"BMUzΝ7r3JKKRoaAtlB}QP)]jhpQJ{P /T,YhmӾ %}A ȁZYqӦ/m?8}wl?,z^cѤ쬤$%sx$횄u;{iI(Ң^,#v]9q5#kDҝd1%ohwaQ\ꮆE6eqC`TLh~.1&Lɝ$7{Ꝍz,WodL"f81AMp4Q NCo?zaQ ϓITW:oSNX`X'|FurKmup0," u:Lͪkp*X}G8'aY07D5FM /=qsXVfnra+u B]ھFlN{âk0~g3xzQ>Ņ_/߆_y̰HC ceX }.w Ka= ؗ, ^qpZ(aa$n*/'/}l];ho=j|u9G#!^w96Xa˹5tizyX4p 3nR|ɣ^c. 5 >,evya-6G+7뀄>8" MuշQc׀ߤ^(K/}w8魊⚸6'u\)opa+eTEjvڼ_5 Y{/D%zIYψqIV8 ˯ڶ|ҹOuq%4˸"+ڱ9e^Ɔ LͶ v b*] VR B8!bXŨ'EĝKcMe/oNȋMqz=0,F=9,`e",bYӹ4`vn77L{!gCE}[ɐ 07d1,9yrU;"&F^=/WB&P{T( +R9VOo^l@D%E- hH||틍Nq(ո]#|X>.cnz輼-K #p}tU:@r v6v2ԶT@^X`<"Nدwʍ |R?>*;yL[p7ڂ`" Rf`Ÿ~|ku0xӲ)/ US`u~Ju vr"Bwd9;aaXztXJ6QAm]sZ.aa O8Srp^ =KAD6! Wj5v@nvf]» I"eAy;S?, 5 N[?7[#tHȇ[=zđB1Tȭxw>UP^ؗ9upwiƺ4l P^tY)sY7Ը\Us2M_/NXhe9 $P`J ˧(]&w '?LĤsǯ0>X!97nhZ('W'D{4~Z 1S| .tE8Ϯ=r!tÑAw7 ܸ8!t;NGw#~qx^_abES й٩~Y@8YCb4pxp݌)z3n#^ }㍿4uJo[jX?OKf~ Us9]kJ@xY6jEo'UaUq?JT5ncg}GN:` T5^i Dpvs% UbW(m'cX,,bŹ;;_ֵXM ŞO_MRK:t4_=3,Cܦ6#3 O{& K  6EyY qD2`߁wy*kң74cʳOs|]+HV+/UE_i'Q"HS` 'pir~wE#FDMU#w Jk?Zgl|v5B'?9t2_=-y e')WWmk->p?ӛ,>-oޡP8bYO# mNbXZxXV\#$°tE$o`܈S 7f$ L9rjxؐ+ &Q~+= yQEa"rӐ cMﭐ [RYSxz0R +GWr.ѯs=ƽܹysZ"JOMj@#c"5?ؕXpA=6SgGk_2:|1O]_ x$>;8|7{\DMd~.tr :;MMer}6/0pX>wZ̺R@pV`+3~~1i3`pE?P5k6) +M^y~:+>;zQW'򙟍k:u&W70녯Z{ܚ#mEeV/~\G2tWr7lR??lVGN Ur–!޼62tW&꡸嬏4&xcXZhXLe%kf~2BkaqX ca=7,{^(hfei&"jIe_Kkk3lNt/:wt--[5ReS^\rK:Y.\P+*@\дQ6ipE׎vWIgYY˞Z֨J5$`0rZKHtQ1M6`˾[-ű́6GZa0{ Hi tAX^\׺b}O~v ްqZ°7,m,̖R K+ϗCDwӫy?65к4u u.ʫ`ʒzw( A}:fv*0&EAwj4 ZDϫh 0(<՞m tš][޽% w`tf4uzruSZ:^Z BǮ5ّ]$fY) @ *0ai 8WY^!nIǷva${ KVVi=SIIҩUciADRWu\ʠ {с]L "°Pi^cX:J{1,LZOZN& K䱃$^Mm "1,D]#8HADDDDDDDKTzr2nGU"1,Dje\ADDDDDDD""""""" 憨F{޺'nKXܹe&NU.JHPjJUݺfb(M'n/AbXDt(\{`6@.'zi~=媈8QW{7t/Sdք]pmwsl@ sk[D5όڼ0Ȕ}{::drWBkc%ͩ`V~6gҭj۱)5w2l!kȺqj^AnuS?)mSGYQ^;5m_AAnW/ ^޲QQYgVSnP _1y\DywAyuYKKGT7ϞQV (/}0q|A;/':\+}fMiUU n^>ODJbиm w˸G.{C3x</FEU^ .񱱊*,…i5bI'k9D;#3#2fv cK?>9EP_1 ZQ݊w#v zjՐɯql[D* vrD[v+#J#ij3{A~1B(:}Mx]x%tJ؝*TH),¢yťU.-zȺyR Kg.0, Kg`YsLZ(9#ժAnJmN};fnNM=1joj}J/y`Q98e{mZ$h[-Q5/2NY͇QC3B$ y: !j27)q mb)8mT5 7鹗DK_Nmzry񱷸&4W @?\~a+eT Z ʺ<Hk>|{/d%af 6?#~%[ lr6/rkWI>Maĕ Ҍ/Zܟ~ή0t >5?OjsG˷#PT@>b}1j/*e?831a{eԗ|sZ*ǪͫQ (PN~6Q d}q j*R.vbcaR;J + ckprCm9DH?29\L/;%~>VUVY/wQa"rŽyNU&ږ*x D6r!_2t@eEŮԏ *xbń<> (?vŴZ]T-wHK"O5v`-?[u1U6^Xwج.m٫Z6 o=-ݾ?k\Oʿ4إx%t|IJ(R %ukO:aaX%4qvYaX`ٴz'+\EPl)L~7MHG/{l #ߙ<~xP 󨭿{78MT`ء| hiܓG/(K*w3XU }S7}yf3g<60S`N7I-0iJaz]awz}y$_ǛǰqqB|?8h+0g-^ jaOc? ΜKGͮo\#B=$w7 I5+HT&ߍA~x NgO6}jfY!;vHղQxa6G.{siۆD=7 f9^ vJ( K'bX8U1, Kږtґ WZ(E߫h0RxAPGh\T ]?ԻO{.ptcseƛfLdtg@oݘBYmT-'Cv|iE7w5 {bԥs߲i7~A' C; W*qT;rҡ`P@g|AgOk'xgw<3/^fF]Y LqbW(mg[̴(cLW {>a|yx%tJ=*TH.,aaXdjư0,"jS?E9+}yw?>w_YʳO>YCMv-BJǏG:X;| coG¡Cnir~3]\i-ˏ6@OGƏ6пVّumƜҢIZ#yHDyx>]qeW| ƛ 9w7$Q1n8-*dK)V-s-1j= j$+aK̰۫8a`i{gkR1, K;diOoX/̩+̩Z\{RNբOj9|+<-,;aa˛nϮPmXIwy%ugG(k#a,r1b9hݸ5PY[b]tm9z'ףkh׮p?4q+,:XW|h񭫝_1wChNC(PۣخZYcdJԒb%$[$ð- KjM 9{w\E(-su4poXA%]"/)^n{M8l#}g93Yǿhn9C\skٜH=mV-}Zx%t|IgJ(R KaXH0,]60, K;%d-|狼Wko|gh 1'y+ V5QSjXaqck-*.iZx]V*V|eђbBd.muRJ3mg\&oJmkޅOrd^Z𴼽*dA0?Lnq"'pgz䔥٧>Z|̶n 5#??7IV '˷m]4t43)@n=DMc-Ɔݸ^^iqRoHr /A=4fwuQ^]` ;g*/mWpj`9>G%J TBbXmu rQY6^&[UT4nfd2, KgbX:^0,ΖZb9-y^sYyCٸپLҼ\"TNY]H IDAT} D9v0`v{&/qfE=*P2X/\TouPo?a\M_\*Cgnx%w&5;fEjzV5''l)0kI?_kkY/|r qkLro{s*Cgn\~em_Kjoqloiͪeu#9Q ^P:P*$xJyeۍXT-0, KTX:^1,NZ%*DԒi"P>g,ٜ<?yO _u;Zj˦2]󰼋_7Wъʾ2njע["gieJXb>zTn}zhhI=k5ZXg$a`yCdM մM^0?%a}ֲV5R 9~4 ܸGyƒ7g/]~S XlVi/`qLȆ~ :.Mu]eCju<4JCPG_JI~~QP]Z% eQj<: w@g5fDGn(޼y` 򴵨j\=~^+ 4uaбkvM{(57Jg@YCU˶8FFWB'k[X ťvzvX:6U R x B'-|mN9 (b1DB'iw%ץPoY[ rt{ޞANz * nP9C N\D7nPUZrJK=JK=Jpޥ3.4 7jJN!VVyӹkDnUTj52uN+ӱJrHI]aWUU.98wR s|`ZUOt`W;B'?puJKaViVViVVi&$DDDDDDD$yDjlcXİ9a! ADDDDDDD"""""""<^B{dDf~DbXİ9}qqI;8HADDDDDDD"""""""Ņ_/߆_yl_UC. ,zVDKagho[7a ;8 >?}i}[]ݞaVU78Ñ5\<B\xeUnXʎmC{1Vb jS)'l\OB {$qU9q~c`k`PL_YK&zi럎@,PܵNtmwsl@ sk[D5ڼ0Ȕ}{::d b闞 j@ z2Ek?9dTNeen0?cI9,ݪv1,=;8Uj}99%:X3+$h OJ̽͝?7@%[0zn\ZW.:2z[wdfސyC"B̧P._3*҈ĎrkZ-骓&6>)g#x&_׍V/8Ʊ?5ul E#˺pa{zfZM90rC\Z?-7pb~|"sc cϋݩi r<bGv+n* o\FZ_jU{@8%;_?#Z@1aN0v#%iR 1\[(C(°0,k3,'sdwfr5 54ƌTumFf‚ܪڛEA}=/"Ӹjkq'g P掎n3NŪ.,W\]Eײ;b̰tV°DdMDAVT646*pa͌6dղEIōK g\g]|)+67EY{eI|:!46Rs0 _&Jm JD'k>|{/d%afL+mq5=t$@F\ ,2hjޢrv&du_h|/콜26,HX`jwhXDܹ4TQM愼ޔ_ `X )mrq&_4q !)gvAcZ{~Lnk;1Ji~K3-+^9Sai;8&*,/naX@CMEn_l5 hLd>W;ic)@L*+*v~|T{Z}ָ8iK;OLVFPk5w~?YiΎ 7l4_eҩm8S ;y>OLJAQ2T)iO?>еXia?;0x)/ US`u~Ju KUs&*`p!U (aG|lQ濒`c=*u;P>byQ~}Í\Ӆw8_l6A㻊-aiD°Dd"L̺'=Ǐ|/NlAz'+\EP {4~6(8sŸK/y4G.{si0ۆD=7 ۃr@),Peir|WX0rܝ6Nbi{+0g-^ jra>Xq߸ٵG.{82H`Tsi ̉󨭿{n7wN*~q8@_a"t K%=0<57ć-KТj]۫ۉ`X7jcnj0_u5R йhkJ@xY?{K> :@pr-cGJۛH/,=MKuh2, 3쇥`L?4ʪ^0MW\gZحuk:bz[X]-wˉU|&Gwǜ\4bDlOPPQ^q|2#QUՏ\i-`d*ݒVUi}^Eʥ'U-ohU;fvjQ|&G9|S/qeW|mAV'GKo 8p+k`ℸ RKŰ0,68V)KsIٚTeq*mj (ƪzUX]cYQ6Ήy8dbIM1f9n烴#-K-T */̟oWt%@Iwjߌj7ˁkWh|<.qk?29-k7.488;SB U]gRz5WUvweПW?u6ƪ-aiGa!vp5)\}g?;~Ne-!rNg#Ƽ\Dœ" G.J5SW 0߸@;lY.q࿖l|<s`U nop\i{'iE4 k'Z[KVMN ↴ ҁfjq2}}L/_mPg`z.2amaS'FɤUY 16vT[]S%yy\/F5Re\j:Գm@U! ~l̳pmT/֬*̽Ot̆uVr6DgsکWLX^4@g1UQ:mLw `XG Wql 2% 6;6^.^k!"+Ue)?hP|pqL[ܸut1<»I׼ Jɼ/t[w2 ώlz8~7UfϖUh [x6˖# `pDc޿t0:YLbaQ+ƍ?qc&Nđ# iaaX:atcZ}uvHt j].+ʞ.3, K8,(=7 ǎsSPxbW `~1)]{$.-jԵWrmsCovնj'b<j>=..7/LL _,2d /-D5GpM#'Yisˆs1@-;pdPKӈPձn+c۶.:PsKٷK4L Ovzy$!A661HИ}>0]Dy[oLv3f(;^LaiNc}<°tbRjzpWC;Fݧ8E{(k?o[]CBk맿sh`jܪ:^quwS4f+zvX:^0,Ζ`hI͟{/Й^ݰI /p_Z6v)>;zQW:g'Yzۭg';駦#O5''l)0v_KZkonZY/|&y[sd{ۛS:s+kP\rG_Vx`ѩҒ$ad6âEhIJm>.aQYSֲ"Yk{ ,@t"VR^{_"lvvc׮f =x YТ["gi?w̓5v$Ǯmz񐀹GkJ*վQ=jU~{|`CPԋF(KA]a9[W޽$V?`~K,mΙKsߔi,컕 |qP9м|p}p]oc8:XLai_aaX:cal݊gQ]C< s~x WM5\M*W6{өXv 8X1VtFbXZai`0 HzW~z=lk8`cI-2A (y5-J=x)\-mx t;0ȳ4UEƱi>* ꪢ:W_?7 Լ]krqPYV =sv;NaiCaTAh àT"<==]l5Zfdڋji~QD(**7)6(dnM7S<>gsxkU|Z fh4?JEPv]KR%ɰt ňaqCaQGZ6'c^U1,1,F>,VADtw5o,/ @Sj_t^fywIHIkIJHIw;8œœL;qݸViVViגVEViVVi ]:uͲJMQC<=CھjޑViWJSVViIJNaX?iU?i)[B'?0JK=JK*JK=J{6DDDDDDD$y """""""%*DWӅmDdB A QW"""""""fLF$QMP5x\ں_CCpJںVSiq8ΩDk%U4" O eDd.$f"Sy}ef^Y߬v8 Qu0f_˼`o;]D2 Ӎ't2n||Fy'[z̐!Iw$mܠ'Z?a=C`Jn.IDATaYVpL4nT 'f}#3`|sӿU 5iAUcm5kb.,$}v^'UAW{WL_.aghὫbWEŮ}d}t6ba-Wp8i,"]P/7921Vc-Nmu&zދ앂.O菆\qӽa#,:͒.awIg#{CƕuGO6" ݟ)cxJ|Up A-Y5O׿K^x;ũY_>lc˳5UupཱུE ;o,z>ŹLSq#ԨomݜDDfOtщ XIDo9|A5U*sfzw;]Dx;dgǵ"""Үpu5z:.ޔĄn <fc{jٍZx$+sBՙg7DD^4OݚWy6|^ pv/K}(btP-*Tٍ RK9fI7D$ަ~+ϚD%fݾ!m-!֭yEӈAD|4."Պӣv E醈5U~1՗?  M࠺"v]iǴi$Rrr99lcޯb/Z])dUoiӍnL 18^yղ٣cc[w=ZLoaTi0hR}*X7^c5ޓ܄3f1'0>iblZS㵲?~WC'mASvֿ#)JٔC'*v~ :( gquI?/G7*Q\ybYOګDvY,Uz4E7d<}蟞5r6zx8l}Z%|}KUSV H+UGg  $۱렦=:~IWfOګ}R'lCT!#::/(xIĘ}b6snzոup2HA4urv#ʢ%ߗ*w<Р I}vSy*y_T_]SjL@DS0TR"{H8ԝGg9Z$֨c?$n֑DTG߮ؽ<Ҿ}Iy?]Ǻ9j}46G/M^n'8?e^hȕsw|Ie,8ȓ;&Mg^݂sxK̺a3O; .Ϊ@іYnݢtO,Vbeĵ>g]1}#1*fVzIl'q~]7o}{KQzrTYWΒ!n\qNDb½sĆ%+Ut@@5vl7a;:m3'=B2 wqpu(mTXo>^r6;4x,j\+ED/sg[ǘj;.{?gnj,h^qo3D&Ɗث>u;,OոTp +IDD̺ ٻNqEO8oN}8=Ga nQX^39%?].|"]}(V\ؔIo}IDDnْ})Ј4x5nw[7:?QN>#EԒ/g,*Pqt /1rL٦|Gťs32/jyt,ѱ9fѤ*ձh7*QR9=7r^}G%'\NsVfI%":^#=&&v\s]VVpոhm`>û$rhLy "2GB ,WEXչֹAa݁V" \wFg289hOT )L b~~!UR` are now accessible. * Instead of static pages, pages are generated on the fly, allowing users to drill down to find out about a :ref:`specific host `, rather than only having one huge page with too much information. * Ability to store reporting data separately from other server data. Installation ============ Quickstart ---------- :ref:`appendix-guides-web-reports-install` Prerequisites ------------- * sqlite3 * pysqlite2 (if using python 2.4) * `Django `_ >= 1.3 * mod-wsgi .. warning:: There is a known issue when using an sqlite database on an ext4 filesystem. You will want to remount the filesystem without barriers (-o barrier=0) in order to speed up the operations of the database. For more information, please see http://phoronix-test-suite.com/pipermail/trondheim-pts_phoronix-test-suite.com/2009-March/000095.html. Install ------- 1. Be sure to include the specified fields included in the example ``bcfg2.conf`` file. These can be specified in either ``/etc/bcfg2.conf``, if it is readable by the webserver user, or ``/etc/bcfg2-web.conf``. Any database supported by `Django `_ can be used. As of version 1.3, `South `_ is used to control schema changes. If your database is not supported by South, any updates will need to be applied manually. Sqlite is configured by default. Please see the :ref:`reporting-databases` section to configure alternative databases. .. warning:: If you are using an sqlite database, the directory containing the database file will need to be writable by the web server. The reason for this is that sqlite will create another file for its journal when it tries to update the database file. .. note:: Distributed environments can share a single remote database for reporting. 2. After configuring your database be sure to run ``bcfg2-admin reports init`` to create the schema. 3. To enable statistics collection in the bcfg2-server, add :ref:`server-plugins-statistics-reporting` to the **plugins** line in your ``bcfg2.conf`` and restart the bcfg2-server. A report collecting daemon should be run to import the collected statistics into the backend. Please see the section :ref:`Report Collector ` for more information. Detailed installation instructions can be found :ref:`here `. .. _dynamic-http-install: Apache configuration for web-based reports ------------------------------------------ .. note:: Reports no longer needs to be installed at the root URL for a given host. Therefore, reports no longer require their own virtual host. In order to make this work, you will need to specify your web prefix by adding a **web_prefix** setting in the [statistics] section of your ``bcfg2.conf``. .. warning:: When running with SELINUX enabled, you can have potential problems with the WSGISocketPrefix. One solution that works without too much trouble is modifying your prefix so that it is located in a standard location:: WSGISocketPrefix /var/run/httpd/wsgi An example site config is included below:: # # Read an alternate configuration file # # SetEnv BCFG2_CONFIG_FILE /etc/bcfg2_testing.conf # # If the root is changed update the static content alias as well # WSGIScriptAlias /bcfg2 "/usr/share/bcfg2/reports.wsgi" WSGISocketPrefix run WSGIDaemonProcess Bcfg2.Server.Reports processes=1 threads=10 WSGIProcessGroup Bcfg2.Server.Reports # # Manually set this to override the static content # #SetEnv bcfg2.media_url /bcfg2/site_media/ # # This should have the same prefix as WSGIScriptAlias # Alias "/bcfg2/site_media/" "/usr/share/bcfg2/site_media/" Options None AllowOverride None order deny,allow deny from all allow from 127.0.0.1 This configuration is suitable for use with the default installation from an RPM or deb package. At this point you should be able to point your web browser to http://localhost/bcfg2 and see the new reports. Upgrading ============ 1. Convert database config Run `tools/upgrade/1.3/migrate_configs.py` Beginning with 1.3 the database configuration moved from [statistics] to [database] in `bcfg2.conf` and `bcfg2-web.conf`. The old settings will be accepted but a deprecation warning will be displayed. 2. Replace the DBStats plugin with the Reporting plugin. 3. Migrate historic data. Run ``tools/upgrade/1.3/migrate_dbstats.py`` The reporting schema is now managed using `South `_ instead of a set of custom scripts. This creates the new schema and imports all of the historic data to the new format. .. note:: After the database is upgraded all of the old tables are left intact. To remove them any table starting with **reports\_** can be dropped. 4. `(Optional)` Run the :ref:`Report Collector ` Add "transport = LocalFilesystem" under "[reporting]" in ``bcfg2.conf``. Restart the bcfg2-server and start the bcfg2-report-collector. Configuring =========== Most of the configuration is handled through the ``/etc/bcfg2.conf`` or alternatively ``/etc/bcfg2-web.conf``. An example using the defaults is listed below:: [database] engine = sqlite3 name = /var/lib/bcfg2/etc/bcfg2.sqlite user = password = host = port = [reporting] transport = DirectStore web_prefix = file_limit = 1m Configuration Sections ---------------------- .. _reporting-databases: database ^^^^^^^^ If you choose to use a different database, you'll need to edit ``/etc/bcfg2.conf``. These fields should be updated in the ``[database]`` section: * engine * ex: engine = mysql * ex: engine = postgresql_psycopg2 * name * user * password * host * port (optional) To store reporting data separately from the main server data, use the following options: * reporting_engine * ex: reporting_engine = mysql * ex: reporting_engine = postgresql_psycopg2 * reporting_name * reporting_user * reporting_password * reporting_host * reporting_port (optional) .. warning:: If mysql is used as a backend, it is recommended to use InnoDB for the `storage engine `_. Refer to :ref:`server-database` for a full listing of available options. statistics ^^^^^^^^^^ .. deprecated: 1.3.0 * config: The config file to be read for additional reporting data. This is used to restrict what can be read by the web server. * time_zone: The django TIME_ZONE settings parameter. * web_debug: Set Django's DEBUG and TEMPLATE_DEBUG settings. This is known to cause memory leaks. Use with caution! reporting ^^^^^^^^^ * transport: See :ref:`Transports `. * web_prefix: Prefix to be added to Django's MEDIA_URL * file_limit: The maximum size of a diff or binary data to store in the database. * max_children: Maximum number of children for the reporting collector. Use 0 to disable the limit and spawn a thread as soon as a working file is available. .. _dynamic_transports: Statistics Transports --------------------- A transport is required to pass the data collected from the bcfg2-server to the bcfg2-report-collector. At the time of this writing two transports are available: * LocalFilesystem: Statistics are written to the local file system and collected on the local machine. * RedisTransport: Statistics are sent through a list in redis. * DirectStore: DBStats style threaded imports in the main server process. Future transports will allow multiple servers to pass data to a single or multiple bcfg2-report-collector processes. New installations default to and should use the LocalFilesystem transport. Upgrades will use DirectStore by default in the 1.3 release. .. Note:: If DirectStore is used, the bcfg2-report-collector process will refuse to run since this method is not compatible with an external process. RedisTransport ^^^^^^^^^^^^^^ This transport uses a single redis instance for communication between bcfg2-server and bcfg2-report-collector. Multiple servers can write to a single redis instance and multiple report collectors may be run as well. An example configuration with the default values:: [reporting] transport = RedisTransport redis_host = 127.0.0.1 redis_port = 6379 redis_db = 0 bcfg2-admin commands operate slightly differently in this mode. Instead of querying the database directly, rpc commands are issued to the report collectors. This only affects the minestruct and pull commands. .. warning:: At the time of this writing the version of python-redis in EPEL is too old to use with this transport. Current versions of the python-redis package require python >= 2.5. Usage ===== .. _report_collector: Report Collector daemon ----------------------- .. Note:: This section does not apply when the DirectStore transport is used. The bcfg2-report-collector gathers statistics from the bcfg2-server process and records them in the backend database. Options are similar to the bcfg2-server daemon:: -D Daemonize process, storing pid -o Set path of file log -h Print this usage message -E Encoding of cfg files -W Web interface configuration file -Q Server repository path -C Specify configuration file --version Print the version and exit -d Enable debugging output -v Enable verbose output .. Note:: The bcfg2-report-collector is not set to start by default bcfg2-admin reports (command line script) ----------------------------------------- The bcfg2-admin tool provides management and maintenance capabilities for the reporting database. A few useful `Django `_ commands are provided as well. * init: Initialize a new database * update: Apply any updates to the reporting database. Unlike the syncdb command, this will modify existing tables. * purge: Removes unwanted clients and data. * -c --client [client name] - Remove interactions from a single client. * --expired - Remove all data for expired clients. --days is used to exclude clients expired within n days. * --days [n] - Remove interactions older then n days. If not used with any other modifiers, all data older then n days is removed. * scrub: Scrub the database for any orphaned objects. Django commands ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * dbshell: Connects to the backend database. * shell: Starts an interactive python shell with the Django environment setup. * sqlall: Print the sql statements used to create the database. * validate: Validate the database against the current models. bcfg2-reports (command line script) ----------------------------------- bcfg2-reports allows you to retrieve data from the database about clients, and the states of their current interactions. It also allows you to change the expired/unexpired states. The utility runs as a standalone application. It does, however, use the models from ``/src/lib/Server/Reports/reports/models.py``. A number of different options can be used to change what bcfg2-reports displays:: Usage: python bcfg2-reports [option] ... Options and arguments (and corresponding environment variables): -a : shows all hosts, including expired hosts -b NAME : single-host mode - shows bad entries from the current interaction of NAME -c : shows only clean hosts -d : shows only dirty hosts -e NAME : single-host mode - shows extra entries from the current interaction of NAME -h : shows help and usage info about bcfg2-reports -m NAME : single-host mode - shows modified entries from the current interaction of NAME -s NAME : single-host mode - shows bad, modified, and extra entries from the current interaction of NAME -t NAME : single-host mode - shows total number of managed and good entries from the current interaction of NAME -x NAME : toggles expired/unexpired state of NAME --badentry=KIND,NAME : shows only hosts whose current interaction has bad entries in of KIND kind and NAME name; if a single argument ARG1 is given, then KIND,NAME pairs will be read from a file of name ARG1 --modifiedentry=KIND,NAME : shows only hosts whose current interaction has modified entries in of KIND kind and NAME name; if a single argument ARG1 is given, then KIND,NAME pairs will be read from a file of name ARG1 --extraentry=KIND,NAME : shows only hosts whose current interaction has extra entries in of KIND kind and NAME name; if a single argument ARG1 is given, then KIND,NAME pairs will be read from a file of name ARG1 --fields=ARG1,ARG2,... : only displays the fields ARG1,ARG2,... (name,time,state,total,good,bad) --sort=ARG1,ARG2,... : sorts output on ARG1,ARG2,... (name,time,state,total,good,bad) --stale : shows hosts which haven't run in the last 24 hours Screenshots =========== Grid Overview ------------- .. image:: GridView.png :alt: Grid overview :width: 850px :height: 530px Detailed Overview ----------------- .. image:: DetailedView.png :alt: Detailed overview :width: 850px :height: 530px .. _reports-calendar-summary: Calendar Summary ---------------- .. image:: CalView.png :alt: Calendar summary :width: 850px :height: 530px .. _reports-item-detail: Client Detail ------------- .. _reports-client-detail: .. image:: ClientDetail.png :alt: Client detail :width: 850px :height: 530px Common Problems --------------- .. image:: CommonProblems.png :alt: Common configuration problems :width: 850px :height: 530px Item Listing ------------ .. image:: BadListing.png :alt: Item listing :width: 850px :height: 530px Item Detail ----------- .. image:: ConfigItem.png :alt: Item detail :width: 850px :height: 530px doc/reports/index.txt000066400000000000000000000014031303523157100151670ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-reports-index: The Bcfg2 Reporting System ========================== Bcfg2's reporting system is its killer feature. It allows administrators to gain a broad understanding of the configuration state of their entire environment. It summarizes * Configuration changes and when they were made * Discrepancies between the specification and current client states * Clients can be grouped by misconfiguration type * Configuration entries that are not specified * Overall client summaries according to these types There are two systems, the old system, which builds static reports based on a series of XSLT stylesheets and a new dynamic reporting system that uses django and a database backend. .. toctree:: :maxdepth: 2 dynamic doc/server/000077500000000000000000000000001303523157100131315ustar00rootroot00000000000000doc/server/acl.txt000066400000000000000000000033201303523157100144270ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-access-control: ================ Access Control ================ .. versionadded:: 1.4.0 Bcfg2 exposes various functions via XML-RPC calls. Some of these are relatively benign (e.g., the calls necessary to generate a client configuration) while others can be used to inspect potentially private data on the server or very easily mount a denial of service attack. As a result, access control lists to limit exposure of these calls is built in. There are two possible ACL methods: built-in, and the :ref:`server-plugins-misc-acl` plugin. The built-in approach simply applies a restrictive default ACL that lets ``localhost`` perform all XML-RPC calls, and restricts all other machines to only the calls necessary to run the Bcfg2 client. Specifically: * If the remote client is ``127.0.0.1``, the call is allowed. Note that, depending on where your Bcfg2 server listens and how it communicates with itself, it likely will not identify to itself as ``localhost``. * If the remote client is not ``127.0.0.1`` and the call is any of the ``set_debug`` or ``toggle_debug`` methods (including ``[toggle|set]_core_debug``), it is rejected. * If the remote client is not ``127.0.0.1`` and the call is ``get_statistics`` (used by ``bcfg2-admin perf``), it is rejected. * If the remote client is not ``127.0.0.1`` and the call includes a ``.`` -- i.e., it is dispatched to any plugin, such as ``Packages.Refresh`` -- then it is rejected. * Otherwise, the call is allowed. The built-in ACL is *only* intended to ensure that Bcfg2 is secure by default; it will not be sufficient in many (or even most) cases. In these cases, it's recommended that you use the :ref:`server-plugins-misc-acl` plugin. doc/server/admin/000077500000000000000000000000001303523157100142215ustar00rootroot00000000000000doc/server/admin/backup.txt000066400000000000000000000006451303523157100162340ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-backup-index: backup ====== .. _Samples repository: https://github.com/solj/bcfg2-repo Create an archive of the whole Bcfg2 :term:`repository`. The archive is stored directly in your Bcfg2 repository (e.g. ``/var/lib/bcfg2/``) and named with the current date and time:: bcfg2-admin backup A backup is recommended before you start using the `Samples repository`_ of Bcfg2. doc/server/admin/client.txt000066400000000000000000000015331303523157100162420ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-client: client ====== Create, delete, list, or modify client entries. :: bcfg2-admin client add attr1=val1 attr2=val2 Allowed attributes are *profile*, *uuid*, *password*, *location*, *secure*, and *address*. A full example is shown below:: bcfg2-admin client add laptop02.example.com profile="basic" For more details please refer to the :ref:`Metadata section `. With ``list`` the file ``clients.xml`` is parsed and all entries are shown:: bcfg2-admin client list server01.example.com laptop02.example.com This is useful for a quick check after adding an entry. If you want more in-depth information about a client, ``bcfg2-info clients`` can provide that. Please refer to the :ref:`bcfg2-info ` section for further details. doc/server/admin/compare.txt000066400000000000000000000003571303523157100164150ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-compare: compare ======= Determine differences between files or directories of client specification instances:: bcfg2-admin compare Or:: bcfg2-admin compare doc/server/admin/index.txt000066400000000000000000000010051303523157100160650ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-index: ===== Admin ===== The ``bcfg2-admin`` command provides you an interface which allows you to interact with your Bcfg2 :term:`repository` in an administrative fashion. To get started, run ``bcfg2-admin help``. You will be presented with a list of different *modes* which each provide various administrative functionality. Available modes are listed below. .. toctree:: :maxdepth: 1 backup client compare init minestruct perf pull viz xcmd doc/server/admin/init.txt000066400000000000000000000022111303523157100157210ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-init: init ==== Interactively initialize a new repository. Most values are automatically detected or a default value is provided. :: bcfg2-admin init Store bcfg2 configuration in [/etc/bcfg2.conf]: Location of bcfg2 repository [/var/lib/bcfg2]: Input password used for communication verification (without echoing; leave blank for a random): What is the server's hostname [conf01.example.com]: Input the server location [https://conf01.example.com:6789]: Input base Operating System for clients: 1: Red Hat/Fedora/RHEL/RHAS/Centos 2: SUSE/SLES 3: Mandrake 4: Debian 5: Ubuntu 6: Gentoo 7: FreeBSD : 1 Generating a 2048 bit RSA private key .....................+++ .....................+++ writing new private key to '/etc/bcfg2.key' ----- Signature ok subject=/C=US/ST=Illinois/L=Argonne/CN=conf01.example.com Getting Private key A toplevel repository structure was created under the provided path. :: /var/lib/bcfg2 |-- Bundler |-- Cfg |-- etc |-- Metadata |-- Pkgmgr |-- Rules `-- SSHbase doc/server/admin/minestruct.txt000066400000000000000000000005741303523157100171650ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-minestruct: minestruct ========== Extract extra entry lists from statistics.:: bcfg2-admin minestruct [-f xml-file] [-g groups] Hierarchy of groups in which to place the extra entries in can be determined with ``-g ``. The ``-f `` option specifies the xml file in which to write the extra entries. doc/server/admin/perf.txt000066400000000000000000000011011303523157100157070ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-perf: perf ==== Query server for performance data.:: bcfg2-admin perf ================ ========== ========== ========== ======= Name Min Max Mean Count ================ ========== ========== ========== ======= RecvStats 0.000378 0.001716 0.001367 5 GetConfig 0.018624 0.039495 0.023589 5 component_lock 0.000002 0.000057 0.000016 20 GetProbes 0.000523 0.000666 0.000591 5 RecvProbeData 0.002260 0.004550 0.002979 doc/server/admin/pull.txt000066400000000000000000000006221303523157100157360ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-pull: pull ==== Integrate configuration information from clients into the server repository. :: bcfg2-admin pull [-v] [-f][-I] [-s] The following options are available: ``-v`` verbose ``-f`` force ``-I`` interactive ``-s`` stdin .. FIXME: No example yet .. A full example is shown below. :: .. bcfg2-admin pull doc/server/admin/viz.txt000066400000000000000000000007171303523157100155770ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-viz: viz === Produce graphviz diagrams of metadata structures. Make sure that the graphviz package is installed. The following command will produce a graphviz image which includes hosts, bundles, and a key:: bcfg2-admin viz -H -b -k -o ~/bcfg2.png .. note:: The graphviz package available via DAG/RPMforge has been known to have dependency issues. We recommend installing the package from EPEL. doc/server/admin/xcmd.txt000066400000000000000000000010411303523157100157110ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-admin-xcmd: xcmd ==== XML-RPC Command Interface.:: xcmd For debbuging the following command can help:: bcfg2-admin xcmd Metadata.toggle_debug Those two examples can alos be found in the :ref:`Package section `. To rebuild the packages plugin cache:: bcfg2-admin xcmd Packages.Refresh To perform a soft reload to reread the configuration file and download only missing sources.:: bcfg2-admin xcmd Packages.Reload doc/server/bcfg2-info.txt000066400000000000000000000121731303523157100156120ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-bcfg2-info: ================ Using bcfg2-info ================ ``bcfg2-info`` is a tool for introspecting server functions. It is useful for understanding how the server is interpreting your repository. It consists of the same logic executed by the server to process the repository and produce configuration specifications, just without all of the network communication code. Think of ``bcfg2-info`` as ``bcfg2-server`` on a stick. It is a useful location to do testing and staging of new configuration rules, prior to deployment. This is particularly useful when developing templates, or developing Bcfg2 plugins. Getting Started =============== First, fire up the ``bcfg2-info`` interpreter. .. code-block:: none [0:464] bcfg2-info Loading experimental plugin(s): Packages NOTE: Interfaces subject to change Handled 8 events in 0.006s Handled 4 events in 0.035s Welcome to bcfg2-info Type "help" for more information > At this point, the server core has been loaded up, all plugins have been loaded, and the ``bcfg2-info`` has both read the initial state of the Bcfg2 repository, as well as begun monitoring it for changes. Like *bcfg2-server*, ``bcfg2-info`` monitors the repository for changes, however, unlike *bcfg2-server*, it does not process change events automatically. File modification events can be processed by explicitly calling the **update** command. This will process the events, displaying the number of events processed and the amount of time taken by this processing. If no events are available, no message will be displayed. For example, after a change to a file in the repository: .. code-block:: none > update Handled 1 events in 0.001s > update > This explicit update process allows you to control the update process, as well as see the precise changes caused by repository modifications. ``bcfg2-info`` has several builtin commands that display the state of various internal server core state. These are most useful for examining the state of client metadata, either for a single client, or for clients overall. **clients** Displays a list of clients, along with their profile groups **groups** Displays a list of groups, the inheritance hierarchy, profile status, and category name, if there is one. **showclient** Displays full metadata information for a client, including profile group, group memberships, bundle list, and any connector data, like Probe values or Property info. **config** Displays the configuration of the Bcfg2 server. To leave the interactive shell, just type ``quit`` or ``exit``. Debugging Configuration Rules ============================= In addition to the commands listed above for viewing client metadata, there are also commands which can shed light on the configuration generation process. Recall that configuration generation occurs in three major steps: 1) Resolve client metadata 2) Build list of entries for the configuration 3) Bind host-specific version of each entry Step *1* can be viewed with the commands presented in the previous section. The latter two steps can be examined using the following commands. **showentries** displays a list of entries (optionally filtered by type) that appear in a client's configuration specification **buildbundle** Render a single bundle template. This only performs the template rendering step; it does not fully bind all entries in the bundle. This command is very useful when developing bundle templates. **buildfile** Perform the entry binding process on a single entry, displaying its results. This command is very useful when developing configuration file templates. **build** Build the full configuration specification and write it to a file. **mappings** displays the entries handled by the plugins loaded by the server core. This command is useful when the server reports a bind failure for an entry. Debugging and Developing Bcfg2 ============================== ``bcfg2-info`` loads a full Bcfg2 server core, so it provides the ideal environment for developing and debugging Bcfg2. Because it is hard to automate this sort of process, we have only implemented two commands in ``bcfg2-info`` to aid in the process. **profile** The profile command produces python profiling information for other ``bcfg2-info`` commands. This can be used to track performance problems in configuration generation. **debug** The debug command exits the ``bcfg2-info`` interpreter loop and drops to a python interpreter prompt. The Bcfg2 server core is available in this namespace as "self". Full documentation for the server core is out of scope for this document. This capability is most useful to call into plugin methods, often with setup calls or the enabling of diagnostics. It is possible to return to the ``bcfg2-info`` command loop by exiting the python interpreter with ^D. There is built-in support for IPython in ``bcfg2-info``. If IPython is installed, dropping into debug mode in ``bcfg2-info`` will use the IPython interpreter by default. doc/server/caching.txt000066400000000000000000000050461303523157100152730ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-caching: =================== Server-side Caching =================== Metadata Caching ================ .. versionadded:: 1.3.0 Caching (or, rather, cache expiration) is always a difficult problem, but it's particularly vexing in Bcfg2 due to the number of different data sources incorporated. In 1.3.0, we introduced some limited caching of client metadata objects. Since a client metadata object can be generated anywhere from 7 to dozens of times per client run (depending on your templates), and since client metadata generation was made more complex and powerful in 1.3.0, caching these objects provides the easiest performance gain. To enable caching, add a ``[caching]`` section to bcfg2.conf with a client_metadata option containing one of the following modes: * ``off``: No caching of client metadata objects is performed. This is the default. * ``initial``: Only initial metadata objects are cached. Initial metadata objects are created only from the data in the :ref:`server-plugins-grouping-metadata` plugin, before additional groups from other plugins are merged in. * ``cautious``: Final metadata objects are cached, but each client's cache is cleared at the start of each client run, immediately after probe data is received. Cache is also cleared as in ``aggressive`` mode. ``on`` is a synonym for ``cautious``. * ``aggressive``: Final metadata objects are cached. Each plugin is responsible for clearing cache when appropriate. These are presented roughly in ascending order of speed, and descending order of reliability. That is, odds are higher that ``aggressive`` mode will result in stale data, but it gives the biggest speed boost. ``off`` will never result in stale data, but it gives no speed boost. In addition to the :ref:`server-plugins-grouping-metadata` plugin, Bcfg2 includes three plugins that can set additional groups, and thus may affect the caching behavior. They are :ref:`server-plugins-grouping-grouppatterns`, :ref:`server-plugins-probes`, and :ref:`server-plugins-connectors-puppetenc`. All of those plugins **except** for PuppetENC fully support all caching levels. PuppetENC is incompatible with ``aggressive``, and may result in some stale data with ``cautious``. If you are not using the PuppetENC plugin, and do not have any custom plugins that provide additional groups, then all four modes should be safe to use. If you are using PuppetENC or have custom Connector plugins that provide additional groups, then you may want to start with ``cautious`` or ``initial``. doc/server/configuration.txt000066400000000000000000000213611303523157100165440ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-configuration: ====================== Server Configuration ====================== This page documents various aspects of server configuration. .. _server-dropping-privs: Running as a non-root user ========================== Although the Bcfg2 server runs as root by default, it is possible (and probably encouraged) to run it as an unprivileged user. This may become the default in the future. This can be done in all versions of Bcfg2, although it has become easier in 1.3.0. The steps to do so are described in three sections below: Common steps for all versions; steps for older versions only; and steps for 1.3.0. Many of the steps below may have already been performed by your OS packages. Common Steps ------------ We will assume for the sake of these steps that we are running the Bcfg2 server as the ``bcfg2`` user, who is a member of the ``bcfg2`` group. To create that user and group, you can run: .. code-block:: bash groupadd bcfg2 useradd -g bcfg2 -M -r -s /sbin/nologin -d /var/lib/bcfg2 \ -c "Bcfg2 server user" bcfg2 ``useradd`` arguments can vary wildly on different OSes, so please read ``useradd`` and run a command appropriate for your platform. The Bcfg2 server has to be able to read and write its data, so we need to set ownership on several things. The config file and specification data, of course: .. code-block:: bash chown bcfg2:bcfg2 /etc/bcfg2.conf chmod 0600 /etc/bcfg2.conf chown -R bcfg2:bcfg2 /var/lib/bcfg2/* chmod -R 0700 /var/lib/bcfg2/* Note that this does not change the permissions of ``/var/lib/bcfg2`` itself, which would prevent the ``bcfg2`` user from enabling a new plugin. If you depend on this capability (e.g., if your specification is stored in a VCS and checked out onto the Bcfg2 server by a script running as the ``bcfg2`` user), then you would want to ``chown`` and ``chmod`` ``/var/lib/bcfg2`` rather than ``/var/lib/bcfg2/*``. The Bcfg2 server also needs to be able to read its SSL certificate, key and the SSL CA certificate: .. code-block:: bash chown bcfg2:bcfg2 /etc/pki/tls/private/bcfg2.key \ /etc/pki/tls/certs/bcfg2.crt chmod 0600 /etc/pki/tls/private/bcfg2.key chmod 0644 /etc/pki/tls/certs/bcfg2.crt The paths to your SSL key and cert may be quite different, particularly on older versions of Bcfg2. .. note:: This step can be skipped if you are using the CherryPy :ref:`backend `. CherryPy reads in the certificate data before dropping privileges, so you can (and should) keep the keypair owned by root to prevent a compromised Bcfg2 server process from modifying that data. Most of these steps can (and should) be done via Bcfg2 itself. Steps on older versions ----------------------- On older versions of Bcfg2, you must change the location of the PID file. This change has been made the default in newer versions. This can be accomplished in one of two ways. * On systems where ``/var/run`` is world-writable with the sticky bit set, no change needs to be made. * On systems where ``/var/run`` is only writable by root, create a subdirectory for the PID file and configure the Bcfg2 server to write its PID file there: .. code-block:: bash mkdir /var/run/bcfg2-server chown bcfg2:bcfg2 /var/run/bcfg2-server chmod 0644 /var/run/bcfg2-server To change the PID file: * On Debian and derivatives, add ``export PIDFILE=/var/run/bcfg2-server/bcfg2-server.pid`` to ``/etc/default/bcfg2-server`` * On Red Hat Enterprise Linux and derivatives, add ``export PIDFILE=/var/run/bcfg2-server/bcfg2-server.pid`` to ``/etc/sysconfig/bcfg2-server``. This includes recent versions that are using systemd. * On other platforms, take the appropriate steps to change the PID file, which is given to the ``bcfg2-server`` process with the ``-D`` option, in your init system. On older versions of Bcfg2, you must also manually change the init script or process to drop privileges to the ``bcfg2`` user before the daemon is even invoked. * On RHEL and derivatives that are not using systemd, modify the ``bcfg2-server`` init script to run ``daemon --user=bcfg2 $DAEMON ...`` in the ``start()`` function. * On Debian and derivatives, modify the ``bcfg2-server`` init script to run ``start_daemon --user=bcfg2 ${DAEMON} ...`` in the ``start()`` function. * On systems that use systemd as their init system, add ``User=bcfg`` to the ``[Service]`` section of ``/etc/systemd/system/bcfg2-server.service`` * On other platforms, take the appropriate steps to change to the ``bcfg2`` user when spawning the ``bcfg2-server`` daemon. Restart ``bcfg2-server`` and you should see it running as non-root in ``ps`` output:: % ps -ef | grep '[b]cfg2-server' 1000 11581 1 0 07:55 ? 00:00:15 python usr/sbin/bcfg2-server -C /etc/bcfg2.conf -D /var/run/bcfg2-server/bcfg2-server.pid Steps on Bcfg2 1.3.0 -------------------- .. versionadded:: 1.3.0 On Bcfg2 1.3, the default PID file location has been changed, but it is still owned by root since no ``bcfg2`` user is created by default. Consequently, you simply have to run: .. code-block:: bash chown bcfg2:bcfg2 /var/run/bcfg2-server chmod 0755 /var/run/bcfg2-server Additionally, the server daemon itself supports dropping privileges natively in 1.3. Simply add the following lines to ``bcfg2.conf``:: [server] ... user = bcfg2 group = bcfg2 Restart ``bcfg2-server`` and you should see it running as non-root in ``ps`` output:: % ps -ef | grep '[b]cfg2-server' 1000 11581 1 0 07:55 ? 00:00:15 python usr/sbin/bcfg2-server -C /etc/bcfg2.conf -D /var/run/bcfg2-server/bcfg2-server.pid .. _server-backends: Server Backends =============== .. versionadded:: 1.3.0 Bcfg2 supports three different server backends: a builtin server based on the Python SimpleXMLRPCServer object; a server that uses CherryPy (http://www.cherrypy.org); and a version of the builtin server that uses the Python :mod:`multiprocessing` module. Each one has advantages and disadvantages. The builtin server: * Is very stable and mature; * Supports certificate authentication; * Works on Python 2.4; * Is slow with larger numbers of clients. The multiprocessing server: * Leverages most of the stability and maturity of the builtin server, but does have some new bits; * Introduces concurrent processing to Bcfg2, which may break in various edge cases; * Supports certificate authentication; * Requires Python 2.6; * Is faster with large numbers of concurrent runs. The CherryPy server: * Is very new and potentially buggy; * Does not support certificate authentication yet, only password authentication; * Requires CherryPy 3.3, which requires Python 2.5; * Is smarter about daemonization, particularly if you are :ref:`server-dropping-privs`; * Is faster with large numbers of clients. Basically, the builtin server should be used unless you have a particular need for performance. The CherryPy server is purely experimental at this point. To select which backend to use, set the ``backend`` option in the ``[server]`` section of ``/etc/bcfg2.conf``. Options are: * ``cherrypy`` * ``builtin`` * ``multiprocessing`` * ``best`` (the default; currently the same as ``builtin``) ``best`` may change in future releases. Multiprocessing core configuration ---------------------------------- If you use the multiprocessing core, there are other bits you may wish to twiddle. By default, the server spawns as many children as the host has CPUs. (This is determined by ``multiprocessing.cpu_count()``.) To change this, set: .. code-block:: ini [server] children = 4 The optimal number of children may vary depending on your workload. For instance, if you are using :ref:`native yum library support `, then a separate process is spawned for each client to resolve its package dependencies, so keeping the children at or below the CPU count is likely a good idea. If you're not using native yum library support, though, you may wish to oversubscribe the core slightly. It's recommended that you test various configurations and use what works best for your workload. Secondly, if ``tmpwatch`` is enabled, you must either disable it or exclude the pattern ``/tmp/pymp-\*``. For instance, on RHEL or CentOS you may have a line like the following in ``/etc/cron.daily/tmpwatch``: .. code-block:: bash /usr/sbin/tmpwatch -x /tmp/.X11-unix -x /tmp/.XIM-unix -x /tmp/.font-unix \ -x /tmp/.ICE-unix -x /tmp/.Test-unix 240 /tmp You would need to add ``-X /tmp/pymp-\*`` to it, like so: .. code-block:: bash /usr/sbin/tmpwatch -x /tmp/.X11-unix -x /tmp/.XIM-unix -x /tmp/.font-unix \ -x /tmp/.ICE-unix -x /tmp/.Test-unix -X /tmp/pymp-\* 240 /tmp See https://bugzilla.redhat.com/show_bug.cgi?id=1058310 for more information. doc/server/configurationentries.txt000066400000000000000000000024161303523157100201360ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-configurationentries: ===================== Configuration Entries ===================== The full semantics of each configuration entry is documented with the :ref:`server-plugins-generators-rules` plugin. .. _boundentries: Bound Entries ============= This feature is a mechanism to specify a full entry at once from a bundle. Traditionally, entries are defined in two stages. First, an abstract entry is defined in a bundle. This entry includes a type (the XML tag) and a name attribute. Then this entry is bound for a client, providing the appropriate instance of that entry for the client. Specifying a bound entry short-circuits this process; the only second stage processing on Bound entries is to remove the "Bound" prefix from the element tag. The use of a bound entry allows the single stage definition of a complete entry. Bound entries can be used for any type. Example: .. code-block:: xml altsrc ====== The ``altsrc`` attribute lets you remap configuration entry names on the server side so you can reuse a single concrete representation for multiple abstract entries. See :ref:`server-plugins-structures-altsrc` for more details. doc/server/database.txt000066400000000000000000000155221303523157100154430ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-database: ======================== Global Database Settings ======================== .. versionadded:: 1.3.0 Several Bcfg2 plugins, including :ref:`server-plugins-grouping-metadata`, :ref:`server-plugins-probes`, and :ref:`server-plugins-statistics-reporting`, can connect use a relational database to store data. They use the global database settings in ``bcfg2.conf``, described in this document, to connect. .. note:: Although SQLite is supported as a database, it may cause significant thread contention (and a performance penalty) if you use SQLite with :ref:`server-plugins-grouping-metadata` or :ref:`server-plugins-probes`. If you are using the database-backed features of either of those plugins, it's recommended that you use a higher performance database backend. Separate Reporting Database =========================== .. versionadded:: 1.4.0 Bcfg2 supports storing the data generated by the :ref:`server-plugins-statistics-reporting` in a separate database from the data generated by the other plugins (e.g. :ref:`server-plugins-grouping-metadata` and :ref:`server-plugins-probes`). To activate this support, set the ``reporting_engine``, ``reporting_name``, ``reporting_user``, etc. options in the ``[database]`` section of the config file. The valid values for the ``reporting_*`` options are the same as for the standard database options. See :ref:`server-database-configuration-options` for a full listing. .. _server-database-configuration-options: Configuration Options ===================== All of the following options should go in the ``[database]`` section of ``/etc/bcfg2.conf``. +--------------------+------------------------------------------------------------+---------------------------------------+ | Option name | Description | Default | +====================+============================================================+=======================================+ | engine | The name of the Django database backend to use. See | "sqlite3" | | | https://docs.djangoproject.com/en/dev/ref/settings/#engine | | | | for available options (note that django.db.backends is not | | | | included in the engine name) | | +--------------------+------------------------------------------------------------+---------------------------------------+ | name | The name of the database | "/var/lib/bcfg2/etc/bcfg2.sqlite" | +--------------------+------------------------------------------------------------+---------------------------------------+ | user | The user to connect to the database as | None | +--------------------+------------------------------------------------------------+---------------------------------------+ | password | The password to connect to the database with | None | +--------------------+------------------------------------------------------------+---------------------------------------+ | host | The host to connect to | "localhost" | +--------------------+------------------------------------------------------------+---------------------------------------+ | port | The port to connect to | None | +--------------------+------------------------------------------------------------+---------------------------------------+ | options | Extra parameters to use when connecting to the database. | None | | | Available parameters vary depending on your database | | | | backend. The parameters are supplied as the value of the | | | | django OPTIONS setting. | | +--------------------+------------------------------------------------------------+---------------------------------------+ | reporting_engine | The name of the Django database backend to use for the | None | | | reporting database. Takes the same values as ``engine``. | | +--------------------+------------------------------------------------------------+---------------------------------------+ | reporting_name | The name of the reporting database | "/var/lib/bcfg2/etc/reporting.sqlite" | +--------------------+------------------------------------------------------------+---------------------------------------+ | reporting_user | The user to connect to the reporting database as | None | +--------------------+------------------------------------------------------------+---------------------------------------+ | reporting_password | The password to connect to the reporting database with | None | +--------------------+------------------------------------------------------------+---------------------------------------+ | reporting_host | The host to connect to for the reporting database | "localhost" | +--------------------+------------------------------------------------------------+---------------------------------------+ | reporting_port | The port to connect to for the reporting database | None | +--------------------+------------------------------------------------------------+---------------------------------------+ | reporting_options | Extra parameters to use when connecting to the reporting | None | | | database. Available parameters vary depending on your | | | | database backend. The parameters are supplied as the | | | | value of the django OPTIONS setting. | | +--------------------+------------------------------------------------------------+---------------------------------------+ Database Schema Sync ==================== After making changes to the configuration options or adding a plugin that uses the global database, you should run ``bcfg2-admin syncdb`` to resync the database schema. doc/server/encryption.txt000066400000000000000000000210171303523157100160650ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-encryption: ===================== Bcfg2 Data Encryption ===================== .. versionadded:: 1.3.0 Bcfg2 supports encrypting some data on the disk, which can help protect sensitive data from other people who need access to the Bcfg2 repository but are perhaps not authorized to see all data. It supports multiple passphrases, which can be used to enforce separations between teams, environments, etc. Use of the encryption feature requires M2Crypto 0.18 or newer. .. note:: This feature is *not* intended to secure the files against a malicious attacker who has gained access to your Bcfg2 server, as the encryption passphrases are held in plaintext in ``bcfg2.conf``. This is only intended to make it easier to use a single Bcfg2 repository with multiple admins who should not necessarily have access to each other's sensitive data. Two basic types of data can be encrypted: * :ref:`server-plugins-generators-cfg` files can be encrypted as whole files. See :ref:`server-plugins-generators-cfg-encryption` for more details. * :ref:`server-plugins-connectors-properties` data can be encrypted on a per-element basis. See :ref:`server-plugins-connectors-properties-encryption` for more details. In general, Properties encryption is preferred for a few reasons: * It plays nicely with your VCS. If you change an encrypted Cfg file, then all you can see in your VCS log is that the file changed, no details about how it changed. With an encrypted Properties file, you can see which element changed (although obviously not the changed content). * It is faster when you have more than one passphrase. When decrypting a Cfg file, Bcfg2 simply brute-forces it with all known passphrases; when decrypting a Properties element, the passphrase is given by name so only one passphrase must be tried. * A Cfg file can only be encrypted with a single passphrase; Properties files can use different passphrases for different elements. If you are using different passphrases to segregate data amongst different teams, this lets teams collaborate more closely on files and other data. Other types of data that can be encrypted are: * Text content of Path tags in :ref:`server-plugins-structures-bundler` * Passphrases in XML description files for generated :ref:`server-plugins-generators-cfg-sshkeys` .. _bcfg2-crypt: bcfg2-crypt =========== Encrypting and decrypting :ref:`server-plugins-generators-cfg` and :ref:`server-plugins-connectors-properties` files can be done with the ``bcfg2-crypt`` tool, which mostly tries to do the right thing. I.e., it encrypts plaintext files, decrypts encrypted files, and automatically discovers if a file is Cfg or Properties. Its usage is thus generally very simple, e.g.:: bcfg2-crypt foo.conf bcfg2-crypt foo.xml Since the behavior of ``bcfg2-crypt`` varies significantly depending on whether you are dealing with a Cfg or Properties files, these are documented separately below. It's also well worthwhile to familiarize yourself with the man page for ``bcfg2-crypt``. Encrypting Cfg Files -------------------- To encrypt a Cfg file, you can simply run:: bcfg2-crypt foo.conf This will write the encrypted data to ``foo.conf.crypt``. Once you are satisfied that the file has been encrypted as you wish, you can remove the plaintext version, or you can use the ``--remove`` flag of ``bcfg2-crypt``. To decrypt a file, simply run ``bcfg2-crypt`` again:: bcfg2-crypt foo.conf.crypt On Cfg files, ``bcfg2-crypt`` is more-or-less equivalent to the following commands (encryption and decryption, respectively):: openssl enc -aes-256-cbc -k -in foo.conf \ -out foo.conf.crypt -a openssl enc -d -aes-256-cbc -k -in foo.conf.crypt \ -out foo.conf -a Those commands can be used in lieu of ``bcfg2-crypt`` if you hate convenience. Encrypting Properties Files --------------------------- To encrypt or decrypt a properties file, simply run:: bcfg2-crypt foo.xml If the top-level tag of a Properties file is not ````, then you need to use the ``--properties`` flag to ``bcfg2-crypt``:: bcfg2-crypt --properties foo.xml The first time you run ``bcfg2-crypt`` on a Properties file, it will encrypt all character data of all elements. Additionally, it will add ``encrypted=""`` to each element that has encrypted character data. It also adds ``encryption="true"`` to the top-level ```` tag as a flag to the server that it should try to decrypt the data in that file. (If you are using Properties schemas, you will need to make sure to add support for these attributes.) On subsequent runs, only those elements flagged with ``encrypted="*"`` are encrypted or decrypted. To decrypt a Properties file, simply re-run ``bcfg2-crypt``:: bcfg2-crypt foo.xml This decrypts the encrypted elements, but it does *not* remove the ``encrypted`` attribute; this way, you can decrypt a Properties file, modify the contents, and then simply re-run ``bcfg2-crypt`` to encrypt it again. If you added elements that you also want to be encrypted, you can either add the ``encrypted`` attribute to them manually, or run:: bcfg2-crypt --xpath '*' foo.xml You can also use the ``--xpath`` option to specify more restrictive XPath expressions to only encrypt a subset of elements, or to encrypt different elements with different passphrases. Alternatively, you can manally set the ``encrypted`` attribute on various elements and ``bcfg2-crypt`` will automatically do the right thing. You can also run bcfg2-crypt in interactive mode to interactively select which attributes should be encrypted:: bcfg2-crypt -I foo.xml If you want to use different passphrases within a single Properties file, you must manually set the ``encrypted`` attribute. .. _server-encryption-configuration: Configuring Encryption ====================== Passphrases ----------- To configure encryption, add a ``[encryption]`` section to ``bcfg2.conf`` with any number of name-passphrase pairs. For instance:: [encryption] foo_team=P4ssphr4se bar_team=Pa55phra5e .. note:: The name of a passphrase **cannot** be ``algorithm`` or ``decrypt``, which are reserved for other configuration options. This would define two separate encryption passphrases, presumably for use by two separate teams. The passphrase names are completely arbitrary. Note that this does entail a chicken-and-egg problem. In order for the Bcfg2 server to be able to decrypt encrypted files, the passphrases must exist in ``bcfg2.conf`` in plaintext; but, if you're encrypting data, presumably you don't want to include those plaintext passphrases in your Bcfg2 repository, so you'll want to encrypt ``bcfg2.conf``. The best way to solve this is: #. On your Bcfg2 server, manually add the ``[encryption]`` section to ``bcfg2.conf`` and restart the Bcfg2 server. #. Update ``bcfg2.conf`` in your Bcfg2 repository with the passphrases, and encrypt it. The first (manual) step breaks the mutual dependency. Algorithm --------- By default, Bcfg2 uses the AES-256-CBC cipher algorithm. If you wish to change this, you can set the ``algorithm`` option in the ``[encryption]`` section of ``bcfg2.conf``:: [encryption] algorithm = bf_cbc The value of ``algorithm`` must be a valid OpenSSL cipher algorithm according the naming model of the Python :mod:`M2Crypto` module. To get a list of valid algorithms, you can run:: openssl list-cipher-algorithms | grep -v ' => ' | \ tr 'A-Z-' 'a-z_' | sort -u .. _server-encryption-lax-strict: Lax vs. Strict decryption ------------------------- By default, Bcfg2 expects to be able to decrypt every encrypted datum. Depending on how encryption is implemented at your site, though, that may not be possible. (For instance, if you use encryption to protect data for your production environment from your staging Bcfg2 server, then you would not expect the staging server to be able to decrypt everything.) In this case, you want to enable lax decryption in the ``[encryption]`` section of ``bcfg2.conf``:: [encryption] lax_decryption = true This causes a failed decrypt to produce a warning only, not an error. This can be overridden by individual XML files by setting ``lax_decryption="false"`` on the top-level tag (or, vice-versa; if strict is the default an XML file can specify ``lax_decryption="true"``. Note that you could, for instance, set lax decryption by default, and then disable it on individual files. Encryption API ============== .. automodule:: Bcfg2.Server.Encryption doc/server/index.txt000066400000000000000000000017241303523157100150050ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-index: ================ The Bcfg2 Server ================ The Bcfg2 server is responsible for taking a comprehensive infrastructure description and turning it into a series of configuration specifications for particular clients. It also manages probed data and tracks statistics for clients. The Bcfg2 server takes information from two sources when generating client configuration specifications. The first is a pool of metadata that describes clients as members of an aspect-based classing system. That is, clients are defined in terms of aspects of their behavior. The other is a file system repository that contains mappings from metadata to literal configuration. These are combined to form the literal configuration specifications for clients. .. toctree:: :maxdepth: 2 plugins/index admin/index configurationentries info bcfg2-info selinux configuration database caching encryption xml-common acl doc/server/info.txt000066400000000000000000000030611303523157100146250ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-info: ======== info.xml ======== Various file properties for entries served by most generator plugins, including :ref:`server-plugins-generators-cfg` and :ref:`server-plugins-generators-sshbase`, are controlled through the use of ``info.xml`` files. By default, these plugins are set to write files to the filesystem with owner **root**, group **root**, and mode **644** (read and write for owner, read only for group and other). These options, and a few others, can be overridden through use of ``info.xml`` files. Each config file directory can have a ``info.xml`` file if needed. .. xml:schema:: info.xsd :linktotype: :inlinetypes: InfoType :noautodep: ACLType A sample ``info.xml`` file for CGI script on a web server might look like: .. code-block:: xml A more complex example for a template that generates both ``bcfg2.conf`` and ``bcfg2-web.conf`` might look like this: .. code-block:: xml See :ref:`server-selinux` for more information on the ``secontext`` attribute and managing SELinux in general. doc/server/plugins/000077500000000000000000000000001303523157100146125ustar00rootroot00000000000000doc/server/plugins/connectors/000077500000000000000000000000001303523157100167675ustar00rootroot00000000000000doc/server/plugins/connectors/awstags.txt000066400000000000000000000102121303523157100211750ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-connectors-awstags: ========= AWSTags ========= The AWSTags plugin is a connector that retrieves tags from instances in EC2, and can optionally assign group membership based on patterns in the tags. See `Using Tags `_ for details on using tags in EC2. AWSTags queries EC2 for instances whose ``private-dns-name`` property matches the hostname of the client. Setup ===== #. Add ``AWSTags`` to the ``plugins`` option in ``/etc/bcfg2.conf`` #. Configure AWS credentials in ``/etc/bcfg2.conf`` (See `Configuration`_ below for details.) #. Optionally, create ``AWSTags/config.xml`` (See `Assigning Groups`_ below for details.) #. Restart the Bcfg2 server. Using Tag Data ============== AWSTags exposes the data in templates as a dict available as ``metadata.AWSTags``. E.g., in a :ref:`Genshi template `, you could do: .. code-block:: genshitext Known tags on ${metadata.hostname}: {% for key, val in metadata.AWSTags.items() %}\ ${key} ${val} {% end %}\ This would produce something like:: Known tags on foo.example.com: Name foo.example.com some random tag the value Assigning Groups ================ AWSTags can assign groups based on the tag data. This functionality is configured in ``AWSTags/config.xml``. Example ------- .. code-block:: xml foo bar $1 In this example, any machine with a tag named ``foo`` would be added to the ``foo`` group. Any machine with a tag named ``bar`` whose value was also ``bar`` would be added to the ``bar`` group. Finally, any machine with a tag named ``bcfg2 group`` would be added to the group named in the value of that tag. Note that both the ``name`` and ``value`` attributes are *always* regular expressions. If a ```` element has only a ``name`` attribute, then it only checks for existence of a matching tag. If it has both ``name`` and ``value``, then it checks for a matching tag with a matching value. You can use backreferences (``$1``, ``$2``, etc.) in the group names. If only ``name`` is specified, then the backreferences will refer to groups in the ``name`` regex. If ``name`` and ``value`` are both specified, then backreferences will refer to groups in the ``value`` regex. If you specify both ``name`` and ``value``, it is not possible to refer to groups in the ``name`` regex. Schema Reference ---------------- .. xml:schema:: awstags.xsd Configuration ============= AWSTags recognizes several options in ``/etc/bcfg2.conf``; at a minimum, you must configure an AWS access key ID and secret key. All of the following options are in the ``[awstags]`` section: +-----------------------+-----------------------------------------------------+ | Option | Description | +=======================+=====================================================+ | ``access_key_id`` | The AWS access key ID | +-----------------------+-----------------------------------------------------+ | ``secret_access_key`` | The AWS secret access key | +-----------------------+-----------------------------------------------------+ | ``cache`` | Whether or not to cache tag lookups. See `Caching`_ | | | for details. Default is to cache. | +-----------------------+-----------------------------------------------------+ Caching ======= Since the AWS API isn't always very quick to respond, AWSTags caches its results by default. The cache is fairly short-lived: the cache for each host is expired when it starts a client run, so it will start the run with fresh data. If you frequently update tags on your instances, you may wish to disable caching. That's probably a bad idea, and would tend to suggest that updating tags frequently is perhaps the Wrong Thing. doc/server/plugins/connectors/grouplogic.txt000066400000000000000000000075471303523157100217170ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-connectors-grouplogic: ========== GroupLogic ========== .. versionadded:: 1.3.2 GroupLogic is a connector plugin that lets you use an XML Genshi template to dynamically set additional groups for clients. Usage ===== To use the GroupLogic plugin, first do ``mkdir /var/lib/bcfg2/GroupLogic``. Add ``GroupLogic`` to your ``plugins`` line in ``/etc/bcfg2.conf``. Next, create ``/var/lib/bcfg2/GroupLogic/groups.xml``: .. code-block:: xml ``groups.xml`` is structured very similarly to the :ref:`server-plugins-grouping-metadata` ``groups.xml``. A Group tag that contains no children is a declaration of membership; a Group or Client tag that does contain children is a conditional. Unlike ``Metadata/groups.xml``, GroupLogic supports genshi templating, so you can dynamically create groups. ``GroupLogic/groups.xml`` is rendered for each client, and the groups set in it are added to the client metadata. .. note:: Also unlike ``Metadata/groups.xml``, GroupLogic can not be used to associate bundles with clients directly, or to negate groups. But you can use GroupLogic to assign a group that is associated with a bundle in Metadata. Consider the case where you have four environments -- dev, test, staging, and production -- and four components to a web application -- the frontend, the API, the database server, and the caching proxy. In order to make files specific to the component *and* to the environment, you need groups to describe each combination: webapp-frontend-dev, webapp-frontend-test, and so on. You *could* do this in ``Metadata/groups.xml``: .. code-block:: xml ... ... ... Creating the sixteen groups this way is incredibly tedious, and this is a quite *small* site. GroupLogic can automate this process. Assume that we've declared the groups thusly in ``Metadata/groups.xml``: .. code-block:: xml One way to automate the creation of the groups would be to simply generate the tedious config: .. code-block:: xml But, since ``GroupLogic/groups.xml`` is rendered for each client individually, there's a more elegant way to accomplish the same thing: .. code-block:: xml This gets only the component and environment for the current client, and, if both are set, sets the single appropriate group. doc/server/plugins/connectors/properties.txt000066400000000000000000000240271303523157100217310ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-connectors-properties: ========== Properties ========== The Properties plugin is a connector plugin that adds information from XML, JSON, and YAML files into client metadata instances. Enabling Properties =================== First, ``mkdir /var/lib/bcfg2/Properties``. Each property file goes in this directory. Each will automatically be cached by the server, and reread/reparsed upon changes. Add **Properties** to your ``plugins`` line in ``/etc/bcfg2.conf``. Data Structures =============== Properties adds a new dictionary to client metadata instances that maps property file names to PropertyFile instances. A property file can be one of three types: * If the filename ends with ``.xml``, it will be parsed as XML and handled by :class:`Bcfg2.Server.Plugins.Properties.XMLPropertyFile`. See `XML Property Files`_ below. * If the filename ends with ``.json`` and JSON libraries are installed (either ``json`` or ``simplejson``, although ``json`` is highly recommended), it will be parsed as `JSON `_ and handled by :class:`Bcfg2.Server.Plugins.Properties.JSONPropertyFile`. See `JSON Property Files`_ below. * If the filename ends with ``.yaml`` or ``.yml`` and PyYAML is installed, it will be parsed as `YAML `_ and handled by :class:`Bcfg2.Server.Plugins.Properties.YAMLPropertyFile`. See `YAML Property Files`_ below. The XML interface is undoubtably the most powerful, as it natively supports schemas to check the data validity, client- and group-specific data, and data encryption. Usage ===== Common Interface ---------------- Different data types have different interfaces, but there are some usage patterns common to all properties files. Specific property files can be referred to in templates as ``metadata.Properties[]``. The data in property files is accessible via different attributes: +-----------+----------------+ | Data Type | Data Attribute | +===========+================+ | XML | ``xdata`` | +-----------+----------------+ | JSON | ``json`` | +-----------+----------------+ | YAML | ``yaml`` | +-----------+----------------+ For instance, in a :ref:`Genshi template `, you might do:: {% for item in metadata.Properties['foo.json'].json %}\ ${item} {% end %}\ {% for key, value in metadata.Properties['foo.yml'].yaml %}\ ${key} = ${value} {% end %}\ {% for el in metadata.Properties['foo.xml'].xdata.findall("Tag") %}\ ${el.get("name")} = ${el.text} {% end %}\ The raw contents of a properties file as a string are available via the ``data`` attribute, e.g., ``metadata.Properties['prop-file'].data``. .. _server-plugins-connectors-properties-write-back: Writing to Properties files ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 1.2.0 If you need to make persistent changes to properties data, you can use the ``write`` method of the :class:`Bcfg2.Server.Plugins.Properties.PropertyFile` class:: {% python import lxml.etree from genshi.template import TemplateError lxml.etree.SubElement(metadata.Properties['foo.xml'], "Client", name=metadata.hostname) if not metadata.Properties['foo.xml'].write(): raise TemplateError("Failed to write changes back to foo.xml") The interface is the same for YAML or JSON data. If writing XML data, the ``write`` method checks the data in the object against its schema before writing it; see `Data Structures`_ for details. Note that use of the ``write`` method can cause race conditions if you run more than one Bcfg2 server. If you run more than one Bcfg2 server, you can disable Properties write-back by setting the following in ``bcfg2.conf``:: [properties] writes_enabled = false .. _server-plugins-connectors-properties-xml: XML Property Files ------------------ The data in an XML property file can be accessed with the ``xdata`` attribute, an :class:`lxml.etree._Element` object documented `here `_. In addition to the ``xdata`` attribute that can be used to access the raw data, the following access methods are defined: * ``Match()`` parses the Group and Client tags in the file and returns a list of elements that apply to the client described by a set of metadata. For instance:: {% python ntp_servers = [el.text for el in metadata.Properties['ntp.xml'].Match(metadata) if el.tag == "Server"] %} * ``XMLMatch()`` parses the Group and Client tags in the file and returns an XML document containing only the data that applies to the client described by a set of metadata. (The Group and Client tags themselves are also removed, leaving only the tags and data contained in them.) For instance:: {% python ntp_servers = [el.text for el in metadata.Properties['ntp.xml'].XMLMatch(metadata).findall("//Server")] %} ``XMLMatch()`` can be run automatically on properties files by using the :ref:`server-plugins-connectors-properties-automatch` feature. You can also access the XML data that comprises a property file directly in one of several ways: * ``metadata.Properties['prop-file'].xdata`` is an lxml.etree._Element object representing the top-level element in the file. * ``metadata.Properties['prop-file'].data`` is the raw contents of the property file as a string. * ``metadata.Properties['prop-file'].entries`` is a list of lxml.etree._Element objects representing the direct children of the top-level element. (I.e., everything directly under the ```` tag.) The XML data in a property file is arbitrary, but a matching ``.xsd`` file can be created to assign a schema to a property file, which will be checked when running ``bcfg2-lint``. For instance, given:: Properties/dns-config.xml Properties/dns-config.xsd ``dns-config.xml`` will be validated against ``dns-config.xsd``. Although Properties files are technically freeform XML, the top-level XML tag should be ````. JSON Property Files ------------------- .. versionadded:: 1.3.0 The data in a JSON property file can be accessed with the ``json`` attribute, which is the loaded JSON data. The JSON properties interface does not provide any additional functionality beyond the `Common Interface`_. YAML Property Files ------------------- .. versionadded:: 1.3.0 The data in a YAML property file can be accessed with the ``yaml`` attribute, which is the loaded YAML data. Only a single YAML document may be included in a file. The YAML properties interface does not provide any additional functionality beyond the `Common Interface`_. .. _server-plugins-connectors-properties-automatch: Automatch ========= .. versionadded:: 1.3.0 You can enable :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch()` for all XML Property files by setting ``automatch`` to ``true`` in the ``[properties]`` section of ``bcfg2.conf``. This makes ``metadata.Properties`` values :class:`lxml.etree._Element` objects that contain only matching data. (This makes it impossible to do :ref:`server-plugins-connectors-properties-write-back` as a side-effect.) In Python terms, setting ``automatch=true`` is the same as doing the following at the top of each template:: {% python for prop in metadata.Properties.values(): prop = prop.XMLMatch(metadata) %} The example above that describes ``XMLMatch()`` would then become simply:: {% python ntp_servers = [el.text for el in metadata.Properties['ntp.xml'].findall("//Server")] %} You can also enable automatch for individual Property files by setting the attribute ``automatch="true"`` on the top-level ```` tag. Conversely, if automatch is enabled by default in ``bcfg2.conf``, you can disable it for an individual Property file by setting ``automatch="false"`` on the top-level ```` tag. If you want to see what ``XMLMatch()``/automatch would produce for a given client on a given Properties file, you can use :ref:`bcfg2-info `:: bcfg2-info automatch props.xml foo.example.com If automatch is not enabled, you can force ``bcfg2-info`` to perform it anyway with ``-f``:: bcfg2-info automatch -f props.xml foo.example.com .. note:: Be sure to notice that enabling automatch changes the type of the data in ``metadata.Properties``; with automatch disabled, the values of the ``metadata.Properties`` dict are :class:`Bcfg2.Server.Plugins.Properties.PropertyFile` objects. With automatch enabled, they are :class:`lxml.etree._Element` objects. .. _server-plugins-connectors-properties-encryption: Encrypted Properties data ========================= .. versionadded:: 1.3.0 You can encrypt selected data in XML Properties files to protect that data from other people who need access to the repository. The data is decrypted transparently on-the-fly by the server; you never need to decrypt the data in your templates. Encryption is only supported on XML properties files. See :ref:`server-encryption` for details on encryption in general, and :ref:`xml-encryption` for details on encryption in XML files. Accessing Properties contents from Genshi Templates =================================================== Access contents of ``Properties/auth.xml``:: ${metadata.Properties['auth.xml'].xdata.find('file').find('bcfg2.key').text} Configuration ============= ``bcfg2.conf`` contains several miscellaneous configuration options for the Properties plugin, which can be set in the ``[properties]`` section. Any booleans in the config file accept the values "1", "yes", "true", and "on" for True, and "0", "no", "false", and "off" for False. It understands the following directives: * ``automatch``: Enable :ref:`server-plugins-connectors-properties-automatch`. Default is false. * ``writes_enabled``: Enable :ref:`server-plugins-connectors-properties-write-back`. Default is true. Module Documentation ==================== .. automodule:: Bcfg2.Server.Plugins.Properties doc/server/plugins/connectors/puppetenc.txt000066400000000000000000000077301303523157100215420ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-connectors-puppetenc: ========= PuppetENC ========= PuppetENC is a connector plugin that adds support for Puppet External Node Classifiers (``_), or ENCs. Output Format ============= The PuppetENC plugin implements the Puppet 2.6.5+ ENC output format with some modifications. The basic output format is described `here `_. The following modifications apply: * ``classes`` are considered to be Bcfg2 groups. (This is basically just a difference in terminology between Puppet and Bcfg2; Bcfg2 calls "groups" what Puppet calls "classes.") * As an alternative to the Puppet-specific ``classes`` value, you may use ``groups`` if you are writing an ENC from scratch specifically for Bcfg2. * Since Bcfg2 does not have the notion of parameterized classes, any class parameters provided will be merged in with the ``parameters`` dict. * ``parameters`` are presented as connector data. (See Usage below.) * The ``environment`` value is not supported. If present, PuppetENC will issue a warning and skip it. The ``parameters`` from separate ENCs are all merged together, including parameters from any parameterized classes. This is a shallow merge; in other words, only the top-level keys are considered. For instance, assuming you had one ENC that produced:: parameters: ntp_servers: - 0.pool.ntp.org - ntp1.example.com And another that produced:: parameters: ntp_servers: - ntp2.example.com This would result in connector data that included *either* the first value of ``ntp_servers`` *or* the second, but not both; this would depend on the order in which the ENCs were run, which is non-deterministic and should not be relied upon. However, if you add one ENC that produced:: parameters: ntp_servers: - 0.pool.ntp.org - ntp1.example.com And another that produced:: parameters: mail_servers: - mail.example.com Then the connector data would consist of:: {"ntp_servers": ["0.pool.ntp.org", "ntp1.example.com"], "mail_servers": ["mail.example.com"]} Usage ===== To use the PuppetENC plugin, first do ``mkdir /var/lib/bcfg2/PuppetENC``. Add ``PuppetENC`` to your ``plugins`` line in ``/etc/bcfg2.conf``. Now you can place any ENCs you wish to run in ``/var/lib/bcfg2/PuppetENC``. Note that ENCs are run each time client metadata is generated, so if you have a large number of ENCs or ENCs that are very time-consuming, they could have a significant impact on server performance. In that case, it could be worthwhile to write a dedicated Connector plugin. PuppetENC parameters can be accessed in templates as ``metadata.PuppetENC``, which is a dict of all parameter data merged together. For instance, given the following ENC output:: --- classes: common: puppet: ntp: ntpserver: 0.pool.ntp.org aptsetup: additional_apt_repos: - deb localrepo.example.com/ubuntu lucid production - deb localrepo.example.com/ubuntu lucid vendor parameters: ntp_servers: - 0.pool.ntp.org - ntp.example.com mail_server: mail.example.com iburst: true environment: production ``metadata.PuppetENC`` would contain:: 'additional_apt_repos': ['deb localrepo.example.com/ubuntu lucid production', 'deb localrepo.example.com/ubuntu lucid vendor'], 'iburst': True, 'mail_server': 'mail.example.com', 'ntp_servers': ['0.pool.ntp.org', 'ntp.example.com'], 'ntpserver': '0.pool.ntp.org'} (Note that the duplication of NTP server data doesn't make this an especially *good* example; it's just the official Puppet example.) So, in a template you could do something like:: {% for repo in metadata.PuppetENC['additional_apt_repos'] %}\ ${repo} {% end %}\ doc/server/plugins/connectors/templatehelper.txt000066400000000000000000000053141303523157100225460ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-connectors-templatehelper: ============== TemplateHelper ============== The TemplateHelper plugin is a connector plugin that adds Python classes and methods to client metadata instances for use in templates. This allows you to easily reuse code that is common amongst multiple templates and add convenience methods. Using TemplateHelper ==================== First, ``mkdir /var/lib/bcfg2/TemplateHelper`` and add **TemplateHelper** to your ``plugins`` line in ``/etc/bcfg2.conf``. Restart ``bcfg2-server``. Now, any ``.py`` file placed in ``/var/lib/bcfg2/TemplateHelper/`` will be read and added to matching client metadata objects. See :ref:`writing-templatehelpers` below for more information on how to write TemplateHelper scripts. TemplateHelper does not support group- or host-specific helpers. All helpers will be available to all clients. .. _writing-templatehelpers: Writing Helpers =============== A helper module is just a Python module with several special conditions: * The filename must end with ``.py`` * The module must have an attribute, ``__export__``, that lists all of the classes, functions, variables, or other symbols you wish to export from the module. * ``data``, ``name``, ``fam``, ``Index``, and ``HandleEvent`` are reserved names. You should not include symbols with a reserved name in ``__export__``. Additionally, including symbols that start with an underscore or double underscore is bad form, and may also produce errors. Additionally, the module *may* have an attribute, ``__default__``, that lists all of the symbols that you wish to include by default in the template namespace. ``name``, ``metadata``, ``source_path``, ``repo``, and ``path`` are reserved names, and should not be included in ``__default__``. See ``examples/TemplateHelper`` for examples of helper modules. Usage ===== Specific helpers can be referred to in templates as ``metadata.TemplateHelper[]``. That returns a HelperModule object which will have, as attributes, all symbols listed in ``__export__``. For example, consider this helper module:: __export__ = ["hello"] __default__ = ["pining"] def hello(metadata): return "Hello, %s!" % metadata.hostname def pining(text): return "It's pinin' for the %s!" % text To use this in a Genshi template, we could do:: ${metadata.TemplateHelper['hello'].hello(metadata)} ${pining("fjords")} The template would produce:: Hello, foo.example.com! It's pinin' for the fjords! Note that the client metadata object is not passed to a helper module in any magical way; if you want to access the client metadata object in a helper function or class, you must pass the object to the function manually. doc/server/plugins/generators/000077500000000000000000000000001303523157100167635ustar00rootroot00000000000000doc/server/plugins/generators/cfg.txt000066400000000000000000000765001303523157100202730ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-plugins-generators-cfg: === Cfg === The Cfg plugin provides a repository to describe configuration file contents for clients. In its simplest form, the Cfg repository is just a directory tree modeled off of the directory tree on your client machines. The Cfg Repository ================== The Cfg plugin is enabled by including **Cfg** on the **plugins** line of the **[server]** section of your Bcfg2 server config file. The repository itself lives in ``/var/lib/bcfg2/Cfg``, assuming you are using the default repository location of ``/var/lib/bcfg2``. The contents of this directory are a series of directories corresponding to the real-life locations of the files on your clients, starting at the root level. For example:: % ls Cfg bin/ boot/ etc/ opt/ root/ usr/ var/ Specific config files go in like-named directories in this heirarchy. For example the password file, ``/etc/passwd``, goes in ``Cfg/etc/passwd/passwd``, while the ssh pam module config file, ``/etc/pam.d/sshd``, goes in ``Cfg/etc/pam.d/sshd/sshd``. The reason for the like-name directory is to allow multiple versions of each file to exist, as described below. Note that these files are exact copies of what will appear on the client machine (except when using templates -- see below). Group-Specific Files ==================== It is often the case that you want one version of a config file for all of your machines except those in a particular group. For example, ``/etc/fstab`` should look alike on all of your desktop machines, but should be different on your file servers. Bcfg2 can handle this case through use of group-specific files. As mentioned above, all Cfg entries live in like-named directories at the end of their directory tree. In the case of fstab, the file at ``Cfg/etc/fstab/fstab`` will be handed out by default to any client that asks for a copy of ``/etc/fstab``. Group-specific files are located in the same directory and are named with the following syntax:: /path/to/filename/filename.GNN_groupname **NN** is a priority number where **00** is lowest and **99** is highest, and **groupname** is the name of a group defined in ``Metadata/groups.xml``. Back to our fstab example, we might have a ``Cfg/etc/fstab/`` directory that looks like this:: fstab fstab.G50_server fstab.G99_fileserver By default, clients will receive the plain fstab file when they request ``/etc/fstab``. Any machine that is in the **server** group, however, will instead receive the ``fstab.G50_server`` file. Finally, any machine that is in the **fileserver** group will receive the ``fstab.G99_fileserver`` file, even if they are also in the **server** group. Host-Specific Files =================== Similar to the case with group-specific files, there are cases where a specific machine should have a different version of a file than all others. This can be accomplished with host-specific files. The format of a host-specific file name is:: /path/to/filename/filename.H_host.example.com Host-specific files have a higher priority than group specific files. Again, the fstab example:: fstab fstab.G50_server fstab.G99_fileserver fstab.H_host.example.com In this case, *host.example.com* will always get the host-specific version, even if it is part of the **server** or **fileserver** (or both) classes. .. note:: If you have the ability to choose between using a group-specific and a host-specific file, it is almost always best to use a group-specific one. That way if a hostname changes or an extra copy of a particular client is built, it will get the same changes as the original. Templates ========= .. _server-plugins-generators-cfg-genshi: Genshi Templates ---------------- Genshi templates allow you to use the `Genshi `_ templating system. Genshi templates should be named with a ``.genshi`` extension, e.g.:: % ls Cfg/etc/motd info.xml motd.genshi See the genshi `documentation `_ for examples of Genshi syntax. Troubleshooting ~~~~~~~~~~~~~~~ When developing a template, you can see what the template would generate on a client with :ref:`bcfg2-info `:: bcfg2-info buildfile E.g.:: bcfg2-info buildfile /etc/foo.conf foo.example.com To generate a file with an :ref:`altsrc ` attribute, you can run:: bcfg2-info buildfile /etc/foo/foo.conf --altsrc=/etc/foo.conf \ foo.example.com Sometimes, it's useful to be able to do more in-depth troubleshooting by running the template manually. To do this, run ``bcfg2-info debug``, and, once in the Python interpreter, run:: metadata = self.build_metadata("") source_path = "" name = source_path[len(self.setup['repo']):] Then, run:: import os from genshi.template import TemplateLoader, NewTextTemplate template = TemplateLoader().load(source_path, cls=NewTextTemplate) data = dict(metadata=metadata, source_path=source_path, path=source_path, name=name, repo=self.setup['repo']) print(template.generate(**data).render()) This gives you more fine-grained control over how your template is rendered. E.g., you can tweak the values of the variables passed to the template, or evaluate the template manually, line-by-line, and so on. You can also use this approach to render templates that depend on :ref:`altsrc ` tags by setting ``source_path`` to the path to the template, and setting ``name`` to the path to the file to be generated, e.g.:: metadata = self.build_metadata("foo.example.com") source_path = "/Cfg/etc/sysconfig/network-scripts/ifcfg-template/ifcfg-template.genshi" name = "/etc/sysconfig/network-scripts/ifcfg-bond0" Error handling ~~~~~~~~~~~~~~ Situations may arise where a templated file cannot be generated due to missing or incomplete information. A TemplateError can be raised to force a bind failure and prevent sending an incomplete file to the client. For example, this template:: {% python from genshi.template import TemplateError grp = None for g in metadata.groups: if g.startswith('ganglia-gmond-'): grp = g break else: raise TemplateError, "Missing group" %}\ will fail to bind if the client is not a member of a group starting with "ganglia-gmond-". The syslogs on the server will contain this message:: bcfg2-server[5957]: Genshi template error: Missing group bcfg2-server[5957]: Failed to bind entry: Path /etc/ganglia/gmond.conf ...indicating the bind failure and message raised with the TemplateError. Handling Dollar Signs ~~~~~~~~~~~~~~~~~~~~~ In a Genshi template, ``$`` is a special character and must be escaped by doubling, i.e., ``$$``. For instance, to embed the Subversion ``$Id$`` keyword in a Genshi template, you would have to do ``$$Id$$``. Examples ~~~~~~~~ .. toctree:: :glob: :maxdepth: 1 examples/genshi/* .. _server-plugins-generators-cfg-cheetah: Cheetah Templates ----------------- Cheetah templates allow you to use the `cheetah templating system `_. Cheetah templates should be named with a ``.cheetah`` extension, e.g.:: % ls Cfg/etc/motd info.xml motd.cheetah Examples ~~~~~~~~ .. toctree:: :glob: :maxdepth: 1 examples/cheetah/* Comments and Cheetah ~~~~~~~~~~~~~~~~~~~~ As Cheetah processes your templates it will consider hash "#" style comments to be actual comments in the template and will strip them from the final config file. If you would like to preserve the comment in the final config file you need to escape the hash character '\#' which will tell Cheetah (and Python) that you do in fact want the comment to appear in the final config file.:: # This is a comment in my template which will be stripped when it's processed through Cheetah \# This comment will appear in the generated config file. .. _server-plugins-generators-cfg-jinja2: Jinja2 Templates ----------------- Jinja2 templates allow you to use the `jinja2 templating system `_. Jinja2 templates should be named with a ``.jinja2`` extension, e.g.:: % ls Cfg/etc/motd info.xml motd.jinja2 Examples ~~~~~~~~ .. toctree:: :glob: :maxdepth: 1 examples/jinja2/* Inside Templates ---------------- Several variables are pre-defined inside templates: +-------------+--------------------------------------------------------+ | Name | Description | +=============+========================================================+ | metadata | :ref:`Client metadata | | | ` | +-------------+--------------------------------------------------------+ | name | The value of the ``name`` attribute as specified in | | | the Path entry in Bcfg2. | +-------------+--------------------------------------------------------+ | source_path | The path to the template file on the filesystem | +-------------+--------------------------------------------------------+ | repo | The path to the Bcfg2 repository on the filesystem | +-------------+--------------------------------------------------------+ | path | In Genshi templates, ``path`` is a synonym for | | | ``source_path``. In Cheetah templates and Jinja2 | | | templates, it's a synonym for ``name``. For this | | | reason, use of ``path`` is discouraged, and it may be | | | deprecated in a future release. | +-------------+--------------------------------------------------------+ To access these variables in a Genshi template, you can simply use the name, e.g.:: Path to this file: ${name} Similarly, in a Jinja2 template:: Path to this file: {{ name }} In a Cheetah template, the variables are properties of ``self``, e.g.:: Path to this file: $self.name Notes on Using Templates ------------------------ Templates can be host and group specific as well. Deltas will not be processed for any Genshi, Cheetah, or Jinja2 base file. .. note:: If you are using templating in combination with host-specific or group-specific files, you will need to ensure that the ``.genshi`` ``.cheetah`` or ``.jinja2`` extension is at the **end** of the filename. Using the examples from above for *host.example.com* and group *server* you would have the following:: Cfg/etc/fstab/fstab.H_host.example.com.genshi Cfg/etc/fstab/fstab.G50_server.cheetah You can mix Genshi and Cheetah when using different host-specific or group-specific files. For example:: Cfg/etc/fstab/fstab.H_host.example.com.genshi Cfg/etc/fstab/fstab.G50_server.cheetah .. _server-plugins-generators-cfg-encryption: Encrypted Files =============== .. versionadded:: 1.3.0 Bcfg2 allows you to encrypt files stored in ``Cfg/`` to protect the data in them from other people who need access to the repository. See also :ref:`server-plugins-connectors-properties-encryption` for information on encrypting elements in Properties files, which is often more friendly for tracking changes in a VCS. .. note:: This feature is *not* intended to secure the files against a malicious attacker who has gained access to your Bcfg2 server, as the encryption passphrases are held in plaintext in ``bcfg2.conf``. This is only intended to make it easier to use a single Bcfg2 repository with multiple admins who should not necessarily have access to each other's sensitive data. See :ref:`server-encryption` for more details on encryption in Bcfg2 in general. Encrypting Files ---------------- An encrypted file should end with ``.crypt``, e.g.:: Cfg/etc/foo.conf Cfg/etc/foo.conf/foo.conf.crypt Cfg/etc/foo.conf/foo.conf.G10_foo.crypt Encrypted Genshi, Cheetah, and Jinja2 templates can have the extensions in either order, e.g.:: Cfg/etc/foo.conf/foo.conf.crypt.genshi Cfg/etc/foo.conf/foo.conf.G10_foo.genshi.crypt Cfg/etc/foo.conf/foo.conf.H_bar.example.com.crypt.cheetah To encrypt or decrypt a file, use :ref:`bcfg2-crypt`. .. _server-plugins-generators-cfg-sshkeys: SSH Keys ======== .. versionadded:: 1.3.0 Cfg can also be used to automatically create and distribute SSH key pairs and the ``authorized_keys`` file. Keys can be created one of two ways: * Host-specific keys, where each client has its own key pair. This is the default. * Group-specific keys. To do this, you must set ``category`` in either ``bcfg2.conf`` (see "Configuration" below) or in ``privkey.xml``. Keys created for a given client will be specific to that client's group in the specified category. Group-specific keys are useful if, for instance, you have multiple distinct environments (development, testing, production, for example) and want to maintain separate keys for each environment. This feature actually creates static keys, much like the :ref:`server-plugins-generators-sshbase` plugin creates SSH certificates. It doesn't generate them on the fly for each request; it generates the key once, then saves it to the filesystem. Creating key pairs ------------------ To create an SSH key pair, you need to define how the private key will be created in ``privkey.xml``. For instance, to create ``/home/foo/.ssh/id_rsa``, you would create ``/var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa/privkey.xml``. This will create *both* the private key and the public key; the latter is created by appending ``.pub`` to the private key filename. It is not possible to change the public key filename. You may *optionally* also create a corresponding ``pubkey.xml``, which will allow the key pair to be created when the public key is requested. (For the example above, you'd create ``/var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa.pub/pubkey.xml``. This can speed up the propagation of SSH keys throughout your managed systems, particularly if you use the ``authorized_keys`` generation feature. ``privkey.xml`` ~~~~~~~~~~~~~~~ ``privkey.xml`` contains a top-level ``PrivateKey`` element, and is structured as follows: .. xml:element:: PrivateKey :linktotype: See :ref:`server-encryption` for more details on encryption in Bcfg2 in general. ``pubkey.xml`` ~~~~~~~~~~~~~~ ``pubkey.xml`` only ever contains a single line: .. code-block:: xml .. xml:element:: PublicKey It acts only as a flag to Bcfg2 that a key pair should be generated, if none exists, using the associated ``privkey.xml`` file. The path to ``privkey.xml`` is determined by removing ``.pub`` from the directory containing ``pubkey.xml``. I.e., if you create ``/var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa.pub/pubkey.xml``, then Bcfg2 will use ``/var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa/privkey.xml`` to create the key pair. Use of ``pubkey.xml`` is optional, but is recommended. If you do not use ``pubkey.xml`` files, you may encounter two problems: * On the first Bcfg2 client run on a given client, the private keys may be present but the public keys may not be. This will be fixed by running ``bcfg2`` again. * If you are including an automatically created public key in ``authorized_keys``, it will not be created until the client the key is for requests the key pair. As an example of this latter scenario, suppose that your ``authorized_keys.xml`` allows access to foo.example.com from ``/root/.ssh/id_rsa.pub`` for bar.example.com. If bar.example.com has not run the Bcfg2 client, then no key pair will have been generated, and generating the foo.example.com ``authorized_keys`` file will create a warning. But if you create ``Cfg/root/.ssh/id_rsa.pub/pubkey.xml``, then building ``authorized_keys`` for foo.example.com will create root's keypair for bar.example.com. .. note:: In order to use ``pubkey.xml``, there *must* be a corresponding ``privkey.xml``. You cannot, for instance, populate a directory with manually-generated private SSH keys, drop ``pubkey.xml`` in the related public key directory, and expect Bcfg2 to generate the public keys. It will not. Examples ~~~~~~~~ ``privkey.xml`` can, at its simplest, be very simple indeed: .. code-block:: xml This will create a private key with all defaults. Or it can be more complex: .. code-block:: xml U2FsdGVkX19xACol83uyPELP94s4CmngD12oU6PLLuE= This creates a 1024-bit DSA key for each group in the ``environment`` category, and keys for clients in the ``secure`` group will be protected with the given (encrypted) passphrase. To complete the example, assume that this file was saved at ``/var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa/privkey.xml``. If a client in the ``development`` group, which is a group in the ``environment`` category, requests the private key, then the following files would be created:: /var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa/id_rsa.G50_development /var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa.pub/id_rsa.pub.G50_development ``/var/lib/bcfg2/Cfg/home/foo/.ssh/id_rsa.pub`` would be created if it did not exist. Subsequent clients that were also members of the ``development`` environment would get the keys that have already been generated. ``pubkey.xml`` always contains a single empty tag: .. code-block:: xml Generating ``authorized_keys`` ------------------------------ ``authorized_keys`` can be automatically generated from public SSH keys that exist in the Cfg tree. The keys in question can be generated from ``privkey.xml``, or they can be manually created. If a key doesn't exist when ``authorized_keys`` is generated, the key will only be created if ``pubkey.xml`` exists. If that is not the case, a warning will be produced. To generate ``authorized_keys``, create ``authorized_keys.xml``, e.g.: ``/var/lib/bcfg2/Cfg/root/.ssh/authorized_keys/authorized_keys.xml``. ``authorized_keys.xml`` ~~~~~~~~~~~~~~~~~~~~~~~ ``authorized_keys.xml`` is structured as follows: .. xml:element:: AuthorizedKeys :linktotype: Example ~~~~~~~ .. code-block:: xml ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDw/rgKQeARRAHK5bQQhAAe1b+gzdtqBXWrZIQ6cIaLgxqj76TwZ3DY4A6aW9RgC4zzd0p4a9MfsScUIB4+UeZsx9GopUj4U6H8Vz7S3pXxrr4E9logVLuSfOLFbI/wMWNRuOANqquLYQ+JYWKeP4kagkVp0aAWp7mH5IOI0rp0A6qE2you4ep9N/nKvHDrtypwhYBWprsgTUXXMHnAWGmyuHGYWxNYBV9AARPdAvZfb8ggtuwibcOULlyK4DdVNbDTAN1/BDBE1ve6WZDcrc386KhqUGj/yoRyPjNZ46uZiOjRr3cdY6yUZoCwzzxvm5vle6mEbLjHgjGEMQMArzM9 vendor@example.com .. note:: ``authorized_keys.xml`` allows you to specify the group whose public key should be allowed. This retrieves the public key specific to that group (if it exists), *not* the public key for all hosts in that group. This is due to the performance penalties that would be imposed by that approach. Similarly, it is not possible to allow access from all keys for a given user (i.e., at a given path). Hopefully, the performance concerns can be resolved in a future release and these features can be added. .. _server-plugins-generators-cfg-ssl-certificates: SSL Keys and Certificates ========================= Cfg can also create SSL keys and certs on the fly, and store the generated data in the repo so that subsequent requests do not result in repeated key/cert recreation. In the event that a new key or cert is needed, the old file can simply be removed from the repository, and the next time that host checks in, a new file will be created. If that file happens to be the key, any dependent certificates will also be regenerated. See also :ref:`appendix-guides-sslca_howto` for a detailed example that uses the SSL key management feature to automate Bcfg2 certificate authentication. Getting started --------------- In order to use the SSL certificate generation feature, you must first have at least one CA configured on your system. For details on setting up your own OpenSSL based CA, please see http://www.openssl.org/docs/apps/ca.html for details of the suggested directory layout and configuration directives. For SSL cert generation to work, the openssl.cnf (or other configuration file) for that CA must contain full (not relative) paths. #. Add a section to your ``/etc/bcfg2.conf`` called ``sslca_foo``, replacing foo with the name you wish to give your CA so you can reference it in certificate definitions. (If you only have one CA, you can name it ``sslca_default``, and it will be the default CA for all other operations.) #. Under that section, add a ``config`` option that gives the location of the ``openssl.cnf`` file for your CA. #. If necessary, add a ``passphrase`` option containing the passphrase for the CA's private key. If no passphrase is entry exists, it is assumed that the private key is stored unencrypted. #. Optionally, add a ``chaincert`` option that points to the location of your ssl chaining certificate. This is used when preexisting certificate hostfiles are found, so that they can be validated and only regenerated if they no longer meet the specification. If you're using a self signing CA this would be the CA cert that you generated. If the chain cert is a root CA cert (e.g., if it is a self-signing CA), also add an entry ``root_ca = true``. If ``chaincert`` is omitted, certificate verification will not be performed. #. Once all this is done, you should have a section in your ``/etc/bcfg2.conf`` that looks similar to the following:: [sslca_default] config = /etc/pki/CA/openssl.cnf passphrase = youReallyThinkIdShareThis? chaincert = /etc/pki/CA/chaincert.crt root_ca = true #. You are now ready to create key and certificate definitions. For this example we'll assume you've added Path entries for the key, ``/etc/pki/tls/private/localhost.key``, and the certificate, ``/etc/pki/tls/certs/localhost.crt`` to a bundle. #. Within the ``Cfg/etc/pki/tls/private/localhost.key`` directory, create a `sslkey.xml`_ file containing the following: .. code-block:: xml #. This will cause the generation of an SSL key when a client requests that Path. (By default, it will be a 2048-bit RSA key; see `sslkey.xml`_ for details on how to change the key type and size.) #. Similarly, create `sslcert.xml`_ in ``Cfg/etc/pki/tls/certs/localhost.crt/``, containing the following: .. code-block:: xml #. When a client requests the cert path, a certificate will be generated using the key hostfile at the specified key location, using the CA matching the ``ca`` attribute. ie. ``ca="foo"`` will match ``[sslca_default]`` in your ``/etc/bcfg2.conf`` The :ref:`Bcfg2 bundle example ` contains entries to automate the process of setting up a CA. Configuration ------------- ``bcfg2.conf`` ~~~~~~~~~~~~~~ In ``bcfg2.conf``, you must declare your CA(s) in ``[sslca_]`` sections. At least one is required. Valid options are detailed below, in `Cfg Configuration`_. Only the ``config`` option is required; i.e., the simplest possible CA section is:: [sslca_default] config = /etc/pki/CA/openssl.cnf ``sslcert.xml`` ~~~~~~~~~~~~~~~ .. xml:schema:: sslca-cert.xsd :linktotype: :inlinetypes: CertType Example ^^^^^^^ .. code-block:: xml test.example.com ``sslkey.xml`` ~~~~~~~~~~~~~~ .. xml:schema:: sslca-key.xsd :linktotype: :inlinetypes: KeyType Example ^^^^^^^ .. code-block:: xml .. _server-plugins-generators-cfg-validation: Content Validation ================== To ensure that files with invalid content are not pushed out, you can provide a content validation script that will be run against each file. Create a file called ``:test`` inside the directory for the file you want to test. For example:: Cfg/etc/sudoers/:test You can also create host- and group-specific validators:: Cfg/etc/sudoers/:test.G80_foogroup Cfg/etc/sudoers/:test.H_bar.example.com A validator script has the following attributes: * It must be executable, or specify a valid bangpath; * The entire content of the file is passed to the validator on stdin; * The validator is not called with any flags or arguments; * The validator must return 0 on success and non-zero on failure; and * The validator must output a sensible error message on failure. For ``sudoers``, a very simple validator is:: #!/bin/sh visudo -cf - This uses the ``visudo`` command's built-in validation. If you wish to disable validation, this can be done with the following setting in ``bcfg2.conf``:: [cfg] validation=no If you have a very large number of validators, you may wish to disable validation by default to avoid slowing down the generation of configurations on the server, and use ``bcfg2-test`` (for instance, as a post-commit hook or as part of a code review process) to run validation. You can do this by setting ``validation=no`` in ``bcfg2.conf`` as described above, and then calling ``bcfg2-test`` with the ``--cfg-validation`` flag. File permissions ================ File permissions for entries handled by Cfg are controlled via the use of :ref:`server-info` files. Note that you **cannot** use both a Permissions entry and a Path entry to handle the same file. .. _server-plugins-generators-cfg-configuration: Cfg Configuration ================= The behavior of many bits of the Cfg plugin can be configured in ``bcfg2.conf`` with the following options. In addition to ``privkey.xml`` and ``authorized_keys.xml``, described above, the behavior of the SSH key generation feature can be influenced by several options in the ``[sshkeys]`` section of ``bcfg2.conf``: +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | Section | Option | Description | Values | Default | +=============+================+=========================================================+=======================+============+ | ``cfg`` | ``passphrase`` | Use the named passphrase to encrypt created data on the | String | None | | | | filesystem. (E.g., SSH and SSL keys.) The passphrase | | | | | | must be defined in the ``[encryption]`` section. | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``cfg`` | ``category`` | Generate data (e.g., SSH keys, SSL keys and certs) | String | None | | | | specific to groups in the given category. It is best to | | | | | | pick a category that all clients have a group from. | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``cfg`` | ``validation`` | Whether or not to perform `Content Validation`_ | Boolean | True | | | | specific to groups in the given category. It is best to | | | | | | pick a category that all clients have a group from. | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sshkeys`` | ``passphrase`` | Override the global Cfg passphrase with a specific | String | None | | | | passphrase for encrypting created SSH private keys. | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sshkeys`` | ``category`` | Override the global Cfg category with a specific | String | None | | | | category for created SSH keys. | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sslca`` | ``passphrase`` | Override the global Cfg passphrase with a specific | String | None | | | | passphrase for encrypting created SSL keys. | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sslca`` | ``category`` | Override the global Cfg category with a specific | String | None | | | | category for created SSL keys and certs. | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sslca_*`` | ``config`` | Path to the openssl config for the CA | String | None | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sslca_*`` | ``passphrase`` | Passphrase for the CA private key | String | None | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sslca_*`` | ``chaincert`` | Path to the SSL chaining certificate for verification | String | None | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ | ``sslca_*`` | ``root_ca`` | Whether or not ```` is a root CA (as | Boolean | False | | | | opposed to an intermediate cert) | | | +-------------+----------------+---------------------------------------------------------+-----------------------+------------+ See :ref:`server-encryption` for more details on encryption in Bcfg2 in general. doc/server/plugins/generators/decisions.txt000066400000000000000000000057651303523157100215210ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-generators-decisions: ========= Decisions ========= This page describes the Decisions plugin. The client has support for a centralized set of per-entry installation decisions. This approach is needed when particular changes are deemed "high risk"; this gives the ability to centrally specify these changes, but only install them on clients when administrator supervision is available. Because collaborative configuration is one of the remaining hard issues in configuration management, these issues typically crop up in environments with several administrators and much configuration variety. In these cases, the client can be configured to run in either a whitelist or blacklist mode, wherein a list of entries is downloaded from the server. The client uses this list to determine which incorrect entries should be corrected during the current run of the installation tool. The Decisions plugin is the only stock plugin that generates entries for client's whitelists or blacklists. .. note:: If the client is not explicitly configured to run in whitelist or blacklist mode, the list of entries is not downloaded and decisions is not used. See `Decision Mode`_ below. The Decisions plugin uses a directory in the Bcfg2 repository called Decisions, which may contain two files: ``whitelist.xml`` and ``blacklist.xml``. These files have a simple format: .. xml:type:: DecisionsType :linktotype: :noautodep: py:genshiElements For example: .. code-block:: xml $ cat Decisions/whitelist.xml This example, included as a whitelist due to its name, enables all services, and the path entry named ``/etc/apt/apt.conf``. All these entries must already be present in your repository, the Decisions plugin just references them. In whitelist mode, only the given items are applied to the client; all other entry installation will be surpressed. In blacklist mode, every entry that is not blacklisted will be installed. When a client asks for its whitelist or blacklist, all of the files pertaining to that client of the correct type are aggregated into a single list. This list is sent to the client. .. note:: Using this plugin does not present additional prompts or safety nets to the administrator running the client, you have to control these via their respective options (``-I`` or ``-n``, for example). Decision Mode ============= The whitelist or blacklist is only generated when a client is run in whitelist or blacklist mode. This can either be set at the command line with the appropriate option (``-l (whitelist|blacklist)``), or in ``bcfg2.conf`` by setting ``decision`` in the ``client`` section to ``whitelist`` or ``blacklist``). Client behavior is not controlled unless the decision mode is set. If you do not use Decisions, all your entries will be installed normally. doc/server/plugins/generators/examples/000077500000000000000000000000001303523157100206015ustar00rootroot00000000000000doc/server/plugins/generators/examples/cheetah/000077500000000000000000000000001303523157100222025ustar00rootroot00000000000000doc/server/plugins/generators/examples/cheetah/crontab.txt000066400000000000000000000021131303523157100243700ustar00rootroot00000000000000.. -*- mode: rst -*- ============================ Writing crontab with Cheetah ============================ This example randomizes the time of cron.daily execution with a stable result. Cron.daily is run at a consistent, randomized time between midnight and 7am.:: #import random #silent random.seed($self.metadata.hostname) # /etc/crontab: system-wide crontab # Unlike any other crontab you don't have to run the `crontab` # command to install the new version when you edit this file. # This file also has a username field, that none of the other crontabs do. SHELL=/bin/sh PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin://bin # m h dom mon dow user command 17 * * * * root run-parts --report /etc/cron.hourly $random.randrange(0,59) $random.randrange(0,6) * * * root test -x /usr/sbin/anacron || run-parts --report /etc/cron.daily 47 6 * * 7 root test -x /usr/sbin/anacron || run-parts --report /etc/cron.weekly 52 6 1 * * root test -x /usr/sbin/anacron || run-parts --report /etc/cron.monthly. doc/server/plugins/generators/examples/cheetah/simple.txt000066400000000000000000000021031303523157100242300ustar00rootroot00000000000000.. -*- mode: rst -*- ========================= Basic Cheetah Templates ========================= This simple example demonstrates basic usage of Cheetah templates. ``/var/lib/bcfg2/Cfg/foo/foo.cheetah`` .. code-block:: none Hostname is $self.metadata.hostname Filename is $self.path Template is $self.source_path Groups: #for $group in $self.metadata.groups: * $group #end for Categories: #for $category in $self.metadata.categories: * $category -- $self.metadata.categories[$category] #end for Probes: #for $probe in $self.metadata.Probes: * $probe -- $self.metadata.Probes[$probe] #end for Output ====== .. code-block:: xml Hostname is topaz.mcs.anl.gov Filename is /foo Template is /var/lib/bcfg2/Cfg/foo/foo.cheetah Groups: * desktop * mcs-base * ypbound * workstation * xserver * debian-sarge * debian * a Categories: * test -- a Probes: * os -- debian doc/server/plugins/generators/examples/genshi/000077500000000000000000000000001303523157100220565ustar00rootroot00000000000000doc/server/plugins/generators/examples/genshi/bcfg2-cron.txt000066400000000000000000000013521303523157100245420ustar00rootroot00000000000000.. -*- mode: rst -*- bcfg2-cron ========== As submitted by Kamil Kisiel The following is my ``/etc/cron.d/bcfg2`` file. It uses the python random module seeded with the client hostname to generate a random time for the client to check in. The hostname seed ensures the generated file is the same each time the client checks in. This cron file helps to distribute the load on the Bcfg2 server since not all machines are checking in at the same time.:: {% python from genshi.builder import tag import random random.seed(metadata.hostname) %}\ ${random.randint(0,60)} * * * * root /usr/sbin/bcfg2 &> /dev/null You can apply the same concept to the other time fields by adding another ``${random.randint()}`` call. doc/server/plugins/generators/examples/genshi/clientsxml.txt000066400000000000000000000072161303523157100250070ustar00rootroot00000000000000.. -*- mode: rst -*- clients.xml =========== As submitted by dclark Here is an example of maintaining the bcfg2 server's ``/var/lib/bcfg2/Metadata/clients.xml`` file using Genshi templates. There are two main advantages: #. Password storage is centralized in the ``Properties/passwords.xml`` file this helps maintain consistency, makes changing passwords easier, and also makes it easier to share your configurations with other sites/people. #. You can template the file using Genshi's `{% def %}` syntax, which makes `clients.xml` much more readable. An important thing to note is how the `name` variable is handled - when just referring to it the standard `${name}` syntax is used, but when it is used as a variable in the expression to get the password, `password="${metadata.Properties['passwords.xml'].xdata.find('password').find('bcfg2-client').find(name).text}"`, it is just referred to as `name`. There is the disadvantage that sometimes 2 passes will be needed to get to a consistent state. Possible improvements: #. Wrapper for bcfg2 client runs on the bcfg2 server, perhaps using a call to `bcfg2-info buildfile`, so clients.xml is always generated before everything else happens (since the state of clients.xml can influence everything else bcfg2-server does). #. We really don't care what the client passwords are, just that they exist, so instead of listing them a master password combined with some kind of one-way hash based on the `name` might make more sense, and make ``Properties/passwords.xml`` easier to maintain. * Cfg/var/lib/bcfg2/Metadata/clients.xml/clients.xml.genshi: .. code-block:: xml {# Doc: http://bcfg2.org/wiki/Authentication #}\ {% def static(profile,name,address) %} \ {% end %}\ {% def dynamic(profile,name) %} \ {% end %}\ \ ${static('group-server-collab','campaigns.example.com','192.168.111.1')} ${static('group-server-collab','info.office.example.com','192.168.111.2')} ${static('group-server-config','config.example.com','192.168.111.3')} ${dynamic('group-project-membercard','membercard')} ${dynamic('group-person-somename','somename.office.example.com')} * Properties/passwords.xml snippit: .. code-block:: xml FAKEpassword1 FAKEpassword2 FAKEpassword3 FAKEpassword4 FAKEpassword5 doc/server/plugins/generators/examples/genshi/ganglia.txt000066400000000000000000000121231303523157100242200ustar00rootroot00000000000000.. -*- mode: rst -*- ganglia ======= Another interesting example of Genshi templating is to automatically generate ``gmond``/``gmetad`` configuration files. The idea is that each cluster is headless: it communicates with the rest of the cluster members on an isolated multicast IP address and port. Any of the cluster members is therefore isolated on that particular ip/port pair. Additionally, each ``gmond`` instance **also** listens on UDP. This allows for any of the cluster members to be polled for information on the entire cluster! The second part of the trick is in ``gmetad.conf``. Here, we dynamically generate a list of clusters (based on profiles names) and a list of members to poll (based on the clients in said profiles). As the number of profiles and client grows, this list will grow automatically as well. When a new host is added, ``gmetad`` will receive an updated configuration and act accordingly. There **is** one caveat though. The ``gmetad.conf`` parser is hard coded to read 16 arguments per ``data_source`` line. If you have more than 15 nodes in a cluster, you will see a warning in the logs. You can either ignore it, or truncate the list to the first 15 members. In our environment, a profile is a one to one match with the role of that particular host. You can also do this based on groups, or any other client property. Bundler/ganglia.xml ------------------- .. code-block:: xml Rules/services-ganglia.xml -------------------------- .. code-block:: xml Cfg/etc/ganglia/gmetad.conf/gmetad.conf.genshi ---------------------------------------------- .. code-block:: none {% python client_metadata = metadata.query.all() profile_array = {} seen = [] for item in client_metadata: if item.profile not in seen: seen.append(item.profile) profile_array[item.profile]=[] profile_array[item.profile].append(item.hostname) seen.sort() %}\ gridname "Our Grid" {% for profile in seen %} data_source "${profile}" \ {% for host in profile_array[profile] %}\ ${host} \ {% end %}\ {% end %} rrd_rootdir "/var/lib/ganglia/rrds" Cfg/etc/ganglia/gmond.conf/gmod.conf.genshi ------------------------------------------- .. code-block:: none {% python import random random.seed(metadata.profile) last_octet=random.randint(2,254) %}\ /* $$Id$$ $$HeadURL$$ */ /* This configuration is as close to 2.5.x default behavior as possible The values closely match ./gmond/metric.h definitions in 2.5.x */ globals { daemonize = yes setuid = yes user = nobody debug_level = 0 max_udp_msg_len = 1472 mute = no deaf = no host_dmax = 1800 /* 30 minutes */ cleanup_threshold = 604800 /*secs=1 week */ gexec = no send_metadata_interval = 0 } /* If a cluster attribute is specified, then all gmond hosts are wrapped inside * of a tag. If you do not specify a cluster tag, then all will * NOT be wrapped inside of a tag. */ cluster { name = "${metadata.profile}" owner = "user@company.net" latlong = "unspecified" url = "unspecified" } /* The host section describes attributes of the host, like the location */ host { location = "unspecified" } /* Feel free to specify as many udp_send_channels as you like. Gmond used to only support having a single channel */ udp_send_channel { host = ${metadata.hostname} port = 8649 } udp_send_channel { mcast_join = 239.2.11.${last_octet} port = 8649 ttl = 1 } /* You can specify as many udp_recv_channels as you like as well. */ udp_recv_channel { port = 8649 bind = ${metadata.hostname} } udp_recv_channel { mcast_join = 239.2.11.${last_octet} bind = 239.2.11.${last_octet} port = 8649 } /* You can specify as many tcp_accept_channels as you like to share an xml description of the state of the cluster */ tcp_accept_channel { port = 8649 } /* Each metrics module that is referenced by gmond must be specified and loaded. If the module has been statically linked with gmond, it does not require a load path. However all dynamically loadable modules must include a load path. */ modules { /* [snip] */ doc/server/plugins/generators/examples/genshi/grubconf.txt000066400000000000000000000025651303523157100244340ustar00rootroot00000000000000.. -*- mode: rst -*- grub.conf ========= Automate the build of grub.conf based on probe data. In this case, we take the results from three probes, serial-console-speed, grub-serial-order, and current-kernel to fill in a few variables. In addition, we want at least two entries set up for the kernel: a multiuser and a single user option. .. code-block:: none # grub.conf generated by Bcfg2 # # Note that you do not have to rerun grub after making changes to this file # NOTICE: You have a /boot partition. This means that # all kernel and initrd paths are relative to /boot/, eg. # root (hd0,0) # kernel /vmlinuz-version ro root=/dev/VolGroup00/LogVol00 # initrd /initrd-version.img #boot=/dev/sda default=0 timeout=5 serial --unit=0 --speed=${metadata.Probes['serial-console-speed']} terminal --timeout=5 ${metadata.Probes['grub-serial-order']} {% for kernbootoption in ["", "single"] %}\ title Red Hat Enterprise Linux Server (${metadata.Probes['current-kernel']})) ${kernbootoption} root (hd0,0) kernel /vmlinuz-${metadata.Probes['current-kernel']} ro root=/dev/VolGroup00/LogVol00 console=ttyS0,${metadata.Probes['serial-console-speed']}n8 console=tty0 rhgb quiet ${kernbootoption} initrd /initrd-${metadata.Probes['current-kernel']}.img {% end %}\ doc/server/plugins/generators/examples/genshi/hosts.txt000066400000000000000000000010231303523157100237530ustar00rootroot00000000000000.. -*- mode: rst -*- hosts ===== This is an example of creating ``/etc/hosts`` based on metadata.hostname:: # Do not remove the following line, or various programs # that require network functionality will fail. 127.0.0.1 localhost.localdomain localhost ::1 localhost6.localdomain6 localhost6 {% python import socket import re ip = socket.gethostbyname(metadata.hostname) shortname = re.split("\.", metadata.hostname) %}\ ${ip} ${metadata.hostname} ${shortname[0]} doc/server/plugins/generators/examples/genshi/iptables.txt000066400000000000000000000222231303523157100244230ustar00rootroot00000000000000.. -*- mode: rst -*- ========== iptables ========== * Setup a Genshi base iptables file that contains the basic rules you want every host to have * To be safe you should have a client side IptablesDeadmanScript if you intend on having bcfg2 bounce iptables upon rule updates .. note:: When updating files in the ``includes`` directory, you will need to `touch` the Genshi template to regenerate the template contents. /repository/Cfg/etc/sysconfig/iptables/iptables.genshi ====================================================== .. code-block:: none {% python from genshi.builder import tag import os,sys import Bcfg2.Options opts = { 'repo': Bcfg2.Options.SERVER_REPOSITORY } setup = Bcfg2.Options.OptionParser(opts) setup.parse('--') repo = setup['repo'] basedir = '%s' % (repo) # for instance: bcfg2BaseDir = basedir + name + '/' def checkHostFile(hostName, type): fileName = bcfg2BaseDir + type + '.H_' + hostName if os.path.isfile(fileName)==True : return fileName else: return fileName def checkGroupFile(groupName, type): fileName = bcfg2BaseDir + type + '.G_' + groupName if os.path.isfile(fileName)==True : return fileName else: return fileName %}\ # BCFG2 GENERATED IPTABLES # DO NOT CHANGE THIS # $$Id$$ # Templates live in ${bcfg2BaseDir} # Manual customization of this file will get reverted. # ----------------------------- FILTER --------------------------------- # # Default CHAINS for FILTER: *filter :INPUT DROP [0:0] :FORWARD DROP [0:0] :OUTPUT ACCEPT [0:0] :NO-SMTP - [0:0] #Default rules #discard malicious packets -A INPUT -p tcp --tcp-flags ALL ACK,RST,SYN,FIN -j DROP -A INPUT -p tcp --tcp-flags SYN,FIN SYN,FIN -j DROP -A INPUT -p tcp --tcp-flags SYN,RST SYN,RST -j DROP #Allow incoming ICMP -A INPUT -p icmp -m icmp -j ACCEPT #Accept localhost traffic -A INPUT -i lo -j ACCEPT # Allow already established sessions to remain -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT # Deny inbound SMTP delivery (still allows outbound connections) -A INPUT -m state --state NEW -m tcp -p tcp --tcp-flags FIN,SYN,RST,ACK SYN --dport 25 -j NO-SMTP -A NO-SMTP -j LOG --log-prefix " Incoming SMTP (denied) " -A NO-SMTP -j DROP # Allow SSH Access :SSH - [0:0] -A INPUT -p tcp -m state --state NEW -m tcp --tcp-flags FIN,SYN,RST,ACK SYN --dport 22 -j SSH -A SSH -s 192.168.0.0/255.255.0.0 -j ACCEPT # Allow Ganglia Access -A INPUT -m state --state NEW -m tcp -p tcp --tcp-flags FIN,SYN,RST,ACK SYN --src 192.168.1.1 --dport 8649 -j ACCEPT # Gmetad access to gmond -A INPUT -m state --state NEW -m tcp -p tcp --tcp-flags FIN,SYN,RST,ACK SYN --src 192.168.1.1 --dport 8649 -j ACCEPT # Gmond UDP multicast -A INPUT -m state --state NEW -m udp -p udp --dport 8649 -j ACCEPT {% if metadata.groups %}\ # group custom FILTER rules: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'custom-filter')} %}\ {% end %}\ {% end %}\ # host-specific FILTER rules: {% include ${checkHostFile(metadata.hostname, 'custom-filter')} %}\ COMMIT # ------------------------------- NAT ---------------------------------- # *nat # Default CHAINS for NAT: :PREROUTING ACCEPT [0:0] :OUTPUT ACCEPT [0:0] :POSTROUTING ACCEPT [0:0] {% if metadata.groups %}\ # group NAT for PREROUTING: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'nat-prerouting')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group NAT for OUTPUT: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'nat-output')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group NAT for POSTROUTING: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'nat-postrouting')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group custom NAT rules: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'custom-nat')} %}\ {% end %}\ {% end %}\ # host-specific NAT ruls: {% include ${checkHostFile(metadata.hostname, 'custom-nat')} %}\ COMMIT # ----------------------------- MANGLE -------------------------------- # *mangle # Default CHAINS for MANGLE: :PREROUTING ACCEPT [0:0] :INPUT ACCEPT [0:0] :FORWARD ACCEPT [0:0] :OUTPUT ACCEPT [0:0] :POSTROUTING ACCEPT [0:0] {% if metadata.groups %}\ # group MANGLE for PREROUTING: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'mangle-prerouting')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group MANGLE for INPUT: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'mangle-input')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group MANGLE for FORWARD: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'mangle-forward')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group MANGLE for OUTPUT: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'mangle-output')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group MANGLE for POSTROUTING rules: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'mangle-postrouting')} %}\ {% end %}\ {% end %}\ {% if metadata.groups %}\ # group custom MANGLE rules: {% for group in metadata.groups %}\ {% include ${checkGroupFile(group,'custom-mangle')} %}\ {% end %}\ {% end %}\ # host-specific MANGLE rules: {% include ${checkHostFile(metadata.hostname, 'custom-mangle')} %}\ COMMIT Cfg/etc/sysconfig/iptables/custom-filter.G_mysql-server ------------------------------------------------------- .. code-block:: none :MYSQL - [0:0] -A INPUT -p tcp -m state --state NEW -m tcp --dport 3306 --tcp-flags FIN,SYN,RST,ACK SYN -j MYSQL -A MYSQL -s 192.168.0.0/255.255.0.0 -j ACCEPT For a host that is in the mysql-server group you get an iptables file that looks like the following:: # BCFG2 GENERATED IPTABLES # DO NOT CHANGE THIS # $Id: template.newtxt 5402 2009-08-19 22:50:06Z unixmouse$ # Templates live in /var/lib/bcfg2/Cfg/etc/sysconfig/iptables/ # Manual customization of this file will get reverted. # ----------------------------- FILTER --------------------------------- # # Default CHAINS for FILTER: *filter :INPUT DROP [0:0] :FORWARD DROP [0:0] :OUTPUT ACCEPT [0:0] :NO-SMTP - [0:0] #Default rules #discard malicious packets -A INPUT -p tcp --tcp-flags ALL ACK,RST,SYN,FIN -j DROP -A INPUT -p tcp --tcp-flags SYN,FIN SYN,FIN -j DROP -A INPUT -p tcp --tcp-flags SYN,RST SYN,RST -j DROP # Allow incoming ICMP -A INPUT -p icmp -m icmp -j ACCEPT # Accept localhost traffic -A INPUT -i lo -j ACCEPT # Allow already established sessions to remain -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT # Deny inbound SMTP delivery (still allows outbound connections) -A INPUT -m state --state NEW -m tcp -p tcp --tcp-flags FIN,SYN,RST,ACK SYN --dport 25 -j NO-SMTP -A NO-SMTP -j LOG --log-prefix " Incoming SMTP (denied) " -A NO-SMTP -j DROP # Allow SSH Access :SSH - [0:0] -A INPUT -p tcp -m state --state NEW -m tcp --tcp-flags FIN,SYN,RST,ACK SYN --dport 22 -j SSH -A SSH -s 192.168.0.0/255.255.0.0 -j ACCEPT # Allow Ganglia Access -A INPUT -m state --state NEW -m tcp -p tcp --tcp-flags FIN,SYN,RST,ACK SYN --src 192.168.1.1 --dport 8649 -j ACCEPT #Gmetad access to gmond -A INPUT -m state --state NEW -m tcp -p tcp --tcp-flags FIN,SYN,RST,ACK SYN --src 192.168.1.1 --dport 8649 -j ACCEPT #Gmond UDP multicast -A INPUT -m state --state NEW -m udp -p udp --dport 8649 -j ACCEPT # group custom FILTER rules: :MYSQL - [0:0] -A INPUT -p tcp -m state --state NEW -m tcp --dport 3306 --tcp-flags FIN,SYN,RST,ACK SYN -j MYSQL -A MYSQL -s 192.168.0.0/255.255.0.0 -j ACCEPT # host-specific FILTER rules: COMMIT # ------------------------------- NAT ---------------------------------- # *nat # Default CHAINS for NAT: :PREROUTING ACCEPT [0:0] :OUTPUT ACCEPT [0:0] :POSTROUTING ACCEPT [0:0] # group NAT for PREROUTING: # group NAT for OUTPUT: # group NAT for POSTROUTING: # group custom NAT rules: # host-specific NAT rules: COMMIT # ----------------------------- MANGLE -------------------------------- # *mangle # Default CHAINS for MANGLE: :PREROUTING ACCEPT [0:0] :INPUT ACCEPT [0:0] :FORWARD ACCEPT [0:0] :OUTPUT ACCEPT [0:0] :POSTROUTING ACCEPT [0:0] # group MANGLE for PREROUTING: # group MANGLE for INPUT: # group MANGLE for FORWARD: # group MANGLE for OUTPUT: # group MANGLE for POSTROUTING rules: # group custom MANGLE rules: # host-specific MANGLE rules: COMMIT doc/server/plugins/generators/examples/genshi/motd.txt000066400000000000000000000056301303523157100235660ustar00rootroot00000000000000.. -*- mode: rst -*- ====== motd ====== The following template automatically generates a MOTD (message of the day) file that describes the system in terms of its Bcfg2 metadata and probe responses. It conditionally displays groups, categories, and probe responses, if there exists any data for them. Cfg/etc/motd/motd.genshi ======================== .. code-block:: none ------------------------------------------------------------------------ GOALS FOR SERVER MANGED BY BCFG2 ------------------------------------------------------------------------ Hostname is ${metadata.hostname} Groups: {% for group in metadata.groups %}\ * ${group} {% end %}\ {% if metadata.categories %}\ Categories: {% for category in metadata.categories %}\ * ${category} {% end %}\ {% end %}\ {% if metadata.Probes %}\ Probes: {% for probe, value in metadata.Probes.iteritems() %}\ * ${probe} \ ${value} {% end %}\ {% end %}\ ------------------------------------------------------------------------- ITOPS MOTD ------------------------------------------------------------------------- Please create a Ticket for any system level changes you need from IT. This template gets the hostname, groups membership of the host, categories of the host (if any), and result of probes on the host (if any). The template formats this in with a header and footer that makes it visually more appealing. Output ====== One possible output of this template would be the following:: ------------------------------------------------------------------------ GOALS FOR SERVER MANGED BY BCFG2 ------------------------------------------------------------------------ Hostname is cobra.example.com Groups: * oracle-server * centos5-5.2 * centos5 * redhat * x86_64 * sys-vmware Categories: * os-variant * os * database-server * os-version Probes: * arch x86_64 * network intranet_network * diskspace Filesystem Size Used Avail Use% Mounted on /dev/mapper/VolGroup00-LogVol00 18G 2.1G 15G 13% / /dev/sda1 99M 13M 82M 13% /boot tmpfs 3.8G 0 3.8G 0% /dev/shm /dev/mapper/mhcdbo-clear 1.5T 198M 1.5T 1% /mnt/san-oracle * virtual vmware ------------------------------------------------------------------------- IT MOTD ------------------------------------------------------------------------- Please create a Ticket for any system level changes you need from IT. One way to make this even more useful, is to only include the result of certain probes. It would also be a nice feature to be able to include customer messages on a host or group level. doc/server/plugins/generators/examples/genshi/mycnf.txt000066400000000000000000000012471303523157100237370ustar00rootroot00000000000000.. -*- mode: rst -*- my.cnf ====== The following template generates a ``server-id`` based on the last two numeric parts of the IP address. The "slave" portion of the configuration only applies to machines in the "slave" group:: {% python import socket parts = socket.gethostbyname(metadata.hostname).split('.') server_id = parts[2] + parts[3] %}\ [mysqld] # [snip] server-id = ${server_id} # Replication configuration {% if "slave" in metadata.groups %}\ relay-log = /data01/mysql/log/mysql-relay-bin log-slave-updates = 1 {% end %}\ sync-binlog = 1 #read-only = 1 #report-host = # [snip] doc/server/plugins/generators/examples/genshi/test.txt000066400000000000000000000122501303523157100235760ustar00rootroot00000000000000.. -*- mode: rst -*- test ==== As submitted by dclark This file just shows you what's available. It assumes a ``/var/lib/bcfg2/Properties/test.xml`` file with an entry like this: .. code-block:: xml fakeBCFG2password :: Hostname is ${metadata.hostname} Groups: {% for group in metadata.groups %}\ ${group} \ {% end %}\ {% if metadata.categories %}\ Categories: {% for category in metadata.categories %}\ ${category} \ {% end %}\ {% end %}\ {% if metadata.Probes %}\ Probes: {% for probe, value in metadata.Probes.iteritems() %}\ $probe $value {% end %}\ {% end %}\ Two main ways to get the same property value: ${metadata.Properties['test.xml'].xdata.find('password').find('bcfg2').text} ${metadata.Properties['test.xml'].xdata.xpath('password/bcfg2')[0].text} One way to get information about metadata and properties: dir(metadata): {% for var in dir(metadata) %}\ ${var} \ {% end %} dir(metadata.Properties.xdata): {% for var in dir(metadata.Properties.xdata) %}\ ${var} \ {% end %} dir(metadata.Properties.xdata.entries): {% for var in dir(metadata.Properties.xdata.entries) %}\ ${var} \ {% end %} dir(metadata.Properties.xdata.label): {% for var in dir(metadata.Properties.xdata.label) %}\ ${var} \ {% end %} dir(metadata.Properties.xdata.name): {% for var in dir(metadata.Properties.xdata.name) %}\ ${var} \ {% end %} dir(metadata.Properties.xdata.properties): {% for var in dir(metadata.Properties.xdata.properties) %}\ ${var} \ {% end %} When the above file is saved as ``Cfg/test/test.genshi`` and generated with ``bcfg2-info buildfile /test test.hostname.org``, the results look like this (below reformatted a little bit to fit in 80 columns):: Hostname is test.hostname.org Groups: bcfg2-server Two main ways to get the same property value: fakeBCFG2password fakeBCFG2password One way to get information about metadata and properties: dir(metadata): __class__ __delattr__ __dict__ __doc__ __getattribute__ __hash__ __init__ __module__ __new__ __reduce__ __reduce_ex__ __repr__ __setattr__ __str__ __weakref__ all bundles categories get_clients_by_group get_clients_by_profile groups hostname inGrouppassword probes uuid dir(metadata.Properties.xdata): HandleEvent Index __class__ __delattr__ __dict__ __doc__ __getattribute__ __hash__ __identifier__ __init__ __iter__ __module__ __new__ __reduce__ __reduce_ex__ __repr__ __setattr__ __str__ __weakref__ entries label name properties dir(metadata.Properties.xdata.entries): __add__ __class__ __contains__ __delattr__ __delitem__ __delslice__ __doc__ __eq__ __ge__ __getattribute__ __getitem__ __getslice__ __gt__ __hash__ __iadd__ __imul__ __init__ __iter__ __le__ __len__ __lt__ __mul__ __ne__ __new__ __reduce__ __reduce_ex__ __repr__ __reversed__ __rmul__ __setattr__ __setitem__ __setslice__ __str__ append count extend index insert pop remove reverse sort dir(metadata.Properties.xdata.label): __add__ __class__ __contains__ __delattr__ __doc__ __eq__ __ge__ __getattribute__ __getitem__ __getnewargs__ __getslice__ __gt__ __hash__ __init__ __le__ __len__ __lt__ __mod__ __mul__ __ne__ __new__ __reduce__ __reduce_ex__ __repr__ __rmod__ __rmul__ __setattr__ __str__ capitalize center count decode encode endswith expandtabs find index isalnum isalpha isdigit islower isspace istitle isupper join ljust lower lstrip partition replace rfind rindex rjust rpartition rsplit rstrip split splitlinesstartswith strip swapcase title translate upper zfill dir(metadata.Properties.xdata.name): __add__ __class__ __contains__ __delattr__ __doc__ __eq__ __ge__ __getattribute__ __getitem__ __getnewargs__ __getslice__ __gt__ __hash__ __init__ __le__ __len__ __lt__ __mod__ __mul__ __ne__ __new__ __reduce__ __reduce_ex__ __repr__ __rmod__ __rmul__ __setattr__ __str__ capitalize center count decode encode endswith expandtabs find index isalnum isalpha isdigit islower isspace istitle isupper join ljust lower lstrip partition replace rfind rindex rjust rpartition rsplit rstrip split splitlinesstartswith strip swapcase title translate upper zfill dir(metadata.Properties.xdata.properties): __class__ __contains__ __copy__ __deepcopy__ __delattr__ __delitem__ __delslice__ __doc__ __getattribute__ __getitem__ __getslice__ __hash__ __init__ __iter__ __len__ __new__ __nonzero__ __reduce__ __reduce_ex__ __repr__ __reversed__ __setattr__ __setitem__ __setslice__ __str__ _init addnext addprevious append attrib clear extend find findall findtext get getchildren getiterator getnext getparent getprevious getroottree index insert items iterancestors iterchildren iterdescendants itersiblings keys makeelement nsmap prefix remove replace set sourceline tag tail text values xpath doc/server/plugins/generators/examples/jinja2/000077500000000000000000000000001303523157100217565ustar00rootroot00000000000000doc/server/plugins/generators/examples/jinja2/extends.txt000066400000000000000000000023211303523157100241670ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst =========================== Extending Jinja2 Templates =========================== Jinja2 templates can use the {% extends %} directive to inherit file fragments which might be common to many configuration files. Use the "jinja2_include" suffix for file fragments you will extend. ``/var/lib/bcfg2/Cfg/foo/common.jinja2_include`` .. code-block:: none [global] setting1 = true setting2 = false {% block setting3 %}setting3 = "default value"{% endblock %} {% block section1 -%} [section1] setting4 = true setting5 = false {%- endblock %} {% block section2 -%} [section2] setting6 = true setting7 = false {%- endblock %} ``/var/lib/bcfg2/Cfg/foo/foo.H_hostname.jinja2`` .. code-block:: none {% extends "common.jinja2_include" %} {% block setting3 %}setting3 = "new value"{% endblock %} {% block section1 -%} [section1] setting4 = false setting5 = false {%- endblock %} Output ====== .. code-block:: none [global] setting1 = true setting2 = false setting3 = "new value" [section1] setting4 = false setting5 = false [section2] setting6 = true setting7 = false doc/server/plugins/generators/examples/jinja2/include.txt000066400000000000000000000017561303523157100241530ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst =========================== Including Jinja2 Templates =========================== Jinja2 templates can use the {% include %} directive to include file fragments which might be common to many configuration files. Use the "jinja2_include" suffix for file fragments you will include. ``/var/lib/bcfg2/Cfg/foo/foo.jinja2`` .. code-block:: none [global] setting1 = true setting2 = false {% for x in metadata.groups %}{% include x + '.jinja2_include' ignore missing %} {% endfor %} ``/var/lib/bcfg2/Cfg/foo/group1.jinja2_include`` .. code-block:: none [group1] setting3 = true setting4 = false ``/var/lib/bcfg2/Cfg/foo/group3.jinja2_include`` .. code-block:: none [group3] setting7 = true setting8 = false Output ====== .. code-block:: none [global] setting1 = true setting2 = false [group1] setting3 = true setting4 = false [group3] setting7 = true setting8 = false doc/server/plugins/generators/examples/jinja2/simple.txt000066400000000000000000000021221303523157100240050ustar00rootroot00000000000000.. -*- mode: rst -*- ========================= Basic Jinja2 Templates ========================= This simple example demonstrates basic usage of Jinja2 templates. ``/var/lib/bcfg2/Cfg/foo/foo.jinja2`` .. code-block:: none Hostname is {{ metadata.hostname }} Filename is {{ name }} Template is {{ source_path }} Groups: {% for group in metadata.groups -%} * {{ group }} {% endfor %} Categories: {% for category in metadata.categories -%} * {{ category }} -- {{ metadata.categories[category] }} {% endfor %} Probes: {% for probe in metadata.Probes -%} * {{ probe }} -- {{ metadata.Probes[probe] }} {% endfor %} Output ====== .. code-block:: xml Hostname is topaz.mcs.anl.gov Filename is /foo Template is /var/lib/bcfg2/Cfg/foo/foo.jinja2 Groups: * desktop * mcs-base * ypbound * workstation * xserver * debian-sarge * debian * a Categories: * test -- a Probes: * os -- debian doc/server/plugins/generators/nagiosgen.txt000066400000000000000000000144611303523157100215040ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-generators-nagiosgen: ========= NagiosGen ========= This page describes the installation and use of the `NagiosGen`_ plugin. .. _NagiosGen: https://github.com/Bcfg2/bcfg2/blob/maint/src/lib/Bcfg2/Server/Plugins/NagiosGen.py Update ``/etc/bcfg2.conf``, adding NagiosGen to plugins:: plugins = Bundler,Cfg,...,NagiosGen Create the NagiosGen directory:: $ mkdir /var/lib/bcfg2/NagiosGen Create default host, and group specs in: ``/var/lib/bcfg2/NagiosGen/default-host.cfg``:: define host{ name default check_command check-host-alive check_interval 5 check_period 24x7 contact_groups admins event_handler_enabled 1 flap_detection_enabled 1 initial_state o max_check_attempts 10 notification_interval 0 notification_options d,u,r notification_period workhours notifications_enabled 1 process_perf_data 0 register 0 retain_nonstatus_information 1 retain_status_information 1 retry_interval 1 } ``/var/lib/bcfg2/NagiosGen/default-group.cfg``:: define service{ name default-service active_checks_enabled 1 passive_checks_enabled 1 obsess_over_service 0 check_freshness 0 notifications_enabled 1 event_handler_enabled 1 flap_detection_enabled 1 process_perf_data 0 retain_status_information 1 retain_nonstatus_information 1 is_volatile 0 check_period 24x7 max_check_attempts 4 check_interval 5 retry_interval 1 contact_groups admins notification_options w,u,c,r notification_interval 0 notification_period workhours } Create group configuration files (Named identical to Bcfg2 groups) and add services, and commands specific to the hostgroup (Bcfg2 group) in ``/var/lib/bcfg2/NagiosGen/base-group.cfg``:: define hostgroup{ hostgroup_name base alias base notes Notes } define service{ service_description NTP check_command check_ntp! use default-service hostgroup_name base } define command{ command_name check_ssh command_line $USER1$/check_ssh $ARG1$ $HOSTADDRESS$ } define service{ service_description SSH check_command check_ssh! use default-service hostgroup_name base } ``/var/lib/bcfg2/NagiosGen/web-server-group.cfg``:: define hostgroup{ hostgroup_name web-server alias Port 80 Web Servers notes UC/ANL Teragrid Web Servers Running on Port 80 } define command{ command_name check_http_80 command_line $USER1$/check_http $HOSTADDRESS$ } define service{ service_description HTTP:80 check_command check_http_80! use default-service hostgroup_name web-server } Create a nagios Bcfg2 bundle ``/var/lib/bcfg2/Bundler/nagios.xml`` .. code-block:: xml Assign clients to nagios groups in ``/var/lib/bcfg2/Metadata/groups.xml`` .. code-block:: xml Note that some of these files are built on demand, each time a client in group "nagios-server" checks in with the Bcfg2 server. Local nagios instances can be configured to use the NagiosGen directory in the Bcfg2 repository directly. Fine-Grained Configuration ========================== NagiosGen can be configured in excruciating detail by editing ``NagiosGen/config.xml``, which will let you set individual Nagios options for hosts or groups. E.g.: .. code-block:: xml Obviously the sort of fine-grained control you get from this overlaps to some degree with Nagios' own templating, so use it wisely and in moderation. ``NagiosGen/config.xml`` replaces the files ``Properties/NagiosGen.xml`` and ``NagiosGen/parents.xml`` in older versions of Bcfg2; your old configs can be migrated using the ``nagiosgen-convert.py`` tool. doc/server/plugins/generators/packages.txt000066400000000000000000000703211303523157100213050ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-generators-packages: ======== Packages ======== .. versionadded:: 1.0.0 This page documents the Packages plugin. Packages is an alternative to :ref:`Pkgmgr ` for specifying package entries for clients. Where Pkgmgr explicitly specifies package entry information, Packages delegates control of package version information to the underlying package manager, installing the latest version available through those channels. Limiting sources to groups ========================== ``Packages/sources.xml`` processes ```` and ```` tags just like Bundles. In addition to any groups or clients specified that way, clients must be a member of the appropriate architecture group as specified in a Source stanza. Memberships in architecture groups is needed so that Packages can map software sources to clients. There is no other way to handle this than to impose membership in the appropriate architecture group. When multiple sources are specified, clients are associated with each source to which they apply (based on group memberships, as described above). Packages and dependencies are resolved from all applicable sources. Setup ===== Three basic steps are required for Packages to work properly. #. Create Packages/`sources.xml`_. This file should look approximately like the example below, and describes both which software repositories should be used, and which clients are eligible to use each one. #. Ensure that clients are members of the proper groups. Each client should be a member of all of the groups listed in the ``sources.xml`` (like ubuntu-intrepid or centos-5.2 in the following examples), and one of the architecture groups listed in the source configuration (i386, amd64 or x86_64 in the following examples). '''Failure to do this will result in the source either not applying to the client, or only architecture independent packages being made available to the client.''' #. Add Package entries to bundles. #. Sit back and relax, as dependencies are resolved, and automatically added to client configurations. sources.xml ----------- ``sources.xml`` is where all package sources are configured for the Packages plugin. It processes ```` and ```` tags just like Bundles. The primary element in ``sources.xml`` is the Source tag: .. xml:element:: Source :noautodep: py:genshiElements Handling GPG Keys ----------------- .. versionadded:: 1.2.0 If you have yum libraries installed, Packages can automatically handle GPG signing keys for Yum and Pulp repositories. (You do not need to use the native yum resolver; if yum libraries are available, GPG signing keys can be handled automatically.) Simply specify the URL to the GPG key(s) for a repository with :xml:element:`GPGKey` elements: .. code-block:: xml x86_64 http://mirror.example.com/keys/RPM-GPG-KEY-CentOS-6 More than one ```` tag can be specified per Source. With the keys specified thusly, Packages will include the keys in the generated yum config file, and will ensure that the keys are imported on the client. There is no need to specify ```` tags for :ref:`Pulp sources `; that data is pulled directly from the Pulp REST API. Arbitrary Repo Options ---------------------- .. versionadded:: 1.2.3 You can specify arbitrary options to be added to the repository config on the server side, if you are using the native yum libraries, and on the client side if you are using the ability of Packages to automatically generate your Yum config. To do this, add an :xml:element:`Options` tag to a :xml:element:`Source`; all of its attributes will be added verbatim to the repository in the generated config. For instance: .. code-block:: xml x86_64 If you are using native yum libraries and need to set options only on the Bcfg2 server, you can set the :xml:attribute:`RepoOptionsType:serveronly` attribute to "true"; or, if you need to set options only on the client, you can set the :xml:attribute:`RepoOptionsType:clientonly` attribute to "true". For instance, if your Bcfg2 server needed to use a proxy to access a repo, and you wanted to expire metadata caches very quickly on the client, you could do: .. code-block:: xml x86_64 Prerequisite Resolution ======================= Packages provides a prerequisite resolution mechanism which has no analogue in Pkgmgr. During configuration generation, all structures are processed. After this phase, but before entry binding, a list of packages and the client metadata instance is passed into Packages' resolver. This process determines a superset of packages that will fully satisfy dependencies of all package entries included in structures, and reports any prerequisites that cannot be satisfied. Disabling dependency resolution ------------------------------- .. versionadded:: 1.1.0 Dependency resolution can be disabled by adding the following setting to ``bcfg2.conf`` in the ``packages`` section:: [packages] resolver=0 All metadata processing can be disabled as well:: [packages] metadata=0 This setting implies disabling the resolver. Blacklisting faulty dependencies -------------------------------- If you encounter an issue with faulty dependency resolution due to Packages, please file a bug report so that we can fix the problem in future releases. In the meantime, you can work around this issue by blacklisting the offending Package in your Sources. The :xml:element:`Blacklist` element should immediately follow the Component section of your source and should look like the following: .. code-block:: xml unwanted-packagename If you use the built-in :ref:`Yum config generator `, blacklisted packages will be added to the ``exclude`` list for the source. .. _packages-exampleusage: Example usage ============= Create a _`sources.xml` file in the Packages directory that looks something like this: .. code-block:: xml main universe i386 amd64 .. note:: .. versionadded:: 1.1.0 The default behavior of the Packages plugin is to not make any assumptions about which packages you want to have added automatically [#f1]_. For that reason, neither **Recommended** nor **Suggested** packages are added as dependencies by default. You will notice that the default behavior for apt is to add Recommended packages as dependencies. You can configure the Packages plugin to add recommended packages by adding the :xml:attribute:`SourceType:recommended` attribute, e.g.: .. code-block:: none .. [#f1] Bcfg2 will by default add **Essential** packages to the client specification. You can disable this behavior by setting the :xml:attribute:`SourceType:essential` attribute to *false*: .. code-block:: none Yum sources can be similarly specified: .. code-block:: xml os updates extras i386 x86_64 http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-5 For sources with a :xml:attribute:`SourceType:url` attribute, the :xml:attribute:`SourceType:version` attribute is also necessary. :ref:`Pulp sources ` are very simple to specify due to the amount of data that can be queried from Pulp itself: .. code-block:: xml .. note:: There is also a rawurl attribute for specifying sources that don't follow the conventional layout. .. code-block:: xml x86_64 x86_64 x86_64 .. code-block:: xml amd64 i386 Configuration Updates ===================== Packages will reload its configuration upon an explicit command via bcfg2-admin:: [0:3711] bcfg2-admin xcmd Packages.Refresh True During this command (which will take some time depending on the quantity and size of the sources listed in the configuration file), the server will report information like:: Packages: Updating http://mirror.anl.gov/ubuntu//dists/jaunty/main/binary-i386/Packages.gz Packages: Updating http://mirror.anl.gov/ubuntu//dists/jaunty/main/binary-amd64/Packages.gz Packages: Updating http://mirror.anl.gov/ubuntu//dists/jaunty/universe/binary-i386/Packages.gz Packages: Updating http://mirror.anl.gov/ubuntu//dists/jaunty/universe/binary-amd64/Packages.gz ... Packages: Updating http://mirror.centos.org/centos/5/extras/x86_64/repodata/filelists.xml.gz Packages: Updating http://mirror.centos.org/centos/5/extras/x86_64/repodata/primary.xml.gz One line per file download needed. ``Packages/sources.xml`` will be reloaded at this time, so any source specification changes (new or modified sources in this file) will be reflected by the server at this point. This process is much, much faster if you use the :ref:`native yum library support `. Soft reload ----------- .. versionadded:: 1.2.0 A soft reload can be performed to reread the configuration file and download only missing sources.:: [0:3711] bcfg2-admin xcmd Packages.Reload True This is done automatically any time `sources.xml`_ is updated. Availability ============ Support for the following clients is currently available. Support for other package managers (Portage, Zypper, IPS, etc) remain to be added. apt --- All dpkg based clients (for example Debian, Ubuntu or Nexenta) could be handled with the apt module: .. code-block:: xml main universe i386 amd64 pac --- For Arch Linux or Parabola GNU/Linux-libre you could use the pac module for packages. You do not need to supply a version attribute as the mirrors are rolling release and does not supply different versions. .. code-block:: xml core extra community i686 x86_64 pkgng ----- The support for the Next Generation package management tool for FreeBSD is called pkgng. It downloads the packagesite file from the mirror and parses the dependencies out of it. It currently does not use the DNS SRV record lookup mechanism to get the correct mirror and does not verify the signature inside the packagesite file. .. code-block:: xml latest x86:64 x86:32 yum --- Rpm based clients (for example RedHat, CentOS or Fedora) could be handled with the yum module: .. code-block:: xml os updates extras i386 x86_64 Package Checking and Verification ================================= In order to do disable per-package verification, you will need to use :ref:`BoundEntries `, e.g.: .. code-block:: xml .. _generating-client-configs: Generating Client APT/Yum Configurations ======================================== The Packages plugin has native support for generating Yum and Apt configs. Simply add entries like these to the appropriate bundles: .. code-block:: xml If you want to change the path to either of those files, you can set ``yum_config`` or ``apt_config`` in ``bcfg2.conf`` to the path to the config files you want to generate:: [packages] yum_config=/etc/yum.repos.d/all.repo apt_config=/etc/apt/sources.d/all If you need to distribute a config to different places on different hosts, you can use the :ref:`server-plugins-structures-altsrc` attribute, e.g.: .. code-block:: xml See :ref:`configuration` for more details on these options. .. note:: Support for generating Yum configs was added in 1.2.0, and Apt configs was added in 1.3.0. Before that, you could use :ref:`server-plugins-generators-cfg-genshi` or :ref:`server-plugins-generators-cfg-cheetah` to generate your configs. .. _native-yum-libraries: Package Groups ============== Some packaging systems provide package groups. To include a package group, use the :xml:attribute:`PackageStructure:group` attribute of the :xml:element:`Package` tag. pac --- .. versionadded:: 1.4.0 Pacman `groups `_ are supported: .. code-block:: xml yum --- Yum package groups are supported by both the native Yum libraries and Bcfg2's internal dependency resolver. You can use either the short group ID or the long group name: .. code-block:: xml By default, only those packages considered the "default" packages in a group will be installed. You can change this behavior using the :xml:attribute:`PackageStructure:type` attribute: .. code-block:: xml Valid values of "type" are: * ``mandatory``: Only install mandatory packages in the group. * ``default``: Install default packages from the group (the default). * ``optional`` or ``all``: Install all packages in the group, including mandatory, default, and optional packages. See :xml:type:`PackageStructure` for details. You can view the packages in a group by category with the ``yum groupinfo`` command. More information about the different levels can be found at http://fedoraproject.org/wiki/How_to_use_and_edit_comps.xml_for_package_groups#Installation Using Native Yum Libraries ========================== .. versionadded:: 1.2.0 By default, Bcfg2 uses an internal implementation of Yum's dependency resolution and other routines so that the Bcfg2 server can be run on a host that does not support Yum itself. If you run the Bcfg2 server on a machine that does have Yum libraries, however, you can enable use of those native libraries in Bcfg2 by setting ``use_yum_libraries`` to ``1`` in the ``[packages:yum]`` section of ``bcfg2.conf``. Benefits to this include: * Much lower memory usage by the ``bcfg2-server`` process. * Much faster ``Packages.Refresh`` behavior. * More accurate dependency resolution. * Better use of multiple processors/cores. Drawbacks include: * Resolution of package dependencies is slower and more resource-intensive. At times it can be much slower, particularly after running ``Packages.Refresh``. * More disk I/O. This can be alleviated by putting ``/var/lib/bcfg2/Packages/cache`` on tmpfs, but that offsets the lower memory usage. In some cases, you may have to raise the open file limit for the user who runs your Bcfg2 server process, particularly if you have a lot of repositories. Configuring the Yum Helper -------------------------- Due to poor memory management by the Yum API, the long-lived bcfg2-server process uses an external short-lived helper, ``bcfg2-yum-helper``, to do the actual Yum API calls for native yum library support. By default, Bcfg2 looks for this helper in ``$PATH``, or, failing that, at ``/usr/sbin/bcfg2-yum-helper``. If you have installed the helper elsewhere, you will need to configure that location with the ``helper`` option in the ``[packages:yum]`` section, e.g.:: [packages:yum] use_yum_libraries = 1 helper = /usr/local/sbin/bcfg2-yum-helper Setting Yum Options ------------------- In ``bcfg2.conf``, any options you set in the ``[packages:yum]`` section other than ``use_yum_libraries`` and ``helper`` will be passed along verbatim to the configuration of the Yum objects used in the Bcfg2 server. The following options are set by default, and should not generally be overridden: * ``cachedir`` is set to a hashed value unique to each distinct Yum configuration. Don't set this unless you know what you're doing. * ``keepcache`` is set to ``0``; there is no benefit to changing this. * ``sslverify`` is set to ``0``; change this if you know what you're doing. * ``reposdir`` is set to ``/dev/null`` to prevent the server's Yum configuration from being read; do not change this. Abstract Package Tags --------------------- If you are using the native Yum libraries, the abstract Package tag supports several attributes in addition to the standard :xml:attribute:`PackageStructure:name`: .. xml:type:: PackageStructure .. _pulp-source-support: Pulp Support ============ .. versionadded:: 1.2.0 Bcfg2 contains explicit support for repositories managed by Pulp (http://pulpproject.org/). .. note:: Only the Pulp 1.x API is supported at this time. When the Pulp 2.x API is finalized support will be added for it. Due to the amount of data about a repository that can be retrieved directly from Pulp, the only thing necessary to configure a Pulp repo is the repo ID, in :xml:attribute:`SourceType:pulp_id`: .. code-block:: xml Pulp sources require some additional configuration. First, the Bcfg2 server must have a valid ``/etc/pulp/consumer/consumer.conf`` that is readable by the user your Bcfg2 server runs as; the Pulp server, URLs, and so on, are determined from this. Secondly, in ``bcfg2.conf`` you must set the following options in the ``[packages:pulp]`` section: * ``username`` and ``password``: The username and password of a Pulp user that will be used to register new clients and bind them to repositories. Membership in the default ``consumer-users`` role is sufficient. Bcfg2 clients using Pulp sources will be registered to the Pulp server as consumers, and will be bound to the appropriate repositories. Debugging unexpected behavior ============================= .. versionadded:: 1.2.1 Using bcfg2-info ---------------- The dependency resolver used in Packages can be run in debug mode:: $ bcfg2-info packageresolve foo.example.com bcfg2-server zlib ... 2 initial packages bcfg2-server zlib 54 new packages added sqlite less libxml2 expat ... 1 unknown packages libglib-2.0.so.0()(64bit) This will show why the resolver is acting as it is. Replace ``foo.example.com`` and ``bcfg2-server`` with a client name and list of packages, respectively. Note that resolving a partial package list (as above) may result in more unknown entries than you'd have otherwise; some of the package drivers (Yum in particular) consider the full package list when resolving multiple providers, and will not be able to properly resolve some dependencies without a full package list. You can also view the sources applicable to a client:: $ bcfg2-info packagesources foo.example.com ... Name: centos-6-x86_64-updates Type: yum URL: http://mirror.example.com/centos-6-x86_64-updates GPG Key(s): http://mirror.example.com/centos-6-x86_64-updates/RPM-GPG-KEY-CentOS-6 Name: centos-6-x86_64-os Type: yum URL: http://mirror.example.com/centos-6-x86_64-os GPG Key(s): http://mirror.example.com/centos-6-x86_64-os/RPM-GPG-KEY-CentOS-6 Using bcfg2-server ------------------ Once the server is started, enable debugging via bcfg2-admin:: $ bcfg2-admin xcmd Packages.toggle_debug TODO list ========= * Zypper support * Portage support .. _configuration: Configuration ============= ``bcfg2.conf`` contains miscellaneous configuration options for the Packages plugin. Any booleans in the config file accept the values "1", "yes", "true", and "on" for True, and "0", "no", "false", and "off" for False. For historical reasons, ``resolver`` and ``metadata`` also accept "enabled" and "disabled". It understands the following directives: [packages] section ------------------ +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | Name | Description | Values | Default | +=============+======================================================+==========+===================================================================+ | backends | List of backends that should be loaded for the | List | Yum,Apt,Pac,Pkgng | | | dependency resolution. | | | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | resolver | Enable dependency resolution | Boolean | True | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | metadata | Enable metadata processing. Disabling ``metadata`` | Boolean | True | | | implies disabling ``resolver`` as well. | | | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | yum_config | The path at which to generate Yum configs. | String | /etc/yum.repos.d/bcfg2.repo | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | apt_config | The path at which to generate APT configs. | String | /etc/apt/sources.list.d/bcfg2-packages-generated-sources.list | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | gpg_keypath | The path on the client RPM GPG keys will be copied | String | /etc/pki/rpm-gpg | | | to before they are imported on the client. | | | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | version | Set the version attribute used when binding Packages | any|auto | auto | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | cache | Path where Packages will store its cache | String | /Packages/cache | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ [packages:yum] section ---------------------- +-------------------+----------------------------------------------------------+---------+-----------+ | Name | Description | Values | Default | +===================+==========================================================+=========+===========+ | use_yum_libraries | Whether or not to use the | Boolean | False | | | :ref:`native yum library support ` | | | +-------------------+----------------------------------------------------------+---------+-----------+ | helper | Path to ``bcfg2-yum-helper`` | String | See below | +-------------------+----------------------------------------------------------+---------+-----------+ To find ``bcfg2-yum-helper`` if none is specified, Bcfg2 looks first in ``$PATH`` and then in ``/usr/sbin/bcfg2-yum-helper`` for the helper. All other options in the ``[packages:yum]`` section will be passed along verbatim to the Yum configuration if you are using the native Yum library support. [packages:pulp] section ----------------------- +----------+-----------------------------------------------------+--------+---------+ | Name | Description | Values | Default | +==========+=====================================================+========+=========+ | username | The username of a Pulp user that will be used to | String | None | | | register new clients and bind them to repositories. | | | +----------+-----------------------------------------------------+--------+---------+ | password | The password of the Pulp user | String | None | +----------+-----------------------------------------------------+--------+---------+ The user should be a member of the default ``consumer-users`` role. doc/server/plugins/generators/pkgmgr.txt000066400000000000000000000366611303523157100210270ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-generators-pkgmgr: ====== Pkgmgr ====== The Pkgmgr plugin resolves the Abstract Configuration Entity "Package" to a package specification that the client can use to detect, verify and install the specified package. For a package specification to be included in the Literal configuration the name attribute from an abstract Package tag (from Bundler) must match the name attribute of a Package tag in Pkgmgr, along with the appropriate group associations of course. Each file in the Pkgmgr directory has a priority. This allows the same package to be served by multiple files. The priorities can be used to break ties in the case that multiple files serve data for the same package. Usage of Groups in Pkgmgr ========================= Groups are used by the Pkgmgr plugin, along with host metadata, for selecting the package entries to include in the clients literal configuration. They can be thought of as:: if client is a member of group1 then assign to literal config Nested groups are conjunctive (logical and).:: if client is a member of group1 and group2 then assign to literal config Group membership may be negated. Tag Attributes in Pkgmgr ======================== .. xml:schema:: pkglist.xsd :linktotype: :noautodep: PackageType Pkgmgr Directory ================ The Pkgmgr/ directory keeps the XML files that define what packages are available for a host or image and where to find those packages. All the files in the directory are processed. The names of the XML files have no special meaning to Bcfg2; they are simply named so it's easy for the administrator to know what the contents hold. All Packages could be kept in a single file if so desired. Bcfg2 simply uses the Groups in the files and priorities to determine how to assign Packages to a host's literal configuration. Listed detailed below is one possible structure for the Pkgmgr directory. The files are structured to contain particular portions of distribution repositories. The files in the directory are:: $ ls Pkgmgr/ centos-4-noarch-updates.xml centos-4-x86_64-updates.xml centos-4-x86_64.xml backup.example.com.xml fedora-core-4-noarch-updates.xml fedora-core-4-x86-updates.xml fedora-core-4-x86.xml rhel-as-4-noarch-updates.xml rhel-as-4-x86-updates.xml rhel-as-4-x86.xml rhel-es-4-noarch-updates.xml rhel-es-4-x86-updates.xml rhel-es-4-x86.xml rhel-ws-4-noarch-udpates.xml rhel-ws-4-x86_64-updates.xml rhel-ws-4-x86_64.xml rhel-ws-4-x86-updates.xml rhel-ws-4-x86.xml As can be seen the file names have been selected to indicate what the contents are and have been split by Vendor, product and repository area. A partial listing of the centos-4-x86_64.xml is below .. code-block:: xml $ cat centos-4-x86_64.xml ... .. code-block:: xml $ cat centos-4-x86_64-updates.xml ... Here it can be seen that the data is encapsulated in a !PackageList Tag which describes the URI of the files described, the type of package, and the priority of the files in this list. The priority is used to decide which specific file to use when there are multiple files that could be used for a particular host. The highest priority file is the one that is used. Using this system, it is possible to have a file that contains all the Packages from the original installation, centos-4-x86_64.xml in this case, and then create a new file that contains updates that are made available afterwards, centos-4-x86_64-updates.xml and centos-4-noarch-updates.xml in this case. The priority of the update PackageLists just needs to be higher so that they will be selected instead of the original installation Packages. The backup.example.com.xml contains a packalist for a specific host which is qualified by the Client tag. Its Packages have a higher priority than the update Packages. This is because this particular host requires special Packages that are older than the ones available in the updates. .. code-block:: xml ... Simplifying Multi-Architecture Environments with :ref:`Altsrc ` ================================================================================================= Frequently multi-architecture environments (typically x86_64) will run into problems needing to specify different architectures on different groups for clients. For example, desktop machines may install 32-bit compatibility packages in addition to 64-bit ones, while servers may install only 64-bit packages. Specifying this in the Pkgmgr was onerous, because different package targets (64bit, 32+64, etc) needed to be specified on a package by group basis. Two features have been implemented that should ease this situation considerably. * The :ref:`Altsrc ` feature adds the ability to add a "bind as" directive to entries. For example, the following entry, in a bundle: .. code-block:: xml would bind as if it were named bar, while the entry would still appear named "foo" in the client configuration specification. * Pkgmgr now builds virtual package targets for any package with Instance client elements. This means that if a client attempts to bind: .. code-block:: xml It will only include the instances listed in the package. By using these features together, a bundle can include: .. code-block:: xml This in conjunction with a Pkgmgr entry that looks like: .. code-block:: xml Will result in a bound entry that looks like: .. code-block:: xml Altogether, this should move policy decisions about package architectures to bundles/base. Automated Generation of Pkgmgr Configuration Files ================================================== The two utilities detailed below are provided in the tools directory of the source tarball. Also see the general :ref:`Pkgmgr ` and :ref:`server-plugins-structures-altsrc` pages. pkgmgr_gen.py ^^^^^^^^^^^^^ pkgmgr_gen will generate a Pkgmgr file from a list of directories containing RPMs or from a list of YUM repositories.:: [root@bcfg2 Pkgmgr]# pkgmgr_gen.py --help usage: pkgmgr_gen.py [options] options: -h, --help show this help message and exit -aARCHS, --archs=ARCHS Comma separated list of subarchitectures to include. The highest subarichitecture required in an architecture group should specified. Lower subarchitecture packages will be loaded if that is all that is available. e.g. The higher of i386, i486 and i586 packages will be loaded if -a i586 is specified. (Default: all). -dRPMDIRS, --rpmdirs=RPMDIRS Comma separated list of directories to scan for RPMS. Wilcards are permitted. -eENDDATE, --enddate=ENDDATE End date for RPM file selection. -fFORMAT, --format=FORMAT Format of the Output. Choices are yum or rpm. (Default: yum) -gGROUPS, --groups=GROUPS List of comma separated groups to nest Package entities in. -iINDENT, --indent=INDENT Number of leading spaces to indent nested entries in the output. (Default:4) -oOUTFILE, --outfile=OUTFILE Output file name. -P, --pkgmgrhdr Include PackageList header in output. -pPRIORITY, --priority=PRIORITY Value to set priority attribute in the PackageList Tag. (Default: 0) -rRELEASE, --release=RELEASE Which releases to include in the output. Choices are all or latest. (Default: latest). -sSTARTDATE, --startdate=STARTDATE Start date for RPM file selection. -uURI, --uri=URI URI for PackageList header required for RPM format ouput. -v, --verbose Enable verbose output. -yYUMREPOS, --yumrepos=YUMREPOS Comma separated list of YUM repository URLs to load. NOTE: Each URL must end in a '/' character. .. note:: The startdate and enddate options are not yet implemented. pkgmgr_update.py ^^^^^^^^^^^^^^^^ pkgmgr_update will update the release (meaning the epoch, version and release) information in an existing Pkgrmgr file from a list of directories containing RPMs or from a list of YUM repositories. All Tags and other attributes in the existing file will remain unchanged.:: [root@bcfg2 Pkgmgr]# pkgmgr_update.py --help usage: pkgmgr_update.py [options] options: -h, --help show this help message and exit -cCONFIGFILE, --configfile=CONFIGFILE Existing Pkgmgr configuration file name. -dRPMDIRS, --rpmdirs=RPMDIRS Comma separated list of directories to scan for RPMS. Wilcards are permitted. -oOUTFILE, --outfile=OUTFILE Output file name or new Pkgrmgr file. -v, --verbose Enable verbose output. -yYUMREPOS, --yumrepos=YUMREPOS Comma separated list of YUM repository URLs to load. NOTE: Each URL must end in a '/' character. Pkgmgr Configuration Examples ============================= verify_flags ^^^^^^^^^^^^ This entry was used for the Centos test client used during RPM development. .. code-block:: xml Multiple Instances ^^^^^^^^^^^^^^^^^^ .. code-block:: xml Kernel ^^^^^^ .. note:: Multiple instances with the same architecture must be in the installOnlyPkgs list. .. code-block:: xml Per Instance Ignore ^^^^^^^^^^^^^^^^^^^ .. note:: In this case a per instance ignore is actually a bad idea as the verify failure is because of multiarch issues where the last package installed wins. So this would be better as a Package level ignore. Ignore tag entries only work with the RPM driver. They do not appear to be supported in YUM as of 1.0pre5. .. code-block:: xml pkg_checks ^^^^^^^^^^ If pkg_checks = false the version information is not required. If pkg_checks = true the full information is needed as normal. For YUM a minimal entry is .. code-block:: xml In fact for YUM, with pkg_checks = false, any combination of the nevra attributes that will build a valid yum package name (see the Misc heading on the yum man page) is valid. .. code-block:: xml For RPM a minimal entry is .. code-block:: xml verify_fail_action ^^^^^^^^^^^^^^^^^^ The way I have Bcfg2 configured for my development systems. This way it reports bad, but doesn't do anything about it. .. code-block:: xml doc/server/plugins/generators/rules.txt000066400000000000000000000370321303523157100206630ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-plugins-generators-rules: ===== Rules ===== The Rules plugin resolves the following Abstract Configuration Entities: * Service * Package * Path * Action * All SELinux entries * POSIXUser * POSIXGroup to literal configuration entries suitable for the client drivers to consume. For an entity specification to be included in the Literal configuration the name attribute from an abstract entity tag (from Bundler) must match the name attribute of an entity tag in Rules, along with the appropriate group associations of course. Each file in the Rules directory has a priority. This allows the same Entities to be served by multiple files. The priorities can be used to break ties in the case that multiple files serve data for the same entity. Tag Attributes in Rules ======================= Running ``bcfg2-lint`` will check your configuration specification for the presence of any mandatory attributes that are necessary for the entry specified. Rules Tag --------- .. xml:element:: Rules :linktotype: :noautodep: :inlinetypes: RContainerType Package Tag ----------- .. xml:type:: PackageType Action Tag ---------- .. xml:type:: ActionType See also :ref:`client-tools-actions`. Service Tag ----------- .. xml:type:: ServiceType Service mode specification ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. versionadded:: 1.3.0 In the 1.3.0 release, the "mode" attribute has been replaced by a pair of attributes, :xml:attribute:`ServiceType:restart` and :xml:attribute:`ServiceType:install`, which control how a service is handled more granularly than the old "mode" attribute. The old "mode" attribute values are equivalent as follows: +-----------------------------+------------------------------------------+ | Mode attribute | Equivalent | +=============================+==========================================+ | ``mode="default"`` | ``restart="true" install="true"`` | +-----------------------------+------------------------------------------+ | ``mode="interactive_only"`` | ``restart="interactive" install="true"`` | +-----------------------------+------------------------------------------+ | ``mode="supervised"`` | ``restart="true" install="true"`` | +-----------------------------+------------------------------------------+ | ``mode="manual"`` | ``restart="false" install="false"`` | +-----------------------------+------------------------------------------+ The default is ``restart="true" install="true"`` Previously, "supervised" could be used to start a service during the verification phase; this is no longer supported. Services that have been stopped on a client will be started during the install phase. Path Tag -------- The Path tag has different values depending on the *type* attribute of the path specified in your configuration. Below is a set of tables which describe the attributes available for various Path types. Note that ``secontext`` below expects a full context, not just the type. For instance, "``system_u:object_r:etc_t:s0``", not just ``etc_t``. You can also specify "``__default__``", which will restore the context of the file to the default set by policy. If a file has no default context rule, and you don't wish to set one, you can specify ``secontext=''`` (i.e., an empty ``secontext``), in which case the client will not try to manage the SELinux context of the file at all. See :ref:`server-selinux` for more information. Attributes common to all Path tags: .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: name,type augeas ^^^^^^ Run `Augeas `_ commands. See :ref:`client-tools-augeas` for more details. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: owner,group,mode,secontext,lens :requiredattrs: owner,group,mode device ^^^^^^ Manage devices. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: dev_type,owner,group,mode,secontext,major,minor :requiredattrs: dev_type,owner,group,mode directory ^^^^^^^^^ Entry represents a directory. :xml:attribute:`PathType:prune` can be set to remove all contents from the directory that are not explicitly specified in Bcfg2. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: owner,group,mode,secontext,prune :requiredattrs: owner,group,mode file ^^^^ Distribute an file with content explicitly specified in-line (i.e., as opposed to using :ref:`server-plugins-generators-cfg` for this file). If the file has no content, :xml:attribute:`PathType:empty` *must* be set to ``true``. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :onlyattrs: owner,group,mode,secontext,empty :requiredattrs: owner,group,mode hardlink ^^^^^^^^ Manage a hard link. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: owner,group,mode,secontext,to :requiredattrs: owner,group,mode,to .. _path-ignore: ignore ^^^^^^ ``ignore`` lets you flag files that are distributed by system software packages, but have been modified locally, to be ignored by package verification routines. This is useful for, e.g., a package that installs an initial version of a file and then modifies it automatically. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: name :requiredattrs: name nonexistent ^^^^^^^^^^^ Remove the specified file or directory. If :xml:attribute:`PathType:recursive` is set, remove the directory recursively (i.e., ``rm -rf``). .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: recursive permissions ^^^^^^^^^^^ Merely set permissions on the specified path, which is presumed to already exist. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: owner,group,mode,secontext,recursive :requiredattrs: owner,group,mode symlink ^^^^^^^ Manage symlinks. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: to :requiredattrs: to .. _server-plugins-generators-rules-vcs: vcs ^^^ Check out the specified VCS repository to the given path. See :ref:`client-tools-vcs` for more details. .. xml:type:: PathType :nochildren: :noattributegroups: :nodoc: :notext: :onlyattrs: vcstype,revision,sourceurl :requiredattrs: vcstype,revision,sourceurl .. _server-plugins-generators-rules-acls: ACLs ^^^^ .. versionadded:: 1.3.0 ACLs on a Path entry are specified not by attributes on the tag but by child ```` tags. For instance: .. code-block:: xml .. xml:element:: ACL It is not currently possible to manually set an effective rights mask; the mask will be automatically calculated from the given ACLs when they are applied. For directories either no default ACL entries or at least an entry for the owner, owning group and other must be defined. Note that it is possible to set ACLs that demand different permissions on a file than those specified in the ``perms`` attribute on the ``Path`` tag. For instance: .. code-block:: xml In this case, we've specified permissions of ``0644``, but the effective rights mask will be "rwx," so setting the ACL will change the permissions to ``0674``. When this happens, Bcfg2 will change the permissions and set the ACLs on every run and the entry will be eternally marked as bad. SELinux Entries --------------- .. versionadded:: 1.3.0 .. note:: In order to use these entries, the client also needs to be at least version 1.3.0 since they require a client tool which is unavailable in previous versions. Below is a set of tables which describe the attributes available for various SELinux types. The entry types (except for ``module``) correspond to ``semanage`` subcommands. Note that the ``selinuxtype`` attribute takes only an SELinux type, not a full context; e.g., "``etc_t``", not "``system_u:object_r:etc_t:s0``". As it can be very tedious to create a baseline of all existing SELinux entries, you can use ``selinux_baseline.py`` located in the ``tools/`` directory to do that for you. See :ref:`server-selinux` for more information. SEBoolean Tag ^^^^^^^^^^^^^ .. xml:type:: SEBooleanType SEPort Tag ^^^^^^^^^^ .. xml:type:: SEPortType SEFcontext Tag ^^^^^^^^^^^^^^ .. xml:type:: SEFcontextType SENode Tag ^^^^^^^^^^ .. xml:type:: SENodeType SELogin Tag ^^^^^^^^^^^ .. xml:type:: SELoginType SEUser Tag ^^^^^^^^^^ .. xml:type:: SEUserType SEInterface Tag ^^^^^^^^^^^^^^^ .. xml:type:: SEInterfaceType SEPermissive Tag ^^^^^^^^^^^^^^^^ .. xml:type:: SEPermissiveType SEModule Tag ^^^^^^^^^^^^ .. xml:type:: SEModuleType See also :ref:`server-plugins-generators-semodules`. .. _server-plugins-generators-rules-posixuser-tag: POSIXUser Tag ------------- .. versionadded:: 1.3.0 .. note:: In order to use this, the client also needs to be at least version 1.3.0 since they require a client tool which is unavailable in previous versions. .. xml:type:: POSIXUserType For example: .. code-block:: xml Using Regular Expressions in Rules ================================== If you wish, you can configure the Rules plugin to support regular expressions. This entails a small performance and memory usage penalty. To do so, add the following setting to ``bcfg2.conf``:: [rules] regex = yes With regular expressions enabled, you can use a regex in the ``name`` attribute to match multiple abstract configuration entries. Regular expressions are anchored at both ends, so ```` will *not* match a Service named ``bcfg2-server``; you'd have to explicitly specify ````. Note that only one Rule can apply to any abstract entry, so you cannot specify multiple regexes to match the same rule. Replacing the name of the Entry in Attributes ============================================= If you are using regular expressions to match the abstract configuration entries, you may need the concrete name of the entry in some attributes. To use this feature, you have to enable it. It is only useful, if used together with regex matching. :: [rules] regex = yes replace_name = yes You now can write something like that in your xml file: .. code-block:: xml ``%{name}`` will be correctly replaced with the username for each POSIXUser. doc/server/plugins/generators/semodules.txt000066400000000000000000000024621303523157100215300ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-generators-semodules: ========= SEModules ========= .. versionadded:: 1.3.0 .. automodule:: Bcfg2.Server.Plugins.SEModules :no-members: Usage ===== To use the SEModules plugin, first do ``mkdir /var/lib/bcfg2/SEModules``. Add ``SEModules`` to your ``plugins`` line in ``/etc/bcfg2.conf`` and restart bcfg2-server. The SEModules directory contains modules in a layout similar to the Cfg plugin: at the top level, SEModules should contain directories named after the modules you want to install, and each of those directories can contain a global module, plus any number of group- and host-specific modules. For instance:: $ ls -F SEModules foo.pp/ bar.pp/ $ ls SEModules/foo.pp/ foo.pp foo.pp.G50_server foo.pp.H_baz.example.com For more information on this directory layout, see :ref:`server-plugins-generators-cfg`. Entries ======= SEModules handles ```` entries. For instance: .. code-block:: xml The ``.pp`` extension is optional. .. note:: If you use a ``BoundSEModule`` tag, you must *not* include the ``.pp`` extension. This is not recommended, though. You can also install a disabled module: .. code-block:: xml doc/server/plugins/generators/sshbase.txt000066400000000000000000000147241303523157100211640ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-plugins-generators-sshbase: ======= SSHbase ======= SSHbase is a purpose-built Bcfg2 plugin for managing ssh host keys. It is responsible for making ssh keys persist beyond a client rebuild and building a proper ``ssh_known_hosts`` file, including a correct localhost record for the current system. It has two functions: * Generating new ssh keys -- When a client requests a key (v1, rsa, ecdsa, etc.), and there is no existing key in the repository, one is generated. * Maintaining the ``ssh_known_hosts`` file -- all current known public keys (and extra public key stores) are integrated into a single ``ssh_known_hosts`` file, and a localhost record for the current client is added. The ``ssh_known_hosts`` file data is updated whenever any keys change, are added, or deleted. Interacting with SSHbase ======================== * Pre-seeding with existing keys -- Currently existing keys will be overwritten by new, sshbase-managed ones by default. Pre-existing keys can be added to the repository by putting them in ``/SSHbase/.H_`` * Pre-seeding can also be performed using ``bcfg2-admin pull Path /name/of/ssh/key`` * Revoking existing keys -- deleting ``/SSHbase/\*.H_`` will remove keys for an existing client. Aliases ======= SSHbase has support for Aliases listed in :ref:`clients.xml `. The address for the entries are specified either through DNS (e.g. a CNAME), or via the address attribute to the Alias. Getting started =============== #. Add SSHbase to the **plugins** line in ``/etc/bcfg2.conf`` and restart the server. This enables the SSHbase plugin on the Bcfg2 server. #. Add Path entries for ``/etc/ssh/ssh_known_hosts``, ``/etc/ssh/ssh_host_dsa_key``, ``/etc/ssh/ssh_host_dsa_key.pub``, etc., to a bundle. #. Enjoy. At this point, SSHbase will generate new keys for any client without a recorded key in the repository, and will generate an ``ssh_known_hosts`` file appropriately. Supported key formats ===================== SSHbase currently supports the following key formats: * RSA1 (``ssh_host_key``, ``ssh_host_key.pub``) * RSA2 (``ssh_host_rsa_key``, ``ssh_host_rsa_key.pub``) * DSA (``ssh_host_dsa_key``, ``ssh_host_dsa_key.pub``) * ECDSA (``ssh_host_ecdsa_key``, ``ssh_host_ecdsa_key.pub``) * Ed25519 (``ssh_host_ed25519_key``, ``ssh_host_ed25519_key.pub``) Group-specific keys =================== .. versionadded:: 1.2.0 In addition to host-specific keys, SSHbase also supports group-specific keys, e.g., for a high-availability cluster or similar application. Group-specific keys must be pre-seeded; SSHbase cannot create group-specific keys itself. To use group-specific keys, simply create ``SSHbase/.Gxx_``. For instance, ``ssh_host_dsa_key.pub.G65_foo-cluster``. Adding public keys for unmanaged hosts ====================================== If you have some hosts which are not managed by Bcfg2, but you would still like to have their public ssh keys available in ``ssh_known_hosts``, you can add their public keys to the ``SSHbase`` directory with a *.static* ending. Example: ``a.static``:: TEST1 ``b.static``:: TEST2 The generated ``ssh_known_hosts`` file:: TEST1 TEST2 Static ssh_known_hosts file =========================== .. versionadded:: 1.2.0 You can also distribute a fully static ``ssh_known_hosts`` file on a per-host or per-group basis by creating ``SSHbase/ssh_known_hosts.H_`` or ``SSHbase/ssh_known_hosts.Gxx_``. Those files will be entirely static; Bcfg2 will not add any host keys to them itself. Permissions and Metadata ======================== .. versionadded:: 1.2.0 SSHbase supports use of an :ref:`info.xml ` file to control the permissions and other metadata for the keys and ``ssh_known_hosts`` file. You can use the ```` directive in ``info.xml`` to change the metadata for different keys, e.g.:: Default permissions are as follows: +------------------------------------------+-------+-------+------+-----------+----------+----------+ | File | owner | group | mode | sensitive | paranoid | encoding | +==========================================+=======+=======+======+===========+==========+==========+ | ssh_known_hosts | root | root | 0644 | false | false | None | +------------------------------------------+-------+-------+------+-----------+----------+----------+ | ssh_host_key | root | root | 0600 | false | false | base64 | +------------------------------------------+-------+-------+------+-----------+----------+----------+ | ssh_host_key.pub | root | root | 0644 | false | false | base64 | +------------------------------------------+-------+-------+------+-----------+----------+----------+ | ssh_host_[rsa|dsa|ecdsa|ed25519]_key | root | root | 0600 | false | false | None | +------------------------------------------+-------+-------+------+-----------+----------+----------+ | ssh_host_[rsa|dsa|ecdsa|ed25519]_key.pub | root | root | 0644 | false | false | None | +------------------------------------------+-------+-------+------+-----------+----------+----------+ Note that the ``sensitive`` attribute is false, even for private keys, in order to permit :ref:`pulling with bcfg2-admin `. You should almost certainly set ``sensitive`` to "true" in ``info.xml``. .. _server-plugins-generators-sshbase-encryption: Encryption ========== SSHbase can optionally encrypt the private keys that it generates. To enable this feature, set the ``passphrase`` option in the ``[sshbase]`` section of ``bcfg2.conf`` to the name of the passphrase that should be used to encrypt all SSH keys. (The passphrases are enumerated in the ``[encryption]`` section.) See :ref:`server-encryption` for more details on Bcfg2 encryption in general. Blog post ========= http://www.ducea.com/2008/08/24/using-the-bcfg2-sshbase-plugin/ .. note:: The linked post uses deprecated ConfigFile entries. Path entries have since replaced these. See :ref:`server-configurationentries`. doc/server/plugins/grouping/000077500000000000000000000000001303523157100164445ustar00rootroot00000000000000doc/server/plugins/grouping/grouppatterns.txt000066400000000000000000000053071303523157100221270ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-grouping-grouppatterns: ============= GroupPatterns ============= The GroupPatterns plugin is a connector that can assign clients group membership pased on patterns in client hostnames. Two basic methods are supported: - regular expressions (NamePatterns) - ranges (NameRange) Hosts that match the specification are placed in the group or groups specified by the pattern. Setup ===== #. Enable the GroupPatterns plugin #. Create the ``GroupPatterns/config.xml`` file (similar to the example below). #. Client groups will be augmented based on the specification Pattern Types ============= NamePatterns use regular expressions to match client hostnames. All matching clients are placed in the resulting groups. NamePatterns also have the ability to use regular expression matched groups to dynamically create group names. The first two examples below are NamePatterns. The first adds client hostname to both groups *gp-test1* and *gp-test2*. The second matches the hostname as a group and places the client in a group called *group-*. NameRange patterns allow the use of the application of numeric ranges to host names. The final pattern below matches any of *node1-node32* and places them all into the *rack1* group. Dynamically generated group names are not supported with NameRange. Examples ======== .. code-block:: xml hostname gp-test1 gp-test2 (.*) group-$1 node[[1-32]] rack1 Cluster Example --------------- Functional aspects are extracted from hostname strings, and dynamic groups are created. Expected hostname to group mapping:: xnfs1.example.com -> nfs-server xnfs2.example.com -> nfs-server xlogin1.example.com -> login-server xlogin2.example.com -> login-server xpvfs1.example.com -> pvfs-server xpvfs2.example.com -> pvfs-server xwww.example.com -> www-server GroupPatterns configuration: .. code-block:: xml x(\w[^\d\.]+)\d*\. $1-server Regex explanation: #. ``x`` Match any hostname that begins with "x" #. ``(\w[!^\d|\.]+)`` followed by one or more word characters that are not a decimal digit or "." and save the string to $1 #. ``\d*`` followed by 0 or more decimal digit(s) #. ``\.`` followed by a literal "." doc/server/plugins/grouping/ldap.txt000066400000000000000000000202601303523157100201250ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-grouping-ldap: ==== Ldap ==== .. warning:: This plugin is considered experimental and has known issues (see below). Purpose ------- This plugin makes it possible to fetch data from an LDAP directory, process it and attach it to your metadata. Installation ------------ __ http://www.python-ldap.org/ First, you need to install the `python-ldap library`__. On debian-based systems this is accomplished by:: aptitude install python-ldap To enable the plugin, add "Ldap" to the plugins line in your ``bcfg2.conf``. Then add a new directory called "Ldap" to the root of your Bcfg2 repository and define your queries in a file called ``config.py`` using the information in the next section. Configuration ------------- As processing LDAP search results can get pretty complex, the configuration has to be written in Python. Here is a minimal example to get you started:: from Bcfg2.Server.Plugins.Ldap import LdapConnection, LdapQuery __queries__ = ['ExampleQuery'] conn_default = LdapConnection( binddn="uid=example,ou=People,dc=example,dc=com", bindpw = "foobat") class ExampleQuery(LdapQuery): base = "ou=People,dc=example,dc=com" scope = "one" attrs = ["cn", "uid"] connection = conn_default def prepare_query(self, metadata): self.filter = "(personalServer=" + metadata.hostname + ")" def process_result(self, metadata): if not self.result: admin_uid = None admin_name = "This server has no admin." return { "admin_uid" : self.result[0][1]["uid"], "admin_name" : self.result[0][1]["cn"] } The first line provides the two required classes for dealing with connections and queries. In this example our LDAP directory has a number of user objects in it. Each of those may have a personal server they administer. Whenever metadata for this machine is being generated by the Bcfg2 server, the UID and name of the admin are retrieved from LDAP. In your bundles and config templates, you can access this data via the metadata object:: ${metadata.Ldap["ExampleQuery"]["admin_name"]} Connection retry ++++++++++++++++ If the LDAP server is down during a request, the LDAP plugin tries to reconnect after a short delay. By default, it waits 3 seconds during the retries and tries to reconnect up to three times. If you wish, you could customize these values in your ``bcfg2.conf``:: [ldap] retries = 3 retry_delay = 3.0 Class reference --------------- LdapConnection ++++++++++++++ .. class:: LdapConnection This class represents an LDAP connection. Every query must be associated with exactly one connection. .. attribute:: LdapConnection.binddn DN used to authenticate against LDAP (required). .. attribute:: LdapConnection.bindpw Password for the previously mentioned **binddn** (required). .. attribute:: LdapConnection.host Hostname of host running the LDAP server (defaults to "localhost"). .. attribute:: LdapConnection.port Port where LDAP server is listening (defaults to 389). You may pass any of these attributes as keyword arguments when creating the connection object. LdapQuery +++++++++ .. class:: LdapQuery This class defines a single query that may adapt itself depending on the current metadata. .. attribute:: LdapQuery.attrs Can be used to retrieve only a certain subset of attributes. May either be a list of strings (attribute names) or ``None``, meaning all attributes (defaults to ``None``). .. attribute:: LdapQuery.base This is the search base. Only LDAP entries below this DN will be included in your search results (required). .. attribute:: LdapQuery.connection Set this to an instance of the LdapConnection class (required). .. attribute:: LdapQuery.filter LDAP search filter used to narrow down search results (defaults to ``(objectClass=*)``). .. attribute:: LdapQuery.name This will be used as the dictionary key that provides access to the query results from the metadata object: ``metadata.Ldap["NAMEGOESHERE"]`` (defaults to the class name). .. attribute:: LdapQuery.scope Set this to one of "base", "one" or "sub" to specify LDAP search depth (defaults to "sub"). .. method:: LdapQuery.is_applicable(self, metadata) You can override this method to indicate whether this query makes sense for a given set of metadata (e.g. you need a query only for a certain bundle or group). (defaults to returning True) .. method:: LdapQuery.prepare_query(self, metadata, \**kwargs) Override this method to alter the query prior to execution. This is useful if your filter depends on the current metadata, e.g.:: self.filter = "(cn=" + metadata.hostname + ")" (defaults to doing nothing) .. method:: LdapQuery.process_result(self, metadata, \**kwargs) You will probably override this method in every query to reformat the results from LDAP. The raw result is stored in ``self.result``, you must return the altered data. Note that LDAP search results are presented in this structure:: ( ("DN of first entry returned", { "firstAttribute" : 1, "secondAttribute" : 2, } ), ("DN of second entry returned", { "firstAttribute" : 1, "secondAttribute" : 2, } ), ) Therefore, to return just the value of the firstAttribute of the second object returned, you'd write:: return self.result[1][1][0] (defaults to returning ``self.result`` unaltered) .. method:: LdapQuery.get_result(self, metadata, \**kwargs) This executes the query. First it will call ``prepare_query()`` for you, then it will try to execute the query with the specified connection and last it will call ``process_result()`` and return that return value. If you use a LdapQuery class by yourself, you could pass additional keyword arguments to ``get_result()``. It will call ``prepare_query()`` and ``process_result()`` for you and also supply this additional arguments to this methods. Here is an example:: __queries__ = ['WebPackageQuery'] class WebSitesQuery(LdapQuery): filter = "(objectClass=webHostingSite)" attrs = ["dc"] connection = conn_default def prepare_query(self, metadata, base_dn): self.base = base_dn def process_result(self, metadata, **kwargs): [...] # build sites dict from returned dc attributes return sites class WebPackagesQuery(LdapQuery): base = "dc=example,dc=com" attrs = ["customerId"] connection = conn_default def prepare_query(self, metadata): self.filter = "(&(objectClass=webHostingPackage)(cn:dn:=" + metadata.hostname + "))" def process_result(self, metadata): customers = {} for customer in self.result: dn = customer[0] cid = customer[1]["customerId"][0] customers[cid]["sites"] = WebSitesQuery().get_result(metadata, base_dn=dn) return customers This example assumes that we have a number of webhosting packages that contain various sites. We need the ``WebPackagesQuery`` to get a list of the packages our customers have and another query for each of those to find out what sites are contained in each package. The magic happens in the second class where ``WebSitesQuery.get_result()`` is called with the additional ``base_dn`` parameter that allows our LdapQuery to only search below that DN. You do not need to add all LdapQueries to the ``__queries__`` list. Only add those to that list, that should be called automatically and whose results should be added to the client metadata. Known Issues ------------ * At this point there is no support for SSL/TLS. * This module could not know, if a value changed on the LDAP server. So it could not expire the client metadata cache sanely. If you are using aggressive caching mode, this plugin will expire the metadata cache for a single client at the start of a client run. If you are using LDAP data from another client in a template, you will probably get the cached values from the last client run of that other client. doc/server/plugins/grouping/metadata.txt000066400000000000000000000166261303523157100210000ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-grouping-metadata: ======== Metadata ======== The metadata mechanism has two types of information, client metadata and group metadata. The client metadata describes which top level group a client is associated with.The group metadata describes groups in terms of what bundles and other groups they include. Group data and clients' memberships are reflected in the `groups.xml`_ and `clients.xml`_ files, respectively. Usage of Groups in Metadata =========================== Clients are assigned membership of groups in the Metadata descriptions. Clients can be directly assigned to *'profile'* or *'public'* groups. Client membership of all other groups is by those groups being associated with the profile or public groups. This file can be indirectly modified from clients through use of the ``-p`` flag to ``bcfg2``. Clients are associated with profile groups in `clients.xml`_ as shown below. .. _server-plugins-grouping-metadata-clients-xml: clients.xml =========== The ``clients.xml`` file contains the mappings of Profile Groups to clients. The file is just a series of ```` tags, each of which describe one host. A sample file is below: .. code-block:: xml .. xml:schema:: clients.xsd For detailed information on client authentication see :ref:`appendix-guides-authentication` .. _server-plugins-grouping-metadata-clients-database: Clients Database ---------------- .. versionadded:: 1.3.0 It is also possible to store client records in a database rather than writing back to `clients.xml`_. This provides several advantages: * `clients.xml`_ will never be written by the server, removing an area of contention between the user and server. * `clients.xml`_ can be removed entirely for many sites. * The Bcfg2 client list can be queried by other machines without obtaining and parsing `clients.xml`_. * A single client list can be shared amongst multiple Bcfg2 servers. In general, storing clients in the database works almost the same as `clients.xml`_. `groups.xml`_ is parsed identically. If `clients.xml`_ is present, it is parsed, but ```` tags in `clients.xml`_ *do not* assert client existence; they are only used to set client options *if* the client exists (in the database). That is, the two purposes of `clients.xml`_ -- to track which clients exist, and to set client options -- have been separated. With the improvements in `groups.xml`_ parsing in 1.3, client groups can now be set directly in `groups.xml`_ with ```` tags. (See :xml:type:`clientType` for more details.) As a result, `clients.xml`_ is only necessary if you need to set options (e.g., aliases, floating clients, per-client passwords, etc.) on clients. To use the database backend instead of `clients.xml`_, set ``use_database`` in the ``[metadata]`` section of ``bcfg2.conf`` to ``true``. You will also need to configure the :ref:`Global Server Database Settings `. The `clients.xml`_-based model remains the default. .. _server-plugins-grouping-metadata-groups-xml: groups.xml ========== The ``groups.xml`` file contains Group and Profile definitions. Here's a simple ``groups.xml`` file: .. code-block:: xml A Group tag that does not contain any child tags is a declaration of membership; a Group or Client tag that does contain children is a conditional. So the example above does not assign either the ``rhel5`` or ``rhel6`` groups to machines in the ``mail-server`` group, but conditionally assigns the ``sendmail-server`` or ``postfix-server`` groups depending on the OS of the client. (Presumably in this example the OS groups are set by a probe.) Consequently, a client that is RHEL 5 and a member of the ``mail-server`` profile group would also be a member of the ``apache-server``, ``nfs-client``, ``server``, and ``sendmail-server`` groups; a RHEL 6 client that is a member of the ``mail-server`` profile group would be a member of the ``apache-server``, ``nfs-client``, ``server``, and ``postfix-server`` groups. Client tags in `groups.xml`_ allow you to supplement the profile group declarations in `clients.xml`_ and/or client group assignments with the :ref:`server-plugins-grouping-grouppatterns` plugin. They should be used sparingly. (They are more useful when you are using the database backend for client records.) You can also declare that a group should be negated; this allows you to set defaults and override them efficiently. Negation is applied after other group memberships are calculated, so it doesn't matter how many times a client is assigned to a group or how many times it is negated; a single group negation is sufficient to remove a client from that group. For instance, in the following example, ``foo.example.com`` is **not** a member of ``selinux-enabled``, even though it is a member of the ``foo-server`` and ``every-server`` groups: .. code-block:: xml Negated groups can also be used to declare other Group assignments, but not to declare Bundle assignments. .. note:: Nested Group conditionals, Client tags, and negated Group tags are all new in 1.3.0. .. xml:schema:: metadata.xsd Metadata Caching ================ .. versionadded:: 1.3.0 Client metadata can be cached in order to improve performance. This is particularly important if you have lots of templates that use metadata from other clients (e.g., with the `MetadataQuery`_ interface described below. See :ref:`server-caching` for a full description of the caching features available. .. _server-plugins-grouping-metadata-clientmetadata: ClientMetadata ============== A special client metadata class is available to :ref:`server-plugins-generators-cfg-genshi` and :ref:`server-plugins-generators-cfg-cheetah`. .. autoclass:: Bcfg2.Server.Plugins.Metadata.ClientMetadata MetadataQuery ------------- .. autoclass:: Bcfg2.Server.Plugins.Metadata.MetadataQuery doc/server/plugins/index.txt000066400000000000000000000046461303523157100164740ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-plugins-index: ======= Plugins ======= Plugins are the source of all logic used in building a config. They can perform one of several tasks: #. Generating configuration inventory lists for clients #. Generating configuration entry contents for clients #. Probing client-side state (like hardware inventory, etc) -- the generic client probing mechanism is described at :ref:`server-plugins-probes`. #. Automating administrative tasks (e.g. :ref:`server-plugins-generators-sshbase` which automates ssh key management) #. Generating client per-entry installation decision-lists Enabling Plugins ================ In order for the Bcfg2 server to use a plugin, it needs to be listed on the *plugins* line in ``bcfg2.conf``. Default Plugins =============== The `Bcfg2 repository`_ contains the all plugins currently distributed with Bcfg2. .. _Bcfg2 repository: https://github.com/Bcfg2/bcfg2/tree/maint/src/lib/Bcfg2/Server/Plugins Metadata (Grouping) ------------------- .. toctree:: :maxdepth: 1 :glob: grouping/* Each of these plugins has a corresponding subdirectory with the same name in the Bcfg2 repository. Abstract Configuration (Structures) ----------------------------------- .. toctree:: :maxdepth: 1 :glob: structures/bundler/index structures/* Each of these plugins has a corresponding subdirectory with the same name in the Bcfg2 repository. Literal Configuration (Generators) ---------------------------------- .. toctree:: :maxdepth: 1 :glob: generators/* Each of these plugins has a corresponding subdirectory with the same name in the Bcfg2 repository. Connector Plugins ----------------- .. toctree:: :maxdepth: 1 :glob: connectors/* Statistics Plugins ------------------ .. toctree:: :maxdepth: 1 :glob: statistics/* Reporting can be enabled by adding it to the plugins line in ``/etc/bcfg2.conf``. Version Plugins --------------- .. toctree:: :maxdepth: 1 :glob: version/* Miscellaneous Plugins --------------------- .. toctree:: :maxdepth: 1 :glob: misc/* Plugin Roles (in 1.0) ===================== In version 1.0, plugins have been refactored into a series of roles. This are fine-grained plugin capabilities that govern how the server core interacts with plugins. More details can be found in :ref:`server-plugins-plugin-roles` .. toctree:: :hidden: plugin-roles probes/index doc/server/plugins/misc/000077500000000000000000000000001303523157100155455ustar00rootroot00000000000000doc/server/plugins/misc/acl.txt000066400000000000000000000162301303523157100170470ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-misc-acl: === ACL === The ACL plugin lets you set client communication ACLs to prevent clients from accessing the full range of exposed XML-RPC methods. You can get a list of all exposed methods by running:: bcfg2-admin xcmd listMethods Note that this will only list methods that are available to the client this is run from; that is, if the ACL plugin is in place, ``listMethods`` will reflect the ACLs. ACLs can be set in two different ways: * IP-based ACLs allow you to set ACLs based on client IP address or CIDR range. * Metadata-based ACLs allow you to set ACLs based on client hostname, group membership, or complex combinations thereof. IP-based ACLs are much faster, but metadata-based ACLs are often easier and better. If you are not going to use any ACLs, it is recommended that you disable this plugin because using it can incur a slight performance hit. If you are using IP-based ACLs but *not* metadata-based ACLs, it is similarly recommended that you ensure that your IP-based ACL file ends with an explicit Deny for all clients; this will ensure that metadata-based ACLs are never checked. If you are using metadata-based ACLs, :ref:`server-caching` can alleviate most of the performance penalty. Enabling the ACL plugin ======================= First, create ``/var/lib/bcfg2/ACL/``. Then, add ``ACL`` to your ``plugins`` list in ``bcfg2.conf``:: plugins = Bundler, Cfg, ..., Packages, ACL Finally, create ``/var/lib/bcfg2/ACL/ip.xml`` (for `IP-based ACLs`_), ``/var/lib/bcfg2/ACL/metadata.xml`` (for `Metadata-based ACLs`_), or both. IP-based ACLs ============= IP-based ACLs allow you to set ACLs based on client IP address or CIDR range. IP-based ACLs are very fast. If you are using IP-based ACLs but *not* metadata-based ACLs, it is recommended that you ensure that your IP-based ACL file ends with an explicit Deny for all clients; this will ensure that metadata-based ACLs are never checked. IP-based ACLs are defined in ``ACL/ip.xml``. The file is parsed sequentially; the first matching rule applies. Each rule is either Allow (to allow the client access), Deny (to deny the client access), or Defer (to defer to `Metadata-based ACLs`_). The last rule in ``ip.xml`` is an implicit default allow for 127.0.0.1, and an implicit default defer for all other machines. If no ``ip.xml`` file exists, then ACL checking will be deferred to metadata-based ACLs. Example ------- .. code-block:: xml In this example: * The machine at 192.168.1.10 (perhaps the Bcfg2 server) can call all plugin XML-RPC methods; * Machines in the 192.168.2.0/24 network cannot assert their own profiles; * The machine at 192.168.1.12 (perhaps the Git server) can call the Git.Update method; * All machines can call core methods (except 192.168.2.0/24, which can call all core methods except AssertProfile). Implicitly, all machines (except localhost) except 192.168.1.10 are disallowed access to the plugin methods. You can also provide a minimal configuration to try to weed out some obvious bad requests before doing the more expensive `Metadata-based ACLs`_. For instance: .. code-block:: xml In this example: * All machines can call all core methods without checking metadata ACLs; * Plugin method calls from machines in 192.168.1.0/24 are deferred to metadata ACLs; and * All other plugin method calls are denied. The only time metadata ACLs would be checked in this example would be plugin method calls by machines in 192.168.1.0/24. Reference --------- .. xml:type: IPACLContainerType Metadata-based ACLs =================== Metadata-based ACLs let you set ACLs based on client hostname or group membership, which is much more flexible and maintainable than `IP-based ACLs`_. The downside is that it is slower, because it requires generating client metadata for each machine that tries to authenticate. Without :ref:`server-caching`, using metadata-based ACLs will double the number of client metadata builds per client run, which could be a sizeable performance penalty. In order to limit the performance penalty, it's highly recommended to: * Enable :ref:`server-caching` in ``cautious`` or ``aggressive`` mode; and * Deny as many clients as possible with `IP-based ACLs`_. Metadata-based ACLs are defined in ``ACL/metadata.xml``. Only Allow and Deny rules are supported, not Defer rules. The file is parsed sequentially; the first matching rule applies. The last rule in ``metadata.xml`` is an implicit default allow for machines called ``localhost`` or ``localhost.localdomain``, and an implicit default deny for all other machines. If no ``metadata.xml`` file exists, then all requests are implicitly allowed. Example ------- This example is functionally identical to the `IP-based ACLs` example above, but more maintainable in several ways: .. code-block:: xml In this case, if you add a Bcfg2 server or Git server, or one of those servers changes IP address, you don't need to rewrite your ACLs. Similarly, you could add a new subnet of user workstations. Reference --------- .. xml:type: MetadataACLContainerType .. _server-plugins-misc-acl-wildcards: Wildcards ========= The ACL descriptions allow you to use '*' as a wildcard for any number of characters *other than* ``.``. That is: * ``*`` would match ``DeclareVersion`` and ``GetProbes``, but would *not* match ``Git.Update``. * ``*.*`` would match ``Git.Update``, but not ``DeclareVersion`` or ``GetProbes``. Since all plugin methods are scoped to their plugin (i.e., they are all ``.``), and all core methods have no scope, this lets you easily allow or deny core or plugin methods. You could also do something like ``*.toggle_debug`` to allow a host to enable or disable debugging for all plugins. No other bash globbing is supported. Examples ======== The :ref:`default ACL list ` can be described in ``ip.xml`` fairly simply: .. code-block:: xml A basic configuration that is still very secure but perhaps more functional could be given in ``metadata.xml``: .. code-block:: xml doc/server/plugins/misc/guppy.txt000066400000000000000000000020741303523157100174550ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-misc-guppy: ===== Guppy ===== This plugin is used to trace memory leaks within the bcfg2-server process using Guppy_. By default the remote debugger is started when this plugin is enabled. The debugger can be disabled in a running process using ``bcfg2-admin xcmd Guppy.Disable`` and enabled using ``bcfg2-admin xcmd Guppy.Enable``. .. _Guppy: http://pypi.python.org/pypi/guppy/0.1.8 Setup ===== - Install the Guppy_ package first. - Add *Guppy* to the **plugins** line in ``bcfg2.conf``. Use cases ========= To attach the console run:: python -c "from guppy import hpy;hpy().monitor()" Example ======= .. code-block:: sh # python -c "from guppy import hpy;hpy().monitor()" *** Connection 1 opened *** lc CID PID ARGV 1 25063 ['/usr/sbin/bcfg2-server', '-D', '/var/run/bcfg2-server.pid'] sc 1 Remote connection 1. To return to Monitor, type or . int Remote interactive console. To return to Annex, type '-'. >>> hp.heap() doc/server/plugins/misc/trigger.txt000066400000000000000000000013561303523157100177560ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-misc-trigger: ======= Trigger ======= Trigger is a plugin that calls external scripts (on the server) at the end of each client run. Setup ===== First, add Trigger to the **plugins** line in ``bcfg2.conf``. Then do the following:: mkdir /var/lib/bcfg2/Trigger echo "#!/bin/sh\necho $1\n" > /var/lib/bcfg2/Trigger/test.sh chmod +x /var/lib/bcfg2/Trigger/test.sh Use cases ========= #. Completing network builds (ie resetting from the build target to the boot PXE target) #. Integration with external systems Trigger Arguments ================= Triggers are run with a series of arguments. #. client hostname #. -p #. client profile #. -g #. group1:group2:..:groupN (all client groups) doc/server/plugins/plugin-roles.txt000066400000000000000000000027551303523157100200040ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-plugin-roles: ============ Plugin Roles ============ * Metadata * Initial metadata construction * Connector data accumulation * ClientMetadata instance delivery * Introspection interface (for bcfg2-info & co) * Connector * Provide additional data for ClientMetadata instances * Probing * send executable probes to clients and receive data responses * Structure * Produce a list of configuration entries that should be included in client configurations * Each structure plugin is produces a list of structures * Core verifies that each bundle listed has been constructed * StructureValidator * Validate a client entry list's internal consistency, modifying if needed * Generator * GoalValidator * Validate client goals, modifying if needed * PullSource * Plugin can provide entry information about clients * PullTarget * Plugin can accept entry data and merge it into the specification * Version * Plugin can read revision information from VCS of choice * Will provide an interface for producing commits made by the bcfg2-server * Decision * ClientRunHooks * Provides hooks executed at the start and end of each client run Configuration of plugins ======================== A single list of plugins (including plugins of all capabilities) is specified upon startup (either via bcfg2.conf or equivalent). All plugins included in the startup list are initialized, and each is enabled in all roles that it supports. doc/server/plugins/probes/000077500000000000000000000000001303523157100161045ustar00rootroot00000000000000doc/server/plugins/probes/current-kernel.txt000066400000000000000000000003171303523157100216060ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-current-kernel: current-kernel ============== Probe the currently running kernel. .. code-block:: sh # PROBE_NAME : current-kernel echo `uname -r` doc/server/plugins/probes/fileprobes.txt000066400000000000000000000042201303523157100207750ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-fileprobes: ========== FileProbes ========== The FileProbes plugin allows you to probe a client for a file, which is then added to the :ref:`server-plugins-generators-cfg` specification. If the file changes on the client, FileProbes can either update it in the specification or allow Cfg to replace it. FileProbes will not probe a file if there's already a file in Cfg that will apply to the client. So if, for instance, you have a generic file in ``Cfg/etc/foo.conf/foo.conf`` that applies to all hosts, FileProbes will not retrieve ``/etc/foo.conf`` from the client (unless ``update`` is enabled; see Configuration_ below). When a new config file is first probed, an ``info.xml`` file is also written to enforce the permissions from that client. Subsequent probes from other clients will not modify or overwrite the data in ``info.xml``. (This ensures that any manual changes you make to ``info.xml`` for that file are not circumvented.) Configuration ============= FileProbes is configured in ``FileProbes/config.xml``, which might look something like: .. code-block:: xml This will result in ``/etc/foo.conf`` being retrieved from all clients; if it changes on a client, it will be overwritten by the version that was retrieved initially. Clients in the ``blah-servers`` group will be probed for ``/etc/blah.conf``; if it changes on a client, those changes will be written into the Bcfg2 specification. If the file is deleted from a client, it will be rewritten from Bcfg2. ``bar.example.com`` will be probed for ``/var/lib/bar.gz``, which contains non-ASCII characters and so needs to use base64 encoding when transferring the file. The paths probed by FileProbes must also be included as Path entries in your bundles in order to be handled properly by Cfg. Permissions are handled as usual, with ``info.xml`` files in Cfg. doc/server/plugins/probes/group.txt000066400000000000000000000075161303523157100200120ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-group: group ===== Probe used to dynamically set client groups based on OS/distro. .. note:: Some parts of this script may depend on having lsb-release installed. .. code-block:: sh #!/bin/bash OUTPUT="" if [ -e /etc/release ]; then # Solaris OUTPUT="$OUTPUT\ngroup:solaris" elif [ -e /etc/debian_version ]; then # debian based OUTPUT="$OUTPUT\ngroup:deb" if [ -e /etc/lsb-release ]; then # variant . /etc/lsb-release OS_GROUP=$DISTRIB_CODENAME DEBIAN_VERSION=$(echo "$DISTRIB_ID" | tr '[A-Z]' '[a-z]') case "$OS_GROUP" in "lucid") OUTPUT="$OUTPUT\ngroup:${DISTRIB_CODENAME}" OUTPUT="$OUTPUT\ngroup:${DEBIAN_VERSION}" ;; esac else # debian OS_GROUP=`cat /etc/debian_version` OUTPUT="$OUTPUT\ngroup:debian" case "$OS_GROUP" in 5.*) OUTPUT="$OUTPUT\ngroup:lenny" ;; "sid") OUTPUT="$OUTPUT\ngroup:sid" ;; esac fi elif [ -e /etc/redhat-release ]; then # redhat based if [ -x /bin/rpm ]; then OUTPUT="${OUTPUT}\ngroup:rpm" OS_GROUP=`/bin/rpm -q --qf "%{NAME}" --whatprovides redhat-release | grep -vi 'freeing read locks for locker' | sed 's/-release.*//' | tr '[A-Z]' '[a-z]'` REDHAT_VERSION=`/bin/rpm -q --qf "%{VERSION}" --whatprovides redhat-release` case "$OS_GROUP" in "centos" | "fedora" | "sl") OUTPUT="${OUTPUT}\ngroup:${OS_GROUP}" OUTPUT="${OUTPUT}\ngroup:${OS_GROUP}-${REDHAT_VERSION}" ;; "redhat") REDHAT_RELEASE=`/bin/rpm -q --qf "%{RELEASE}" --whatprovides redhat-release| cut -d. -f1` OUTPUT="${OUTPUT}\ngroup:${OS_GROUP}" OUTPUT="${OUTPUT}\ngroup:${OS_GROUP}-${REDHAT_VERSION}" OUTPUT="${OUTPUT}\ngroup:${OS_GROUP}-${REDHAT_RELEASE}" ;; esac fi elif [ -e /etc/gentoo-release ]; then # gentoo OUTPUT="$OUTPUT\ngroup:gentoo" elif [ -x /usr/sbin/system_profiler ]; then # os x ### NOTE: Think about using system_profiler SPSoftwareDataType here OUTPUT="$OUTPUT\ngroup:osx" OSX_VERSION=`sw_vers | grep 'ProductVersion:' | egrep -o '[0-9]+\.[0-9]+'` if [ "$OSX_VERSION" == "10.6" ]; then OUTPUT="$OUTPUT\ngroup:osx-snow" elif [ "$OSX_VERSION" == "10.5" ]; then OUTPUT="$OUTPUT\ngroup:osx-leo" fi echo $OUTPUT else exit 0 fi # get the proper architecture ARCH=`uname -m` case "$ARCH" in "x86_64") if [ "$OS_GROUP" == 'centos' -o "$OS_GROUP" == 'sl' -o "$OS_GROUP" == 'redhat' ]; then OUTPUT="$OUTPUT\ngroup:${ARCH}" else OUTPUT="$OUTPUT\ngroup:amd64" fi ;; "i386" | "i686") OUTPUT="$OUTPUT\ngroup:i386" ;; "sparc64") OUTPUT="$OUTPUT\ngroup:sparc64" ;; esac # output the result of all the group probing # (interpreting the backslashed newlines) echo -e $OUTPUT doc/server/plugins/probes/grub-serial-order.txt000066400000000000000000000025511303523157100221750ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-grub-serial-order: grub-serial-order ================= A basic hardware probe to determine if you should change the default serial ordering in grub.conf. This pre-supposes that you know your hardware is broken. You can tell something is wrong with your hardware if it takes lots of time to iterate through the "Press a key" option and present you with the grub menu. In some cases, I've seen this take as long as 20 minutes. .. code-block:: sh #!/bin/sh # # # We need to modify the order of the --serial line in grub # in order to fix silly hardware bugs. In some cases, having # this in the wrong order causes grub to take an inordinate # amount of time to do anything before it actually auto-picks # the default menu option to boot. # PATH=/bin:/usr/bin:/sbin:/usr/sbin; export PATH # let's figure out what product type this is os=`uname -s` productname="product-no-dmidecode" if [ $os = "Linux" ] ; then productname=`dmidecode -s system-product-name 2>&1` case $productname in "PowerEdge M600") echo "console serial" ;; *) echo "serial console" ;; esac fi if [ $os = "SunOS" ] ; then # Bcfg2 server is unhappy with null output from probes echo "console" fi doc/server/plugins/probes/index.txt000066400000000000000000000225701303523157100177620ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-plugins-probes: ====== Probes ====== At times you need to gather information from a client machine before you can generate its configuration. For example, if some of your machines have both a local scratch disk and a system disk while others only have the system disk, you would want to know this information to correctly generate an `/etc/auto.master` autofs config file for each type. Here we will look at how to do this. Probes also allow dynamic group assignment for clients, see :ref:`server-plugins-probes-dynamic-groups`. First, create a ``Probes`` directory in our toplevel repository location:: mkdir /var/lib/bcfg2/Probes This directory will hold any small scripts we want to use to grab information from client machines. These scripts can be in any scripting language; the shebang line (the ``#!/usr/bin/env some_interpreter_binary`` line at the very top of the script) is used to determine the script's interpreter. .. note:: Bcfg2 uses python mkstemp to create the Probe scripts on the client. If your /tmp directory is mounted **noexec**, you will likely need to modify the :envvar:`TMPDIR` environment variable so that the bcfg2 client creates the temporary files in a directory from which it can execute. .. note:: .. versionadded:: 1.3.0 A probe script must exit with a return value of 0. If it exits with a non-0 return value, the client will abort its run. This behavior can be disabled by setting ``exit_on_probe_failure = 0`` in the ``[client]`` section of ``bcfg2.conf``. Now we need to figure out what exactly we want to do. In this case, we want to hand out an ``/etc/auto.master`` file that looks like:: /software /etc/auto.software --timeout 3600 /home /etc/auto.home --timeout 3600 /hometest /etc/auto.hometest --timeout 3600 /nfs /etc/auto.nfs --timeout 3600 /scratch /etc/auto.scratch --timeout 3600 for machines that have a scratch disk. For machines without an extra disk, we want to get rid of that last line:: /software /etc/auto.software --timeout 3600 /home /etc/auto.home --timeout 3600 /hometest /etc/auto.hometest --timeout 3600 /nfs /etc/auto.nfs --timeout 3600 So, from the Probes standpoint we want to create a script that counts the number of SCSI disks in a client machine. To do this, we create a very simple ``Probes/scratchlocal`` script: .. code-block:: bash grep -c Vendor /proc/scsi/scsi Running this on a node with *n* disks will return the number *n+1*, as it also counts the controller as a device. To differentiate between the two classes of machines we care about, we just need to check the output of this script for numbers greater than 2. We do this in the template. .. note:: This example uses :ref:`server-plugins-generators-cfg-cheetah`, but Cheetah templates are **not** required in order for Probes to operate properly. For the template we will want to create a ``Cfg/etc/auto.master`` directory to hold the template of the file in question. Inside of this template we will need to check the result of the Probe script that got run and act accordingly. The ``Cfg/etc/auto.master/auto.master.cheetah`` file looks like:: /software /etc/auto.software --timeout 3600 /home /etc/auto.home --timeout 3600 /hometest /etc/auto.hometest --timeout 3600 /nfs /etc/auto.nfs --timeout 3600 #if int($self.metadata.Probes["scratchlocal"]) > 2 /scratch /etc/auto.scratch --timeout 3600 #end if Any Probe script you run will store its output in ``$self.metadata.Probes["scriptname"]``, so we get to our `scratchlocal` script's output as seen above. (See `Handling Probe Output`_, below, for more information on how this is done.) Note that we had to wrap the output in an `int()` call; the script output is treated as a string, so it needs to be converted before it can be tested numerically. With all of these pieces in place, the following series of events will happen when the client is run: #. Client runs #. Server hands down our ``scratchlocal`` probe script #. Client runs the ``scratchlocal`` probe script and hands its output back up to the server #. Server generates ``/etc/auto.master`` from its template, performing any templating substitutions/actions needed in the process. #. Server hands ``/etc/auto.master`` down to the client #. Client puts file contents in place. Now we have a nicely dynamic ``/etc/auto.master`` that can gracefully handle machines with different numbers of disks. All that's left to do is to add the ``/etc/auto.master`` to a Bundle: .. code-block:: xml .. _server-plugins-probes-dynamic-groups: Dynamic Group Assignment ======================== The output lines of the probe matching "group:" are used to dynamically assign hosts to groups. These dynamic groups need not already exist in ``Metadata/groups.xml``. If a dynamic group is defined in ``Metadata/groups.xml``, clients that include this group will also get all included groups and bundles. Consider the following output of a probe:: group:debian-wheezy group:amd64 This assigns the client to the groups debian-wheezy and amd64. To prevent clients from manipulating the probe output and choosing unexpected groups (and receiving their potential sensitive files) you can use the ``allowed_groups`` option in the ``[probes]`` section of ``bcfg2.conf`` on the server. This whitespace-separated list of anchored regular expressions (must match the complete group name) controls dynamic group assignments. Only matching groups are allowed. The default allows all groups. .. versionadded:: 1.3.4 Example: .. code-block:: ini [probes] allowed_groups = debian-(squeeze|wheezy|sid) i386 This allows the groups `debian-squeeze`, `debian-wheezy`, `debian-sid` and `i386`. With the probe output from above, this setting would disallow the group `amd64`. Handling Probe Output ===================== Bcfg2 stores output from probes in the ``Probes`` property of a client's metadata object. To access this data in :ref:`server-plugins-generators-cfg-genshi`, for instance, you could do:: ${metadata.Probes['script-name']} This is not the full output of the probe; any lines that start with "group:" have been stripped from the output. The data is a string-like object that has some interesting and salient features: * If the data is a valid XML document, then ``metadata.Probes['script-name'].xdata`` will be an ``lxml.etree._Element`` object representing the XML data. * If the data is a valid JSON document, and either the Python ``json`` or ``simplejson`` module is installed, then ``metadata.Probes['script-name'].json`` will be a data structure representing the JSON data. * If the data is a valid YAML document, and either the Python ``yaml`` or ``syck`` module is installed, then ``metadata.Probes['script-name'].yaml`` will be a data structure representing the YAML data. If these conditions are not met, then the named properties will be ``None``. In all other fashions, the probe data objects should act like strings. Host- and Group-Specific probes =============================== Bcfg2 has the ability to alter probes based on client hostname and group membership. These files work similarly to files in Cfg. If multiple files with the same basename apply to a client, the most specific one is used. Only one instance of a probe is served to a given client, so if a host-specific version and generic version apply, only the client-specific one will be used. If you want to to detect information about the client operating system, the :ref:`server-plugins-probes-ohai` plugin can help. .. _server-plugins-probes-data-storage: Data Storage ============ .. versionadded:: 1.3.0 The Probes plugin stores the output of client probes locally on the Bcfg2 server in order to ensure that probe data and groups are available on server startup (rather than having to wait until all probes have run every time the server is restarted) and to :ref:`bcfg2-info ` and related tools. There are two options for storing this data: ``Probes/probed.xml``, a plain XML file stored in the Bcfg2 specification; or in a database. Advantages and disadvantages of using the database: * The database is easier to query from other machines, for instance if you run ``bcfg2-info`` or ``bcfg2-test`` on a machine that is not your Bcfg2 server. * The database allows multiple Bcfg2 servers to share probe data. * The database is likely to handle probe data writes (which happen on every client run) more quickly, since it can only write the probes whose data has changed. * The database is likely to handle probe data reads (which happen only on server startup) more slowly, since it must query a database rather than the local filesystem. Once the data has been read in initially (from XML file or from the database) it is kept in memory. To use the database-backed storage model, set ``use_database`` in the ``[probes]`` section of ``bcfg2.conf`` to ``true``. You will also need to configure the :ref:`server-database`. The file-based storage model is the default, although that is likely to change in future versions of Bcfg2. Other examples ============== .. toctree:: :maxdepth: 1 current-kernel group vserver grub-serial-order manufacturer producttype serial-console-speed Other Probing plugins ===================== .. toctree:: ohai fileprobes doc/server/plugins/probes/manufacturer.txt000066400000000000000000000020121303523157100213340ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-manufacturer: manufacturer ============ Probe to output some standardized group names based on the manufacturer information. .. code-block:: sh #!/bin/sh # PATH=/bin:/usr/bin:/sbin:/usr/sbin; export PATH manufacturer=manuf-no-demidecode os=`uname -s` if [ $os = "Linux" ] ; then manufacturer=`dmidecode -s system-manufacturer 2>&1| sed -e 's/[ ]\+$//g'` case $manufacturer in "Dell Inc.") manufacturer="manuf-dell" ;; "Sun Microsystems") manufacturer="manuf-sun" ;; "VMware, Inc.") manufacturer="manuf-vmware" ;; *) manufacturer="manuf-unknown" ;; esac fi if [ $os = "SunOS" ]; then case `uname -i` in SUNW,*) manufacturer="manuf-sun" ;; *) manufacturer="manuf-unknown" ;; esac fi echo group:$manufacturer doc/server/plugins/probes/ohai.txt000066400000000000000000000020771303523157100175730ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-ohai: Ohai ==== .. _Ohai: http://wiki.opscode.com/display/chef/Ohai .. _Ohai-Install: http://wiki.opscode.com/display/chef/Ohai+Installation+and+Use The `Ohai`_ plugin is used to detect information about the client operating system. The data is reported back to the server using JSON. Client prerequisites -------------------- On the client, you need to install `Ohai`_. See `Ohai-Install`_ for more information. Server prerequisites -------------------- If you have python 2.6 or later installed, you can continue on to :ref:`ohai-setup`. Otherwise, you will need to install the python-simplejson module found packaged in most distributions. .. _ohai-setup: Setup ----- To enable the Ohai plugin, you need to first create an ``Ohai`` directory in your Bcfg2 repository (e.g. ``/var/lib/bcfg2/Ohai``). You then need to add **Ohai** to the plugins line in ``bcfg2.conf``. Once this is done, restart the server and start a client run. You will have the JSON output from the client in the ``Ohai`` directory you created previously. doc/server/plugins/probes/producttype.txt000066400000000000000000000040521303523157100212300ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-producttype: producttype =========== A probe to set up dynamic groups based on the producttype and possibly some internal components of the system. Defined products are product-name. Defined component information is has_some_component. In the example below, we can infer that we have Emulex Lightpulse gear and set the group has_hardware_emulex_lightpulse. .. code-block:: sh !/bin/sh # # PATH=/bin:/usr/bin:/sbin:/usr/sbin; export PATH # let's figure out what product type this is os=`uname -s` productname="product-no-dmidecode" if [ $os = "Linux" ] ; then productname=`dmidecode -s system-product-name 2>&1` case $productname in "PowerEdge M600") productname="product-bladem600" ;; "Sun Fire X4100 M2") productname="product-x4100m2" ;; "Sun Fire X4440") productname="product-x4440" ;; "VMware Virtual Platform") productname="product-vmware-vm" ;; *) productname="product-unknown" ;; esac # check for emulex lightpulse fiber channel HBA check_emulex_lightpulse=`lspci -d 10df: | grep -c LightPulse` if [ $check_emulex_lightpulse -gt 0 ]; then echo group:has_hardware_emulex_lightpulse fi # check for broadcom nics check_broadcom_nic=`lspci -d 14e4: | grep -c NetXtreme` if [ $check_broadcom_nic -gt 0 ]; then echo group:has_hardware_broadcom_nic fi # check for intel pro/1000 MT nics check_intel_pro1000mt_nic=`lspci -d 8086:1010 | wc -l` if [ $check_intel_pro1000mt_nic -gt 0 ]; then echo group:has_hardware_intel_pro1000mt_nic fi fi if [ $os = "SunOS" ] ; then case `uname -i` in SUNW,*) productname=`uname -i` ;; *) productname=product-unknown ;; esac fi echo group:$productname doc/server/plugins/probes/serial-console-speed.txt000066400000000000000000000025011303523157100226600ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-serial-console-speed: serial-console-speed ==================== A probe to tell us what the serial console speed should be for a given piece of hardware. This pre-supposed some knowledge of the hardware because you define the speeds in here instead of attempting to probe bios or something in the hardware in most cases (like x86). .. code-block:: sh #!/bin/sh # # # figure out what serial speed we should tell bcfg2 to use. # since there's no way to probe, we need to set this up by external # knowledge of the system hardware type (and just make sure we # standardize on that serial speed for that hardware class) PATH=/bin:/usr/bin:/sbin:/usr/sbin; export PATH # let's figure out what product type this is os=`uname -s` productname="product-no-dmidecode" if [ $os = "Linux" ] ; then productname=`dmidecode -s system-product-name 2>&1` case $productname in "PowerEdge M600") echo "115200" ;; *) echo "9600" ;; esac fi if [ $os = "SunOS" ]; then platform=`uname -i` case $platform in SUNW,*) eeprom ttya-mode | sed 's/ttya-mode=//'|awk -F, '{print $1}' ;; *) echo "9600" ;; esac fi doc/server/plugins/probes/vserver.txt000066400000000000000000000010111303523157100203320ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-probes-vserver: vserver ======= Detect if the server is a Linux-VServer host. .. code-block:: sh #!/bin/sh # Test the proc TEST=`cat /proc/self/status|grep s_context| cut -d":" -f2|cut -d" " -f 2` case "$TEST" in "") # Not a vserver kernel echo group:host ;; "0") # Vserver kernel but it is the HOST echo group:host ;; [0-9]*) # Vserver echo group:vserver ;; esac doc/server/plugins/statistics/000077500000000000000000000000001303523157100170045ustar00rootroot00000000000000doc/server/plugins/statistics/reporting.txt000066400000000000000000000005611303523157100215600ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-statistics-reporting: ========= Reporting ========= Reporting can be enabled by adding Reporting to the plugins line in ``/etc/bcfg2.conf``: plugins = Bundler,Cfg,...,Reporting For more information on how to use Reporting to setup reporting, see :ref:`reports-dynamic`. .. note:: This replaces the DBStats plugin. doc/server/plugins/structures/000077500000000000000000000000001303523157100170355ustar00rootroot00000000000000doc/server/plugins/structures/altsrc.txt000066400000000000000000000073161303523157100210750ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-altsrc: ====== altsrc ====== .. versionadded:: 0.9.5 Altsrc is a generic, Bcfg2 server-side mechanism for performing configuration entry name remapping for the purpose of data binding. Altsrc can be used as a parameter for any entry type, and can be used in any structure. Use Cases ========= * Equivalent configuration entries on different architectures with different names * Mapping entries with the same name to different bind results in a configuration (two packages with the same name but different types) * A single configuration entry across multiple specifications (multi-plugin, or multi-repo) Examples ======== * Consider the case of /etc/hosts on linux and /etc/inet/hosts on solaris. These files contain the same data in the same format, and should typically be synchronized, however, exist in different locations. Classically, one would need to create one entry for each in Cfg and perform manual synchronization. Or, you could use symlinks and pray. Altsrc is driven from the bundle side. For example: .. code-block:: xml In this case, when a solaris host gets the 'netinfo' bundle, it will get the first Path entry, which includes an altsrc parameter. This will cause the server to bind the entry as if it were a Path called ``/etc/hosts``. This configuration entry is still called ``/etc/inet/hosts``, and is installed as such. * On encap systems, frequently multiple packages of the same name, but of different types will exist. For example, there might be an openssl encap package, and an openssl rpm package. This can be dealt with using a bundle like: .. code-block:: xml This bundle will bind data for the packages "openssl-encap" and "openssl-rpm", but will be delivered to the client with both packages named "openssl" with different types. * Consider the case where there exist complicated, but completely independent specifications for the same configuration entry but different groups of clients. The following bundle will allow the use of two different templates /etc/firewall-rules-external and /etc/firewall-rules-internal for different clients based on their group membership. .. code-block:: xml ... * Consider the case where a variety of files can be constructed by a single :ref:`Genshi ` or :ref:`Cheetah ` template. It would be possible to copy this template into the proper location for each file, but that requires proper synchronization upon modification and knowing up front what the files will all be called. Instead, the following bundle allows the use of a single template for all proper config file instances. .. code-block:: xml doc/server/plugins/structures/bundler/000077500000000000000000000000001303523157100204705ustar00rootroot00000000000000doc/server/plugins/structures/bundler/bcfg2.txt000066400000000000000000000064241303523157100222220ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-bundler-bcfg2-server: Bcfg2 Server ============ These two bundles split out the entries that do require a restart of ``bcfg2-server`` from those that don't. These bundles also demonstrate use of bound entries to avoid splitting entries between Bundler and Rules. ``Bundler/bcfg2-server.xml``: .. code-block:: xml ``Bundler/bcfg2-server-base.xml``: .. code-block:: xml doc/server/plugins/structures/bundler/index.txt000066400000000000000000000233611303523157100223450ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-plugins-structures-bundler: ======= Bundler ======= Bundler is used to describe groups of inter-dependent configuration entries, such as the combination of packages, configuration files, and service activations that comprise typical Unix daemons. Bundles are used to add groups of configuration entries to the inventory of client configurations, as opposed to describing particular versions of those entries. For example, a bundle could say that the configuration file ``/etc/passwd`` should be included in a configuration, but will not describe the particular version of ``/etc/passwd`` that a given client will receive. Group and Client tags can be used inside of bundles to differentiate which entries particular clients will recieve; this is useful for the case where entries are named differently across systems; for example, one Linux distro may have a package called ``openssh`` while another uses the name ``ssh``. See :ref:`xml-group-client-tags` for details and a longer example. A brief example: .. code-block:: xml Note that we do not specify *how* a given entry should be managed, only that it should be. The concrete specification of each entry will be provided by a different plugin such as :ref:`server-plugins-generators-cfg`, :ref:`server-plugins-generators-rules`, or :ref:`server-plugins-generators-packages`. Alternatively, you can use fully-bound entries in Bundler, which has various uses. For instance: .. code-block:: xml In this example, both Service tags and one Package tag are fully bound -- i.e., all information required by the client to manage those entries is provided in the bundle itself. .. _server-plugins-structures-bundler-magic: Bundle "Magic" ============== Bundles are collections of *related* entries. That point is very, very important, because a bundle performs certain "magic" actions when one or more entries in it are modified: * :xml:type:`Service ` entries whose ``restart`` attribute is ``true`` (the default) will be restarted. * :xml:type:`Action ` entries whose ``when`` attribute is ``modified`` will be run. Because of these two magic actions, it's extremely important to structure your bundles around Service and Action entries, rather than around some loose idea of which entries are related. For instance, in order to manage a Bcfg2 server, a number of packages, paths, services, etc. must be managed. But not all of these entries would require ``bcfg2-server`` to be restarted, so to limit restarts it's wise to split these entries into two bundles. See :ref:`server-plugins-structures-bundler-bcfg2-server` for an example of this. .. _server-plugins-structures-bundler-index-disabling-magic: Disabling Magic --------------- Disabling magic bundler actions can be done in one of two ways: * On a per-entry basis. Set ``restart="false"`` on a Service to prevent it from being restarted when the bundle is modified. Set ``when="always"`` on an Action to cause it to run every time, regardless of whether or not the bundle was modified. * On a per-bundle basis. Set ``independent="true"`` on the top-level ``Bundle`` tag to signify that the bundle is a collection of independent (i.e., unrelated) entries, and to prevent any magic actions from being performed. (This is similar to the ``Base`` plugin in older versions of Bcfg2.) This was added in Bcfg2 1.4. Service entries in independent bundles are never restarted, and Action entries in independent bundles are only executed if ``when="always"``. (I.e., an Action entry in an independent bundle with ``when="modified"`` is useless.) .. _server-plugins-structures-bundler-index-genshi-templates: Genshi templates ================ Genshi XML templates allow you to use the `Genshi `_ templating system to dynamically generate a bundle. Genshi templates can be specified one of two ways: 1. Add an XML-style genshi template to the Bundler directory with a ``.genshi`` and the associated namespace attribute. *This is deprecated as of Bcfg2 1.4.* 2. Add the Genshi namespace to your existing XML bundle. See :ref:`xml-genshi-templating` for details. Troubleshooting --------------- To render a bundle for a given client, you can run:: bcfg2-info buildbundle This will render the template; it will not fully bind all of the entries in the bundle. See :ref:`bcfg2-info ` for more details. .. _server-plugins-structures-bundler-index-dependencies: Dependencies ============ Dependencies on other bundles can be specified by adding a RequiredBundle tag that adds another bundle by name, e.g.: .. code-block:: xml ... The dependent bundle is added to the list of bundles sent to the client, *not* to the parent bundle itself. If you want to propagate the modification flag from the required bundle, you can add ``inherit_modification="true"`` to the RequiredBundle tag. An example: ``nfs-client.xml``: .. code-block:: xml ``automount.xml``: .. code-block:: xml If a new ``nfs-utils`` package was installed, the ``nfslock``, ``rpcbind``, and ``nfs`` services would be restarted, but *not* the ``autofs`` service. If you would add ``inherit_modification="true"`` to the RequiredBundle tag, you would ensure the propagation of the modification flag and the ``autofs`` service would be restarted, too. But if a new ``/etc/auto.misc`` file was sent out, *only* the ``autofs`` service would be restarted, but the ``nfslock``, ``rpcbind``, and ``nfs`` services would not be restarted (independent of the ``inherit_modification`` flag). Altsrc ====== .. toctree:: :maxdepth: 1 ../altsrc Examples ======== In some cases, configuration files need to include the client's hostname in their name. The following template produces such a config file entry. .. code-block:: xml Depending on the circumstance, these configuration files can either be handled by individual entries in :ref:`server-plugins-generators-cfg`, or can be mapped to a single entry by using the :ref:`server-plugins-structures-altsrc` feature. In this example, configuration file names are built using probed results from the client. getmac is a probe that gathers client MAC addresses and returns them in a newline delimited string. .. code-block:: xml .. note:: * The use of the altsrc directive causes all ifcfg files to be handled by the same plugin and entry. * The blocks have only been available in genshi since 0.4 (http://genshi.edgewall.org/ticket/84) If you want a file to be only on a per-client basis, you can use an if declaration. .. code-block:: xml or alternately .. code-block:: xml or yet another way .. code-block:: xml The final form is preferred if there is no code inside the block that would fail on other clients. While these examples are simple, the test in the if block can in fact be any python statement. .. _server-plugins-structures-bundler-index-examples: Other examples ============== Some simple examples of Bundles can be found in the `Bcfg2 example repository`_. .. _Bcfg2 example repository: https://github.com/solj/bcfg2-repo In addition to the example repository, the following is a list of some more complex example Bundles. .. toctree:: :maxdepth: 1 bcfg2 kernel moab nagios ntp snmpd torque yp doc/server/plugins/structures/bundler/kernel.txt000066400000000000000000000062221303523157100225130ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _server-plugins-structures-bundler-kernel: kernel ====== This is a rather complex Bundle for the Linux kernel from a system with a history of complexity. There are two kernel versions present on the systems at all times (the current and the previous), so the package names all contain versioning information. This includes kernel-specific modules for various specialties - ``gm`` for Myrinet boards, ``gpfs`` and ``pvfs`` for storage clients, and ``nvidia`` modules for machines with Nvidia cards. Note that only the ``ia32`` machines have Nvidia cards in them, and thus those entries only exist in that section. It is easy to see that there is duplication of effort between the two architectures - both have the same ``linux`` package entry names, for example. This Bundle could be arranged in many different ways, some of which might be better than this one. Feel free to hack as needed. .. code-block:: xml doc/server/plugins/structures/bundler/moab.txt000066400000000000000000000006071303523157100221520ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-bundler-moab: moab ==== This is a fairly simple Bundle for the Moab workload manager. .. code-block:: xml doc/server/plugins/structures/bundler/nagios.txt000066400000000000000000000023501303523157100225110ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-bundler-nagios: nagios ====== A Bundle for the Nagios service. This Bundle installs all of our local Nagios plugins, takes into account that the SNMP package changed names between SLES 8 and SLES 9, and works on both the Nagios server and the clients. .. code-block:: xml .. note:: You may also want to have a look at the :ref:`NagiosGen ` plugin. doc/server/plugins/structures/bundler/ntp.txt000066400000000000000000000010431303523157100220300ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-bundler-ntp: ntp === Despite its lack of groups, this Bundle controls both ``ntp`` servers and clients. It does this through the use of host-specific entries in the ``Cfg`` repository. It is left as an exercise for the reader to do this better through use of groups. .. code-block:: xml doc/server/plugins/structures/bundler/snmpd.txt000066400000000000000000000005201303523157100223470ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-bundler-snmpd: snmpd ===== A simple bundle for a SNMP daemon with a package, a service and a configuration file. .. code-block:: xml doc/server/plugins/structures/bundler/torque.txt000066400000000000000000000052441303523157100225550ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-bundler-torque: torque ====== = torque.xml = A longer Bundle that includes many group-specific entries. .. code-block:: xml doc/server/plugins/structures/bundler/yp.txt000066400000000000000000000014401303523157100216600ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-bundler-yp: yp == = yp.xml = Note that this Bundle includes **Group** sections. Toplevel elements go to anybody that includes this Bundle, but clients that belong to the **yp-client** and **yp-server** groups get their own specialized treatment too. .. code-block:: xml doc/server/plugins/structures/defaults.txt000066400000000000000000000023271303523157100214110ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-defaults: ========== Defaults ========== The Defaults plugin can be used to populate default attributes for entries. Defaults is *not* a Generator plugin, so it does not actually bind an entry; Defaults are applied after an entry has been bound, and only populate attributes that are not yet set. Like :ref:`server-plugins-generators-rules`, Defaults supports regular expressions in the name attribute. For instance, to make all Service entries use the ``systemd`` tool on Fedora 15 and the ``chkconfig`` tool on Fedora 14, you could do:: If you were to specify a ``type`` attribute for a Service entry in Rules (or a ``type`` attribute for a BoundService entry in Bundler), that would take precendence over the default. Like :ref:`server-plugins-generators-rules`, Defaults can also replace ``%{name}`` in attributes with the real name of the entry. To enable this, add the following setting to ``bcfg2.conf``:: [defaults] replace_name = yes doc/server/plugins/structures/deps.txt000066400000000000000000000033451303523157100205360ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-structures-deps: ==== Deps ==== The Deps Plugin allows you to make a series of assertions like "Package X requires Package Y (and optionally also Package Z etc). Note that only configuration entries, like Package, Path, etc can be used. Groupings (like Bundle) are not supported. Here are some examples: .. note:: These particular examples are not extremely useful when using the Packages plugin as Packages will handle the dependency resolution for you. However, there are certainly other use cases for the Deps plugin. Deps/bcfg2.xml ============== .. code-block:: xml This basically causes any configuration specification that includes Package bcfg2 to include python-lxml and isprelink, in a second base clause. Deps/bcfg2-server.xml ===================== .. code-block:: xml This states that the bcfg2-server package (it's a separate package on some distros) depends on a long list of other packages. doc/server/plugins/version/000077500000000000000000000000001303523157100162775ustar00rootroot00000000000000doc/server/plugins/version/bzr.txt000066400000000000000000000016021303523157100176340ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-version-bzr: === Bzr === Why use the Bazaar plugin ========================= The Bazaar plugin is useful if you would like to track changes to your bcfg2 repository using a `Bazaar `_ backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Future plans are to commit changes to the repo which are made by the server. How to enable the Bazaar plugin =============================== Simply add "Bzr" to your plugins line in ``/etc/bcfg2.conf``:: [server] plugins = Bundler,Cfg,...,Bzr Usage notes =========== Unlike other VCS plugins for Bcfg2, the Bazaar plugin checks whether there are uncommitted changes to the repository. If there are, this plugin appends a "+" after the version number. Essentially, this means you're using that version, "plus" some changes. doc/server/plugins/version/cvs.txt000066400000000000000000000011401303523157100176270ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-version-cvs: === CVS === Why use the CVS plugin ========================= The CVS plugin is useful if you would like to track changes to your Bcfg2 repository using a `CVS `_ backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Future plans are to commit changes to the repo which are made by the server. How to enable the CVS plugin =============================== Simply add "Cvs" to your plugins line in ``/etc/bcfg2.conf``:: [server] plugins = Bundler,Cfg,...,Cvs doc/server/plugins/version/darcs.txt000066400000000000000000000014531303523157100201370ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-version-darcs: ===== Darcs ===== This page describes the new Darcs plugin which is experimental. Why use the Darcs plugin ======================== The Darcs plugin is useful if you would like to track changes to your Bcfg2 repository using a `Darcs `_ backend. Currently, it enables you to get revision information out of your repository for reporting purposes. Once the plugin is enabled, every time a client checks in, it will include the current repository revision in the reports/statistics. How to enable the Darcs plugin ============================== You will need to install Darcs on the Bcfg2 server first. Once installed, simply add Darcs to your plugins line in ``/etc/bcfg2.conf``:: [server] plugins = Bundler,Cfg,...,Darcs doc/server/plugins/version/fossil.txt000066400000000000000000000011751303523157100203430ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-version-fossil: ====== Fossil ====== Why use the Fossil plugin ========================= The Fossil plugin is useful if you would like to track changes to your bcfg2 repository using a `Fossil SCM `_ backend. Currently, It enables you to get revision information out of your repository for reporting purposes. Future plans are to commit changes to the repo which are made by the server. How to enable the Fossil plugin =============================== Simply add "Fossil" to your plugins line in ``/etc/bcfg2.conf``:: [server] plugins = Bundler,Cfg,...,Fossil doc/server/plugins/version/git.txt000066400000000000000000000034531303523157100176300ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-version-git: === Git === The Git plugin is useful if you would like to track changes to your bcfg2 repository using a `Git `_ backend. It enables you to get revision information out of your repository for reporting purposes. Once the plugin is enabled, every time a client checks in, it will include the current repository revision in the reports/statistics. Additionally, if the ``GitPython`` library is installed, the Git plugin exposes an additional XML-RPC method call, ``Git.Update``. With no arguments, ``Git.Update`` updates the working copy to the latest version in the remote tracking branch. If the current working copy doesn't have a remote tracking branch, then nothing is done. ``Git.Update`` can also be given a single argument, the name of a git tree-ish (branch, tag, ref, commit, etc.) to check out. When this is done, the new working is updated as well. For example:: bcfg2-admin xcmd Git.Update master This checks out the ``master`` branch and updates it to the latest data from the remote ``master`` (if applicable). If you then run:: bcfg2-admin xcmd Git.Update This updates to the latest remote data without changing branches. Then:: bcfg2-admin xcmd Git.Update dd0bb776c This checks out the specified commit. Subsequently:: bcfg2-admin xcmd Git.Update This does nothing, because the working copy is now in "detached HEAD" state, and there can be no remote tracking branch to update from. To put it another way, once you tell ``Git.Update`` which tree-ish to checkout, it stays on that tree-ish until you tell it otherwise. Enabling the Git plugin ======================= To enable the Git plugin, simply add it to your plugins line in ``/etc/bcfg2.conf``:: [server] plugins = Bundler,Cfg,Metadata,...,Git doc/server/plugins/version/hg.txt000066400000000000000000000011771303523157100174440ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-version-hg: ============== Mercurial (Hg) ============== Why use the Mercurial plugin ============================ The Hg plugin is useful if you would like to track changes to your Bcfg2 repository using `Hg `_ backend. Currently, it enables you to get revision information out of your repository for reporting purposes. How to enable the Mercurial plugin ================================== You will need to install Mercurial on the Bcfg2 server first. Simply add Hg to your plugins line in ``/etc/bcfg2.conf``:: [server] plugins = Bundler,Cfg,...,Hg doc/server/plugins/version/svn.txt000066400000000000000000000033611303523157100176510ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-plugins-version-svn: === Svn === The Svn plugin is useful if you would like to track changes to your Bcfg2 repository using a `Subversion `_ backend. As with the other Version plugins, the Svn plugin enables you to get revision information out of your repository for reporting purposes. Once the plugin is enabled, every time a client checks in, it will include the current repository revision in the reports/statistics. Additionally, if the ``pysvn`` library is installed, the Svn plugin exposes two XML-RPC method calls: * ``Svn.Update`` updates the working copy to the latest version in the repository. * ``Svn.Commit`` commits any changes to the working copy back to the repository. In order for this to work, the user Bcfg2 runs as must be able to commit to the repository non-interactively. Enabling the Svn plugin ======================= Simply add Svn to your plugins line in ``/etc/bcfg2.conf``:: [server] plugins = Bundler,Cfg,Metadata,..,Svn Resolving conflicts ======================= By default, the Svn plugin does not attempt to resolve conflicts when trying to commit or update the repository. This can be changed by adding to ``/etc/bcfg2.conf``, e.g.:: [svn] conflict_resolution = theirs-conflict The possible values of ``conflict_resolution`` are: * ``base`` * ``postpone`` (default) * ``mine-conflict`` * ``theirs-conflict`` * ``mine-full`` * ``theirs-full`` The other possible SVN conflict resolvers (``edit``, ``launch``, ``working``) require manual intervention and so are not possible. Descriptions of each action can be found in the `Version Control with Subversion `_ book. doc/server/selinux.txt000066400000000000000000000217641303523157100153730ustar00rootroot00000000000000.. -*- mode: rst -*- .. _server-selinux: ======= SELinux ======= This document describes two related but somewhat disparate concepts: First, how to run Bcfg2 under SELinux; and secondly, how to use Bcfg2 to manage SELinux. .. _server-selinux-policy: Running Bcfg2 under SELinux =========================== .. versionadded:: 1.3.0 Bcfg2 now ships with an SELinux policy that can be used to run both the client and server in enforcing mode. (Most of the helper tools, like ``bcfg2-info`` and ``bcfg2-admin``, will still need to be run unconfined.) It defines the following booleans: +-------------------------------------+-----------------------------------------+----------------------------------------------------------+---------+ | Boolean Name | Description | Plugins Affected | Default | +=====================================+=========================================+==========================================================+=========+ | bcfg2_server_exec_scripts | Allow the Bcfg2 server to execute | :ref:`server-plugins-misc-trigger` and | off | | | scripts in ``unconfined_t``. This | :ref:`server-plugins-connectors-puppetenc`, | | | | ability is limited to scripts in the | and Cfg | | | | ``bcfg2_server_script_exec_t`` context. | :ref:`server-plugins-generators-cfg-validation` | | | | If this boolean is off, then external | | | | | server-side scripts will be run in | | | | | ``bcfg2_server_t``, which is a fairly | | | | | limited context. | | | +-------------------------------------+-----------------------------------------+----------------------------------------------------------+---------+ | bcfg2_server_can_network_connect_db | Allow the Bcfg2 server to connect to | :ref:`server-plugins-statistics-reporting`, the | off | | | databases (e.g., MySQL and PostgreSQL) | :ref:`server-plugins-grouping-metadata-clients-database` | | | | | feature of Metadata, and the database | | | | | :ref:`server-plugins-probes-data-storage` | | | | | feature of Probes | | +-------------------------------------+-----------------------------------------+----------------------------------------------------------+---------+ It also defines the following SELinux types: +----------------------------+-------------------------------------------------+ | Type Name | Description | +============================+=================================================+ | bcfg2_t | The context the Bcfg2 client runs in | +----------------------------+-------------------------------------------------+ | bcfg2_exec_t | The context of the Bcfg2 client script itself | +----------------------------+-------------------------------------------------+ | bcfg2_server_t | The context the Bcfg2 server runs in | +----------------------------+-------------------------------------------------+ | bcfg2_server_exec_t | The context of the Bcfg2 server script itself | +----------------------------+-------------------------------------------------+ | bcfg2_initrc_exec_t | The context of the Bcfg2 client init script | +----------------------------+-------------------------------------------------+ | bcfg2_server_initrc_exec_t | The context of the Bcfg2 server init script | +----------------------------+-------------------------------------------------+ | bcfg2_var_lib_t | The context of most Bcfg2 specification data, | | | with the exception of the executable scripts in | | | ``bcfg2_server_script_exec_t`` | +----------------------------+-------------------------------------------------+ | bcfg2_server_script_t | The context server-side scripts run in. This | | | type is unconfined if the | | | ``bcfg2_server_exec_scripts`` is on. | +----------------------------+-------------------------------------------------+ | bcfg2_server_script_exec_t | The context of the server-side scripts in the | | | Bcfg2 specification | +----------------------------+-------------------------------------------------+ | bcfg2_yum_helper_exec_t | The context of the bcfg2-yum-helper script | +----------------------------+-------------------------------------------------+ | bcfg2_var_run_t | The context of the server pidfile | +----------------------------+-------------------------------------------------+ | bcfg2_lock_t | The context of the client lock file | +----------------------------+-------------------------------------------------+ | bcfg2_conf_t | The context of bcfg2.conf | +----------------------------+-------------------------------------------------+ | bcfg2_tmp_t | The context of temp files created by the Bcfg2 | | | server | +----------------------------+-------------------------------------------------+ If you do run your server in enforcing mode, it is highly recommend that you run ``restorecon -R /var/lib/bcfg2`` every time you update the content in that directory, particularly if you are using plugins that execute arbitrary scripts. .. _server-selinux-entries: Managing SELinux Entries ======================== .. versionadded:: 1.3.0 Bcfg2 has the ability to handle the majority of SELinux entries with the ``SELinux`` entry type, which handles modules (with the :ref:`server-plugins-generators-semodules` plugin), file contexts, users and user mappings, permissive domains, nodes, and interfaces. In addition, ``info.xml`` files and most types of the ``Path`` tag can accept an ``secontext`` attribute to set the context of that entry. The full semantics of each configuration entry is documented with the :ref:`server-plugins-generators-rules` plugin. .. note:: The ``secontext`` attribute takes a *full* context, e.g., "``system_u:object_r:etc_t:s0``"; the ``selinuxtype`` attribute always takes *only* an SELinux type, e.g., "``etc_t``". ``secontext`` (but not ``selinuxtype``) can also accept the special value "``__default__``", which will restore the context on the Path entry in question to the default supplied by the SELinux policy. In its current version, the SELinux support in Bcfg2 is not sufficient to manage MCS/MLS policies. Extra Entries ------------- As it can be very tedious to create a baseline of all existing SELinux entries, you can use ``selinux_baseline.py`` located in the ``tools/`` directory to do that for you. The actual definition of an "extra" entry actually depends on the version of SELinux available; the SELinux APIs have been extremely fluid, so many features available in newer versions are not available in older versions. Newer SELinux versions (e.g., in recent versions of Fedora) can be queried for only entries that have been locally modified; on these versions of SELinux, only locally modified entries will be considered extra. On older SELinux versions (e.g., on RHEL 5), however, that functionality is missing, so *all* SELinux entries will be considered extra, making ``selinux_baseline.py`` quite necessary. ``selinux_baseline.py`` writes a bundle to stdout that contains ``BoundSELinux`` entries for the appropriate SELinux entities. .. _server-selinux-duplicate-entries: Duplicate Entries ----------------- It may be necessary to use `BoundSEFcontext` tags if a single fcontext needs two different SELinux types depending on whether it's a symlink or a plain file. For instance: .. code-block:: xml doc/server/xml-common.txt000066400000000000000000000405341303523157100157660ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim: ft=rst .. _xml-features: ===================== Common XML Features ===================== Most of the XML files in Bcfg2 have a common set of features that are supported. These are described in some detail below, and a precise rundown of which features are supported by which files is provided. .. _xml-group-client-tags: Group and Client tags ===================== These allow the portions of an XML document inside a Client or Group tag to only apply to the given client group. That is, they can be thought of as conditionals, where the following are roughly equivalent: .. code-block:: xml And:: If client is a member of group1 then Manage the abstract path "/etc/foo.conf" Nested Group and Client tags are conjunctive (logical ``AND``). For instance, the following are roughly equivalent: .. code-block:: xml And:: If client is a member of group1 and has hostname "foo.example.com" then Manage the abstract package "bar" If client is a member of group1 then Manage the abstract package "baz" There is no convenient ``else``; you must specify all conditions explicitly. To do this, Group and Client tags may be negated, as in: .. code-block:: xml This is roughly equivalent to:: If client is a member of group1 then Manage the abstract service "foo" If client is not a member of group 1 then Manage the abstract service "bar" Or, more compactly: If client is a member of group1 then Manage the abstract service "foo" Else Manage the abstract service "bar" As an example, consider the following :ref:`bundle `: .. code-block:: xml In this bundle, most of the entries are common to all systems. Clients in group ``deb`` get one extra package and service, while clients in group ``rpm`` get two extra packages and an extra service. In addition, clients in group ``fedora`` *and* group ``rpm`` get one extra package entries, unless they are not in the ``fedora14`` group, in which case, they get an extra package. The client ``trust.example.com`` gets one extra file that is not distributed to any other clients. +------------------------+-----------------------------------+ | Group/Hostname | Entry | +========================+===================================+ | all | ``/etc/ssh/*`` | +------------------------+-----------------------------------+ | ``rpm`` | Package ``openssh`` | +------------------------+-----------------------------------+ | ``rpm`` | Package ``openssh-askpass`` | +------------------------+-----------------------------------+ | ``rpm`` | Service ``sshd`` | +------------------------+-----------------------------------+ | ``rpm`` AND ``fedora`` | Package ``openssh-server`` | +------------------------+-----------------------------------+ | ``rpm`` AND ``fedora`` | Package ``openssh-clients`` | | AND NOT ``fedora14`` | | +------------------------+-----------------------------------+ | ``deb`` | Package ``ssh`` | +------------------------+-----------------------------------+ | ``deb`` | Service ``ssh`` | +------------------------+-----------------------------------+ | ``trust.example.com`` | ``/etc/ssh/shosts.equiv`` | +------------------------+-----------------------------------+ .. _xml-genshi-templating: Genshi templating ================= Genshi XML templates allow you to use the `Genshi `_ templating system to dynamically generate XML file content for a given client. Genshi templating can be enabled on a file by adding the Genshi namespace to the top-level tag, e.g.: .. code-block:: xml Several variables are pre-defined inside Genshi XML templates: +-------------+--------------------------------------------------------+ | Name | Description | +=============+========================================================+ | metadata | :ref:`Client metadata | | | ` | +-------------+--------------------------------------------------------+ | repo | The path to the Bcfg2 repository on the filesystem | +-------------+--------------------------------------------------------+ .. note:: ```` and ```` tags can be used inside templates as of Bcfg2 1.2, but they do not behave the same as using a Genshi conditional, e.g.:: The conditional is evaluated when the template is rendered, so code inside the conditional is not executed if the conditional fails. A ```` tag is evaluated *after* the template is rendered, so code inside the tag is always executed. This is an important distinction: if you have code that will fail on some groups, you *must* use a Genshi conditional, not a ```` tag. The same caveats apply to ```` tags. .. _xml-genshi-reference: Genshi XML Template Reference ----------------------------- The Genshi XML templating language is described in depth at `Genshi `_. The XML schema reference follows. Genshi Tags ~~~~~~~~~~~ .. xml:group:: genshiElements :namespace: py Genshi Attributes ~~~~~~~~~~~~~~~~~ .. xml:attributegroup:: genshiAttrs :namespace: py .. _xml-encryption: Encryption ========== You can encrypt data in XML files to protect that data from other people who need access to the repository. The data is decrypted transparently on-the-fly by the server. .. note:: This feature is *not* intended to secure the files against a malicious attacker who has gained access to your Bcfg2 server, as the encryption passphrases are held in plaintext in ``bcfg2.conf``. This is only intended to make it easier to use a single Bcfg2 repository with multiple admins who should not necessarily have access to each other's sensitive data. XML files are encrypted on a per-element basis; that is, rather than encrypting the whole file, only the character content of individual elements is encrypted. This makes it easier to track changes to the file in a VCS, and also lets unprivileged users work with the other data in the file. Only character content of an element can be encrypted; attribute content and XML elements themselves cannot be encrypted. By default, decryption is *strict*; that is, if any element cannot be decrypted, parsing of the file is aborted. See :ref:`server-encryption-lax-strict` for information on changing this on a global or per-file basis. To encrypt or decrypt a file, use :ref:`bcfg2-crypt`. See :ref:`server-encryption` for more details on encryption in Bcfg2 in general. XInclude ======== .. versionadded:: 0.9.0 `XInclude `_ is a W3C specification for the inclusion of external XML documents into XML source files, allowing complex definitions to be split into smaller, more manageable pieces. For instance, in the :ref:`server-plugins-grouping-metadata` ``groups.xml`` file, you might do: .. code-block:: xml To enable XInclude on a file, you need only add the XInclude namespace to the top-level tag. You can also *optionally* include a file that may or may not exist with the ``fallback`` tag: .. code-block:: xml In this case, if ``their-groups.xml`` does not exist, no error will be raised and everything will work fine. (You can also use ``fallback`` to include a different file, or explicit content in the case that the parent include does not exist.) XInclude can only include complete, well-formed XML files. In some cases, it may not be entirely obvious or intuitive how to structure such an included file to conform to the schema, although in general the included files should be structure exactly like the parent file. Wildcard XInclude ----------------- .. versionadded:: 1.3.1 Bcfg2 supports an extension to XInclude that allows you to use shell globbing in the hrefs. (Stock XInclude doesn't support this, since the href is supposed to be a URL.) For instance: .. code-block:: xml This would include all ``*.xml`` files in the ``groups`` subdirectory. Note that if a glob finds no files, that is treated the same as if a single included file does not exist. You should use the ``fallback`` tag, described above, if a glob may potentially find no files. Feature Matrix ============== +---------------------------------------------------+--------------+--------+------------+------------+ | File | Group/Client | Genshi | Encryption | XInclude | +===================================================+==============+========+============+============+ | :ref:`ACL ip.xml ` | No | No | No | Yes | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`ACL metadata.xml | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Bundler | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`info.xml ` | Yes [#f1]_ | Yes | Yes | Yes | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`privkey.xml and pubkey.xml | Yes | Yes | Yes | Yes [#f2]_ | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`authorizedkeys.xml | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`sslcert.xml and sslkey.xml | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Decisions | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Defaults | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`FileProbes | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`GroupPatterns | No | No | No | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Metadata clients.xml | No | No | No | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Metadata groups.xml | Yes [#f3]_ | No | No | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`NagiosGen | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Packages | Yes | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Pkgmgr | Yes | No | No | No | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Properties | Yes [#f4]_ | Yes | Yes | Yes | | ` | | | | | +---------------------------------------------------+--------------+--------+------------+------------+ | :ref:`Rules ` | Yes | Yes | Yes | Yes | +---------------------------------------------------+--------------+--------+------------+------------+ .. rubric:: Footnotes .. [#f1] ``info.xml`` also supports conditional Path tags; see :ref:`server-info` for more. .. [#f2] XInclude is supported, but the schema has not been modified to allow including files that are structured exactly like the parent. You may need to read the schema to understand how to use XInclude properly. .. [#f3] The semantics of Group tags in ``groups.xml`` is slightly different; see :ref:`server-plugins-grouping-metadata-groups-xml` for details. .. [#f4] Group and Client tags in XML Properties are not automatic by default; they can be resolved by use of either the ``Match()`` or ``XMLMatch()`` methods, or by use of the :ref:`server-plugins-connectors-properties-automatch` feature. See :ref:`server-plugins-connectors-properties-xml` for details. doc/unsorted/000077500000000000000000000000001303523157100134665ustar00rootroot00000000000000doc/unsorted/emacs_snippet.txt000066400000000000000000000035221303523157100170630ustar00rootroot00000000000000.. -*- mode: rst -*- .. _unsorted-emacs_snippet: ====================== Emacs + YASnippet mode ====================== This page describes using emacs with YASnippet mode with a set of snippets that allow quick composition of bundles and base files. More snippets are under development. #. Download YASnippet from http://code.google.com/p/yasnippet/ #. Install it into your emacs load path (typically ~/.emacs.d/site-lisp) #. Add YASnippet initialization to your .emacs (remember to re-byte-compile it if needed) .. code-block:: cl (require 'yasnippet-bundle) ;;; Bcfg2 snippet (yas/define-snippets 'sgml-mode '( (" $0 " nil) (" $0 " nil) (" $0" nil) (" $0" nil) (" $0" nil) (" $0" nil) (" $0" nil) (" $0" nil) (" $0" nil) ) ) #. One quick M-x eval-current-buffer, and this code is enabled Each of these snippets activates on the opening element, ie , and the snippet will be expanded. The template will be inserted into the text with a set of input prompts, which default to overwrite mode and can be tabbed through. The code above only works for bundles and base, but will be expanded to support other xml files as well. doc/unsorted/howtos.txt000066400000000000000000000013421303523157100155520ustar00rootroot00000000000000.. -*- mode: rst -*- .. _unsorted-howtos: ====== HOWTOs ====== Here are several howtos that describe different aspects of Bcfg2 deployment * :ref:`appendix-guides-authentication` - a description of the Bcfg2 authentication infrastructure * AnnotatedExamples - a description of basic Bcfg2 specification operations * BuildingDebianPackages - How to build debian packages * :ref:`appendix-guides-gentoo` - Issues specific to running Bcfg2 on Gentoo * :ref:`server-plugins-probes` - How to use Probes to gather information from a client machine. * :ref:`client-tools-actions` - How to use Actions * :ref:`server-plugins-probes-dynamic-groups` - Using dynamic groups * :ref:`client-modes-paranoid` - How to run an update in paranoid mode doc/unsorted/index.txt000066400000000000000000000010751303523157100153410ustar00rootroot00000000000000.. -*- mode: rst -*- .. _unsorted-index: Unsorted Docs ============= These docs have yet to be sorted properly. The content for them can be found at the TitleIndex_ page on Trac. Most should be converted to sphinx. Some may not need any conversion (e.g. The Download page). Once converted and put in the proper place, you can remove the item from the list below. .. _TitleIndex: https://trac.mcs.anl.gov/projects/bcfg2/wiki/TitleIndex * `PrecompiledPackages` * `SchemaEvolution` * `SecurityDevPlan` * `ServerSideOverview` .. toctree:: :maxdepth: 2 :glob: * doc/unsorted/specification_overview.png000066400000000000000000000170151303523157100207460ustar00rootroot00000000000000PNG  IHDRJ8PLTE̙f::WWuu3J` w(/7>FMUl"Dfƪ3"J1`@wO^n}Ҍ"Df׈33GG\\pp)Rz"31J@`Ow^n}"Df3M"f+3<DMUh|:Wu3Pm33MMff3Mf"+3<DMUh:|Wu3"J1`@wO^n}Ҍ"Df̈ת33MMff:Wu3(P7mETbq3Mf3Uw--DD[[qq3&M3f@MYfs:WuȒׯ33MMff:Wu3M&f3@MYfs:WubKGDf |d pHYsodIDATx[R:[)z<a杹,`>>8k%pDw#BTUq&?M6vǛM=t?N_&1tѱܫ`_& &YƿlOk1L(e^'EB`n_;cl0sF l>szn|` AX! 3v(CQcӁ `Xt($`(C%F"}mlz8a Fa[sΕnYw3>q5}c8DUzś%/! щUen$WhX <0سĻjyDH';/fd 72;hOwk,NDc nT;`Ƥ.5H.lAwc_j  !أP( AѰ?''ueT ؜ػ_{nl0C0! 0! `0l0! `0! `CVؔ vh[00000000000000000000lsXNE0GL, c1%0[`ctĢ0C{F4o^?Oz#zzϟz^|j,q>0N4)ؗ񓧴C-+M_|<2N0;|(ZYV&񞞑^tQpJuO씑_Hyi]U]\:%ej~l . #Q߳ng%yCUΫcSN͉2.Wy;.uYJ2Ko|[:79@q>P<])>7I٧ҷڀRetok]V5j}ПM~Ik1 {g&ǔ95A٫O|%;Z-ͬwYJ gҨu(攛\2[>Sэji;vfnl p V/-{nfўq{o/[(7K+QV]|.uYJ2%+>2?*kL}'S}̄ozQh8])}}UjSQ  ¿ZWU׺N%Yo-52u͹ddk`yBygr/WI!vaYEC0! 0dSJٱ|]8%zԓ<#C;60on <<`""j>?VH#|cV p8HX=6L 7^hcrxCtbz{g+2}C4`>"m 7N ^9E)w@LFލotF/@ Q`FB7&|fƪFglB=9n I."љvQ-=\ I7bPbE `C! `C`C0`C0ĦT#DDo ncwR `VMc0pĄ1`0 N8^ 5(eG3;]~~Ui髋`^Egˎ\@|K.)?k"r[e~/<ηϛi_WEkWM{Ч '\V-(nK*H_ 7k0T][mš2:#8tU.U~G}~;xw5et<97.y'p]ym +v\xEwʨk'Y)KV}ogrWNm%?O>GhuU/LlS(JV.ecp}^dZn̲s62nlZ>e˖FFǗj9΄r/kTˤnE1u/8k" 2oU@ܪd$%}y0|:؋na9kK֭"[Xދ0! `C! `C`Cbp70 kpG`6TL@_h] "E-<# P.i,<3#UfX+0_ii8Rr+̭$V5W%ܓhuvYWYm59 ˚1/`0ڳeQ%q L2x[GS&!-֛sI'J/r|K7>3$$h`$r00n`$BvHGWlu$w_>АJQ@ 6'lUJ I#EJS*F`ўU)6*+hNsasR`IAs TX%(S#lUJ: ţ&'/3ʵ(K7zeiLIuߞr5X4 Ԑ+]) ָ0u<}5<v롼69)TG\L|벬 Z>;0d p?| # p0iNjP7F1>/ino"w2,RkjS ɼAaЦ `jϴ`JvpIc0W&1ǚ+7J4ָ|k6w4y̋n .-7E4"0swܗ,{'oBwh^ 'F3݊&!'ttğ;((pپC*|vntn:|&w/UIeI+=86 S ]  BE;ctla:,_1hCQ+m8ʎ Hڊd vI$oٱlR+0!㏣}DU* a=Iz@h 0 ъyBM-3iuBgF ZcHe?)G?:Ѯ̙~Z 9Nq|b,O~*b"T^T| EE*2+l'v)'5<$,YepcRx3`02`ٕiuf葾mw܋_DXٵs{*N%M cSI MQSI. 7TR0.Jjd^~TRa 1:;ed FTR_qwvƪ ΀X>`NC[fM; 64l=Hظ07iC)q;UT՗sNJ.E>|9Ng^frXQR;inUUSyw[q4k>}1fr2PN=5%Zx՜_ӬY;˫눨b?@&%Sn"'߶>pDowsvȱ3LSﺩ풛?|:sKc$+/Ol4<5הΕ4%2/7:hhLo-z?9^V·Mh|0%G<pI^5utJK:8@To?2d<t/V {`OUvxl04̸QN8 up4%z%zv2_@hAXi,yICJQi i0%3@8p>MӺ(;,I@p뾌x} F'N%줦T4)̀526QA^}ID)I4CpHf?TmïS/zryj3.3f~Mr Ґa"}Hß7 `N`C0x~9_'A~! ,HDCYQ$X%DY{ w Adn/|,}" Xz`VX> `hD #lAक(H}$)6 MX ;)iу>1.7(b p(%'g`lpYeW'&YpFGˤ;n`HhKq 0`0`0v60F  `9"(fb0(Y@hsF(C̀OW7W?Ua#u`0<2F x dd>p v1#Y0! `0! `C! `C!6 + 0!){IENDB`doc/unsorted/vim_snippet.txt000066400000000000000000000037071303523157100165730ustar00rootroot00000000000000.. -*- mode: rst -*- .. _unsorted-vim_snippet: =================== Vim Snippet Support =================== This page describes using vim with snipMate and a set of snippets that allow quick composition of bundles and base files. #. Download snipMate from http://www.vim.org/scripts/script.php?script_id=2540 #. Install it using the install instructions (unzip snipMate.zip -d ~/.vim or equivalent, e.g. $HOME\vimfiles on Windows) #. Add the following to ``~/.vim/snippets/xml.snippets`` .. code-block:: none # Bundle snippet ${2} # Base snippet ${1} # Group snippet ${2} # Path snippet # Service snippet # Package snippet # Action snippet # Directory snippet # SymLink snippet # Permissions snippet #. Save and start editing away! Each of these snippets activates on the opening element, ie . After this string is entered, but before entering a space, press , and the snippet will be expanded. The template will be inserted into the text with a set of input prompts, which default to overwrite mode and can be tabbed through. The code above only works for bundles and base, but will be expanded to support other xml files as well. doc/unsorted/viz.png000066400000000000000000000157521303523157100150160ustar00rootroot00000000000000PNG  IHDR &WPLTEsssOOO444iii?(id_<ߌPx%%L 3 f++22(tRNS@fAIDATx흉 gTd֒*u j@X  !`$,Y >\R D.@ʘ^;eCy  xJ,h pBz,J  6' ,Ir* $ yiG‚3 C>ZfNF M,A`DK,x :7<Ă7Bj|0 % k,3҂a˿ d#,Xx XЯ"P]k0.% %n04ծ(kTaBJDqSj0,.}jZ)v -o&xW&5KKIXH],`ʷ@U{AY`O:4Dm$Yh,Zc͈/UY ⃂4@v?-]Ś 4L!77/ ](? Y@g*S&Y? F,XmP!}I̫lG`!yܧ !x $;9h ) m0hD̿4de2f5 ky܀ĵ@CxW!am>4А"4$(G I*Һ2@CА"74$k I):t@CB&!А4$Xb[% @C|[)4UZ, 1 @C<K)4Q, 1* @JakаfhR0Yx*U\,Xr=H,FTŒu 1ރÿALcW<>- ߨ>] HH\~H:ҟ4>h(MLsܧ9]>-h*=d AM.>Yp09Z e8,wFk$ V 巖,J,ggǰlf(7\rdBQNGiB7p-%V$T&DEߓ{ij .LS3\ł[~OnRճ vʄ$POk˜ רjHb3.Kmf* )*K lJ$PC|{/Y@JoY]Mm%kgݨVKp!̀w@[^k`aͥ Tm)K,xaԺj.uˢ0f=ɈqAh ,w}6i;*X ,+,Ti,,˙U;XGG% M 2^,*mGl*vd U4jox{3_w,hv- m#2|j';-&vҾdE XeH]a ޔ;Rn,}p@B\Hw s !qS !O q ,_(Hռ,SK se3_ܒ|M@TI `+]<)Ah)y_=Y 4 |VV r'֔'<֏0-+ue6,^>`aϦXi\>`AϤXiT<>`aIEOXX,Sht4 z]? ޝ`D `ahx;0C4$1"ī`|rG:0$=Rոl.;YֻB-YGA>|*ؒe!yY"ْ4@8xUYּQMB4<[\[cLH4xO-QPH64:k:] CCAdBCħd`rl9 9+-k3dTRPH ֡KʖS YH֧Lʖ+ I֭Mʖ; )`M]baZFtJlM}`!VXXM=|xSN|x@! a$c"XeT, 9Q{Bc^t>u8grOwd e?iY෈j?Ÿw \:r=1*Nd5 6!Ŏ.ejSBƖdϤ='YU$x 4KY$pjd = g ,,gYܲ@&iYPj߳}Va\Yh?-ܳǏlX]P>-$ K,t{>Yx}Z guXیk6DZ0VluoHX}Zfpz'0(0oPj|D2IOOȀtm}8$nn<˲*(E͇YP-#c@hi#w˂s 0O|>R>X?"}ħ֕IGiRR\`!5ţXHOhRT4`!Um6 Bژ`!imJ6XH^,BXC[,4 (4 BN KB,dᴳ ,d'$f& û}A0>z,x+$" b6ޅתGn?xJHȓ͵j-XPcPk9+-O$A!ga!Ch/6/XJ2k8d΂>ᣃ,'àAz̬Y1 59qĂ6 J2f!>[3$p<N`ٲ|НC|NJT(BC,f@kƘ0&Ady)!|AAd `>O5 Srd /fb YB``q!?T E})7F0dǂGYyޗ`#VNOˋ[8G=)'ן n}7%<,MY>?/؛  N 1zt8 faV,1˦ UCꊵkF'$2> /q2{rdA</eNߞ?#y'?}Y` DEh F MeȺH=GX<$J__|,\N W8 rxF7}&6(E<.j'K Y^ȩ>8/]^Yhx -c܈@RxL@Q`d` E\TZ,DT=KqA¿^- G3~nҖe,L /タA EBVX`$+#xAą/Xx~ٕfB0Y -@!,l8 ?tj 5 QH .' 0lB0XpDᦙ֥# RC<k ֻ`/EYd7%X@ .OkTר(~G;&OP?4x M7>nsw4 apVcBNc&/hv{E1e;PK <vG(䑕1]DKqž ^B$7<.HӖ{ES9}Vw{[߷Y\hw •rp"LYoRfanیʸnǴ`q Y0]Ti֏xoV,.< sEWsyyɥ3G!e7mC D>nG,; pݟD<\ӐY `XXH#D;T5eQ&`1b{I]f;`؎ǃBhLSȝOIv  'O~Ŏ,\䁜gWHHO);7BYKm׌_x=j3>WbLj +^,\0C/|M^kJJ6#KPh Y(C$8踱Kq-ѼrĖu.,:3"f>h"^0Dwj d?e2O|-oڄa7Չl~iCS 9[–XHYֱB,lZ:B,lea.* wDY'LQ q Li= HZB"Ra!ڗP 4+>(-7h6$_K@a,Dбϟ@aH,$@Bt) C|)MH?ełàm%R! mޮ \u7vLXX@cu^tY`@$jM `0_GWÝIKW!̼VYp8!sc@T[ݕ @B,xϷkƀwB|b `j Q@JL,ZKnU(ueҭArx&%2VU2Ph+*g^Xx; GƖXXtdc]{MBT/EkyĢ ˎvB/ ̢1*I]ѷl~Hp] ]0!%*C_:+&UNߧ$YI#^Y@ `/ YUbE %%DU5A<5`MFԞX㔎4Ha`CYg&iY@;]` ¤qgaIUMSx2:J~ƀ!O , E\Hw s !qS !O q -__Me "ŷ6)`&j02\d @XԖ󾆒]mE_\eDmeGlDm%S@BHN⳵ IENDB`doc/unsorted/windows.txt000066400000000000000000000076761303523157100157410ustar00rootroot00000000000000.. -*- mode: rst -*- .. _unsorted-windows: ================================= Notes on possible Windows support ================================= * Windows Management Instrumentation (WMI) should be used wherever possible; there is an excellent [http://tgolden.sc.sabren.com/python/wmi.html WMI Python Module] available, which also comes with a [http://tgolden.sc.sabren.com/python/wmi_cookbook.html WMI Cookbook]. * Before Windows 2003 SP1, on 64-bit machines there are [http://msdn2.microsoft.com/en-us/library/aa393067.aspx no API or WMI calls] to get to many 32-bit windows functions (such as the 32-bit registry) from 64-bit programs, and vice versa. There also is no (official) x86_64 native python distributions for Windows pre-Python 2.5. So the choice would be: #. Only support Windows in Python 2.5+ (which wouldn't be that bad because part of the build process would probably be to create stand-alone bcfg2 executables using [http://www.py2exe.org/ py2exe]). For 64-bit support there would have to be some kind of convoluted py2exe build process that built some things with 32-bit python and some things with 64-bit python. #. Wrap external command-line programs such as winreg, which is part of [http://dmst.aueb.gr/dds/sw/outwit/ outwit], and screen scrape. Each external command-line program would need to be compiled into 32 and 64 bit versions. This approach might lead to licensing annoyances and having binary blobs in source control. Services ======== With the exception of 32/64 bit issues, Windows Services support should be pretty trivial; it would differ from \*nix services in that it would be done via WMI API calls and not a 3rd party python module or wrapping a binary. Registry ======== The best way of handling the registry may be to map it into a file-based representation on the server end. The Cfg plugin could then be used to set registry values as needed. Files ===== For a first run there may be some way of utilizing [http://cygwin.com/ cygwin] to make use of the existing \*nix POSIX module for manipulating files. There would probably need to be some changes to deal with the fact that open files can't be manipulated/moved/deleted at all in Windows (other than to do some registry magic that makes the changes on the next reboot). Packages ======== Listing and removal of packages should be pretty easy via WMI. For installation in most cases the admin would need to figure out the correct silent install flags (there is a [http://www.appdeploy.com/ web site] that catalogs a lot of this information), and include that in the bcfg2 server-side XML along with a URL (like with the RPM plugin); the bcfg2 client itself would need to take care of download, perhaps via the [http://linux.duke.edu/projects/urlgrabber/ urlgrabber python module]. Another option would be to utilize one of the existing FLOSS tools for dealing with Windows packages, such as [http://wpkg.org/ WPKG]. Prior FLOSS Art =============== * [http://www.autoitscript.com/autoit3/ AutoIt] - For dealing with packages that don't have a silent install option * [http://www.opensysadmin.com/trac/ticket/4 French Stuff] * [http://ocsinventory.sourceforge.net/ Open Computers and Software Inventory - Next Generation] * [http://www.glpi-project.org/spip.php?lang=en GLPI - Gestionnaire libre de parc informatique] * Javascript thing a colleague of Desai's at ANL wrote - Desai was going to see if this can be released * [http://sial.org/howto/cfengine/windows/ Managing Windows with CFEngine and Perl] * [http://www.dmst.aueb.gr/dds/sw/outwit/ Outwit] - Small unixy utilities for Windows stuff like the registry and clipboard * [http://www.cfengine.org/docs/cfengine-NT/ Porting cfengine to Windows NT] * [http://isg.ee.ethz.ch/tools/realmen/ Real Men Don't Click] - Tobi Oetiker's stuff * [http://isg.ee.ethz.ch/tools/realmen/res/index.en.html More Prior FLOSS Art] * [http://unattended.sourceforge.net/ Unattended] - Bare Metal Installs, Package Management * [http://wpkg.org/ WPKG] - Package Management doc/unsorted/writing_specification.txt000066400000000000000000000156471303523157100206270ustar00rootroot00000000000000.. -*- mode: rst -*- .. _unsorted-writing_specification: =========================== Writing Bcfg2 Specification =========================== Bcfg2 specifications are logically divided in to three areas: * Metadata * Abstract * Literal The metadata portion of the configuration assigns a client to its profile group and to its non-profile groups. The profile group is assigned in ``Metadata/clients.xml`` and the non profile group assignments are in ``Metadata/groups.xml``. The group memberships contained in the metadata are then used to constuct an abstract configuration for the client. An abstract configuration for a client identifies the configuration entities (packages, configuration files, service, etc) that a client requires, but it does not identify them explicitly. For instance an abstract configuration may identify that a client needs the Bcfg2 package with .. code-block:: xml but this does not explicitly identify that an RPM package version 0.9.2 should be loaded from http://rpm.repo.server/bcfg2-0.9.2-0.1.rpm. The abstract configuration is defined in the XML configuration files for the Bundler plugin. A combination of a clients metadata (group memberships) and abstract configuration is then used to generate the clients literal configuration. For instance the above abstract configuration entry may generate a literal configuration of .. code-block:: xml A clients literal configuration is generated by a number of plugins that handle the different configuration entities. .. image:: specification_overview.png Dynamic Groups ============== Dynamic groups are likewise complex, and are covered on their own [wiki:DynamicGroups page] Abstract Configuration (Structures) =================================== A clients Abstract Configuration is the inventory of configuration entities that should be installed on a client. The Bundler plugin usually provides the abstract configuration. The plugin Bundler builds descriptions of interrelated configuration entities. These are typically used for the representation of services, or other complex groups of entities. Configuration Entity Types -------------------------- Entities in the abstract configuration (and correspondingly in the literal configuration) can have one of several types. In the abstract configuration, each of these entities only has a tag and the name attribute set. The types of Configuration Entities that maybe assigned to the abstract configuration can be seen at :ref:`server-configurationentries`. An example of each entity type is below. .. code-block:: xml Writing Bundles --------------- Bundles consist of a set of configuration entities. These entities are grouped together due to a configuration-time interdependency. Basic services tend to be the simplest example of these. They normally consist of * some software package(s) * some configuration files * an indication that some service should be activated If any of these pieces are installed or updated, all should be rechecked and any associated services should be restarted. All files in the Bundles/ subdirectory of the repository are processed. Each bundle must be defined in its own file:: # ls Bundler Glide3.xml LPRng.xml Tivoli-backup.xml Tivoli.xml a2ps.xml abiword.xml account.xml adsm-client.xml amihappy.xml apache-basic.xml apache.xml apache2-basic.xml apt-proxy.xml at.xml atftp-server.xml atftp.xml .... When packages in a bundle are verified by the client toolset, the Paths included in the same bundle are taken into consideration. That is, a package will not fail verification from a Bcfg2 perspective if the package verification only failed because of configuration files that are defined in the same bundle. The following is an annotated copy of a bundle: .. code-block:: xml In this bundle, most of the entries are common to all systems. Clients in group "deb" get one extra package and service, while clients in group "rpm" get two extra packages and an extra service. In addition, clients in group "fedora" and group "rpm" get one extra package entries, unless they are not in the fc4 group, in which case, they get an extra package. Notice that this file doesn't describe which versions of these entries that clients should get, only that they should get them. (Admittedly, this example is slightly contrived, but demonstrates how group entries can be used in bundles) +----------------+-------------------------------+ | Group | Entry | +================+===============================+ | all | /etc/ssh/* | +----------------+-------------------------------+ | rpm | Package openssh | +----------------+-------------------------------+ | rpm | Package openssh-askpass | +----------------+-------------------------------+ | rpm | Service sshd | +----------------+-------------------------------+ | rpm and fedora | Package openssh-server | +----------------+-------------------------------+ | rpm and fedora | Package openssh-clients | | and not fc4 | | +----------------+-------------------------------+ | deb | Package ssh | +----------------+-------------------------------+ | deb | Service ssh | +----------------+-------------------------------+ Bundle Tag ^^^^^^^^^^ .. xml:type:: BundleType :nochildren: As mentioned above the Configuration Entity Tags may only have the name attribute in Bundle definitions. Group and Client Tags ^^^^^^^^^^^^^^^^^^^^^ .. xml:type:: BundlerGroupType :nochildren: An abstract group may contain any of the Configuration Entity types and other groups. Literal Configuration (Generators) ================================== A Generator is a Bcfg2 piece of code that is run to generate the literal configuration for a host using a combination of the hosts metadata and abstract configuration. A Generator can take care of a particular configuration element. Any time this element is requested by the client, the server dynamically generates it either by crunching data and creating new information or by reading a file off of disk and passes it down to the client for installation. examples/000077500000000000000000000000001303523157100126745ustar00rootroot00000000000000examples/Bundler/000077500000000000000000000000001303523157100142675ustar00rootroot00000000000000examples/Bundler/dirvish.xml000066400000000000000000000011171303523157100164610ustar00rootroot00000000000000 client: nfs-host tree: /export/homes/${user.text} exclude: *~ .nfs* examples/Cfg/000077500000000000000000000000001303523157100133735ustar00rootroot00000000000000examples/Cfg/etc/000077500000000000000000000000001303523157100141465ustar00rootroot00000000000000examples/Cfg/etc/cron.d/000077500000000000000000000000001303523157100153315ustar00rootroot00000000000000examples/Cfg/etc/cron.d/dirvish/000077500000000000000000000000001303523157100170015ustar00rootroot00000000000000examples/Cfg/etc/cron.d/dirvish/dirvish000066400000000000000000000002721303523157100203750ustar00rootroot00000000000000# /etc/cron.d/dirvish: crontab fragment for dirvish SHELL=/bin/sh PATH=/sbin:/bin:/usr/sbin:/usr/bin MAILTO=root # run every night 4 22 * * * root /etc/dirvish/dirvish-cronjob examples/Cfg/etc/dirvish/000077500000000000000000000000001303523157100156165ustar00rootroot00000000000000examples/Cfg/etc/dirvish/dirvish-cronjob/000077500000000000000000000000001303523157100207205ustar00rootroot00000000000000examples/Cfg/etc/dirvish/dirvish-cronjob/dirvish-cronjob000066400000000000000000000020021303523157100237370ustar00rootroot00000000000000#! /bin/sh # # daily cron job for the dirvish package # # NOTE: This is the sample cron job included in Debian. You may need to # change the executable paths if running on a different OS. if [ ! -x /usr/sbin/dirvish-expire ]; then exit 0; fi if [ ! -s /etc/dirvish/master.conf ]; then exit 0; fi mount_check() { mntout=`tempfile -p mount` mount $1 >$mntout 2>&1 if [ ! -d $1/lost+found ]; then # only works for "real" filesystems :-) # (Yes, I know about reiserfs.) echo "'mount $1' failed?! Stopping." echo "mount output:" cat $mntout rm -f $mntout exit 2 fi if stat $1 | grep 'Inode: 2[^0-9]' >/dev/null; then # ditto rm -f $mntout return 0 # ok fi echo "$1 isn't inode 2 ?! Mount must have failed; stopping." echo '' stat $1 echo "mount output:" cat $mntout rm -f $mntout umount $1 exit 2 } ## Example of how to mount and umount a backup partition... # mount_check /backup /usr/sbin/dirvish-expire --quiet && /usr/sbin/dirvish-runall --quiet rc=$? # umount /backup || rc=$? exit $rc examples/Cfg/etc/dirvish/master.conf/000077500000000000000000000000001303523157100200355ustar00rootroot00000000000000examples/Cfg/etc/dirvish/master.conf/master.conf.genshi000066400000000000000000000005561303523157100234610ustar00rootroot00000000000000bank: /backup image-default: %Y-%m-%d log: bzip2 index: bzip2 xdev: 1 exclude: lost+found/ *~ .nfs* Runall: {% for user in metadata.Properties['dirvish.xml'].data.find('users') %}\ homes/${user.tag} {% end %}\ expire-default: +2 weeks expire-rule: # MIN HR DOM MON DOW STRFTIME_FMT * * * * 1 +6 weeks * * 1-7 * 1 +6 months * * 1-7 1,4,7,10 1 never examples/Properties/000077500000000000000000000000001303523157100150305ustar00rootroot00000000000000examples/Properties/dirvish.xml000066400000000000000000000003031303523157100172160ustar00rootroot00000000000000 user1homedir user2homedir user3homedir examples/README000066400000000000000000000004651303523157100135610ustar00rootroot00000000000000This directory contains example files for various Bcfg2 generators and plugins. The documentation for the examples is available in the Bcfg2 wiki or the manual. Those files may not work in your environment. Others can be obsolete. The purpose is to give you a entry point for your own configuration. examples/TemplateHelper/000077500000000000000000000000001303523157100156075ustar00rootroot00000000000000examples/TemplateHelper/include.py000066400000000000000000000072131303523157100176070ustar00rootroot00000000000000""" IncludeHelper makes it easier to include group- and host-specific files in a template. Synopsis: {% python import os custom = IncludeHelper(metadata, path).files(os.path.basename(name)) %}\ {% for file in custom %}\ ########## Start ${describe_specificity(file)} ########## {% include ${file} %} ########## End ${describe_specificity(file)} ########## {% end %}\ This would let you include files with the same base name; e.g. in a template for ''foo.conf'', the include files would be called ''foo.conf.G_.genshi_include''. If a template needs to include different files in different places, you can do that like so: inc = IncludeHelper(metadata, path) custom_bar = inc.files("bar") custom_baz = inc.files("baz") This would result in two different sets of custom files being used, one drawn from ''bar.conf.G_.genshi_include'' and the other from ''baz.conf.G_.genshi_include''. """ import os import re __default__ = ["IncludeHelper", "get_specificity", "describe_specificity"] class IncludeHelper(object): def __init__(self, metadata, path): """ Constructor. The template path can be found in the ''path'' variable that is set for all Genshi templates. """ self.metadata = metadata self.path = path def get_basedir(self): return os.path.dirname(self.path) def files(self, fname, groups=None): """ Return a list of files to include for this host. Files are found in the template directory based on the following patterns: * ''.H_.genshi_include'': Host-specific files * ''.G_.genshi_include'': Group-specific files * ''.genshi_include'': Non-specific includes Note that there is no numeric priority on the group-specific files; all matching files are returned by ``IncludeHelper.files()``. If you wish to only include files for a subset of groups, pass the ``groups`` keyword argument. Host-specific files are always included in the return value. """ files = [] hostfile = os.path.join(self.get_basedir(), "%s.H_%s.genshi_include" % (fname, self.metadata.hostname)) if os.path.isfile(hostfile): files.append(hostfile) allfile = os.path.join(self.get_basedir(), "%s.genshi_include" % fname) if os.path.isfile(allfile): files.append(allfile) if groups is None: groups = sorted(self.metadata.groups) for group in groups: filename = os.path.join(self.get_basedir(), "%s.G_%s.genshi_include" % (fname, group)) if os.path.isfile(filename): files.append(filename) return files SPECIFICITY_RE = re.compile(r'(G|H)_(.*)\.genshi_include') def get_specificity(fname): """ Get a tuple of (, ) describing the specificity of the given file. Specificity types are "host", "group", or "all". The parameter will be either a hostname, a group name, or None (for "all"). """ match = SPECIFICITY_RE.search(fname) if match: if match.group(1) == "G": stype = "group" else: stype = "host" return (stype, match.group(2)) return ("all", None) def describe_specificity(fname): """ Get a string describing the specificity of the given file """ (stype, param) = get_specificity(fname) if stype != "all": return "%s-specific configs for %s" % (stype, param) else: return "Generic configs for all clients" examples/bcfg2-lint.conf000066400000000000000000000012571303523157100154770ustar00rootroot00000000000000[lint] plugins=InfoXML,Comments,RequiredAttrs,Validate,MergeFiles [errors] no-infoxml=error paranoid-false=error properties-schema-not-found=silent inconsistent-bundle-name=error keywords-not-found=error comments-not-found=error [InfoXML] required_attrs = owner,group,perms,paranoid [Comments] global_keywords = Id genshibundler_comments = Properties,Probes,Description properties_comments = Template,Format genshi_comments = Maintainer,Properties,Probes,Description cheetah_comments = Maintainer,Properties,Probes,Description cfg_comments = cfg_keywords = probe_comments = Maintainer,Purpose,Groups,Other Output [Validate] schema=/usr/share/bcfg2/schemas [MergeFiles] threshold=85 examples/bcfg2.conf000066400000000000000000000002051303523157100145230ustar00rootroot00000000000000[communication] password = foobat # certificate = /etc/bcfg2.key # key = /etc/bcfg2.key [components] bcfg2 = https://localhost:6789 examples/brpt.sqlite000066400000000000000000006400001303523157100150660ustar00rootroot00000000000000SQLite format 3@ uu ?9.6*4%+"%"     oo  5sAAjoeyhagedorn@mcs.anl.govsha1$a1043$f0904cb26ef42eb5259cf3dcb80dc6c01506ed8c2006-06-21 16:42:01.9958142006-06-19 15:10:18.850029 joey/1.10+delete_logentry o3'%%tableauth_messageauth_messageCREATE TABLE "auth_message" ( "id" integer NOT NULL PRIMARY KEY, "user_id" integer NOT NULL, "message" text NOT NULL ) !!ctableauth_groupauth_groupCREATE TABLE "auth_group" ( "id" integer NOT NULL PRIMARY KEY, "name" varchar(80) NOT NULL UNIQUE )3G!indexsqlite_autoindex_auth_group_1auth_groupJctableauth_userauth_userCREATE TABLE "auth_user" ( "id" integer NOT NULL PRIMARY KEY, "username" varchar(30) NOT NULL UNIQUE, "first_name" varchar(30) NOT NULL, "last_name" varchar(30) NOT NULL, "email" varchar(75) NOT NULL, "password" varchar(128) NOT NULL, "is_staff" bool NOT NULL, "is_active" bool NOT NULL, "is_superuser" bool NOT NULL, "last_login" datetime NOT NULL, "date_joined" datetime NOT NULL ) $5t1Eindexsqlite_autoindex_auth_user_1auth_user++Stableauth_permissionauth_permissionCREATE TABLE "auth_permission" ( "id" integer NOT NULL PRIMARY KEY, "name" varchar(50) NOT NULL, "content_type_id" integer NOT NULL, "codename" varchar(100) NOT NULL, UNIQUE ("content_type_id", "codename") )=Q+indexsqlite_autoindex_auth_permission_1auth_permission<99tableauth_group_permissionsauth_group_permissions CREATE TABLE "auth_group_permissions" ( "id" integer NOT NULL PRIMARY KEY, "group_id" integer NOT NULL REFERENCES "auth_group" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id"), UNIQUE ("group_id", "permission_id") )K _9indexsqlite_autoindex_auth_group_permissions_1auth_group_permissions     "c --ctableauth_user_groupsauth_user_groups CREATE TABLE "auth_user_groups" ( "id" integer NOT NULL PRIMARY KEY, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "group_id" integer NOT NULL REFERENCES "auth_group" ("id"), UNIQUE ("user_id", "group_id") )? S-indexsqlite_autoindex_auth_user_groups_1auth_user_groupsE AAtableauth_user_user_permissionsauth_user_user_permissionsCREATE TABLE "auth_user_user_permissions" ( "id" integer NOT NULL PRIMARY KEY, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id"), UNIQUE ("user_id", "permission_id") )S gAindexsqlite_autoindex_auth_user_user_permissions_1auth_user_user_permissions    vYE(~Z<" pingreportspingreasonreportsreasonmetadatareportsmetadata"##performancereportsperformanceclientreportsclient badreportsbad modifiedreportsmodified extrareportsextra !!repositoryreportsrepository" ##interactionreportsinteractionlog entryadminlogentrysitesitessitesessionsessionssession(%%#content typecontenttypescontenttype!!permissionauthpermissionuserauthusergroupauthgroupmessageauthmessage j)R;}reportspingreportsreasonreportsmetadata#reportsperformancereportsclientreportsbad reportsmodified reportsextra !reportsrepository #reportsinteraction adminlogentrysitessitesessionssession%#contenttypescontenttype!authpermission authuserauthgroupauthmessage MQ33Ctabledjango_content_typedjango_content_typeCREATE TABLE "django_content_type" ( "id" integer NOT NULL PRIMARY KEY, "name" varchar(100) NOT NULL, "app_label" varchar(100) NOT NULL, "model" varchar(100) NOT NULL, UNIQUE ("app_label", "model") )EY3indexsqlite_autoindex_django_content_type_1django_content_typeD))Ctabledjango_sessiondjango_sessionCREATE TABLE "django_session" ( "session_key" varchar(40) NOT NULL PRIMARY KEY, "session_data" text NOT NULL, "expire_date" datetime NOT NULL );O)indexsqlite_autoindex_django_session_1django_session,##tabledjango_sitedjango_siteCREATE TABLE "django_site" ( "id" integer NOT NULL PRIMARY KEY, "domain" varchar(100) NOT NULL, "name" varchar(50) NOT NULL ) hhMIAM9Af081f2e5fbec289a16c5f9ec883d0cf9KGRwMQpTJ19hdXRoX3VzZXJfaWQnCnAyCkkxCnMuMmI3ZmJiMWY5OTAwZmM0ZWYxOWE0YTNkYzI4 ZTFjNmU= 2006-07-05 16:42:02.305576 $Mf081f2e5fbec289a16c5f9ec883d0cf9 ##example.comexample.com  cc;--)tabledjango_admin_logdjango_admin_logCREATE TABLE "django_admin_log" ( "id" integer NOT NULL PRIMARY KEY, "action_time" datetime NOT NULL, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "content_type_id" integer NULL REFERENCES "django_content_type" ("id"), "object_id" text NULL, "object_repr" varchar(200) NOT NULL, "action_flag" smallint unsigned NOT NULL, "change_message" text NOT NULL )33[tablereports_interactionreports_interactionCREATE TABLE "reports_interaction" ( "id" integer NOT NULL PRIMARY KEY, "client_id" integer NOT NULL, "timestamp" datetime NOT NULL, "state" varchar(32) NOT NULL, "repo_revision" integer NOT NULL, "client_version" varchar(32) NOT NULL, "pingable" bool NOT NULL, "goodcount" integer NOT NULL, "totalcount" integer NOT NULL )   oj##tablereports_badreports_badCREATE TABLE "reports_bad" ( "id" integer NOT NULL PRIMARY KEY, "name" varchar(128) NOT NULL, "kind" varchar(16) NOT NULL, "critical" bool NOT NULL, "reason_id" integer NOT NULL )11itablereports_repositoryreports_repositoryCREATE TABLE "reports_repository" ( "id" integer NOT NULL PRIMARY KEY, "timestamp" datetime NOT NULL )p''tablereports_extrareports_extraCREATE TABLE "reports_extra" ( "id" integer NOT NULL PRIMARY KEY, "name" varchar(128) NOT NULL, "kind" varchar(16) NOT NULL, "critical" bool NOT NULL, "reason_id" integer NOT NULL )y--%tablereports_modifiedreports_modified CREATE TABLE "reports_modified" ( "id" integer NOT NULL PRIMARY KEY, "name" varchar(128) NOT NULL, "kind" varchar(16) NOT NULL, "critical" bool NOT NULL, "reason_id" integer NOT NULL )  ZBCTahlmqszDE\gknrv]^jpu_`io{FAHKd|@GJMNPRUVY[tewWyfbXSQI~xc}LOQe}L#QZ=2-,+*22-)'$#)(,#22-   *))tablereports_reasonreports_reason!CREATE TABLE "reports_reason" ( "id" integer NOT NULL PRIMARY KEY, "owner" text NOT NULL, "current_owner" text NOT NULL, "group" text NOT NULL, "current_group" text NOT NULL, "perms" text NOT NULL, "current_perms" text NOT NULL, "status" text NOT NULL, "current_status" text NOT NULL, "to" text NOT NULL, "current_to" text NOT NULL, "version" text NOT NULL, "current_version" text NOT NULL, "current_exists" bool NOT NULL, "current_diff" text NOT NULL ) ))Itablereports_clientreports_client#CREATE TABLE "reports_client" ( "id" integer NOT NULL PRIMARY KEY, "creation" datetime NOT NULL, "name" varchar(128) NOT NULL, "current_interaction_id" integer NULL REFERENCES "reports_interaction" ("id") )   gI339tablereports_performancereports_performance$CREATE TABLE "reports_performance" ( "id" integer NOT NULL PRIMARY KEY, "metric" varchar(128) NOT NULL, "value" numeric(32, 16) NOT NULL )^--otablereports_metadatareports_metadata&CREATE TABLE "reports_metadata" ( "id" integer NOT NULL PRIMARY KEY, "client_id" integer NOT NULL REFERENCES "reports_client" ("id"), "timestamp" datetime NOT NULL )E==tablereports_bad_interactionsreports_bad_interactions'CREATE TABLE "reports_bad_interactions" ( "id" integer NOT NULL PRIMARY KEY, "bad_id" integer NOT NULL REFERENCES "reports_bad" ("id"), "interaction_id" integer NOT NULL REFERENCES "reports_interaction" ("id"), UNIQUE ("bad_id", "interaction_id") )Oc=indexsqlite_autoindex_reports_bad_interactions_1reports_bad_interactions(      ?QAA-tablereports_extra_interactionsreports_extra_interactions)CREATE TABLE "reports_extra_interactions" ( "id" integer NOT NULL PRIMARY KEY, "extra_id" integer NOT NULL REFERENCES "reports_extra" ("id"), "interaction_id" integer NOT NULL REFERENCES "reports_interaction" ("id"), UNIQUE ("extra_id", "interaction_id") )S gAindexsqlite_autoindex_reports_extra_interactions_1reports_extra_interactions*c!GGEtablereports_modified_interactionsreports_modified_interactions,CREATE TABLE "reports_modified_interactions" ( "id" integer NOT NULL PRIMARY KEY, "modified_id" integer NOT NULL REFERENCES "reports_modified" ("id"), "interaction_id" integer NOT NULL REFERENCES "reports_interaction" ("id"), UNIQUE ("modified_id", "interaction_id") )Y"mGindexsqlite_autoindex_reports_modified_interactions_1reports_modified_interactions-   @pL( rDmE)j@(3+Can add interaction add_interaction)5+Can delete log entrydelete_logentry)5+Can change log entrychange_logentry#/%Can add log entryadd_logentry +#Can delete sitedelete_site +#Can change sitechange_site%Can add siteadd_site&1)Can delete sessiondelete_session&1)Can change sessionchange_session +#Can add sessionadd_session/;1Can delete content typedelete_contenttype/;1Can change content typechange_contenttype) 5+Can add content typeadd_contenttype, 7/Can delete permissiondelete_permission, 7/Can change permissionchange_permission& 1)Can add permissionadd_permission +#Can delete userdelete_user +#Can change userchange_user%Can add useradd_user"-%Can delete groupdelete_group"-%Can change groupchange_group'Can add groupadd_group&1)Can delete messagedelete_message&1)Can change messagechange_message +#Can add messageadd_message VxJh>$xNvV1)!Can add reasonadd_reason(03+Can delete metadatadelete_metadata(/3+Can change metadatachange_metadata".-%Can add metadataadd_metadata.-91Can delete performancedelete_performance.,91Can change performancechange_performance(+3+Can add performanceadd_performance$*/'Can delete clientdelete_client$)/'Can change clientchange_client()!Can add clientadd_client')!Can delete bad delete_bad&)!Can change bad change_bad%#Can add bad add_bad($3+Can delete modified delete_modified(#3+Can change modified change_modified""-%Can add modified add_modified"!-%Can delete extra delete_extra" -%Can change extra change_extra'Can add extra add_extra,7/Can delete repository delete_repository,7/Can change repository change_repository&1)Can add repository add_repository.91Can delete interaction delete_interaction.91Can change interaction change_interaction 66H]r 6Ohz#add_message)change_message)delete_messageadd_group%change_group%delete_groupadd_user#change_user#delete_user )add_permission /change_permission /delete_permission +add_contenttype 1change_contenttype1delete_contenttype#add_session)change_session)delete_sessionadd_site#change_site#delete_site%add_logentry+change_logentry :Pi&<J[l}$#delete_ping6#change_ping5add_ping4'delete_reason3'change_reason2!add_reason1+delete_metadata0+change_metadata/+ add_interaction1 change_interaction1 delete_interaction) add_repository/ change_repository/ delete_repository add_extra% change_extra % delete_extra!% add_modified"+ change_modified#+ delete_modified$  add_bad%! change_bad&! delete_bad'!add_client('change_client)'delete_client*+add_performance+1change_performance,1delete_performance-%add_metadata.   rr#KK[tablereports_performance_interactionreports_performance_interaction2CREATE TABLE "reports_performance_interaction" ( "id" integer NOT NULL PRIMARY KEY, "performance_id" integer NOT NULL REFERENCES "reports_performance" ("id"), "interaction_id" integer NOT NULL REFERENCES "reports_interaction" ("id"), UNIQUE ("performance_id", "interaction_id") )]$qKindexsqlite_autoindex_reports_performance_interaction_1reports_performance_interaction3 %EEviewreports_current_interactionsreports_current_interactionsCREATE VIEW reports_current_interactions AS SELECT x.client_id AS client_id, reports_interaction.id AS interaction_id FROM (select client_id, MAX(timestamp) as timer FROM reports_interaction GROUP BY client_id) x, reports_interaction WHERE reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer  ..FIs&%%mtablereports_pingreports_ping5CREATE TABLE "reports_ping" ( "id" integer NOT NULL PRIMARY KEY, "client_id" integer NOT NULL REFERENCES "reports_client" ("id"), "starttime" datetime NOT NULL, "endtime" datetime NOT NULL, "status" varchar(4) NOT NULL )(33'tablereports_interactionreports_interactionCREATE TABLE reports_interaction (id integer PRIMARY KEY, client_id integer, timestamp datetime, state varchar(32), repo_revision integer, client_version varchar(32), goodcount integer, totalcount integer)'))) tablereports_clientreports_client#CREATE TABLE "reports_client" ( "id" integer NOT NULL PRIMARY KEY, "creation" datetime NOT NULL, "name" varchar(128) NOT NULL, "current_interaction_id" integer NULL REFERENCES "reports_interaction" ("id"), "expiration" datetime NULL ) *G3'indexreports_interaction_client_idreports_interaction7CREATE INDEX reports_interaction_client_id on reports_interaction (client_id)   ..4*+UAKindexreports_extra_interactions_client_idreports_extra_interactions8CREATE INDEX reports_extra_interactions_client_id on reports_extra_interactions(interaction_id)6,[GWindexreports_modified_interactions_client_idreports_modified_interactions:CREATE INDEX reports_modified_interactions_client_id on reports_modified_interactions(interaction_id)-W)Gindexreports_client_current_interaction_idreports_client;CREATE INDEX reports_client_current_interaction_id on reports_client (current_interaction_id)I.iKkindexreports_performance_interaction_performance_idreports_performance_interactionCREATE INDEX reports_performance_interation_interaction_id on reports_performance_interaction (interaction_id) Gk"*3<ENWakuuk "+4=FOXajs|  Q  P  O  N     ,  =CiD}      -  >DjD}      .  ?EkD}      /  @G~  H=NY o!z"#$%4u5  Q     Y_A$kM/`B"Y#,+ libncurses5-devpackageUnknown +% bridge-utilspackageUnknown* libidn11packageUnknown) binutilspackageUnknown( libcurl3packageUnknown '% libcurl3-devpackageUnknown& libsysfs1packageUnknown% libatm1packageUnknown$! pkg-configpackageUnknown #% libidn11-devpackageUnknown"! libssl-devpackageUnknown)!7 python2.3-twisted-binpackageUnknown  iproutepackageUnknown libsmapi2packageUnknown cpp-3.3packageUnknown libxft2packageUnknown libxmu6packageUnknown portmappackageUnknown whatamipackageUnknown libc6-devpackageUnknown libx11-6packageUnknown libmagic1packageUnknown libsm6packageUnknown libice6packageUnknown(5 linux-kernel-headerspackageUnknown mailxpackageUnknown filepackageUnknown xfsprogspackageUnknown reportbugpackageUnknown AC@|=A= =! Thu Jan 5 06:29:46 2006cleanunknown$Revision$= =! Thu Jan 5 06:30:47 2006cleanunknown$Revision$= =! Thu Jan 5 06:29:32 2006cleanunknown$Revision$= =! Thu Jan 5 06:32:59 2006cleanunknown$Revision$= =! Thu Jan 5 06:33:44 2006cleanunknown$Revision$= =! Thu Jan 5 06:39:31 2006cleanunknown$Revision$MM= =! Thu Jan 5 06:35:50 2006dirtyunknown$Revision${|D =/ Wed Nov 23 06:30:57 2005cleanunknown$Revision: 1.40 $dd= =! Thu Jan 5 06:34:13 2006dirtyunknown$Revision${|D =/ Wed Nov 23 06:29:37 2005cleanunknown$Revision: 1.40 $dd= =!  Thu Jan 5 06:32:00 2006cleanunknown$Revision$66= =! Thu Jan 5 06:32:59 2006cleanunknown$Revision$66= =! Thu Jan 5 06:36:07 2006cleanunknown$Revision$66= =! Thu Jan 5 06:32:56 2006cleanunknown$Revision$66= =! Thu Jan 5 06:29:31 2006cleanunknown$Revision$66 kC9u/k= =! EThu Jan 5 06:37:01 2006dirtyunknown$Revision$  D =/ EMon Nov 28 06:26:21 2005cleanunknown$Revision: 1.40 $= =! DThu Jan 5 06:40:33 2006dirtyunknown$Revision$  D =/ DMon Nov 28 06:29:43 2005cleanunknown$Revision: 1.40 $= =! CThu Jan 5 06:37:15 2006dirtyunknown$Revision$  D =/ CMon Nov 28 06:26:04 2005cleanunknown$Revision: 1.40 $= =! BThu Jan 5 06:39:06 2006dirtyunknown$Revision$  D =/ BMon Nov 28 06:32:53 2005cleanunknown$Revision: 1.40 $D =/ ATue Oct 18 10:16:25 2005cleanunknown$Revision: 1.37 $  = =! >Thu Jan 5 06:30:28 2006cleanunknown$Revision$##= =! =Thu Jan 5 06:40:32 2006cleanunknown$Revision$##= =! Thu Jan 5 06:32:06 2006cleanunknown$Revision$= =! Thu Jan 5 06:42:07 2006cleanunknown$Revision$= =! Thu Jan 5 06:35:10 2006cleanunknown$Revision$ ^d/_+[&^07A12006-06-20 12:27:49.982608ccn251.mcs.anl.gov06A12006-06-20 12:27:49.746685ccn241.mcs.anl.gov05A12006-06-20 12:27:49.514135ccn242.mcs.anl.gov04A12006-06-20 12:27:49.247140ccn243.mcs.anl.gov33A72006-06-20 12:27:49.005553cct10m-67.mcs.anl.gov22A52006-06-20 12:27:48.747426cct9m-67.mcs.anl.gov21A52006-06-20 12:27:48.497518cct8m-67.mcs.anl.gov20A52006-06-20 12:27:48.258119cct7m-67.mcs.anl.gov2/A52006-06-20 12:27:47.988235cct6m-67.mcs.anl.gov2.A52006-06-20 12:27:47.720366cct5m-67.mcs.anl.gov2-A52006-06-20 12:27:47.434229cct4m-67.mcs.anl.gov2,A52006-06-20 12:27:47.181935cct3m-67.mcs.anl.gov2+A52006-06-20 12:27:46.940713cct2m-67.mcs.anl.gov2*A52006-06-20 12:27:46.698707cct1m-67.mcs.anl.gov3)A72006-06-20 12:27:46.440763cct10m-fe.mcs.anl.gov2(A52006-06-20 12:27:46.190723cct9m-fe.mcs.anl.gov2'A52006-06-20 12:27:45.932524cct8m-fe.mcs.anl.gov2&A52006-06-20 12:27:45.674438cct7m-fe.mcs.anl.gov Yj8p>~NY/JA/2006-06-20 12:31:22.763838ccn17.mcs.anl.gov/IA/2006-06-20 12:30:55.235513ccn16.mcs.anl.gov/HA/2006-06-20 12:30:45.138430ccn15.mcs.anl.gov/GA/2006-06-20 12:30:34.887905ccn13.mcs.anl.gov/FA/2006-06-20 12:30:24.160255ccn12.mcs.anl.gov.EA-2006-06-20 12:29:55.528569ccn7.mcs.anl.gov.DA-2006-06-20 12:29:27.388909ccn5.mcs.anl.gov.CA-2006-06-20 12:28:51.062654ccn4.mcs.anl.gov.BA-2006-06-20 12:28:13.204424ccn2.mcs.anl.gov.AA-2006-06-20 12:27:54.635153ccn1.mcs.anl.gov0@A12006-06-20 12:27:54.389107ccn244.mcs.anl.gov0?A12006-06-20 12:27:54.077101ccn256.mcs.anl.gov0>A12006-06-20 12:27:52.775187ccn237.mcs.anl.gov0=A12006-06-20 12:27:51.487556ccn240.mcs.anl.gov0<A12006-06-20 12:27:51.237287ccn245.mcs.anl.gov0;A12006-06-20 12:27:50.995877ccn254.mcs.anl.gov0:A12006-06-20 12:27:50.754362ccn253.mcs.anl.gov09A12006-06-20 12:27:50.496817ccn252.mcs.anl.gov08A12006-06-20 12:27:50.246302ccn250.mcs.anl.gov Vxoe\RH>5+! {rh^TJ@6,"}sj`VLB9/%LKJIHGFEDCBA@?B>=<;:;98765432>10/.-,9+*)7(6'&%$#"! +$ z   B   9 7~2}|{zyxw N!%/9CMqg]SI5+!W_hqz#-7{?AIRZclt}     z  S  B  1      \  % \ y        [   s      =  m   J&e7$%&+'\  A  v    .  t  `  }   LM  N !O!"P"#Q## $R$$u$$$$I$ TkJ.T1c="qTF stracepackageUnknownE! ntp-simplepackageUnknownD ntpdatepackageUnknownC gm-utilspackageUnknownB mpishpackageUnknownA gmpackageUnknown@ cpp-3.2packageUnknown? tftppackageUnknown#>+ libmpich1.0-devpackageUnknown4=M kernel-image-2.6.9-chiba-selfishpackageUnknown< mpich-binpackageUnknown; gcc-3.2packageUnknown4:M gm-driver-2.6.9-chiba-selfish-uppackageUnknown9# libmpich1.0packageUnknown 8% gcc-3.2-basepackageUnknown?7c kernel-image-2.6.10-rc2-mm3-v0.7.32-9-mode1packageUnknown?6c kernel-image-2.6.10-rc2-mm3-v0.7.32-9-mode4packageUnknown75S kernel-image-2.6.9-chiba-selfish-uppackageUnknown4 patchpackageUnknown3 mpichpackageUnknown2! zlib1g-devpackageUnknown1 gccpackageUnknown%0/ python2.3-twistedpackageUnknown/ makepackageUnknown. gcc-3.3packageUnknown- cvspackageUnknown QPd)x=Q8 3! d2006-01-05 06:31:20cleanunknown$Revision$##8 3! c2006-01-05 06:31:17cleanunknown$Revision$##8 3! b2006-01-05 06:31:14cleanunknown$Revision$##8 3! a2006-01-05 06:31:12cleanunknown$Revision$##8 3! `2006-01-05 06:31:05cleanunknown$Revision$##8 3! _2006-01-05 06:31:04cleanunknown$Revision$##8 3! ^2006-01-05 06:31:02cleanunknown$Revision$##8 3! ]2006-01-05 06:31:02cleanunknown$Revision$##8 3! \2006-01-05 06:31:00cleanunknown$Revision$##8 3! [2006-01-05 06:30:57cleanunknown$Revision$##8 3! Z2006-01-05 06:30:53cleanunknown$Revision$##8 3! Y2006-01-05 06:30:47cleanunknown$Revision$8 3! X2006-01-05 06:30:37cleanunknown$Revision$##8 3! W2006-01-05 06:30:34cleanunknown$Revision$##8 3! V2006-01-05 06:30:30cleanunknown$Revision$##8 3! U2006-01-05 06:30:28cleanunknown$Revision$## X8AJS\enx.$ (1:CLU^hr|&0:DNXblt} & & & $ & U & D & 3 & & & $ $ H$$ $$$%$&$'E $ / $ _ $ $ $ V $ n $ $ %S%&T&&v&&L&&"&]&!~&#&%&'F&5&D&}& &e & &" & & &{ & & & &' &^ &{ & & & & &  & a & & T & & &  & > & o & & 'U''(V()W)*X*+Y+++++O++ DpW5]<uL/ aDb hdparmpackageUnknowna# debootstrappackageUnknown` valgrindpackageUnknown_ gdbpackageUnknown%^/ libdbd-mysql-perlpackageUnknown$]- libmysqlclient12packageUnknown!\' libplrpc-perlpackageUnknown[ libpq3packageUnknown&Z1 libnet-daemon-perlpackageUnknownY libedit2packageUnknownX# libselinux1packageUnknown"W) libdbd-pg-perlpackageUnknownV iperfpackageUnknownU# libdbi-perlpackageUnknown T% mysql-commonpackageUnknownS! traceroutepackageUnknown)R7 kernel-image-2.6.12.6packageUnknown2QI kernel-image-2.6.11.10-mcs-x86packageUnknownP condorserviceUnknownO arpingpackageUnknownN libnet1packageUnknownM autoconfpackageUnknownL# automake1.4packageUnknownK m4packageUnknownJ libtoolpackageUnknown!I' autotools-devpackageUnknown%H/ module-init-toolspackageUnknown#G+ gm-route-clientpackageUnknown ^f2c0b.^1*A12006-06-20 14:24:09.247594ccn103.mcs.anl.gov1)A12006-06-20 14:24:08.556136ccn164.mcs.anl.gov1(A12006-06-20 14:24:07.864745ccn172.mcs.anl.gov1'A12006-06-20 14:24:07.182307ccn180.mcs.anl.gov1&A12006-06-20 14:24:06.483981ccn160.mcs.anl.gov0%A/2006-06-20 14:24:05.783075ccn44.mcs.anl.gov1$A12006-06-20 14:24:05.100015ccn162.mcs.anl.gov0#A/2006-06-20 14:24:04.375435ccn99.mcs.anl.gov1"A12006-06-20 14:24:03.684795ccn116.mcs.anl.gov0!A/2006-06-20 14:24:02.976457ccn69.mcs.anl.gov1 A12006-06-20 14:24:02.152244ccn149.mcs.anl.gov1A12006-06-20 14:24:01.461132ccn178.mcs.anl.gov1A12006-06-20 14:24:00.769428ccn238.mcs.anl.gov0A/2006-06-20 14:24:00.094813ccn33.mcs.anl.gov1A12006-06-20 14:23:59.420278ccn140.mcs.anl.gov1A12006-06-20 14:23:58.728936ccn112.mcs.anl.gov0A/2006-06-20 14:23:58.046858ccn68.mcs.anl.gov0A/2006-06-20 14:23:57.346922ccn87.mcs.anl.gov N&:CLU^gpy )3=GQ[eoy0&!)2;DMV_hqz + + L+&+P+`+ q+!+"+#+$+%+4+5 +. +i + + + +% +] + + + + +N +~ + 2 + d + + + + + + + ( + @ + + ,,Z,, @-[-.\.. %/]// )0^00 *1_12`22~22W22,2b2!2#2%2'Q252D2}2 2t 2 2/ 2 2 2 2 2 U(2<FPZdnx$.8BLV`jt~ (1:CLU^gpy  9 j 7 7 9 6 9 X 6 T 6 / 6 6 6 6 6 6  6 * 6 E 6 7e777777]777717$7%7&&7'V7475 7; 7y 7 75 7h 7 7 7$ 7 ; 7 p 7 7 7 ' 7 l 7 7 7 \ 7 y 7 7 8f88 .9h99999!9`999949$9%9&)9'Y 9 > 9 s 9 9 9 + 9 p 9 _ 9 | 9 9 :i:: 2 Xxph`XPH@7.$zpf\RH>4*  vlbXND:0&=<;:9876543210/.-,+*)('&%$#"!      ~}|~{}z|y{xzwyvxuwtvsurtqsproqnpmolnkmjlikhjgifh N(2<FPZdnx",6>GOXajr{ '1;EOYcmw>4>5 >? >~ > > > >; >n > > > >+ >Y > > B > w > > > > > >  > , > F > > 0 > ^ > ?o?@p@@ @ BAqABrBBBBB7BwBBBBFB$B%B&0B'`B4B5 BL B B BB B{ B B B9 B N B B B B 4 B y B B B B  B j B B B CsC _e1d0a-_1NA12006-06-20 14:24:36.088486ccn240.mcs.anl.gov0MA/2006-06-20 14:24:35.382826ccn92.mcs.anl.gov1LA12006-06-20 14:24:34.683562ccn139.mcs.anl.gov0KA/2006-06-20 14:24:33.950281ccn23.mcs.anl.gov1JA12006-06-20 14:24:33.240402ccn167.mcs.anl.gov0IA/2006-06-20 14:24:32.199453ccn71.mcs.anl.gov1HA12006-06-20 14:24:31.501396ccn145.mcs.anl.gov1GA12006-06-20 14:24:30.525547ccn184.mcs.anl.gov1FA12006-06-20 14:24:29.834335ccn174.mcs.anl.gov1EA12006-06-20 14:24:29.118499ccn179.mcs.anl.gov0DA/2006-06-20 14:24:28.418757ccn41.mcs.anl.gov0CA/2006-06-20 14:24:27.726671ccn46.mcs.anl.gov1BA12006-06-20 14:24:26.977654ccn194.mcs.anl.gov0AA/2006-06-20 14:24:26.253168ccn39.mcs.anl.gov1@A12006-06-20 14:24:25.547383ccn123.mcs.anl.gov1?A12006-06-20 14:24:24.837232ccn106.mcs.anl.gov0>A/2006-06-20 14:24:23.588322ccfs1.mcs.anl.gov1=A12006-06-20 14:24:22.882858ccn232.mcs.anl.gov U!*4>GPYbkt}  *4>HR\fpz (2<FPZdnx y j j 7k k :l l Fm m G mG mtn n Io o Sp q q4vq5 q qU q  q A q q q q r s t tot4yt5 t! tY t t t t t t t ; t w t uu v w x  y  z  zzz zDzzzzz4}z5 z& z^ z z z z zX z z z zJ zw z + z Z z z z z z z @ z |{  | } ~  `Rj0H`8n 3! D2006-01-05 06:29:46cleanunknown$Revision$##8m 3! C2006-01-05 06:29:32cleanunknown$Revision$8l 3! B2006-01-05 06:29:31cleanunknown$Revision$668k 3! A2006-01-03 16:26:27cleanunknown$Revision$##8j 3! @2006-01-03 16:26:26cleanunknown$Revision$##8i 3! ?2006-01-03 16:26:26cleanunknown$Revision$##8h 3! >2006-01-03 06:34:16cleanunknown$Revision$##8g 3! =2005-12-26 06:26:08cleanunknown$Revision$##8f 3! <2005-12-22 06:27:20cleanunknown$Revision$##8e 3! ;2005-12-19 17:51:58cleanunknown$Revision$##8d 3! :2005-12-19 11:40:37cleanunknown$Revision$##8c 3! 92005-12-19 11:40:05cleanunknown$Revision$##8b 3! 82005-12-19 11:36:01cleanunknown$Revision$##8a 3! 72005-12-14 06:25:51cleanunknown$Revision$8` 3! 62005-12-10 06:30:50cleanunknown$Revision$8_ 3! 52006-01-05 06:30:38dirtyunknown$Revision$  ,{5q+g(k,=, =! OThu Jan 5 06:30:34 2006cleanunknown$Revision$##=+ =! NThu Jan 5 06:40:18 2006cleanunknown$Revision$##=* =! MThu Jan 5 06:30:28 2006cleanunknown$Revision$##=) =! LThu Jan 5 06:42:38 2006cleanunknown$Revision$##=( =! KThu Jan 5 06:42:05 2006cleanunknown$Revision$##=' =! JThu Jan 5 06:39:04 2006dirtyunknown$Revision$  D& =/ JMon Nov 28 06:29:41 2005cleanunknown$Revision: 1.40 $=% =! IThu Jan 5 06:32:56 2006dirtyunknown$Revision$  D$ =/ IMon Nov 28 06:33:34 2005cleanunknown$Revision: 1.40 $=# =! HThu Jan 5 06:40:00 2006dirtyunknown$Revision$  D" =/ HMon Nov 28 06:35:50 2005cleanunknown$Revision: 1.40 $=! =! GThu Jan 5 06:31:08 2006dirtyunknown$Revision$  D =/ GMon Nov 28 06:35:07 2005cleanunknown$Revision: 1.40 $= =! FThu Jan 5 06:30:28 2006dirtyunknown$Revision$  D =/ FMon Nov 28 06:35:07 2005cleanunknown$Revision: 1.40 $ N&0;FQ\gr}%/9CMWaku !,7AKU_is}      \    w 4 5 , f        J             Q       , k    : $ % &- ']  D  y    a  ~  ! 4 5 0 k    \      "  #  '  (    - l    ; $ % &. '^ 4 5 C M#-7AKValw "-8CNYdoz *5@KValw     Z   5  9  ;  < * i *s  =  +  G  V   > y    P    ' $ % & 'J 4 5 / j    & ^    O     * G a      3  e      [    X  r     M     ?  A  C  K *q I\>w]> pQ2fI libxt6PackageUnknown libdm0PackageUnknown % xemacs21-binPackageUnknown# xfslibs-devPackageUnknown(5 xemacs21-mulesupportPackageUnknown! fontconfigPackageUnknown libxpm4PackageUnknown xemacs21PackageUnknown libtiff4PackageUnknown libapr0PackageUnknown# libxrender1PackageUnknown portmapServiceUnknown apacheServiceUnknown2 I kernel-headers-2.6.12.5-p3-mcsPackageUnknown  blktoolPackageUnknown  valgrindPackageUnknown  gdbPackageUnknown  portmapPackageUnknown % pvfs2-serverServiceUnknown libxml2PackageUnknown") python2.3-lxmlPackageUnknown# python-lxmlPackageUnknown! libxslt1.1PackageUnknown gnuplotPackageUnknown % libfreetype6PackageUnknown! libpng12-0PackageUnknown hostPackageUnknown % libgd2-noxpmPackageUnknown~# gnuplot-noxPackageUnknown (K U_i(?^ 3/ 52005-12-01 06:38:32cleanunknown$Revision: 1.40 $8] 3! 42006-01-05 06:30:00dirtyunknown$Revision$  ?\ 3/ 42005-12-01 06:38:16cleanunknown$Revision: 1.40 $8[ 3! 32006-01-05 06:36:07dirtyunknown$Revision$  ?Z 3/ 32005-12-01 06:36:49cleanunknown$Revision: 1.40 $8Y 3! 22005-12-19 11:43:35dirtyunknown$Revision$  ?X 3/ 22005-12-01 06:35:33cleanunknown$Revision: 1.40 $8W 3! 12006-01-05 06:30:32dirtyunknown$Revision$  ?V 3/ 12005-12-01 06:35:26cleanunknown$Revision: 1.40 $8U 3! 02006-01-05 06:29:49dirtyunknown$Revision$  ?T 3/ 02005-12-01 06:35:18cleanunknown$Revision: 1.40 $8S 3! /2006-01-05 06:34:23dirtyunknown$Revision$  ?R 3/ /2005-12-01 06:33:59cleanunknown$Revision: 1.40 $8Q 3! .2006-01-05 06:36:45dirtyunknown$Revision$  ?P 3/ .2005-12-01 06:32:43cleanunknown$Revision: 1.40 $8O 3! -2006-01-05 06:36:04dirtyunknown$Revision$  G  !+5?IS]gq| !,7BMXblv$/:EP[fq|   6 v    D 4 5 K     H z   8 f   M        i   K    9 y    H $ % &5 'f 4 5 N     K }   ; i     ; T r       P  _d1b.a._1`A12006-06-20 14:24:49.330066ccn117.mcs.anl.gov0_A/2006-06-20 14:24:48.638701ccn66.mcs.anl.gov1^A12006-06-20 14:24:47.964251ccn113.mcs.anl.gov1]A12006-06-20 14:24:47.289748ccn114.mcs.anl.gov0\A/2006-06-20 14:24:46.615854ccn95.mcs.anl.gov0[A/2006-06-20 14:24:45.690672cct6m.mcs.anl.gov0ZA/2006-06-20 14:24:44.991374ccn27.mcs.anl.gov0YA/2006-06-20 14:24:44.041849ccn20.mcs.anl.gov1XA12006-06-20 14:24:43.342237ccn125.mcs.anl.gov1WA12006-06-20 14:24:42.561288ccn124.mcs.anl.gov1VA12006-06-20 14:24:41.860188ccn190.mcs.anl.gov1UA12006-06-20 14:24:41.185721ccn152.mcs.anl.gov0TA/2006-06-20 14:24:40.477400ccn29.mcs.anl.gov1SA12006-06-20 14:24:39.769310ccn142.mcs.anl.gov0RA/2006-06-20 14:24:38.853569ccn31.mcs.anl.gov1QA12006-06-20 14:24:38.170434ccn181.mcs.anl.gov1PA12006-06-20 14:24:37.480264ccn144.mcs.anl.gov1OA12006-06-20 14:24:36.814153ccn148.mcs.anl.gov O$/:EP[fpz$.8BLV`jt$/:EP[fq|   :     g     %  l        L  M  U  W Y [ ] _ a  N  P  Q    < |    J $ % &8 'i 4 5 P     M    > k     = V t       R     =     h     '  n ^m< xGS"^/]A/2006-06-20 12:33:03.689945ccn72.mcs.anl.gov/\A/2006-06-20 12:33:02.224338ccn71.mcs.anl.gov/[A/2006-06-20 12:33:01.074344ccn70.mcs.anl.gov/ZA/2006-06-20 12:32:59.959618ccn67.mcs.anl.gov/YA/2006-06-20 12:32:58.835695ccn65.mcs.anl.gov.XA-2006-06-20 12:32:18.960734ccn9.mcs.anl.gov/WA/2006-06-20 12:32:17.204893ccn32.mcs.anl.gov/VA/2006-06-20 12:32:15.545700ccn30.mcs.anl.gov/UA/2006-06-20 12:32:13.971375ccn29.mcs.anl.gov/TA/2006-06-20 12:32:11.238685ccn31.mcs.anl.gov/SA/2006-06-20 12:32:07.339068ccn28.mcs.anl.gov/RA/2006-06-20 12:32:07.010523ccn26.mcs.anl.gov/QA/2006-06-20 12:32:04.985893ccn25.mcs.anl.gov/PA/2006-06-20 12:32:02.615604ccn27.mcs.anl.gov/OA/2006-06-20 12:32:00.996594ccn24.mcs.anl.gov/NA/2006-06-20 12:31:59.090256ccn23.mcs.anl.gov/MA/2006-06-20 12:31:55.266913ccn22.mcs.anl.gov/LA/2006-06-20 12:31:51.885215ccn21.mcs.anl.gov/KA/2006-06-20 12:31:50.423183ccn20.mcs.anl.gov Y (08@HPX`hpx (08@HPX`hpx (1:CLU^gpy      #&),/46:;>@BDFHJLNQ R!U"V#Y$Z%]&^'_(c)f*j+l,o-s.u/x0|1~23456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXY N"+4=FOXajs| '09BKT]fpz$.8BLV`jt~[\]^_ ` abcdefg!h%i(j*k,l0m3n7o:p<q?rCsEtHuLvOwQxUyWzZ{^|a}c~eh k m q u w z ~                                   exph`XPH@80( xph`XPH@80( xph`XPH@80( e)d(c(b(a'`'_'^&]%\%[%Z$Y#X#W#V"U!T!S!R QPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&% $ # " !                  ^xph`XPH@80' |sjaXOF=4+"wne\SJA8/& CHBHAG@G?G>G=F<F;F:E9E8E7D6D5C4C3C2B1B0B/A.A-A,@+@*@)?(?'?&>%>$>#="=!= <<<;;;:::9998887776 6 6 5 5 54443332221~1}1|0{0z0y/x/w/v.u.t.s-r-q-p,o,n,m+l+k+j*i*h*g)f) HCGDH=; =! _Thu Jan 5 06:30:27 2006cleanunknown$Revision$##=: =! ]Thu Jan 5 06:31:02 2006cleanunknown$Revision$##=9 =! \Thu Jan 5 06:40:10 2006cleanunknown$Revision$##=8 =! [Thu Jan 5 06:37:10 2006cleanunknown$Revision$##=7 =! ZThu Jan 5 06:42:30 2006cleanunknown$Revision$##=6 =! YThu Jan 5 06:31:04 2006cleanunknown$Revision$##=5 =! XThu Jan 5 06:39:17 2006dirtyunknown$Revision$  D4 =/ XThu Dec 1 06:30:59 2005cleanunknown$Revision: 1.40 $=3 =! WThu Jan 5 06:30:00 2006cleanunknown$Revision$##=2 =! VThu Jan 5 06:33:13 2006cleanunknown$Revision$##=1 =! UThu Jan 5 06:40:54 2006cleanunknown$Revision$##=0 =! TThu Jan 5 06:40:47 2006cleanunknown$Revision$##=/ =! SThu Jan 5 06:37:29 2006cleanunknown$Revision$##=. =! QThu Jan 5 06:39:29 2006cleanunknown$Revision$##=- =! PThu Jan 5 06:42:06 2006cleanunknown$Revision$## (K U_i(?N 3/ -2005-12-01 06:32:25cleanunknown$Revision: 1.40 $8M 3! ,2006-01-05 06:39:17dirtyunknown$Revision$  ?L 3/ ,2005-12-01 06:30:59cleanunknown$Revision: 1.40 $8K 3! +2006-01-05 06:30:39dirtyunknown$Revision$  ?J 3/ +2005-12-01 06:30:46cleanunknown$Revision: 1.40 $8I 3! *2006-01-05 06:29:59dirtyunknown$Revision$  ?H 3/ *2005-12-01 06:30:42cleanunknown$Revision: 1.40 $8G 3! )2006-01-05 06:32:11dirtyunknown$Revision$  ?F 3/ )2005-12-01 06:30:29cleanunknown$Revision: 1.40 $8E 3! (2006-01-05 06:37:17dirtyunknown$Revision$  ?D 3/ (2005-12-01 06:30:11cleanunknown$Revision: 1.40 $8C 3! '2006-01-05 06:32:59dirtyunknown$Revision$  ?B 3/ '2005-12-01 06:29:54cleanunknown$Revision: 1.40 $8A 3! &2006-01-05 06:39:03dirtyunknown$Revision$  ?@ 3/ &2005-12-01 06:29:51cleanunknown$Revision: 1.40 $8? 3! %2006-01-05 06:30:47dirtyunknown$Revision$  @L\ l0|@9 3! 2006-01-05 06:42:32cleanunknown$Revision$##9 3! 2006-01-05 06:42:30cleanunknown$Revision$##9 3! 2006-01-05 06:42:29cleanunknown$Revision$##9 3! 2006-01-05 06:42:23cleanunknown$Revision$##9 3! 2006-01-05 06:42:22cleanunknown$Revision$##9 3! 2006-01-05 06:42:18cleanunknown$Revision$##9 3! 2006-01-05 06:42:15cleanunknown$Revision$##9 3! 2006-01-05 06:42:13cleanunknown$Revision$##9 3! 2006-01-05 06:42:10cleanunknown$Revision$##9 3! 2006-01-05 06:42:07cleanunknown$Revision$9 3! 2006-01-05 06:42:06cleanunknown$Revision$##9 3! 2006-01-05 06:42:05cleanunknown$Revision$##9 3! 2006-01-05 06:42:02cleanunknown$Revision$##9 3! 2006-01-05 06:41:57cleanunknown$Revision$##9 3! 2006-01-05 06:41:52cleanunknown$Revision$##9 3! 2006-01-05 06:40:57cleanunknown$Revision$## ctZ>oL1jE'c!~' libmailutils0packageUnknown} libgsasl7packageUnknown$|- edg-crl-upgradedserviceUnknown{ grisserviceUnknownz lam4packageUnknown y% ext3rminatorpackageUnknownx libxml2packageUnknown"w) python2.3-lxmlpackageUnknownv# python-lxmlpackageUnknownu! libxslt1.1packageUnknown%t/ mpich2-system-mpdpackageUnknown s% mysql-clientpackageUnknownr gawkpackageUnknownq gnuplotpackageUnknownp hostpackageUnknown o% libgd2-noxpmpackageUnknownn# gnuplot-noxpackageUnknownm# gnuplot-x11packageUnknownl cpp-4.0packageUnknown k% gcc-4.0-basepackageUnknownj g77-3.3packageUnknown)i7 kernel-image-2.6.11.8packageUnknownh nttcppackageUnknowng g77packageUnknown%f/ nfs-kernel-serverpackageUnknowne# libg2c0-devpackageUnknownd! mpd-systemserviceUnknownc! rsh-clientpackageUnknown ;g4j7k8n;19A12006-06-20 14:22:30.011499ccn135.mcs.anl.gov08A/2006-06-20 14:22:29.111946ccn62.mcs.anl.gov17A12006-06-20 14:22:28.000396ccn129.mcs.anl.gov06A/2006-06-20 14:22:25.882948ccn93.mcs.anl.gov15A12006-06-20 14:22:23.913051ccn212.mcs.anl.gov14A12006-06-20 14:22:21.627362ccn206.mcs.anl.gov13A12006-06-20 14:22:17.593653ccn210.mcs.anl.gov12A12006-06-20 14:22:15.978545ccn229.mcs.anl.gov11A12006-06-20 14:22:14.398637ccn226.mcs.anl.gov10A12006-06-20 14:22:11.850485ccn204.mcs.anl.gov1/A12006-06-20 14:22:09.439502ccn214.mcs.anl.gov1.A12006-06-20 14:22:05.747430ccn208.mcs.anl.gov1-A12006-06-20 14:22:01.974297ccn200.mcs.anl.gov/,A-2006-06-20 14:21:57.932505ccn9.mcs.anl.gov1+A12006-06-20 14:21:55.678676ccn222.mcs.anl.gov1*A12006-06-20 14:21:52.784532ccn213.mcs.anl.gov1)A12006-06-20 14:21:49.356481ccn202.mcs.anl.gov1(A12006-06-20 14:21:47.406959ccn207.mcs.anl.gov1'A12006-06-20 14:21:45.464275ccn215.mcs.anl.gov (K U_i(?> 3/ %2005-12-01 06:29:48cleanunknown$Revision: 1.40 $8= 3! $2006-01-05 06:32:04dirtyunknown$Revision$  ?< 3/ $2005-12-01 06:27:42cleanunknown$Revision: 1.40 $8; 3! #2006-01-05 06:30:27dirtyunknown$Revision$ ?: 3/ #2005-12-01 06:27:08cleanunknown$Revision: 1.40 $89 3! "2006-01-05 06:29:46dirtyunknown$Revision$  ?8 3/ "2005-12-01 06:26:31cleanunknown$Revision: 1.40 $87 3! !2006-01-05 06:32:55dirtyunknown$Revision$  ?6 3/ !2005-12-01 06:26:28cleanunknown$Revision: 1.40 $85 3!  2006-01-05 06:39:24dirtyunknown$Revision$  ?4 3/  2005-12-01 06:26:14cleanunknown$Revision: 1.40 $83 3! 2006-01-05 06:33:47dirtyunknown$Revision$  ?2 3/ 2005-12-01 06:25:29cleanunknown$Revision: 1.40 $81 3! 2006-01-05 06:40:00dirtyunknown$Revision$  ?0 3/ 2005-11-28 06:35:50cleanunknown$Revision: 1.40 $8/ 3! 2006-01-05 06:30:28dirtyunknown$Revision$  Wm< xGQW0pA12006-06-20 12:33:36.086341ccn107.mcs.anl.gov0oA12006-06-20 12:33:34.745893ccn109.mcs.anl.gov0nA12006-06-20 12:33:32.883783ccn103.mcs.anl.gov0mA12006-06-20 12:33:30.430217ccn102.mcs.anl.gov0lA12006-06-20 12:33:28.412773ccn101.mcs.anl.gov0kA12006-06-20 12:33:26.370681ccn100.mcs.anl.gov/jA/2006-06-20 12:33:19.355191ccn98.mcs.anl.gov/iA/2006-06-20 12:33:17.432930ccn96.mcs.anl.gov/hA/2006-06-20 12:33:16.090882ccn95.mcs.anl.gov/gA/2006-06-20 12:33:14.509221ccn94.mcs.anl.gov/fA/2006-06-20 12:33:13.076608ccn92.mcs.anl.gov/eA/2006-06-20 12:33:12.785423ccn90.mcs.anl.gov/dA/2006-06-20 12:33:12.559829ccn89.mcs.anl.gov/cA/2006-06-20 12:33:11.177307ccn86.mcs.anl.gov/bA/2006-06-20 12:33:09.856415ccn83.mcs.anl.gov/aA/2006-06-20 12:33:08.486026ccn82.mcs.anl.gov/`A/2006-06-20 12:33:06.620459ccn80.mcs.anl.gov/_A/2006-06-20 12:33:05.364796ccn78.mcs.anl.gov/^A/2006-06-20 12:33:05.042036ccn74.mcs.anl.gov HCGDH=J =! pThu Dec 22 06:27:20 2005cleanunknown$Revision$##=I =! oThu Jan 5 06:34:03 2006cleanunknown$Revision$##=H =! nThu Jan 5 06:37:04 2006cleanunknown$Revision$##=G =! mMon Dec 26 06:26:08 2005cleanunknown$Revision$##=F =! lThu Jan 5 06:34:17 2006cleanunknown$Revision$##=E =! kThu Jan 5 06:42:47 2006cleanunknown$Revision$##DD =/ jTue Oct 25 16:10:19 2005cleanunknown$Revision: 1.37 $  =C =! iThu Jan 5 06:36:07 2006cleanunknown$Revision$##=B =! hThu Jan 5 06:42:10 2006cleanunknown$Revision$##=A =! gThu Jan 5 06:34:21 2006cleanunknown$Revision$##=@ =! fThu Jan 5 06:40:28 2006cleanunknown$Revision$##=? =! cThu Jan 5 06:37:16 2006cleanunknown$Revision$##=> =! bThu Jan 5 06:33:56 2006cleanunknown$Revision$##== =! aThu Jan 5 06:32:06 2006cleanunknown$Revision$##=< =! `Thu Jan 5 06:33:30 2006cleanunknown$Revision$## \ypg^ULC:1( }tkbYPG>5,#xof]TKB90' fffeeedddcccbbbaaa ` ` ` _ __^^^]]]\\\~[}[|[{ZzZyZxYwYvYuXtXsXrWqWpWoVnVmVlUkUjUiThTgTfSeSdScRbRaR`Q_Q^Q]P\P[PZOYOXOWNVNUNTMSMRMQLPLOLNKMKLKKJJJIJHJGIFIEIDH I&&0:DNXblv  *4>HR\fpz$.8BLV`jt~                        ! # & * - / 1 2 7 9 < @ B F G J N P T V Z \ ` c g j l o r w x |                   Fj8p> vDyF0A12006-06-20 12:34:13.886701ccn121.mcs.anl.gov0A12006-06-20 12:34:12.121759ccn141.mcs.anl.gov0A12006-06-20 12:34:10.797156ccn140.mcs.anl.gov0A12006-06-20 12:34:09.568469ccn139.mcs.anl.gov0A12006-06-20 12:34:07.233575ccn138.mcs.anl.gov0~A12006-06-20 12:34:05.390771ccn135.mcs.anl.gov0}A12006-06-20 12:34:03.434325ccn134.mcs.anl.gov0|A12006-06-20 12:34:01.344660ccn136.mcs.anl.gov0{A12006-06-20 12:33:59.218329ccn137.mcs.anl.gov0zA12006-06-20 12:33:57.163972ccn111.mcs.anl.gov0yA12006-06-20 12:33:54.611890ccn112.mcs.anl.gov0xA12006-06-20 12:33:51.984731ccn113.mcs.anl.gov0wA12006-06-20 12:33:48.634579ccn114.mcs.anl.gov0vA12006-06-20 12:33:45.953418ccn133.mcs.anl.gov0uA12006-06-20 12:33:44.247895ccn132.mcs.anl.gov0tA12006-06-20 12:33:42.057868ccn131.mcs.anl.gov0sA12006-06-20 12:33:40.691830ccn130.mcs.anl.gov0rA12006-06-20 12:33:39.308809ccn104.mcs.anl.gov0qA12006-06-20 12:33:37.560890ccn106.mcs.anl.gov OCGK O=Y =! Thu Jan 5 06:32:59 2006cleanunknown$Revision$##=X =! ~Mon Dec 19 11:40:05 2005cleanunknown$Revision$##=W =! }Tue Jan 3 06:34:16 2006cleanunknown$Revision$##=V =! |Thu Jan 5 06:33:13 2006cleanunknown$Revision$##=U =! {Thu Jan 5 06:33:16 2006cleanunknown$Revision$##=T =! zThu Jan 5 06:39:12 2006cleanunknown$Revision$##=S =! yThu Jan 5 06:36:17 2006cleanunknown$Revision$##=R =! xThu Jan 5 06:42:15 2006cleanunknown$Revision$##=Q =! wThu Jan 5 06:42:13 2006cleanunknown$Revision$##=P =! vThu Jan 5 06:30:00 2006cleanunknown$Revision$##=O =! uThu Jan 5 06:33:24 2006cleanunknown$Revision$##=N =! tThu Jan 5 06:33:36 2006cleanunknown$Revision$##=M =! sThu Jan 5 06:30:28 2006cleanunknown$Revision$##=L =! rThu Jan 5 06:33:56 2006cleanunknown$Revision$##=K =! qThu Jan 5 06:39:37 2006cleanunknown$Revision$## @@@@@>h =! Thu Jan 5 06:42:02 2006cleanunknown$Revision$##>g =! Thu Jan 5 06:30:53 2006cleanunknown$Revision$##>f =! Thu Jan 5 06:36:35 2006cleanunknown$Revision$##>e =! Thu Jan 5 06:40:35 2006cleanunknown$Revision$##>d =! Thu Jan 5 06:42:48 2006cleanunknown$Revision$##>c =! Thu Jan 5 06:33:17 2006cleanunknown$Revision$##>b =! Thu Jan 5 06:40:51 2006cleanunknown$Revision$##>a =! Thu Jan 5 06:40:09 2006cleanunknown$Revision$##>` =! Thu Jan 5 06:42:32 2006cleanunknown$Revision$##>_ =! Thu Jan 5 06:36:45 2006cleanunknown$Revision$##>^ =! Thu Jan 5 06:39:03 2006cleanunknown$Revision$##>] =! Thu Jan 5 06:39:03 2006cleanunknown$Revision$##>\ =! Thu Jan 5 06:43:02 2006cleanunknown$Revision$##>[ =! Thu Jan 5 06:36:33 2006cleanunknown$Revision$##>Z =! Thu Jan 5 06:40:19 2006cleanunknown$Revision$## 7g4h5i6j70A12006-06-20 12:34:43.160717ccn156.mcs.anl.gov0A12006-06-20 12:34:41.695139ccn155.mcs.anl.gov0A12006-06-20 12:34:39.812588ccn128.mcs.anl.gov0A12006-06-20 12:34:37.760161ccn127.mcs.anl.gov0A12006-06-20 12:34:35.923184ccn154.mcs.anl.gov0A12006-06-20 12:34:35.540098ccn151.mcs.anl.gov0A12006-06-20 12:34:33.800115ccn123.mcs.anl.gov0A12006-06-20 12:34:32.300130ccn124.mcs.anl.gov0A12006-06-20 12:34:30.927177ccn125.mcs.anl.gov0 A12006-06-20 12:34:29.410821ccn126.mcs.anl.gov0 A12006-06-20 12:34:27.961425ccn149.mcs.anl.gov0 A12006-06-20 12:34:26.570383ccn148.mcs.anl.gov0 A12006-06-20 12:34:24.988994ccn146.mcs.anl.gov0 A12006-06-20 12:34:23.289610ccn147.mcs.anl.gov0A12006-06-20 12:34:21.615620ccn142.mcs.anl.gov0A12006-06-20 12:34:20.082919ccn145.mcs.anl.gov0A12006-06-20 12:34:18.526295ccn115.mcs.anl.gov0A12006-06-20 12:34:16.810259ccn116.mcs.anl.gov0A12006-06-20 12:34:15.344659ccn122.mcs.anl.gov Zypg^ULC:1( }tkbYPG>5,#xoe[QG=3) yxwvutsrqponmlkjihgf~e~d~c}b|a|`|_|^{]{\{[zZzYzXyWyVyUxTxSxRwQwPwOvNvMvLuKuJuItHtGtFsEsDsCrBrAr@q?q>q=p<p;p:o9o8o7n6n5n4m3m2m1l0l/l.k-k,k+j*j)j(i'i&i%h$h#h"g!g g S '1;EOY}si_UKA7-#cks{ #+3;CKS[cks{                                             ! %(+-0359?CGKP!T#X%\'a(b)e*h+k,n-q.t/w0{1}23456789:;<=> @@@@@>w =! Thu Jan 5 06:32:59 2006cleanunknown$Revision$##>v =! Thu Jan 5 06:42:55 2006cleanunknown$Revision$##>u =! Thu Jan 5 06:30:27 2006cleanunknown$Revision$##>t =! Thu Jan 5 06:30:27 2006cleanunknown$Revision$##>s =! Thu Jan 5 06:36:52 2006cleanunknown$Revision$##>r =! Thu Jan 5 06:42:56 2006cleanunknown$Revision$##>q =! Thu Jan 5 06:33:25 2006cleanunknown$Revision$##>p =! Thu Jan 5 06:35:15 2006cleanunknown$Revision$##>o =! Thu Jan 5 06:37:22 2006cleanunknown$Revision$##>n =! Thu Jan 5 06:36:07 2006cleanunknown$Revision$##>m =! Thu Jan 5 06:33:43 2006cleanunknown$Revision$##>l =! Thu Jan 5 06:37:24 2006cleanunknown$Revision$##>k =! Thu Jan 5 06:39:24 2006cleanunknown$Revision$##>j =! Thu Jan 5 06:39:41 2006cleanunknown$Revision$##>i =! Thu Jan 5 06:41:57 2006cleanunknown$Revision$## 7g4h5i6j70)A12006-06-20 12:35:18.055534ccn173.mcs.anl.gov0(A12006-06-20 12:35:16.784274ccn174.mcs.anl.gov0'A12006-06-20 12:35:15.979908ccn175.mcs.anl.gov0&A12006-06-20 12:35:10.384768ccn169.mcs.anl.gov0%A12006-06-20 12:35:09.214767ccn171.mcs.anl.gov0$A12006-06-20 12:35:03.748638ccn166.mcs.anl.gov0#A12006-06-20 12:35:01.908120ccn168.mcs.anl.gov0"A12006-06-20 12:35:00.567203ccn198.mcs.anl.gov0!A12006-06-20 12:34:59.200374ccn197.mcs.anl.gov0 A12006-06-20 12:34:57.902398ccn196.mcs.anl.gov0A12006-06-20 12:34:56.536118ccn195.mcs.anl.gov0A12006-06-20 12:34:54.986484ccn193.mcs.anl.gov0A12006-06-20 12:34:53.638222ccn161.mcs.anl.gov0A12006-06-20 12:34:52.347086ccn163.mcs.anl.gov0A12006-06-20 12:34:50.855892ccn165.mcs.anl.gov0A12006-06-20 12:34:49.384198ccn160.mcs.anl.gov0A12006-06-20 12:34:47.549769ccn158.mcs.anl.gov0A12006-06-20 12:34:46.200585ccn159.mcs.anl.gov0A12006-06-20 12:34:44.684744ccn157.mcs.anl.gov e@y9p(e> =! Thu Jan 5 06:42:59 2006cleanunknown$Revision$##> =! Thu Jan 5 06:31:00 2006cleanunknown$Revision$##> =! Thu Jan 5 06:32:11 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:30:29 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:39:59 2006cleanunknown$Revision$##> =! Tue Jan 3 16:26:26 2006cleanunknown$Revision$##E =/ Wed Nov 23 06:34:12 2005cleanunknown$Revision: 1.40 $  >~ =! Thu Jan 5 06:29:46 2006cleanunknown$Revision$##E} =/ Wed Oct 5 10:56:06 2005cleanunknown$Revision: 1.36 $  >| =! Mon Dec 19 17:51:58 2005cleanunknown$Revision$##>{ =! Thu Jan 5 06:29:49 2006cleanunknown$Revision$##>z =! Thu Jan 5 06:29:48 2006cleanunknown$Revision$##>y =! Thu Jan 5 06:42:42 2006cleanunknown$Revision$##>x =! Thu Jan 5 06:29:49 2006cleanunknown$Revision$## O",6@JT^hr|  +6ALWbmx$.8BLV`jt~      "  R m    >     $ % & '9 4w 5  V   R        A W      &  T      D    S  i        n    @     $ % & '< 4x 5  X   S      L )2;DMV_hqz  *4>HR\fpz$.8BLV`jt~o9p;q>rAsDtGuKvNwPxTyVzY{\|`~d j o p s v y |                                                   " % ) + 7g4h5i6j70<A12006-06-20 12:39:01.412178ccn220.mcs.anl.gov0;A12006-06-20 12:38:45.261843ccn219.mcs.anl.gov0:A12006-06-20 12:38:43.548727ccn211.mcs.anl.gov09A12006-06-20 12:38:30.501503ccn207.mcs.anl.gov08A12006-06-20 12:38:01.240610ccn210.mcs.anl.gov07A12006-06-20 12:37:27.482917ccn208.mcs.anl.gov06A12006-06-20 12:37:24.228616ccn187.mcs.anl.gov05A12006-06-20 12:37:23.044637ccn189.mcs.anl.gov04A12006-06-20 12:37:21.888003ccn186.mcs.anl.gov03A12006-06-20 12:36:52.872566ccn204.mcs.anl.gov02A12006-06-20 12:36:20.701007ccn206.mcs.anl.gov01A12006-06-20 12:35:59.813104ccn203.mcs.anl.gov00A12006-06-20 12:35:58.663805ccn178.mcs.anl.gov0/A12006-06-20 12:35:57.505791ccn182.mcs.anl.gov0.A12006-06-20 12:35:56.356386ccn179.mcs.anl.gov0-A12006-06-20 12:35:55.241118ccn176.mcs.anl.gov0,A12006-06-20 12:35:54.025853ccn199.mcs.anl.gov0+A12006-06-20 12:35:18.566555ccn202.mcs.anl.gov0*A12006-06-20 12:35:18.320260ccn201.mcs.anl.gov YnH) |W4yX>"yY} libjpeg62PackageUnknown|# gnuplot-x11PackageUnknown{ dpkg-devPackageUnknown"z) kernel-packagePackageUnknowny! rsh-clientPackageUnknownx# debootstrapPackageUnknownw nttcpPackageUnknownv ucfPackageUnknownu! postgresqlPackageUnknownt! libperl5.8PackageUnknown%s/ postgresql-clientPackageUnknownr unzipPackageUnknownq rlsServiceUnknownp! postgresqlServiceUnknowno tftpPackageUnknown n% ext3rminatorPackageUnknown"m) libqthreads-12PackageUnknown*l9 kernel-image-2.6.11.12PackageUnknownk hdparmPackageUnknownj mailutilsPackageUnknown"i) guile-1.6-libsPackageUnknownh cpp-4.0PackageUnknowng bonnie++PackageUnknown#f+ libguile-ltdl-1PackageUnknown%e/ nfs-kernel-serverPackageUnknown d% gcc-4.0-basePackageUnknown c% libreadline5PackageUnknown!b' libmailutils0PackageUnknown @L\ l0|@9~ 3! 2006-01-05 06:40:54cleanunknown$Revision$##9} 3! 2006-01-05 06:40:51cleanunknown$Revision$##9| 3! 2006-01-05 06:40:47cleanunknown$Revision$##9{ 3! 2006-01-05 06:40:44cleanunknown$Revision$##9z 3! 2006-01-05 06:40:42cleanunknown$Revision$##9y 3! 2006-01-05 06:40:35cleanunknown$Revision$##9x 3! 2006-01-05 06:40:32cleanunknown$Revision$##9w 3! 2006-01-05 06:40:28cleanunknown$Revision$##9v 3! 2006-01-05 06:40:19cleanunknown$Revision$##9u 3! 2006-01-05 06:40:18cleanunknown$Revision$##9t 3! 2006-01-05 06:40:14cleanunknown$Revision$##9s 3! 2006-01-05 06:40:10cleanunknown$Revision$##9r 3! 2006-01-05 06:40:09cleanunknown$Revision$##9q 3! 2006-01-05 06:40:07cleanunknown$Revision$##9p 3! 2006-01-05 06:39:59cleanunknown$Revision$##9o 3! 2006-01-05 06:39:54cleanunknown$Revision$## (K U_i(?. 3/ 2005-11-28 06:35:07cleanunknown$Revision: 1.40 $8- 3! 2006-01-05 06:31:08dirtyunknown$Revision$  ?, 3/ 2005-11-28 06:35:07cleanunknown$Revision: 1.40 $8+ 3! 2006-01-05 06:32:56dirtyunknown$Revision$  ?* 3/ 2005-11-28 06:33:34cleanunknown$Revision: 1.40 $8) 3! 2006-01-05 06:39:06dirtyunknown$Revision$  ?( 3/ 2005-11-28 06:32:53cleanunknown$Revision: 1.40 $8' 3! 2006-01-05 06:30:25dirtyunknown$Revision$  ?& 3/ 2005-11-28 06:32:21cleanunknown$Revision: 1.40 $8% 3! 2006-01-05 06:39:58dirtyunknown$Revision$  ?$ 3/ 2005-11-28 06:32:18cleanunknown$Revision: 1.40 $8# 3! 2006-01-05 06:36:45dirtyunknown$Revision$  ?" 3/ 2005-11-28 06:32:02cleanunknown$Revision: 1.40 $8! 3! 2006-01-05 06:36:45dirtyunknown$Revision$  ? 3/ 2005-11-28 06:30:48cleanunknown$Revision: 1.40 $8 3! 2006-01-05 06:40:33dirtyunknown$Revision$  V~=l+a V> =! Thu Jan 5 06:36:45 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:32:43 2005cleanunknown$Revision: 1.40 $> =! Mon Jan 9 13:57:52 2006cleanunknown$Revision$##> =! Thu Jan 5 06:29:59 2006cleanunknown$Revision$##> =! Thu Jan 5 06:42:23 2006cleanunknown$Revision$##> =! Thu Jan 5 06:29:49 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:35:18 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:30:00 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:38:16 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:32:55 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:26:28 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:36:35 2006cleanunknown$Revision$##> =! Thu Jan 5 06:39:29 2006cleanunknown$Revision$##> =! Thu Jan 5 06:39:54 2006cleanunknown$Revision$## T~tj`VLB8.$zpf\RH>4*  vlbXND:0&MLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!      ~}|{z O~sh]RG<1&{peZOD9/$wmcXMB7,!      ;B>~}|{7z6yxwvutsrqpon+mlkjizhgfedcba`_^]\[ZYXWVUT;SRQPONM>LK Ah6l9p= sA0_A/2006-06-20 14:23:00.632898ccn65.mcs.anl.gov0^A/2006-06-20 14:22:59.825212ccn72.mcs.anl.gov1]A12006-06-20 14:22:59.143054ccn185.mcs.anl.gov1\A12006-06-20 14:22:58.434342ccn199.mcs.anl.gov0[A/2006-06-20 14:22:57.734802ccn73.mcs.anl.gov1ZA12006-06-20 14:22:57.018507ccn126.mcs.anl.gov0YA/2006-06-20 14:22:56.127420cct3m.mcs.anl.gov0XA/2006-06-20 14:22:55.295040ccn85.mcs.anl.gov0WA/2006-06-20 14:22:54.612254ccn24.mcs.anl.gov1VA12006-06-20 14:22:53.929026ccn239.mcs.anl.gov1UA12006-06-20 14:22:53.237666ccn237.mcs.anl.gov1TA12006-06-20 14:22:52.443815ccn130.mcs.anl.gov0SA/2006-06-20 14:22:50.997341ccn22.mcs.anl.gov1RA12006-06-20 14:22:50.297913ccn163.mcs.anl.gov0QA/2006-06-20 14:22:49.554755ccn37.mcs.anl.gov0PA/2006-06-20 14:22:48.823835ccn78.mcs.anl.gov0OA/2006-06-20 14:22:48.108511ccn47.mcs.anl.gov1NA12006-06-20 14:22:47.408988ccn165.mcs.anl.gov1MA12006-06-20 14:22:46.725210ccn133.mcs.anl.gov @L\ l0|@9^ 3! 2006-01-05 06:39:03cleanunknown$Revision$##9] 3! 2006-01-05 06:39:03cleanunknown$Revision$##9\ 3! 2006-01-05 06:39:02cleanunknown$Revision$##9[ 3! 2006-01-05 06:39:01cleanunknown$Revision$##9Z 3! 2006-01-05 06:37:29cleanunknown$Revision$##9Y 3! 2006-01-05 06:37:24cleanunknown$Revision$##9X 3! 2006-01-05 06:37:22cleanunknown$Revision$##9W 3! 2006-01-05 06:37:16cleanunknown$Revision$##9V 3! 2006-01-05 06:37:10cleanunknown$Revision$##9U 3! 2006-01-05 06:37:07cleanunknown$Revision$##9T 3! 2006-01-05 06:37:04cleanunknown$Revision$##9S 3! 2006-01-05 06:37:01cleanunknown$Revision$##9R 3! 2006-01-05 06:36:57cleanunknown$Revision$##9Q 3! 2006-01-05 06:36:53cleanunknown$Revision$##9P 3! 2006-01-05 06:36:52cleanunknown$Revision$##9O 3! 2006-01-05 06:36:46cleanunknown$Revision$## f2d0`,1pA12006-06-20 14:25:01.015673ccn187.mcs.anl.gov1oA12006-06-20 14:25:00.290113ccn141.mcs.anl.gov1nA12006-06-20 14:24:59.607190ccn176.mcs.anl.gov1mA12006-06-20 14:24:58.890726ccn158.mcs.anl.gov1lA12006-06-20 14:24:58.207601ccn161.mcs.anl.gov1kA12006-06-20 14:24:57.491499ccn118.mcs.anl.gov1jA12006-06-20 14:24:56.817096ccn146.mcs.anl.gov1iA12006-06-20 14:24:56.100788ccn100.mcs.anl.gov0hA/2006-06-20 14:24:55.301345ccn52.mcs.anl.gov1gA12006-06-20 14:24:54.593375ccn196.mcs.anl.gov0fA/2006-06-20 14:24:53.661025ccn21.mcs.anl.gov1eA12006-06-20 14:24:52.962480ccn153.mcs.anl.gov1dA12006-06-20 14:24:52.144966ccn115.mcs.anl.gov0cA/2006-06-20 14:24:51.420615ccn67.mcs.anl.gov0bA/2006-06-20 14:24:50.720909ccn34.mcs.anl.gov1aA12006-06-20 14:24:50.047249ccn186.mcs.anl.gov @i6k9n< s@1rA12006-06-20 14:23:16.981441ccsto4.mcs.anl.gov0qA/2006-06-20 14:23:16.225418ccn60.mcs.anl.gov0pA/2006-06-20 14:23:14.934181ccn88.mcs.anl.gov0oA/2006-06-20 14:23:13.935953ccn84.mcs.anl.gov1nA12006-06-20 14:23:12.614390ccn138.mcs.anl.gov0mA/2006-06-20 14:23:11.601207cct1m.mcs.anl.gov1lA12006-06-20 14:23:10.918232ccn193.mcs.anl.gov1kA12006-06-20 14:23:10.235016ccn233.mcs.anl.gov1jA12006-06-20 14:23:08.835847ccsto2.mcs.anl.gov0iA/2006-06-20 14:23:07.903150cct7m.mcs.anl.gov0hA/2006-06-20 14:23:07.203564ccn77.mcs.anl.gov0gA/2006-06-20 14:23:06.512857ccn82.mcs.anl.gov1fA12006-06-20 14:23:05.804437ccn230.mcs.anl.gov1eA12006-06-20 14:23:04.880616ccsto5.mcs.anl.gov1dA12006-06-20 14:23:04.205774ccn191.mcs.anl.gov1cA12006-06-20 14:23:03.506223ccn192.mcs.anl.gov1bA12006-06-20 14:23:02.815127ccn231.mcs.anl.gov0aA/2006-06-20 14:23:01.965425ccn75.mcs.anl.gov0`A/2006-06-20 14:23:01.290832ccn43.mcs.anl.gov GeJ,{^:vY<gGa libgsasl7PackageUnknown$`- edg-crl-upgradedServiceUnknown_ grisServiceUnknown%^/ nfs-kernel-serverServiceUnknown)]7 kernel-image-2.6.12.6PackageUnknown\! nfs-commonPackageUnknown[ sysstatPackageUnknownZ updatePackageUnknownY condorServiceUnknownX sysstatServiceUnknown W% mysql-clientPackageUnknownV mailxPackageUnknown U% mysql-serverPackageUnknownT mysqlServiceUnknown%S/ libdbd-mysql-perlPackageUnknown!R' libplrpc-perlPackageUnknownQ libpq3PackageUnknown&P1 libnet-daemon-perlPackageUnknown"O) libdbd-pg-perlPackageUnknownN# libdbi-perlPackageUnknown M% mysql-commonPackageUnknownL cpp-3.2PackageUnknownK gcc-3.2PackageUnknownJ filePackageUnknownI autoconfPackageUnknown H% gcc-3.2-basePackageUnknownG# automake1.4PackageUnknownF m4PackageUnknownE libtoolPackageUnknown M(3>IT_ju&0:DNXcny)4?JU`kv " B Y       '  V     G    T  k        q    B     $ % & '> 4{ 5 # [   U       # C Z       )  X     J    U  l        r    C OvkaVK@6+  }rg\QF;0%zodYND9.# t~}|{zyxBwvuts;rqponmlkj>ihgSfe7d6cba`_^]\[ZY+XWVUTSRQzPONMLKJtIHGFEDC;BA>@6?>=+<;:98 O#.9DOZep{  +6ALWbmw$/:EP[fq|     $ % & '? 4| 5 $ \   V       $ D [       *  Y      V         t    G     $ % & 'A 4~ 5 ' `   Y       & E ]       -  ]     [~=G Q[? 3/ 2005-11-28 06:29:43cleanunknown$Revision: 1.40 $8 3! 2006-01-05 06:30:43dirtyunknown$Revision$  ? 3/ 2005-11-28 06:29:41cleanunknown$Revision: 1.40 $8 3! 2006-01-05 06:33:17dirtyunknown$Revision$  ? 3/ 2005-11-28 06:29:41cleanunknown$Revision: 1.40 $8 3! 2006-01-05 06:39:04dirtyunknown$Revision$  ? 3/ 2005-11-28 06:29:41cleanunknown$Revision: 1.40 $8 3! 2006-01-05 06:37:01dirtyunknown$Revision$  ? 3/ 2005-11-28 06:26:21cleanunknown$Revision: 1.40 $8 3! 2006-01-05 06:37:15dirtyunknown$Revision$  ? 3/ 2005-11-28 06:26:04cleanunknown$Revision: 1.40 $8 3! 2006-01-05 06:31:18dirtyunknown$Revision$  ? 3/ 2005-11-28 06:26:03cleanunknown$Revision: 1.40 $? 3/ 2005-11-26 00:42:27cleanunknown$Revision: 1.40 $? 3/  2005-11-23 06:34:12cleanunknown$Revision: 1.40 $  T~tj`VLB8.$zpf\RH>4*  vlbXND:0&          ~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?> :w/^L :E! =/ Thu Dec 1 06:30:46 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:30:27 2006dirtyunknown$Revision$ E =/ Thu Dec 1 06:27:08 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:39:24 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:26:14 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:30:47 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:29:48 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:39:03 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:29:51 2005cleanunknown$Revision: 1.40 $E =/ Sat Nov 26 00:42:27 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:37:17 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:30:11 2005cleanunknown$Revision: 1.40 $> =! Thu Jan 5 06:36:07 2006dirtyunknown$Revision$  E =/ Thu Dec 1 06:36:49 2005cleanunknown$Revision: 1.40 $ @L\ l0|@9N 3! 2006-01-05 06:36:46cleanunknown$Revision$##9M 3! 2006-01-05 06:36:45cleanunknown$Revision$##9L 3! 2006-01-05 06:36:45cleanunknown$Revision$##9K 3! 2006-01-05 06:36:45cleanunknown$Revision$##9J 3! 2006-01-05 06:36:35cleanunknown$Revision$##9I 3! 2006-01-05 06:36:35cleanunknown$Revision$##9H 3! 2006-01-05 06:36:35cleanunknown$Revision$##9G 3! 2006-01-05 06:36:35cleanunknown$Revision$##9F 3! 2006-01-05 06:36:33cleanunknown$Revision$##9E 3! 2006-01-05 06:36:17cleanunknown$Revision$##9D 3! 2006-01-05 06:36:17cleanunknown$Revision$##9C 3! 2006-01-05 06:36:17cleanunknown$Revision$##9B 3! 2006-01-05 06:36:17cleanunknown$Revision$##9A 3! 2006-01-05 06:36:08cleanunknown$Revision$##9@ 3! 2006-01-05 06:36:07cleanunknown$Revision$##9? 3! 2006-01-05 06:36:07cleanunknown$Revision$66 EiL$zW1k8iE!D' autotools-devPackageUnknownC libmagic1PackageUnknownB lam4PackageUnknownA! traceroutePackageUnknown@ libnet1PackageUnknown? arpingPackageUnknown> patchPackageUnknown= pvfs2PackageUnknown0<E kernel-image-2.6.12.5-p3-mcsPackageUnknown0;E pvfs2-driver-2.6.12.5-p3-mcsPackageUnknown!:' libpango1.0-0PackageUnknown9# libatk1.0-0PackageUnknown!8' libgtk2.0-binPackageUnknown&71 libpango1.0-commonPackageUnknown#6+ mozilla-firefoxPackageUnknown 5% libglib2.0-0PackageUnknown4 libidl0PackageUnknown3# libgtk2.0-0PackageUnknown$2- libgtk2.0-commonPackageUnknown1# xlibmesa-glPackageUnknown0! emacs21-elPackageUnknown%// libhtml-tree-perlPackageUnknown. gm-devPackageUnknown- libxft1PackageUnknown!,' mpich2-systemPackageUnknown,+= kernel-source-2.4.29-rc2PackageUnknown#*+ libgenders-perlPackageUnknown :j7k8n;n:1A12006-06-20 14:23:42.081758ccn109.mcs.anl.gov0A/2006-06-20 14:23:41.373380ccn79.mcs.anl.gov0A/2006-06-20 14:23:40.682588ccn83.mcs.anl.gov1A12006-06-20 14:23:39.983336ccn104.mcs.anl.gov0A/2006-06-20 14:23:39.274642ccn57.mcs.anl.gov0A/2006-06-20 14:23:27.682644ccfs2.mcs.anl.gov1A12006-06-20 14:23:26.974253ccn128.mcs.anl.gov0~A/2006-06-20 14:23:26.283383ccn49.mcs.anl.gov1}A12006-06-20 14:23:25.559050ccn131.mcs.anl.gov0|A/2006-06-20 14:23:24.859184ccn80.mcs.anl.gov1{A12006-06-20 14:23:24.159599ccn150.mcs.anl.gov1zA12006-06-20 14:23:23.460028ccn159.mcs.anl.gov1yA12006-06-20 14:23:22.576590ccn132.mcs.anl.gov1xA12006-06-20 14:23:21.471312ccn147.mcs.anl.gov1wA12006-06-20 14:23:20.736714ccn137.mcs.anl.gov1vA12006-06-20 14:23:20.028878ccn136.mcs.anl.gov0uA/2006-06-20 14:23:19.337598ccn50.mcs.anl.gov0tA/2006-06-20 14:23:18.621455ccn30.mcs.anl.gov0sA/2006-06-20 14:23:17.930031ccn35.mcs.anl.gov N_=kN$cI, nN) libxtrap6PackageUnknown(! python-devPackageUnknown'! mpich2-mpdPackageUnknown& gendersPackageUnknown% libdps1PackageUnknown$ libxp6PackageUnknown #% xlibmesa-gluPackageUnknown" libxi6PackageUnknown! rcsPackageUnknown # sss-wrapperPackageUnknown!' xbase-clientsPackageUnknown! mpich2-devPackageUnknown xlibsPackageUnknown libxmuu1PackageUnknown libxtst6PackageUnknown'3 libhtml-tagset-perlPackageUnknown libxv1PackageUnknown pdshPackageUnknown#+ sss-bcm-clientsPackageUnknown'3 libhtml-parser-perlPackageUnknown&1 python-elementtreePackageUnknown fingerPackageUnknown! libxrandr2PackageUnknown# libwww-perlPackageUnknown! libdb3-devPackageUnknown# mpich2-libsPackageUnknown# libxcursor1PackageUnknown# liburi-perlPackageUnknown  mpdServiceUnknown @iH$ Ak4z@7 S kernel-image-2.6.9-chiba-selfish-upPackageUnknown4 M kernel-image-2.6.9-chiba-selfishPackageUnknown  libx11-6PackageUnknown# + libmpich1.0-devPackageUnknown mpich-binPackageUnknown g77-3.3PackageUnknown4M gm-driver-2.6.9-chiba-selfish-upPackageUnknown'3 kernel-image-2.4.28PackageUnknown# libmpich1.0PackageUnknown! xlibs-dataPackageUnknown$- gm-driver-2.4.28PackageUnknown?c kernel-image-2.6.10-rc2-mm3-v0.7.32-9-mode1PackageUnknown?c kernel-image-2.6.10-rc2-mm3-v0.7.32-9-mode4PackageUnknown)7 kernel-image-2.6.11.8PackageUnknown2~I kernel-image-2.6.11.10-mcs-x86PackageUnknown'}3 pvfs2-driver-2.4.28PackageUnknown| g77PackageUnknown"{) xfree86-commonPackageUnknownz# libg2c0-devPackageUnknowny mpichPackageUnknown x% pvfs2-clientServiceUnknownw libedit2PackageUnknownv# libselinux1PackageUnknownu iperfPackageUnknown 8g4h5k8j8/OA/2006-06-20 12:40:59.367525ccn66.mcs.anl.gov2NA52006-06-20 12:40:59.050559ccfs1-fe.mcs.anl.gov0MA12006-06-20 12:40:58.636975ccn188.mcs.anl.gov0LA12006-06-20 12:40:57.358510ccn185.mcs.anl.gov0KA12006-06-20 12:40:55.842348ccn192.mcs.anl.gov0JA12006-06-20 12:40:54.589773ccn181.mcs.anl.gov0IA12006-06-20 12:40:53.287053ccn167.mcs.anl.gov0HA12006-06-20 12:40:51.942343ccn144.mcs.anl.gov/GA/2006-06-20 12:40:48.301044ccn93.mcs.anl.gov/FA/2006-06-20 12:40:46.817021ccn69.mcs.anl.gov0EA12006-06-20 12:40:30.940298ccn215.mcs.anl.gov0DA12006-06-20 12:40:30.713887ccn216.mcs.anl.gov0CA12006-06-20 12:40:14.515890ccn213.mcs.anl.gov0BA12006-06-20 12:39:57.061332ccn224.mcs.anl.gov0AA12006-06-20 12:39:51.479045ccn223.mcs.anl.gov0@A12006-06-20 12:39:45.019474ccn222.mcs.anl.gov0?A12006-06-20 12:39:44.675669ccn221.mcs.anl.gov0>A12006-06-20 12:39:23.338536ccn217.mcs.anl.gov0=A12006-06-20 12:39:06.731887ccn218.mcs.anl.gov Fk9tBxEyF1&A12006-06-20 14:21:43.565360ccn219.mcs.anl.gov1%A12006-06-20 14:21:41.935227ccn220.mcs.anl.gov1$A12006-06-20 14:21:38.302187ccn205.mcs.anl.gov1#A12006-06-20 14:21:35.137344ccn217.mcs.anl.gov1"A12006-06-20 14:21:33.471899ccn227.mcs.anl.gov1!A12006-06-20 14:21:31.431605ccn203.mcs.anl.gov1 A12006-06-20 14:21:29.457814ccn218.mcs.anl.gov1A12006-06-20 14:21:27.783812ccn223.mcs.anl.gov0A/2006-06-20 14:21:25.910039ccn15.mcs.anl.gov0A/2006-06-20 14:21:23.994672ccn12.mcs.anl.gov0A/2006-06-20 14:21:22.087455ccn13.mcs.anl.gov0A/2006-06-20 14:21:20.038872ccn16.mcs.anl.gov/A-2006-06-20 14:21:17.723680ccn2.mcs.anl.gov/A-2006-06-20 14:21:15.733187ccn3.mcs.anl.gov/A-2006-06-20 14:21:13.412210ccn8.mcs.anl.gov0A/2006-06-20 14:21:07.430271ccn10.mcs.anl.gov0A/2006-06-20 14:21:05.542010ccn14.mcs.anl.gov/A-2006-06-20 14:21:03.435149ccn5.mcs.anl.gov0A/2006-06-20 14:20:59.967814ccn11.mcs.anl.gov WwY;eJ0 t\5~W%t/ module-init-toolsPackageUnknown#s+ gm-route-clientPackageUnknownr stracePackageUnknownq gm-utilsPackageUnknownp! ntp-simplePackageUnknowno ntpdatePackageUnknownn mpishPackageUnknown%m/ mpich2-system-mpdPackageUnknownl gmPackageUnknownk! mpd-systemServiceUnknownj cpp-3.3PackageUnknowni! zlib1g-devPackageUnknownh libc6-devPackageUnknowng gccPackageUnknown%f/ python2.3-twistedPackageUnknowne makePackageUnknownd bzip2PackageUnknownc gcc-3.3PackageUnknownb cppPackageUnknowna opensslPackageUnknown` cvsPackageUnknown#_+ libncurses5-devPackageUnknown ^% bridge-utilsPackageUnknown!]' python2.3-devPackageUnknown\ libidn11PackageUnknown[ binutilsPackageUnknownZ libcurl3PackageUnknown Y% libcurl3-devPackageUnknownX libsysfs1PackageUnknown(W5 linux-kernel-headersPackageUnknown Ow6^SO>/ =! Thu Jan 5 06:40:44 2006cleanunknown$Revision$##>. =! Thu Jan 5 06:40:14 2006cleanunknown$Revision$##>- =! Thu Jan 5 06:40:42 2006cleanunknown$Revision$##>, =! Sat Dec 10 06:30:50 2005cleanunknown$Revision$>+ =! Thu Jan 5 06:36:45 2006cleanunknown$Revision$##>* =! Thu Jan 5 06:32:59 2006dirtyunknown$Revision$  E) =/ Thu Dec 1 06:29:54 2005cleanunknown$Revision: 1.40 $>( =! Thu Jan 5 06:29:59 2006dirtyunknown$Revision$  E' =/ Thu Dec 1 06:30:42 2005cleanunknown$Revision: 1.40 $E& =/ Mon Dec 5 06:25:21 2005dirtyunknown$Revision: 1.37 $E% =/ Mon Oct 24 06:27:13 2005cleanunknown$Revision: 1.37 $  >$ =! Thu Jan 5 06:33:47 2006dirtyunknown$Revision$  E# =/ Thu Dec 1 06:25:29 2005cleanunknown$Revision: 1.40 $>" =! Thu Jan 5 06:30:39 2006dirtyunknown$Revision$  @L\ l0|@9> 3! 2006-01-05 06:36:07cleanunknown$Revision$##9= 3! 2006-01-05 06:36:07cleanunknown$Revision$##9< 3! 2006-01-05 06:36:07cleanunknown$Revision$##9; 3! 2006-01-05 06:36:07cleanunknown$Revision$##9: 3! 2006-01-05 06:36:07cleanunknown$Revision$##99 3! 2006-01-05 06:35:15cleanunknown$Revision$##98 3! 2006-01-05 06:35:10cleanunknown$Revision$97 3! 2006-01-05 06:35:10cleanunknown$Revision$##96 3! 2006-01-05 06:35:09cleanunknown$Revision$##95 3! 2006-01-05 06:34:31cleanunknown$Revision$##94 3! 2006-01-05 06:34:27cleanunknown$Revision$##93 3! 2006-01-05 06:34:22cleanunknown$Revision$##92 3! 2006-01-05 06:34:21cleanunknown$Revision$##91 3! 2006-01-05 06:34:17cleanunknown$Revision$##90 3! 2006-01-05 06:34:06cleanunknown$Revision$##9/ 3! 2006-01-05 06:34:03cleanunknown$Revision$## S&0:DNXblv  *4>HR\fpz$.8BLV`jt~         4 5 : ; ? A D I L M Q S X Y _ d f i k n q u y { ~                                                 " T~tj`VLB8.$zpf\RH>4*  vlbXND:0&!      ~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPON RqJ!gL1_@oRV libatm1PackageUnknownU! pkg-configPackageUnknown T% libidn11-devPackageUnknownS! libssl-devPackageUnknown)R7 python2.3-twisted-binPackageUnknownQ iproutePackageUnknown%P/ python2.2-xmlbasePackageUnknownO python2.2PackageUnknown!N' python2.2-xmlPackageUnknownM bcfg2ServiceUnknown!L' stop-bootlogdServiceUnknownK rmnologinServiceUnknownJ cronServiceUnknownI rsyncServiceUnknownH makedevServiceUnknownG inetdServiceUnknownF klogdServiceUnknownE sysklogdServiceUnknownD singleServiceUnknown#C+ amihappy-clientPackageUnknownB http-tinyPackageUnknownA libkrb53PackageUnknown@ cpp-2.95PackageUnknown'?3 libmsyslog-mcs-perlPackageUnknown%>/ python2.3-libxml2PackageUnknown!=' libaudiofile0PackageUnknown< tcpdumpPackageUnknown%;/ chiba-stage-slavePackageUnknown&:1 emacs21-bin-commonPackageUnknown Fq )2;DNXblv  *{q4<ENV_hpy ; ; ; f ; 2;;;;2;q;;; ;?;V;h; w;!;";#;$;%;4;5 ;G ; ; ; ; ;C ;v ; ; ; ;3 ;b ; ; I ; ~ ; ; ; ; ; ;  ; / ; I ; <k<< 4=l== 6>m>>>>(>g>>>>8>T>f> u>!>">#>$ KOc(w;K9. 3! 2006-01-05 06:33:56cleanunknown$Revision$##9- 3! 2006-01-05 06:33:56cleanunknown$Revision$##9, 3! 2006-01-05 06:33:56cleanunknown$Revision$##9+ 3! 2006-01-05 06:33:50cleanunknown$Revision$##9* 3! 2006-01-05 06:33:44cleanunknown$Revision$8) 3! 2006-01-05 06:33:43cleanunknown$Revision$##8( 3! ~2006-01-05 06:33:40cleanunknown$Revision$##8' 3! }2006-01-05 06:33:36cleanunknown$Revision$##8& 3! |2006-01-05 06:33:30cleanunknown$Revision$##8% 3! {2006-01-05 06:33:29cleanunknown$Revision$##8$ 3! z2006-01-05 06:33:25cleanunknown$Revision$##8# 3! y2006-01-05 06:33:24cleanunknown$Revision$##8" 3! x2006-01-05 06:33:17cleanunknown$Revision$##8! 3! w2006-01-05 06:33:16cleanunknown$Revision$##8 3! v2006-01-05 06:33:13cleanunknown$Revision$##8 3! u2006-01-05 06:33:13cleanunknown$Revision$## N|qf[PE:/$ti^TI?4)zodYNC8-" ,+z*)('&%$#"! 2     &~}|{zyxwvutsrqponmlkjihgfedcba`_ TvT 6+#Can delete pingdelete_ping 5+#Can change pingchange_ping4%Can add pingadd_ping$3/'Can delete reasondelete_reason$2/'Can change reasonchange_reason V~=s2h V>= =! Thu Jan 5 06:30:43 2006dirtyunknown$Revision$  E< =/ Mon Nov 28 06:29:41 2005cleanunknown$Revision: 1.40 $>; =! Thu Jan 5 06:36:45 2006dirtyunknown$Revision$  E: =/ Mon Nov 28 06:32:02 2005cleanunknown$Revision: 1.40 $>9 =! Thu Jan 5 06:31:18 2006dirtyunknown$Revision$  E8 =/ Mon Nov 28 06:26:03 2005cleanunknown$Revision: 1.40 $>7 =! Thu Jan 5 06:42:51 2006cleanunknown$Revision$##>6 =! Thu Jan 5 06:42:22 2006cleanunknown$Revision$##>5 =! Thu Jan 5 06:39:58 2006dirtyunknown$Revision$  E4 =/ Mon Nov 28 06:32:18 2005cleanunknown$Revision: 1.40 $>3 =! Thu Jan 5 06:34:06 2006cleanunknown$Revision$##>2 =! Thu Jan 5 06:42:18 2006cleanunknown$Revision$##>1 =! Thu Jan 5 06:31:02 2006cleanunknown$Revision$##>0 =! Thu Jan 5 06:31:17 2006cleanunknown$Revision$## Ij7p> vD|I0bA12006-06-20 12:43:55.013025ccn108.mcs.anl.gov/aA/2006-06-20 12:43:53.821941ccn84.mcs.anl.gov/`A/2006-06-20 12:43:52.622712ccn75.mcs.anl.gov/_A/2006-06-20 12:43:51.407010ccn73.mcs.anl.gov/^A/2006-06-20 12:43:50.168385ccn88.mcs.anl.gov/]A/2006-06-20 12:43:49.924364ccn76.mcs.anl.gov/\A/2006-06-20 12:43:48.691670ccn77.mcs.anl.gov/[A/2006-06-20 12:43:47.508669ccn79.mcs.anl.gov/ZA/2006-06-20 12:43:45.680814ccn68.mcs.anl.gov/YA/2006-06-20 12:43:30.321705ccn19.mcs.anl.gov/XA/2006-06-20 12:43:24.307358ccn18.mcs.anl.gov/WA/2006-06-20 12:43:14.505349ccn14.mcs.anl.gov/VA/2006-06-20 12:42:46.622021ccn11.mcs.anl.gov/UA/2006-06-20 12:42:07.912326ccn10.mcs.anl.gov.TA-2006-06-20 12:41:35.697255ccn6.mcs.anl.gov0SA12006-06-20 12:41:34.489720ccn118.mcs.anl.gov0RA12006-06-20 12:41:33.301090ccn117.mcs.anl.gov.QA-2006-06-20 12:41:02.478199ccn8.mcs.anl.gov/PA/2006-06-20 12:41:00.527714ccn91.mcs.anl.gov Otj`UJ@5*  ~sh^SH=2'}rg]RG<2({zyxw>vuts9rqp7o6nmlk2jihgfed+cba&`_$^ ]\[ZzYXWVUTSRQPONBMLKJI;HGFEDCB>A@?>9=<;7:698765432+10/$. - I!+5@KValw'2=HS]gq{ !,7BMXcny     d % 4 5 A     = p    - [   C  x         .  H   1  `   . m   0 o    = 4 5 E     A t   1 `   G  |        1 p    > O~sh]RH=2(}rg\RG<2(|qf[QF;0%JzIHGFEtDCBA@?>=<;B:9876;543210/>.-,+9*)('7&6%$#"! +$ zt     B~;}| Ovk`UJ@5+  uj_UJ?4) }rg\QF;0%     q~}|{zyxBwvutsr;qponmlk>jihg9fedc7b6a`_^2]\[ZYXW+VUTSR&QPO$N MLK  =h5i6l9o=0LA/2006-06-20 14:22:46.017385ccn32.mcs.anl.gov0KA/2006-06-20 14:22:45.317999ccn42.mcs.anl.gov1JA12006-06-20 14:22:44.593312ccn189.mcs.anl.gov0IA/2006-06-20 14:22:43.902071ccn48.mcs.anl.gov1HA12006-06-20 14:22:43.227371ccn198.mcs.anl.gov1GA12006-06-20 14:22:42.378222ccn195.mcs.anl.gov1FA12006-06-20 14:22:41.694980ccn197.mcs.anl.gov0EA/2006-06-20 14:22:40.762276cct4m.mcs.anl.gov1DA12006-06-20 14:22:40.062501ccn171.mcs.anl.gov0CA/2006-06-20 14:22:38.929966cct2m.mcs.anl.gov1BA12006-06-20 14:22:37.805791ccsto1.mcs.anl.gov1AA12006-06-20 14:22:37.131190ccn170.mcs.anl.gov1@A12006-06-20 14:22:36.431833ccn175.mcs.anl.gov1?A12006-06-20 14:22:35.757249ccn183.mcs.anl.gov1>A12006-06-20 14:22:35.099317ccn134.mcs.anl.gov1=A12006-06-20 14:22:34.355085ccn102.mcs.anl.gov1<A12006-06-20 14:22:33.221130ccn107.mcs.anl.gov1;A12006-06-20 14:22:31.443286ccn168.mcs.anl.gov0:A/2006-06-20 14:22:30.744177ccn81.mcs.anl.gov ^rE mE$mJ {^ autofsserviceUnknown# stagemasterserviceUnknown tftpd-hpaserviceUnknown % dhcp3-serverserviceUnknown apache2serviceUnknown# netbootmondserviceUnknown'3 systemimager-serverserviceUnknown % bcfg2-serverserviceUnknown famserviceUnknown mdadmserviceUnknown dpkg-devpackageUnknown") kernel-packagepackageUnknown exim4serviceUnknown ! postgresqlpackageUnknown ! libperl5.8packageUnknown% / postgresql-clientpackageUnknown  unzippackageUnknown  rlsserviceUnknown! postgresqlserviceUnknown % mysql-serverpackageUnknown mysqlserviceUnknown updatepackageUnknown") libqthreads-12packageUnknown*9 kernel-image-2.6.11.12packageUnknown mailutilspackageUnknown") guile-1.6-libspackageUnknown#+ libguile-ltdl-1packageUnknown % libreadline5packageUnknown O~si^SH>4) {qg\RG<1&wlaVK@5* hgfe2dcba`_^]\[ZYXWVUT&SRQPONMLKJIHGFEDCBAq@?>=<;:987654B3210/.-,S+9*)('7&%$#"!  0f2e2e2c00A/2006-06-20 14:23:56.630255ccn56.mcs.anl.gov1A12006-06-20 14:23:55.939155ccn105.mcs.anl.gov0A/2006-06-20 14:23:55.148239ccn51.mcs.anl.gov1A12006-06-20 14:23:54.173569ccsto3.mcs.anl.gov1A12006-06-20 14:23:53.490797ccn155.mcs.anl.gov0A/2006-06-20 14:23:52.366404ccn96.mcs.anl.gov0A/2006-06-20 14:23:51.641862ccn58.mcs.anl.gov0A/2006-06-20 14:23:50.950837ccn53.mcs.anl.gov0A/2006-06-20 14:23:50.226205ccn97.mcs.anl.gov1A12006-06-20 14:23:49.493604ccn157.mcs.anl.gov0A/2006-06-20 14:23:48.568751cct5m.mcs.anl.gov0 A/2006-06-20 14:23:47.861305ccn36.mcs.anl.gov1 A12006-06-20 14:23:47.161398ccn177.mcs.anl.gov0 A/2006-06-20 14:23:46.428567ccn54.mcs.anl.gov0 A/2006-06-20 14:23:45.746308ccn59.mcs.anl.gov1 A12006-06-20 14:23:45.021187ccn108.mcs.anl.gov0A/2006-06-20 14:23:44.296637ccn94.mcs.anl.gov1A12006-06-20 14:23:43.588736ccn101.mcs.anl.gov0A/2006-06-20 14:23:42.889238ccn91.mcs.anl.gov Ovk`UK@5* }rg\RG<1'{peZOD:/$76q543210/B.-,+*;)('&%$>#"! 76+z     q~}|{zyBxwvutsrqSp9onml7kji H)4?JU`kv'2=HS^it$/:EP[fq|        ;   4  O  m  )n *p *r 4 5 4 5 k   -  r 4 5 5 R    x    % ]     W    I v   ?  { ( a  !    P ) b   Z    L z   F  * c  G  U#+4<EMV^gox )3=GQ[eoy &/8AJS\enw S E e _ DtDEuEFvFGwGHxHH OIyIJzJK{KL|LL DMNOPQRSS&*S'Z S S& S S S , S q S S  S Y S TUVV WW X X X gY Y Z Z [ [ \ \ ] ]  ^ ^ _ _ ` ` a a b b c c d d &e f f - fT fg g /h h J Pti_TI>3(}rg\RG<1&{pf[PF;1&;>62+~}|&{zyxwvu;ts>rq6po+nmlkjihgfeBdcba`_;^]\[ZY>XWVUT7S6RQPO2NMLKJIHGF+EDCB&A@?z>=<;:98 Vw/e$a VEK =/ Mon Nov 28 06:32:21 2005cleanunknown$Revision: 1.40 $>J =! Thu Jan 5 06:34:22 2006cleanunknown$Revision$##>I =! Thu Jan 5 06:32:59 2006cleanunknown$Revision$##>H =! Thu Jan 5 06:31:12 2006cleanunknown$Revision$##>G =! Thu Jan 5 06:30:57 2006cleanunknown$Revision$##>F =! Thu Jan 5 06:32:59 2006cleanunknown$Revision$##>E =! Thu Jan 5 06:32:06 2006cleanunknown$Revision$##>D =! Thu Jan 5 06:33:56 2006cleanunknown$Revision$##>C =! Thu Jan 5 06:36:17 2006cleanunknown$Revision$##>B =! Thu Jan 5 06:33:17 2006dirtyunknown$Revision$  EA =/ Mon Nov 28 06:29:41 2005cleanunknown$Revision: 1.40 $E@ =/ Mon Oct 24 10:43:57 2005cleanunknown$Revision: 1.37 $  >? =! Thu Jan 5 06:36:45 2006dirtyunknown$Revision$  E> =/ Mon Nov 28 06:30:48 2005cleanunknown$Revision: 1.40 $ G $,4<DLT\dlt} (1:CLU^gpy'*.1278=AEIMO!S#W%['`(d)g*i+m,p-r.v/y0z123456789:;<=>?@ABCEFGHIJKLMNOPQRSTUVWXYZ[\]^ PxndZPF<1&~sh^SH=2'|rh]RH=2'WV$UTSRQPONMLKJI;HGF>E6DCB2A@+?>&=<;:9876543210/;.-,>+*6)(+'&%$#"!   2&      D*5@KValw'2=HS^it$/:EP[fq| n    ) a    P      _   O  B     > q   . ]    a       N  0  #  f E r H u K y M | c     n    2  M   !   "   #   $   8  j T~tj`VLB8.$zpf\RH>4*  vlbXND:0&utsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#" =k9m:o< p=0uA12006-06-20 12:44:43.340871ccn170.mcs.anl.gov0tA12006-06-20 12:44:42.134899ccn190.mcs.anl.gov0sA12006-06-20 12:44:29.224621ccn209.mcs.anl.gov0rA12006-06-20 12:44:28.066897ccn153.mcs.anl.gov0qA12006-06-20 12:44:26.859883ccn152.mcs.anl.gov0pA12006-06-20 12:44:25.686110ccn150.mcs.anl.gov0oA12006-06-20 12:44:23.511466ccn129.mcs.anl.gov0nA12006-06-20 12:44:22.312268ccn143.mcs.anl.gov0mA12006-06-20 12:44:21.063332ccn120.mcs.anl.gov/lA/2006-06-20 12:44:19.880535ccn97.mcs.anl.gov0kA12006-06-20 12:44:18.709069ccn119.mcs.anl.gov0jA12006-06-20 12:44:18.484179ccn249.mcs.anl.gov0iA12006-06-20 12:44:18.240126ccn110.mcs.anl.gov0hA12006-06-20 12:44:16.998962ccn105.mcs.anl.gov0gA12006-06-20 12:44:15.766558ccn180.mcs.anl.gov/fA/2006-06-20 12:44:14.542266ccn99.mcs.anl.gov/eA/2006-06-20 12:44:13.326890ccn87.mcs.anl.gov/dA/2006-06-20 12:44:11.753565ccn85.mcs.anl.gov.cA-2006-06-20 12:43:56.328616ccn3.mcs.anl.gov Pti^SH=2'|rh]RG<1&|rh^TJ@6,! '&%$#"!+ &z     ~}|9{zy7xwv2utsrqpo&n$mlkjihgfedcba`_9^]\7[ZYX *~=z9v5r*EZ =/ Tue Oct 25 02:35:17 2005cleanunknown$Revision: 1.37 $  >Y =! Thu Jan 5 06:42:38 2006cleanunknown$Revision$##>X =! Thu Jan 5 06:40:57 2006cleanunknown$Revision$##>W =! Thu Jan 5 06:33:29 2006cleanunknown$Revision$##>V =! Wed Dec 14 06:25:51 2005cleanunknown$Revision$>U =! Thu Jan 5 06:39:19 2006cleanunknown$Revision$##>T =! Thu Jan 5 06:39:01 2006cleanunknown$Revision$##>S =! Thu Jan 5 06:36:07 2006cleanunknown$Revision$##>R =! Thu Jan 5 06:37:07 2006cleanunknown$Revision$##>Q =! Thu Jan 5 06:36:08 2006cleanunknown$Revision$##>P =! Thu Jan 5 06:36:53 2006cleanunknown$Revision$##>O =! Thu Jan 5 06:36:45 2006cleanunknown$Revision$##>N =! Thu Jan 5 06:36:17 2006cleanunknown$Revision$##>M =! Thu Jan 5 06:30:37 2006cleanunknown$Revision$##>L =! Thu Jan 5 06:30:25 2006dirtyunknown$Revision$  OvlaVK@5* {pf[PE:/$xmcXMB7,! vutsqr qponmlkjihgfeBdcbaS`9_^]7\[ZYXWVUTSRQP ONM$LK J IHGF EDCBAq@ ?>=<;:98765;43210/. ->,+* )6(2 ]~6s2o']Eh =/ Thu Dec 1 06:38:32 2005cleanunknown$Revision: 1.40 $>g =! Thu Jan 5 06:39:43 2006cleanunknown$Revision$##>f =! Thu Jan 5 06:36:04 2006dirtyunknown$Revision$  Ee =/ Thu Dec 1 06:32:25 2005cleanunknown$Revision: 1.40 $>d =! Thu Jan 5 06:31:20 2006cleanunknown$Revision$##>c =! Tue Jan 3 16:26:26 2006cleanunknown$Revision$##>b =! Thu Jan 5 06:40:07 2006cleanunknown$Revision$##>a =! Thu Jan 5 06:35:09 2006cleanunknown$Revision$##>` =! Thu Jan 5 06:36:57 2006cleanunknown$Revision$##>_ =! Thu Jan 5 06:37:01 2006cleanunknown$Revision$##>^ =! Thu Jan 5 06:32:04 2006dirtyunknown$Revision$  E] =/ Thu Dec 1 06:27:42 2005cleanunknown$Revision: 1.40 $>\ =! Tue Jan 3 16:26:27 2006cleanunknown$Revision$##>[ =! Thu Jan 5 06:41:52 2006cleanunknown$Revision$## =g4h5k9p=0A12006-06-20 12:46:46.599619ccn234.mcs.anl.gov0A12006-06-20 12:46:45.408520ccn162.mcs.anl.gov/A/2006-06-20 12:46:44.243154ccn63.mcs.anl.gov/A/2006-06-20 12:46:43.076632ccn44.mcs.anl.gov/A/2006-06-20 12:46:41.877870ccn41.mcs.anl.gov/A/2006-06-20 12:46:40.581098ccn49.mcs.anl.gov/A/2006-06-20 12:46:35.240797ccn61.mcs.anl.gov/A/2006-06-20 12:46:33.875128ccn57.mcs.anl.gov0A12006-06-20 12:46:18.026511ccn214.mcs.anl.gov0A12006-06-20 12:46:01.751815ccn212.mcs.anl.gov0~A12006-06-20 12:46:00.553833ccn194.mcs.anl.gov0}A12006-06-20 12:45:25.041616ccn200.mcs.anl.gov0|A12006-06-20 12:45:23.825660ccn191.mcs.anl.gov0{A12006-06-20 12:45:23.108394ccn183.mcs.anl.gov0zA12006-06-20 12:45:19.444844ccn184.mcs.anl.gov0yA12006-06-20 12:45:18.253458ccn177.mcs.anl.gov0xA12006-06-20 12:45:17.045793ccn172.mcs.anl.gov0wA12006-06-20 12:45:15.814212ccn164.mcs.anl.gov0vA12006-06-20 12:44:44.025010ccn205.mcs.anl.gov 0L\ l09 3! 2006-01-09 13:57:52cleanunknown$Revision$##9 3! 2006-01-05 06:43:02cleanunknown$Revision$##9 3! 2006-01-05 06:42:59cleanunknown$Revision$##9 3! 2006-01-05 06:42:56cleanunknown$Revision$##9 3! 2006-01-05 06:42:55cleanunknown$Revision$##9 3! 2006-01-05 06:42:51cleanunknown$Revision$##9 3! 2006-01-05 06:42:48cleanunknown$Revision$##9 3! 2006-01-05 06:42:47cleanunknown$Revision$##9 3! 2006-01-05 06:42:43cleanunknown$Revision$##9 3! 2006-01-05 06:42:42cleanunknown$Revision$##9 3! 2006-01-05 06:42:38cleanunknown$Revision$##9 3! 2006-01-05 06:42:38cleanunknown$Revision$## @L\ l0|@9n 3! 2006-01-05 06:39:49cleanunknown$Revision$##9m 3! 2006-01-05 06:39:47cleanunknown$Revision$##9l 3! 2006-01-05 06:39:43cleanunknown$Revision$##9k 3! 2006-01-05 06:39:41cleanunknown$Revision$##9j 3! 2006-01-05 06:39:41cleanunknown$Revision$##9i 3! 2006-01-05 06:39:37cleanunknown$Revision$##9h 3! 2006-01-05 06:39:31cleanunknown$Revision$MM9g 3! 2006-01-05 06:39:29cleanunknown$Revision$##9f 3! 2006-01-05 06:39:29cleanunknown$Revision$##9e 3! 2006-01-05 06:39:29cleanunknown$Revision$##9d 3! 2006-01-05 06:39:24cleanunknown$Revision$##9c 3! 2006-01-05 06:39:19cleanunknown$Revision$##9b 3! 2006-01-05 06:39:13cleanunknown$Revision$##9a 3! 2006-01-05 06:39:12cleanunknown$Revision$##9` 3! 2006-01-05 06:39:09cleanunknown$Revision$##9_ 3! 2006-01-05 06:39:03cleanunknown$Revision$## POc(w<P8 3! t2006-01-05 06:33:13cleanunknown$Revision$##8 3! s2006-01-05 06:32:59cleanunknown$Revision$##8 3! r2006-01-05 06:32:59cleanunknown$Revision$668 3! q2006-01-05 06:32:59cleanunknown$Revision$##8 3! p2006-01-05 06:32:59cleanunknown$Revision$##8 3! o2006-01-05 06:32:59cleanunknown$Revision$##8 3! n2006-01-05 06:32:59cleanunknown$Revision$##8 3! m2006-01-05 06:32:59cleanunknown$Revision$8 3! l2006-01-05 06:32:59cleanunknown$Revision$##8 3! k2006-01-05 06:32:56cleanunknown$Revision$##8 3! j2006-01-05 06:32:56cleanunknown$Revision$668 3! i2006-01-05 06:32:06cleanunknown$Revision$8 3! h2006-01-05 06:32:06cleanunknown$Revision$##8 3! g2006-01-05 06:32:06cleanunknown$Revision$##8 3! f2006-01-05 06:32:01cleanunknown$Revision$##8 3! e2006-01-05 06:32:00cleanunknown$Revision$66 F  !,7BMXcny)4?JU`kv&0:EP[fq|    ? r   / ^     5 O l       E  z    2  w    b        b        +  1  3   $ c    5 4 5 = |    9 l   ) W   ?  t     ?  *4>HR\fpz$.8BLV`jt~                                             $ ' ( , 0 3 6 8 = > C E H K O R U W [ Fe#-7AKU_iyoes{#,5>GPYbkt~ 2 2 2 2 V 2 E 2 4 2  2 2 20 2g 2} 2 2 2 2 2  2 k 2 2 e 2 2  2  2 B 2 v 2 2 3a33 ,4b444 5c56d66666\666606R6d6 s6!6"6#6$6%6465 6: 6x 6 6 6 64 6g 6 6 6 6# 6U 6 6 : T~tj`VLB8.$zpf\RH>4*  vlbXND:0&IHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!      ~}|{zyxwv IgI< libg2c0PackageUnknown; libldap2PackageUnknown: libsmapi2PackageUnknown9 libxft2PackageUnknown8 libxmu6PackageUnknown7 whatamiPackageUnknown O~sh^SH=3) |rg\QF<1&xmbXNC8-" 2    +  &$ ~} |z{zyxwtvutsrqponmlkjBihgf;edcba`_^>]\[ZYSX9WVUT6SRQPONML+K JIH$GF :'2=HS^it$/:EP[fq|     C    @   I     K     L      Y     &  k     )  n     1  v  8  ~   #    "  5    '  <    7  q        A  }    D OwlaVK@5+  |qf[PE:/$vlaWLA6+  cba`7_^]\2[ZYXWVUTS&RQPONMLKJIHGFEBDCBA7@?>=<;:9876543210/.-B,+*)(;'&%$#"!> S96 ]w6l+h ]>v =!  Thu Jan 5 06:32:59 2006cleanunknown$Revision$##>u =!  Thu Jan 5 06:34:27 2006cleanunknown$Revision$##>t =!  Thu Jan 5 06:36:07 2006cleanunknown$Revision$##Es =/ Tue Oct 25 16:12:18 2005cleanunknown$Revision: 1.37 $  >r =! Thu Jan 5 06:36:46 2006cleanunknown$Revision$##>q =! Thu Jan 5 06:39:03 2006cleanunknown$Revision$##>p =! Thu Jan 5 06:36:46 2006cleanunknown$Revision$##>o =! Thu Jan 5 06:39:49 2006cleanunknown$Revision$##>n =! Thu Jan 5 06:33:40 2006cleanunknown$Revision$##Em =/ Tue Oct 25 16:09:41 2005cleanunknown$Revision: 1.37 $  >l =! Thu Jan 5 06:33:50 2006cleanunknown$Revision$##>k =! Thu Jan 5 06:34:23 2006dirtyunknown$Revision$  Ej =/ Thu Dec 1 06:33:59 2005cleanunknown$Revision: 1.40 $>i =! Thu Jan 5 06:30:38 2006dirtyunknown$Revision$  ApS)\6f@#dA!9' esound-commonPackageUnknown*89 libstdc++2.10-glibc2.2PackageUnknown7 emacs21PackageUnknown+6; kernel-image-2.4.29-rc1PackageUnknown'53 kernel-image-2.4.27PackageUnknown4! libungif4gPackageUnknown3 mpisyncPackageUnknown$2- chiba-qm-scriptsPackageUnknown1 libaudio2PackageUnknown0! libpcap0.8PackageUnknown/ screenPackageUnknown. g++PackageUnknown- xaw3dgPackageUnknown, mdadmPackageUnknown#++ python-libxslt1PackageUnknown$*- libmysqlclient12PackageUnknown!)' apache2-utilsPackageUnknown(# libgnutls10PackageUnknown"') python-libxml2PackageUnknown& gcc-2.95PackageUnknown% libartsc0PackageUnknown&$1 python2.3-libxslt1PackageUnknown(#5 libfrontier-rpc-perlPackageUnknown" g++-3.3PackageUnknown!! libgcrypt7PackageUnknown! ' python2.3-xmlPackageUnknown$- libaudiofile-devPackageUnknown%/ chiba-mayor-slavePackageUnknown Q~tj`VLB8.$yncXMB7,! uj_TJ?4) /  _  . ~- }, |+ {* z y  x) w( v' ut t& s% r$ q# p" o! n m  l k j9 i h gX f e d c b a ` _ ^ ] \ [ Z Y X  W  V2 U& T S R Q P O N M L K J I H G F E2D&CBA@?>=<;:9876 5 423& Jj8p> vD|J/A/2006-06-20 12:47:11.693018ccn37.mcs.anl.gov/A/2006-06-20 12:47:10.468890ccn56.mcs.anl.gov/A/2006-06-20 12:47:09.256319ccn35.mcs.anl.gov/A/2006-06-20 12:47:09.019987ccn40.mcs.anl.gov/A/2006-06-20 12:47:07.845974ccn43.mcs.anl.gov/A/2006-06-20 12:47:06.612610ccn42.mcs.anl.gov/A/2006-06-20 12:47:05.396781ccn38.mcs.anl.gov/A/2006-06-20 12:47:04.200252ccn33.mcs.anl.gov/A/2006-06-20 12:47:03.956067ccn55.mcs.anl.gov/A/2006-06-20 12:47:02.765395ccn53.mcs.anl.gov/A/2006-06-20 12:47:01.549212ccn50.mcs.anl.gov/A/2006-06-20 12:47:00.340813ccn54.mcs.anl.gov/A/2006-06-20 12:46:59.167155ccn52.mcs.anl.gov/A/2006-06-20 12:46:58.009612ccn51.mcs.anl.gov/ A/2006-06-20 12:46:56.835864ccn64.mcs.anl.gov/ A/2006-06-20 12:46:55.602903ccn62.mcs.anl.gov/ A/2006-06-20 12:46:54.278644ccn60.mcs.anl.gov/ A/2006-06-20 12:46:53.129296ccn59.mcs.anl.gov/ A/2006-06-20 12:46:51.930931ccn58.mcs.anl.gov 1~=z9v5r1> =! Thu Jan 5 06:35:10 2006cleanunknown$Revision$##> =! Thu Jan 5 06:30:27 2006cleanunknown$Revision$##> =! Thu Jan 5 06:36:17 2006cleanunknown$Revision$##> =! Thu Jan 5 06:32:59 2006cleanunknown$Revision$##> =! Thu Jan 5 06:31:05 2006cleanunknown$Revision$##> =! Thu Jan 5 06:29:59 2006cleanunknown$Revision$##> =! Thu Jan 5 06:39:13 2006cleanunknown$Revision$##>~ =! Thu Jan 5 06:36:35 2006cleanunknown$Revision$##>} =! Thu Jan 5 06:36:07 2006cleanunknown$Revision$##>| =! Thu Jan 5 06:33:13 2006cleanunknown$Revision$##>{ =! Thu Jan 5 06:34:31 2006cleanunknown$Revision$##>z =! Thu Jan 5 06:42:43 2006cleanunknown$Revision$##>y =! Thu Jan 5 06:36:07 2006cleanunknown$Revision$##>x =!  Thu Jan 5 06:39:09 2006cleanunknown$Revision$##>w =!  Mon Dec 19 11:36:01 2005cleanunknown$Revision$## J']gq{%/9CMWakuSI?5+! wmcYOE;1'                                     a b e h m p s t v z }                  <GJg D  |b  ]  L~tj`VLB8.$zpf\RH>4*  vlbXND:0&     ~}|{zyxwvutsrqpon m l k j i h g f e d c b a `_^]\[ZYXWVUTSRQPONMLKJ <j8o:n;o<0.A12006-06-20 12:48:20.478944ccn230.mcs.anl.gov0-A12006-06-20 12:48:20.232967ccn228.mcs.anl.gov0,A12006-06-20 12:48:13.764662ccn229.mcs.anl.gov0+A12006-06-20 12:48:03.923430ccn227.mcs.anl.gov0*A12006-06-20 12:47:57.969425ccn226.mcs.anl.gov0)A12006-06-20 12:47:57.717877ccn225.mcs.anl.gov/(A/2006-06-20 12:47:56.569195ccn81.mcs.anl.gov0'A12006-06-20 12:47:56.310764ccn248.mcs.anl.gov0&A12006-06-20 12:47:56.058957ccn246.mcs.anl.gov1%A32006-06-20 12:47:25.886772ccsched.mcs.anl.gov2$A52006-06-20 12:47:25.653578ccfs2-67.mcs.anl.gov0#A12006-06-20 12:47:25.426055ccn255.mcs.anl.gov/"A/2006-06-20 12:47:24.252171ccn34.mcs.anl.gov/!A/2006-06-20 12:47:23.070465ccn39.mcs.anl.gov/ A/2006-06-20 12:47:17.947043ccn45.mcs.anl.gov/A/2006-06-20 12:47:16.777564ccn46.mcs.anl.gov/A/2006-06-20 12:47:15.607874ccn47.mcs.anl.gov/A/2006-06-20 12:47:14.331509ccn48.mcs.anl.gov/A/2006-06-20 12:47:12.851370ccn36.mcs.anl.gov \~=s8n&\> =! ,Mon Dec 19 11:43:35 2005dirtyunknown$Revision$  E =/ ,Thu Dec 1 06:35:33 2005cleanunknown$Revision: 1.40 $> =! +Thu Jan 5 06:29:46 2006dirtyunknown$Revision$  E =/ +Thu Dec 1 06:26:31 2005cleanunknown$Revision: 1.40 $> =! *Thu Jan 5 06:30:32 2006dirtyunknown$Revision$  E =/ *Thu Dec 1 06:35:26 2005cleanunknown$Revision: 1.40 $> =! (Mon Dec 19 11:40:37 2005cleanunknown$Revision$##8 =! %Thu Jan 5 06:34:12 2006dirty $Revision$> =! "Thu Jan 5 06:42:29 2006cleanunknown$Revision$##> =! !Thu Jan 5 06:39:41 2006cleanunknown$Revision$##E =/  Tue Oct 25 16:09:27 2005cleanunknown$Revision: 1.37 $  > =! Thu Jan 5 06:39:47 2006cleanunknown$Revision$##> =! Thu Jan 5 06:30:27 2006cleanunknown$Revision$##> =! Thu Jan 5 06:29:59 2006cleanunknown$Revision$## +ccmw !+3<ENW`ir{     A l  %  S        #  6   +  5  oIsE[!|#%'[5D} _    ( x  VyT6c;|]8zV!5' esound-commonpackageUnknown4! libungif4gpackageUnknown3 mpisyncpackageUnknown2 libaudio2packageUnknown1 xaw3dgpackageUnknown0 mdadmpackageUnknown#/+ python-libxslt1packageUnknown".) python-libxml2packageUnknown- gcc-2.95packageUnknown, libartsc0packageUnknown&+1 python2.3-libxslt1packageUnknown(*5 libfrontier-rpc-perlpackageUnknown!)' python2.3-xmlpackageUnknown$(- libaudiofile-devpackageUnknown%'/ chiba-mayor-slavepackageUnknown &% libzzip-0-12packageUnknown!%' libartsc0-devpackageUnknown$# libesd0-devpackageUnknown #% libaudio-devpackageUnknown" g++-2.95packageUnknown%!/ libstdc++2.10-devpackageUnknown  libesd0packageUnknown") emacs21-commonpackageUnknown!' libxml2-utilspackageUnknown txt2manpackageUnknown % bcfg2-xmlrpcserviceUnknown# mayor_slaveserviceUnknown on~m l~kuji;hg f;ed <~xmbWMB7,! uk`VKA6+  ~?>=<;:9287&6543210/.-2,+&*)('&%$#"!2 &< ; S : 9 8 7   6 5 y E ;     4  3  e  2   $ 1 0 aB"#<+ amihappy-clientpackageUnknown; http-tinypackageUnknown: cpp-2.95packageUnknown'93 libmsyslog-mcs-perlpackageUnknown%8/ python2.3-libxml2packageUnknown!7' libaudiofile0packageUnknown&61 emacs21-bin-commonpackageUnknown -kkt} (1:CLU^gpy@ABCEFGHIJKLMNOPQRSTUVWXYZ[\]^_` abcdefg h#i&j)k.l/m2 ZjF#mN+ kK'Z+6; pvfs2-driver-2.4.29-rc2PackageUnknown5 libxaw7PackageUnknown$4- xemacs21-supportPackageUnknown3 libsm6PackageUnknown2 libice6PackageUnknown1 xfsprogsPackageUnknown!0' libcompfaceg1PackageUnknown/ reportbugPackageUnknown. lynxPackageUnknown- uuid-devPackageUnknown,! rsh-serverPackageUnknown"+) emacsen-commonPackageUnknown*! libdm0-devPackageUnknown) libxext6PackageUnknown (% pvfs2-clientPackageUnknown' ssl-certPackageUnknown!&' apache-commonPackageUnknown% bwmPackageUnknown $% mime-supportPackageUnknown# xinetdPackageUnknown" bcPackageUnknown! libsasl2PackageUnknown % libsmapi-devPackageUnknown!' xemacs21-mulePackageUnknown defomaPackageUnknown&1 ttf-bitstream-veraPackageUnknown") libfontconfig1PackageUnknown(5 xemacs21-basesupportPackageUnknown ,ffox#,5>GPYbkt~` abcdefg"h$i'j+k-l1m4n6o8p=q@rBsFtIuJvMwRxSyXz[{]|_~f i l n r t x { }       9~=z9> =! 5Thu Jan 5 06:39:02 2006cleanunknown$Revision$##> =! 4Thu Jan 5 06:36:35 2006cleanunknown$Revision$##> =! 2Thu Jan 5 06:30:30 2006cleanunknown$Revision$##> =! 1Thu Jan 5 06:32:56 2006cleanunknown$Revision$##> =! 0Thu Jan 5 06:39:29 2006cleanunknown$Revision$##> =! /Thu Jan 5 06:31:14 2006cleanunknown$Revision$##> =! .Thu Jan 5 06:32:01 2006cleanunknown$Revision$## g405A12006-06-20 12:48:27.613736ccn236.mcs.anl.gov04A12006-06-20 12:48:26.350394ccn238.mcs.anl.gov03A12006-06-20 12:48:26.123496ccn235.mcs.anl.gov02A12006-06-20 12:48:25.015717ccn239.mcs.anl.gov01A12006-06-20 12:48:23.898886ccn233.mcs.anl.gov00A12006-06-20 12:48:22.775515ccn232.mcs.anl.gov0/A12006-06-20 12:48:21.642542ccn231.mcs.anl.govexamples/report-configuration.xml000066400000000000000000000025501303523157100176000ustar00rootroot00000000000000 gentoo/000077500000000000000000000000001303523157100123515ustar00rootroot00000000000000gentoo/bcfg2-1.3.0.ebuild000066400000000000000000000035701303523157100151640ustar00rootroot00000000000000# Copyright 1999-2013 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ EAPI=4 PYTHON_DEPEND="*:2.6" SUPPORT_PYTHON_ABIS="1" # ssl module required. RESTRICT_PYTHON_ABIS="2.5" inherit distutils eutils DESCRIPTION="configuration management tool" HOMEPAGE="http://bcfg2.org" SRC_URI="ftp://ftp.mcs.anl.gov/pub/bcfg/${P}.tar.gz" LICENSE="BSD-2" SLOT="0" KEYWORDS="~amd64 ~x86 ~amd64-linux ~x86-linux ~x64-solaris" IUSE="doc cheetah genshi server" DEPEND="dev-python/setuptools doc? ( dev-python/sphinx )" RDEPEND="app-portage/gentoolkit cheetah? ( dev-python/cheetah ) genshi? ( dev-python/genshi ) server? ( virtual/fam dev-python/lxml dev-python/python-daemon || ( dev-python/pyinotify dev-libs/libgamin[python] ) )" PYTHON_MODNAME="Bcfg2" distutils_src_install_post_hook() { if ! use server; then rm -f "$(distutils_get_intermediate_installation_image)${EPREFIX}/usr/sbin/bcfg2-"* fi } src_compile() { distutils_src_compile if use doc; then einfo "Building Bcfg2 documentation" PYTHONPATH="build-$(PYTHON -f --ABI)" \ sphinx-build doc doc_output || die fi } src_install() { distutils_src_install --record=PY_SERVER_LIBS --install-scripts "${EPREFIX}/usr/sbin" if ! use server; then # Remove files only necessary for a server installation rm -rf "${ED}usr/share/bcfg2" || die rm -rf "${ED}usr/share/man/man8" || die else newinitd "${FILESDIR}/${PN}-server-1.2.0.rc" bcfg2-server fi insinto /etc doins examples/bcfg2.conf if use doc; then # install the sphinx documentation pushd doc_output > /dev/null insinto /usr/share/doc/${PF}/html doins -r [a-z]* _images _static || die "Failed to install documentation" popd > /dev/null fi } pkg_postinst () { distutils_pkg_postinst if use server; then einfo "If this is a new installation, you probably need to run:" einfo " bcfg2-admin init" fi } gentoo/files/000077500000000000000000000000001303523157100134535ustar00rootroot00000000000000gentoo/files/bcfg2-server.rc000066400000000000000000000011431303523157100162670ustar00rootroot00000000000000#!/sbin/runscript # # bcfg2-server - bcfg configuration daemon # depend () { need net } start () { ebegin "Starting bcfg2-server" start-stop-daemon --start --quiet \ --pidfile /var/run/bcfg2-server/bcfg2-server.pid \ --startas /usr/sbin/bcfg2-server -- -D /var/run/bcfg2-server.pid eend $? "Failed to start bcfg2-server" } stop () { ebegin "Stopping bcfg2-server" start-stop-daemon --stop --quiet \ --pidfile /var/run/bcfg2-server/bcfg2-server.pid \ --signal INT eend $? "Failed to stop bcfg2-server" } man/000077500000000000000000000000001303523157100116315ustar00rootroot00000000000000man/bcfg2-admin.8000066400000000000000000000125611303523157100140000ustar00rootroot00000000000000.TH "BCFG2-ADMIN" "8" "April 06, 2014" "1.3" "Bcfg2" .SH NAME bcfg2-admin \- Perform repository administration tasks . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-admin\fP [\-C \fIconfigfile\fP] \fImode\fP [\fImode args\fP] [\fImode options\fP] .SH DESCRIPTION .sp \fBbcfg2\-admin\fP is used to perform Bcfg2 repository administration. .SH OPTIONS .INDENT 0.0 .TP .BI \-C \ configfile Specify alternate bcfg2.conf location. .TP .BI \-E \ encoding Specify the encoding of config files. .TP .BI \-Q \ path Specify the path to the server repository. .TP .BI \-S \ server Manually specify the server location (as opposed to using the value in bcfg2.conf). This should be in the format "\fI\%https://server:port\fP" .TP .B \-d Enable debugging output. .TP .B \-h Print usage information. .TP .BI \-o \ logfile Writes a log to the specified path. .TP .BI \-\-ssl\-key\fB= key Specify the path to the SSL key. .TP .B \-v Enable verbose output. .TP .BI \-x \ password Use \(aqpassword\(aq for client communication. .UNINDENT .SH MODES .INDENT 0.0 .TP .B backup Create an archive of the entire Bcfg2 repository. .TP .B client \fIaction\fP \fIclient\fP [attribute=value] Add, edit, or remove clients entries in metadata (See CLIENT OPTIONS below). .TP .B compare \fIold\fP \fInew\fP Compare two client configurations. Can be used to verify consistent behavior between releases. Determine differences between files or directories (See COMPARE OPTIONS below). .TP .B dbshell Call the Django \(aqdbshell\(aq command on the configured database. .TP .B init Initialize a new repository (interactive). .TP .B initreports Initialize the Reporting database. .TP .B minestruct \fIclient\fP [\-f xml\-file] [\-g groups] Build structure entries based on client statistics extra entries (See MINESTRUCT OPTIONS below). .TP .B perf Query server for performance data. .TP .B pull \fIclient\fP \fIentry\-type\fP \fIentry\-name\fP Install configuration information into repo based on client bad entries (See PULL OPTIONS below). .TP .B purgereports Purge historic and expired data from the Reporting database .TP .B reportssqlall Call the Django \(aqshell\(aq command on the Reporting database. .TP .B reportsstats Print Reporting database statistics. .TP .B scrubreports Scrub the Reporting database for duplicate reasons and orphaned entries. .TP .B shell Call the Django \(aqshell\(aq command on the configured database. .TP .B syncdb Sync the Django ORM with the configured database. .TP .B tidy Remove unused files from repository. .TP .B updatereports Apply database schema updates to the Reporting database. .TP .B validatedb Call the Django \(aqvalidate\(aq command on the configured database. .TP .B viz [\-H] [\-b] [\-k] [\-o png\-file] Create a graphviz diagram of client, group and bundle information (See VIZ OPTIONS below). .TP .B xcmd Provides a XML\-RPC Command Interface to the bcfg2\-server. .UNINDENT .SS CLIENT OPTIONS .INDENT 0.0 .TP .B mode One of the following. .INDENT 7.0 .TP .B \fIadd\fP Add a client .TP .B \fIdel\fP Delete a client .TP .B \fIlist\fP List all client entries .UNINDENT .TP .B client Specify the client\(aqs name. .TP .B attribute=value Set attribute values when adding a new client. Allowed attributes are \(aqprofile\(aq, \(aquuid\(aq, \(aqpassword\(aq, \(aqlocation\(aq, \(aqsecure, and \(aqaddress\(aq. .UNINDENT .SS COMPARE OPTIONS .INDENT 0.0 .TP .B \-d \fIN\fP, \-\-diff\-lines \fIN\fP Show only N lines of a diff .UNINDENT .INDENT 0.0 .TP .B \-c, \-\-color Show colors even if not ryn from a TTY .TP .B \-q, \-\-quiet Only show that entries differ, not how they differ .UNINDENT .INDENT 0.0 .TP .B old Specify the location of the old configuration(s). .TP .B new Specify the location of the new configuration(s). .UNINDENT .SS MINESTRUCT OPTIONS .INDENT 0.0 .TP .B client Client whose metadata is to be searched for extra entries. .TP .B \-g \fIgroups\fP Hierarchy of groups in which to place the extra entries in. .TP .B \-f \fIoutputfile\fP Specify the xml file in which to write the extra entries. .UNINDENT .SS PULL OPTIONS .INDENT 0.0 .TP .B client Specify the name of the client to search for. .TP .B entry type Specify the type of the entry to pull. .TP .B entry name Specify the name of the entry to pull. .UNINDENT .SS VIZ OPTIONS .INDENT 0.0 .TP .B \-H, \-\-includehosts Include hosts in diagram. .TP .B \-b, \-\-includebundles Include bundles in diagram. .UNINDENT .INDENT 0.0 .TP .B \-o \fIoutfile\fP, \-\-outfile \fIoutfile\fP Write to outfile file instead of stdout. .UNINDENT .INDENT 0.0 .TP .B \-k, \-\-includekey Add a shape/color key. .UNINDENT .INDENT 0.0 .TP .B \-c \fIhostname\fP, \-\-only\-client \fIhostname\fP Only show groups and bundles for the named client .UNINDENT .SH SEE ALSO .sp \fIbcfg2\-info(8)\fP, \fIbcfg2\-server(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2-build-reports.8000066400000000000000000000027161303523157100155040ustar00rootroot00000000000000.TH "BCFG2-BUILD-REPORTS" "8" "March 18, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-build-reports \- Generate state reports for Bcfg2 clients . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-build\-reports\fP [\fI\-A\fP] [\fI\-c\fP] [\fI\-s\fP] .SH DESCRIPTION .sp \fBbcfg2\-build\-reports\fP is used to build all client state reports. See the Bcfg2 manual for report setup information. .SH OPTIONS .INDENT 0.0 .TP .B \-A Displays all data. .TP .BI \-c \ configfile Specify an alternate report configuration path. The default is \fBrepo/etc/reports\-configuration.xml\fP. .TP .B \-h Print usage information. .TP .BI \-s \ statsfile Use an alternative path for the statistics file. The default is \fBrepo/etc/statistics.xml\fP. .UNINDENT .SH SEE ALSO .sp \fIbcfg2(1)\fP, \fIbcfg2\-server(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2-crypt.8000066400000000000000000000115401303523157100140450ustar00rootroot00000000000000.TH "BCFG2-CRYPT" "8" "March 18, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-crypt \- Bcfg2 encryption and decryption utility . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-crypt\fP [\-C \fIconfigfile\fP] [\-\-decrypt|\-\-encrypt] [\-\-cfg|\-\-properties] [\-\-stdout] [\-\-remove] [\-\-xpath \fIxpath\fP] [\-p \fIpassphrase\-or\-name\fP] [\-v] [\-I] \fIfilename\fP [\fIfilename\fP...] .SH DESCRIPTION .sp \fBbcfg2\-crypt\fP performs encryption and decryption of Cfg and Properties files. It\(aqs often sufficient to run \fBbcfg2\-crypt\fP with only the name of the file you wish to encrypt or decrypt; it can usually figure out what to do. .SH OPTIONS .INDENT 0.0 .TP .BI \-C \ configfile Specify alternate bcfg2.conf location. .TP .B \-\-decrypt, \-\-encrypt Select encryption or decryption mode for the given file(s). This is usually unnecessary, as \fBbcfg2\-crypt\fP can often determine which is necessary based on the contents of each file. .TP .B \-\-cfg An XML file should be encrypted in its entirety rather than element\-by\-element. This is only necessary if the file is an XML file whose name ends with \fI.xml\fP and whose top\-level tag is \fI\fP. See [MODES] below for details. .TP .B \-\-properties Process a file as an XML Properties file, and encrypt the text of each element separately. This is necessary if, for example, you\(aqve used a different top\-level tag than \fIProperties\fP in your Properties files. See [MODES] below for details. .TP .B \-\-stdout Print the resulting file to stdout instead of writing it to a file. .TP .B \-\-remove Remove the plaintext file after it has been encrypted. Only meaningful for Cfg files. .TP .BI \-\-xpath \ xpath Encrypt the character content of all elements that match the specified XPath expression. The default is \fI*[@encrypted]\fP or \fI*\fP; see [MODES] below for more details. Only meaningful for Properties files. .TP .BI \-p \ passphrase Specify the name of a passphrase specified in the \fI[encryption]\fP section of \fIbcfg2.conf\fP. See [SELECTING PASSPHRASE] below for more details. .TP .B \-v Be verbose. .TP .B \-I When encrypting a Properties file, interactively select the elements whose data should be encrypted. .TP .B \-h Print usage information. .UNINDENT .SH MODES .sp \fBbcfg2\-crypt\fP can encrypt Cfg files or Properties files; they are handled very differently. .INDENT 0.0 .TP .B Cfg When \fBbcfg2\-crypt\fP is used on a Cfg file, the entire file is encrypted. This is the default behavior on files that are not XML, or that are XML but whose top\-level tag is not \fI\fP. This can be enforced by use of the \fI\-\-cfg\fP option. .TP .B Properties When \fBbcfg2\-crypt\fP is used on a Properties file, it encrypts the character content of elements matching the XPath expression given by \fI\-\-xpath\fP. By default the expression is \fI*[@encrypted]\fP, which matches all elements with an \fIencrypted\fP attribute. If you are encrypting a file and that expression doesn\(aqt match any elements, then the default is \fI*\fP, which matches everything. When \fBbcfg2\-crypt\fP encrypts the character content of an element, it also adds the \fIencrypted\fP attribute, set to the name of the passphrase used to encrypt that element. When it decrypts an element it does not remove \fIencrypted\fP, though; this lets you easily and efficiently run \fBbcfg2\-crypt\fP against a single Properties file to encrypt and decrypt it without needing to specify a long list of options. See the online Bcfg2 docs on Properties files for more information on how this works. .UNINDENT .SH SELECTING PASSPHRASE .sp The passphrase used to encrypt or decrypt a file is discovered in the following order. .INDENT 0.0 .IP 1. 3 The passphrase given on the command line using \fI\-p\fP is used. .IP 2. 3 If exactly one passphrase is specified in \fIbcfg2.conf\fP, it will be used. .IP 3. 3 If operating in Properties mode, \fIbcfg2.conf\fP will attempt to read the name of the passphrase from the encrypted elements. .IP 4. 3 If decrypting, all passphrases will be tried sequentially. .IP 5. 3 If no passphrase has been determined at this point, an error is produced and the file being encrypted or decrypted is skipped. .UNINDENT .SH SEE ALSO .sp \fIbcfg2\-server(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2-info.8000066400000000000000000000064441303523157100136460ustar00rootroot00000000000000.TH "BCFG2-INFO" "8" "March 18, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-info \- Creates a local version of the Bcfg2 server core for state observation . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-info\fP [\-C \fIconfigfile\fP] [\-E \fIencoding\fP] [\-Q \fIrepository path\fP] [\-h] [\-p] [\-x \fIpassword\fP] [\fImode\fP] [\fImode args\fP] [\fImode options\fP] .SH DESCRIPTION .sp \fBbcfg2\-info\fP instantiates an instance of the Bcfg2 core for data examination and debugging purposes. .SH OPTIONS .INDENT 0.0 .TP .BI \-C \ configfile Specify alternate bcfg2.conf location. .TP .BI \-E \ encoding Specify the encoding of config files. .TP .BI \-Q \ path Specify the path to the server repository. .TP .B \-d Enable debugging output. .TP .B \-h Print usage information. .TP .BI \-p \ profile Specify a profile. .TP .BI \-x \ password Use \(aqpassword\(aq for client communication. .UNINDENT .SH MODES .INDENT 0.0 .TP .B build \fIhostname\fP \fIfilename\fP Build config for hostname, writing to filename. .TP .B buildall \fIdirectory\fP Build configs for all clients in directory. .TP .B buildallfile \fIdirectory\fP \fIfilename\fP [\fIhostnames\fP] Build config file for all clients in directory. .TP .B buildbundle \fIfilename\fP \fIhostname\fP Build bundle for hostname (not written to disk). If filename is a bundle template, it is rendered. .TP .B builddir \fIhostname\fP \fIdirname\fP Build config for hostname, writing separate files to dirname. .TP .B buildfile [\-\-altsrc=*altsrc*] \fIfilename\fP \fIhostname\fP Build config file for hostname (not written to disk). .TP .B bundles Print out group/bundle information. .TP .B clients Print out client/profile information. .TP .B config Print out the configuration of the Bcfg2 server. .TP .B debug Shell out to native python interpreter. .TP .B event_debug Display filesystem events as they are processed. .TP .B groups List groups. .TP .B help Print the list of available commands. .TP .B mappings [\fIentry type\fP] [\fIentry name\fP] Print generator mappings for optional type and name. .TP .B packageresolve \fIhostname\fP \fIpackage\fP [\fIpackage\fP...] Resolve the specified set of packages. .TP .B packagesources \fIhostname\fP Show package sources. .TP .B profile \fIcommand\fP \fIargs\fP Profile a single bcfg2\-info command. .TP .B quit Exit bcfg2\-info command line. .TP .B showentries \fIhostname\fP \fItype\fP Show abstract configuration entries for a given host. .TP .B showclient \fIclient1\fP \fIclient2\fP Show metadata for given hosts. .TP .B update Process pending file events. .TP .B version Print version of this tool. .UNINDENT .SH SEE ALSO .sp \fIbcfg2(1)\fP, \fIbcfg2\-server(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2-lint.8000066400000000000000000000112461303523157100136550ustar00rootroot00000000000000.TH "BCFG2-LINT" "8" "March 18, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-lint \- Check Bcfg2 specification for validity, common mistakes, and style . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-lint\fP [\fIoptions\fP] [\fIplugin\fP [\fIplugin\fP...]] .SH DESCRIPTION .sp \fBbcfg2\-lint\fP checks the Bcfg2 specification for schema validity, common mistakes, and other criteria. It can be quite helpful in finding typos or malformed data. .sp \fBbcfg2\-lint\fP exits with a return value of 2 if errors were found, and 3 if warnings (but no errors) were found. Any other non\-0 exit value denotes some failure in the script itself. .sp \fBbcfg2\-lint\fP is a rewrite of the older bcfg2\-repo\-validate tool. .SH OPTIONS .INDENT 0.0 .TP .BI \-C \ configfile Specify alternate bcfg2.conf location. .TP .BI \-Q \ path Specify the path to the server repository. .TP .B \-v Be verbose. .TP .B \-\-lint\-config Specify path to bcfg2\-lint.conf (default \fB/etc/bcfg2\-lint.conf\fP). .TP .B \-\-stdin Rather than operating on all files in the Bcfg2 specification, only validate a list of files supplied on stdin. This mode is particularly useful in pre\-commit hooks. .sp This makes a few assumptions: .sp Metadata files will only be checked if a valid chain of XIncludes can be followed all the way from clients.xml or groups.xml. Since there are multiple formats of metadata stored in Metadata/ (i.e., clients and groups), there is no way to determine which sort of data a file contains unless there is a valid chain of XIncludes. It may be useful to always specify all metadata files should be checked, even if not all of them have changed. .sp Property files will only be validated if both the property file itself and its matching schema are included on stdin. .UNINDENT .SH PLUGINS .sp In addition to the plugins listed below, Bcfg2 server plugins may have their own \fIbcfg2\-lint\fP functionality, which is enabled automatically when the server plugin is enabled. See \fIbcfg2\-lint.conf(5)\fP for more information on lint plugin configuration. .INDENT 0.0 .TP .B Comments Check the specification for VCS keywords and any comments that are required. By default, this only checks that the \fI$Id$\fP keyword is included and expanded in all files. You may specify VCS keywords to check and comments to be required in the config file. (For instance, you might require that every file have a "Maintainer" comment.) .sp In XML files, only comments are checked for the keywords and comments required. .TP .B Genshi Ensure that all Genshi templates are valid and compile properly. .TP .B GroupNames Ensure that all groups called by name in Metadata, Rules, Bundler, GroupPatterns, and Cfg are valid. .TP .B InfoXML Check that certain attributes are specified in \fIinfo.xml\fP files. By default, requires that \fIowner\fP, \fIgroup\fP, and \fImode\fP are specified. Can also require that an \fIinfo.xml\fP exists for all Cfg files, and that paranoid mode be enabled for all files. .TP .B MergeFiles Suggest that similar probes and config files be merged into single probes or TGenshi templates. .TP .B RequiredAttrs Check that all entries have the appropriate required attributes, and that the attributes are in a valid format. This goes above and beyond the validation offered by an XML schema. .TP .B Validate Validate the Bcfg2 specification against the XML schemas. .sp Property files are freeform XML, but if a \fI.xsd\fP file with a matching filename is provided, then schema validation will be performed on property files individually as well. For instance, if you have a property file named \fIntp.xml\fP then by placing a schema for that file in \fIntp.xsd\fP schema validation will be performed on \fIntp.xml\fP. .UNINDENT .SH BUGS .sp \fBbcfg2\-lint\fP may not handle some deprecated plugins as well as it handles newer ones. For instance, there may be some places where it expects all of your configuration files to be handled by Cfg rather than by a mix of Cfg and TGenshi or TCheetah. .SH SEE ALSO .sp \fIbcfg2(1)\fP, \fIbcfg2\-server(8)\fP, \fIbcfg2\-lint.conf(5)\fP .\" Generated by docutils manpage writer. . man/bcfg2-lint.conf.5000066400000000000000000000113251303523157100145740ustar00rootroot00000000000000.TH "BCFG2-LINT.CONF" "5" "March 18, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-lint.conf \- Configuration parameters for bcfg2-lint . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH DESCRIPTION .sp bcfg2\-lint.conf includes configuration parameters for bcfg2\-lint. .SH FILE FORMAT .sp The file is INI\-style and consists of sections and options. A section begins with the name of the sections in square brackets and continues until the next section begins. .sp Options are specified in the form "name=value". .sp The file is line\-based each newline\-terminated line represents either a comment, a section name or an option. .sp Any line beginning with a hash (#) is ignored, as are lines containing only whitespace. .sp The file consists of one \fI[lint]\fP section, up to one \fI[errors]\fP section, and then any number of plugin\-specific sections, documented below. (Note that this makes it quite feasible to combine your \fIbcfg2\-lint.conf\fP into your \fIbcfg2.conf(5)\fP file, if you so desire). .SH GLOBAL OPTIONS .sp These options apply to \fIbcfg2\-lint\fP generally, and must be in the \fI[lint]\fP section. .INDENT 0.0 .TP .B plugins A comma\-delimited list of plugins to run. By default, all plugins are run. This can be overridden by listing plugins on the command line. See \fIbcfg2\-lint(8)\fP for a list of the available plugins. .UNINDENT .SH ERROR HANDLING .sp Error handling is configured in the \fI[errors]\fP section. Each option should be the name of an error and one of \fIerror\fP, \fIwarning\fP, or \fIsilent\fP, which tells \fBbcfg2\-lint\fP how to handle the warning. Error names and their defaults can be displayed by running \fBbcfg2\-lint\fP with the \fI\-\-list\-errors\fP option. .SH PLUGIN OPTIONS .sp These options apply only to a single plugin. Each option should be in a section named for its plugin; for instance, options for the InfoXML plugin would be in a section called \fI[InfoXML]\fP. .sp If a plugin is not listed below, then it has no configuration. .sp In many cases, the behavior of a plugin can be configured by modifying how errors from it are handled. See ERROR HANDLING, above. .SS Comments .sp The \fIComments\fP plugin configuration specifies which VCS keywords and comments are required for which file types. The valid types of file are \fIglobal\fP (all file types), \fIbundler\fP (non\-templated bundle files), \fIgenshibundler\fP (templated bundle files), \fIproperties\fP (property files), \fIcfg\fP (non\-templated Cfg files), \fIgenshi\fP or \fIcheetah\fP (templated Cfg files), \fIinfoxml\fP (info.xml files), and \fIprobe\fP (probe files). .sp The specific types (i.e., types other than "global") all supplement global; they do not override it. The exception is if you specify an empty option, e.g.: .INDENT 0.0 .INDENT 3.5 cfg_keywords = .UNINDENT .UNINDENT .sp By default, the \fI$Id$\fP keyword is checked for and nothing else. .sp Multiple keywords or comments should be comma\-delimited. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .IP \(bu 2 \fI_keywords\fP .UNINDENT .UNINDENT .UNINDENT .sp Ensure that files of the specified type have the given VCS keyword. Do \fInot\fP include the dollar signs. I.e.: .INDENT 0.0 .INDENT 3.5 infoxml_keywords = Revision .UNINDENT .UNINDENT .sp \fInot\fP .INDENT 0.0 .INDENT 3.5 infoxml_keywords = $Revision$ .INDENT 0.0 .IP \(bu 2 \fI_comments\fP .UNINDENT .UNINDENT .UNINDENT .sp Ensure that files of the specified type have a comment containing the given string. In XML files, only comments are checked. In plain text files, all lines are checked since comment characters may vary. .SS InfoXML .INDENT 0.0 .TP .B required_attrs A comma\-delimited list of attributes to require on \fI\fP tags. Default is "owner,group,mode". .UNINDENT .SS MergeFiles .INDENT 0.0 .TP .B threshold The threshold at which MergeFiles will suggest merging config files and probes. Default is 75% similar. .UNINDENT .SS Validate .INDENT 0.0 .TP .B schema The full path to the XML Schema files. Default is \fB/usr/share/bcfg2/schema\fP. This can be overridden with the \fI\-\-schema\fP command\-line option. .UNINDENT .SH SEE ALSO .sp \fIbcfg2\-lint(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2-report-collector.8000066400000000000000000000033071303523157100162050ustar00rootroot00000000000000.TH "BCFG2-REPORT-COLLECTOR" "8" "July 27, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-report-collector \- Reports collection daemon . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-report\-collector\fP [\fIoptions\fP] .SH DESCRIPTION .sp \fBbcfg2\-report\-collector\fP runs a daemon to collect logs from the LocalFilesystem \fIBcfg2 Reports\fP transport object and add them to the Reporting storage backend. .SH OPTIONS .INDENT 0.0 .TP .BI \-C \ configfile Specify alternate bcfg2.conf location. .TP .BI \-D \ pidfile Daemonize, placing the program pid in \fIpidfile\fP. .TP .BI \-E \ encoding Specify the encoding of config files. .TP .BI \-Q \ path Specify the path to the server repository. .TP .BI \-W \ configfile Specify the path to the web interface configuration file. .TP .B \-d Enable debugging output. .TP .B \-h Print usage information. .TP .BI \-o \ path Set path of file log .TP .B \-v Run in verbose mode. .TP .B \-\-version Print the version and exit .UNINDENT .SH SEE ALSO .sp \fIbcfg2\-server(8)\fP, \fIbcfg2\-reports(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2-reports.8000066400000000000000000000073631303523157100144120ustar00rootroot00000000000000.TH "BCFG2-REPORTS" "8" "March 18, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-reports \- Query reporting system for client status . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-reports\fP [\-a] [\-b \fINAME\fP] [\-c] [\-d] [\-e \fINAME\fP] [\-h] [\-m \fINAME\fP] [\-s \fINAME\fP] [\-x \fINAME\fP] [\-\-badentry=\fIKIND,NAME\fP] [\-\-extraentry=\fIKIND,NAME\fP] [\-\-fields=\fIARG1,ARG2,...\fP] [\-\-modifiedentry=\fIKIND,NAME\fP] [\-\-sort=\fIARG1,ARG2,...\fP] [\-\-stale] [\-v] .SH DESCRIPTION .sp \fBbcfg2\-reports\fP allows you to retrieve data from the database about clients, and the states of their current interactions. It also allows you to change the expired/unexpired states. The utility runs as a standalone application. It does, however, use the models from \fBsrc/lib/Bcfg2/Reporting/models.py\fP. .SH OPTIONS .INDENT 0.0 .TP .B \-h Print usage information. .UNINDENT .SH MODES .sp The following are various modes available for \fBbcfg2\-reports\fP. .SS Single\-Host Modes .INDENT 0.0 .TP .BI \-b, \-\-bad \ hostname Shows bad entries from the current interaction of \fIhostname\fP. .TP .BI \-e, \-\-extra \ hostname Shows extra entries from the current interaction of \fIhostname\fP. .TP .BI \-m, \-\-modified \ hostname Shows modified entries from the current interaction of \fIhostname\fP. .TP .BI \-s, \-\-show \ hostname Shows bad, modified, and extra entries from the current interaction of \fIhostname\fP. .TP .BI \-t, \-\-total \ hostname Shows total number of managed and good entries from the current interaction of \fIhostname\fP. .TP .BI \-x, \-\-expire \ hostname Toggles expired/unexpired state of \fIhostname\fP. .TP .B \-a, \-\-all Show all hosts, including expired hosts. .UNINDENT .SS Host Selection Modes .INDENT 0.0 .TP .B \-a, \-\-all Show all hosts, including expired hosts. .TP .B \-c, \-\-clean Show only clean hosts. .TP .B \-d, \-\-dirty Show only dirty hosts. .TP .B \-\-stale Show hosts that haven\(aqt run in the last 24 hours. .UNINDENT .SS Entry Modes .sp The following mode flags require either a comma\-delimited list of any number of \fI:\fP arguments describing entries, or the \fI\-\-file\fP option. .INDENT 0.0 .TP .BI \-\-badentry\fB= entrylist Shows only hosts whose current interaction has bad entries matching the given entry or entries. .TP .BI \-\-extraentry\fB= entrylist Shows only hosts whose current interaction has extra entries matching the given entry or entries. .TP .BI \-\-entrystatus\fB= entry Shows the status of the single entry (given by \fI:\fP) on all hosts. .TP .BI \-\-modifiedentry\fB= entrylist Shows only hosts whose current interaction has modified entries matching the given entry or entries. .UNINDENT .SS Entry Options .sp The following options can be used with the above Entry Modes. .INDENT 0.0 .TP .BI \-\-fields\fB= fields Only display the listed fields. Takes a comma\-delimited list of field names .TP .BI \-\-file\fB= file Read \fI:\fP pairs from the specified file instead of the command line. .UNINDENT .SH SEE ALSO .sp \fIbcfg2(1)\fP, \fIbcfg2\-server(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2-server.8000066400000000000000000000037531303523157100142210ustar00rootroot00000000000000.TH "BCFG2-SERVER" "8" "July 27, 2013" "1.3" "Bcfg2" .SH NAME bcfg2-server \- Server for client configuration specifications . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\-server\fP [\-d] [\-v] [\-C \fIconfigfile\fP] [\-D \fIpidfile\fP] [\-E \fIencoding\fP] [\-Q \fIrepo path\fP] [\-S \fIserver url\fP] [\-o \fIlogfile\fP] [\-x \fIpassword\fP] [\-\-ssl\-key=\fIssl key\fP] [\-\-no\-fam\-blocking] .SH DESCRIPTION .sp \fBbcfg2\-server\fP is the daemon component of Bcfg2 which serves configurations to clients based on the data in its repository. .SH OPTIONS .INDENT 0.0 .TP .BI \-C \ configfile Specify alternate bcfg2.conf location. .TP .BI \-D \ pidfile Daemonize, placing the program pid in \fIpidfile\fP. .TP .BI \-E \ encoding Specify the encoding of config files. .TP .BI \-Q \ path Specify the path to the server repository. .TP .BI \-S \ server Manually specify the server location (as opposed to using the value in bcfg2.conf). This should be in the format "\fI\%https://server:port\fP" .TP .B \-d Enable debugging output. .TP .B \-v Run in verbose mode. .TP .B \-h Print usage information. .TP .BI \-\-ssl\-key\fB= key Specify the path to the SSL key. .TP .BI \-\-no\-fam\-blocking Synonym for fam_blocking = False in bcfg2.conf .UNINDENT .SH SEE ALSO .sp \fIbcfg2(1)\fP, \fIbcfg2\-lint(8)\fP, \fIbcfg2.conf(5)\fP .\" Generated by docutils manpage writer. . man/bcfg2.1000066400000000000000000000126631303523157100127060ustar00rootroot00000000000000.TH "BCFG2" "1" "March 18, 2013" "1.3" "Bcfg2" .SH NAME bcfg2 \- Bcfg2 client tool . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" Man page generated from reStructuredText. . .SH SYNOPSIS .sp \fBbcfg2\fP [\fIoptions\fP] .SH DESCRIPTION .sp \fBbcfg2\fP runs the Bcfg2 configuration process on the current host. This process consists of the following steps. .INDENT 0.0 .IP \(bu 2 Fetch and execute probes .IP \(bu 2 Upload probe results .IP \(bu 2 Fetch the client configuration .IP \(bu 2 Check the current client state .IP \(bu 2 Attempt to install the desired configuration .IP \(bu 2 Upload statistics about the Bcfg2 execution and client state .UNINDENT .SH OPTIONS .INDENT 0.0 .TP .B \-B Configure everything except the given bundle(s). .TP .BI \-C \ configfile Specify alternate bcfg2.conf location. .TP .BI \-D \ drivers Specify a comma\-delimited set of Bcfg2 tool drivers. \fINOTE: only drivers listed will be loaded. (e.g., if you do not include POSIX, you will be unable to verify/install Path entries).\fP .TP .BI \-E \ encoding Specify the encoding of config files. .TP .B \-I Run bcfg2 in interactive mode. The user will be prompted before each change. .TP .B \-O Omit lock check. .TP .B \-P Run bcfg2 in paranoid mode. Diffs will be logged for configuration files marked as paranoid by the Bcfg2 server. .TP .B \-Q Run bcfg2 in "bundle quick" mode, where only entries in a bundle are verified or installed. This runs much faster than \-q, but doesn\(aqt provide statistics to the server at all. In order for this option to work, the \-b option must also be provided. This option is incompatible with \-r. .TP .BI \-R \ retrycount Specify the number of times that the client will attempt to retry network communication. .TP .BI \-S \ server Manually specify the server location (as opposed to using the value in bcfg2.conf). This should be in the format "\fI\%https://server:port\fP" .TP .B \-Z Do not configure independent entries. .TP .BI \-b \ bundles Run only the specified colon\-delimited set of bundles. .TP .BI \-c \ cachefile Cache a copy of the configuration in cachefile. .TP .BI \-\-ca\-cert\fB= cacert Specifiy the path to the SSL CA certificate. .TP .B \-d Enable debugging output. .TP .B \-e When in verbose mode, display extra entry information. .TP .BI \-f \ path Configure from a file rather than querying the server. .TP .B \-h Print usage information. .TP .B \-k Run in bulletproof mode. This currently only affects behavior in the debian toolset; it calls apt\-get update and clean and dpkg \-\-configure \-\-pending. .TP .BI \-l \ decisionmode Run the client in the specified decision list mode ("whitelist" or "blacklist"), or "none", which can be used in order to override the decision list mode specified in bcfg2.conf). This approach is needed when particular changes are deemed "high risk". It gives the ability tocentrally specify these changes, but only install them on clients when administrator supervision is available. Because collaborative configuration is one of the remaining hard issues in configuration management, these issues typically crop up in environments with several administrators and much configuration variety. (This setting will be ignored if the \-f option is also specified). .TP .B \-n Run bcfg2 in dry\-run mode. No changes will be made to the system. .TP .BI \-o \ logfile Writes a log to the specified path. .TP .BI \-p \ profile Assert a profile for the current client. .TP .B \-q Run bcfg2 in quick mode. Package checksum verification won\(aqt be performed. This mode relaxes the constraints of correctness, and thus should only be used in safe conditions. .TP .BI \-r \ mode Cause bcfg2 to remove extra configuration elements it detects. Mode is one of "all", "Services", "Packages", or "Users". "all" removes all extra entries. "Services", "Packages", and "Users" remove only the extra configuration elements of the respective type. ("Services" actually just disables extra services, since they can\(aqt be removed, and "Users" removes extra POSIXUser and POSIXUser entries.) .TP .BI \-s \ servicemode Set bcfg2 interaction level for services. Default behavior is to modify all services affected by reconfiguration. "build" mode attempts to stop all services started. "disabled" suppresses all attempts to modify services. .TP .BI \-\-ssl\-cert\fB= cert Specify the path to the SSL certificate. .TP .BI \-\-ssl\-cns\fB= CNs Colon\-delimited list of acceptable SSL server Common Names. .TP .BI \-\-ssl\-key\fB= key Specify the path to the SSL key. .TP .BI \-u \ user Attempt to authenticate as \(aquser\(aq. .TP .BI \-t \ timeout Set the timeout (in seconds) for client communication. Default is 90 seconds. .TP .B \-v Run bcfg2 in verbose mode. .TP .BI \-x \ password Use \(aqpassword\(aq for client communication. .TP .B \-z Only configure independent entries, ignore bundles. .UNINDENT .SH SEE ALSO .sp \fIbcfg2\-server(8)\fP, \fIbcfg2\-info(8)\fP .\" Generated by docutils manpage writer. . man/bcfg2.conf.5000066400000000000000000000523201303523157100136300ustar00rootroot00000000000000.\" Man page generated from reStructuredText. . .TH "BCFG2.CONF" "5" "November 04, 2014" "1.4" "Bcfg2" .SH NAME bcfg2.conf \- Configuration parameters for Bcfg2 . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH DESCRIPTION .sp bcfg2.conf includes configuration parameters for the Bcfg2 server and client. .SH FILE FORMAT .sp The file is INI\-style and consists of sections and options. A section begins with the name of the sections in square brackets and continues until the next section begins. .sp Options are specified in the form "name=value". .sp The file is line\-based each newline\-terminated line represents either a comment, a section name or an option. .sp Any line beginning with a hash (#) is ignored, as are lines containing only whitespace. .SH SERVER OPTIONS .sp These options are only necessary on the Bcfg2 server. They are specified in the \fB[server]\fP section of the configuration file. .INDENT 0.0 .TP .B repository Specifies the path to the Bcfg2 repository containing all of the configuration specifications. The repository should be created using the \fIbcfg2\-admin init\fP command. .TP .B filemonitor The file monitor used to watch for changes in the repository. The default is the best available monitor. The following values are valid: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C inotify gamin pseudo .ft P .fi .UNINDENT .UNINDENT .TP .B fam_blocking Whether the server should block at startup until the file monitor backend has processed all events. This can cause a slower startup, but ensure that all files are recognized before the first client is handled. Defaults to True. .TP .B ignore_files A comma\-separated list of globs that should be ignored by the file monitor. Default values are: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C *~ *# #* *.swp *.swpx *.swx SCCS \&.svn 4913 \&.gitignore .ft P .fi .UNINDENT .UNINDENT .TP .B listen_all This setting tells the server to listen on all available interfaces. The default is to only listen on those interfaces specified by the bcfg2 setting in the components section of \fBbcfg2.conf\fP\&. .TP .B plugins A comma\-delimited list of enabled server plugins. Currently available plugins are: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C ACL Bundler Bzr Cfg Cvs Darcs Decisions Defaults Deps FileProbes Fossil Git GroupLogic GroupPatterns Guppy Hg Ldap Metadata NagiosGen Ohai Packages Pkgmgr POSIXCompat Probes Properties PuppetENC Reporting Rules SEModules ServiceCompat SSHbase Svn TemplateHelper Trigger .ft P .fi .UNINDENT .UNINDENT .sp Descriptions of each plugin can be found in their respective sections below. .TP .B prefix Specifies a prefix if the Bcfg2 installation isn\(aqt placed in the default location (e.g. \fB/usr/local\fP). .TP .B backend Specifies which server core backend to use. Current available options are: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C cherrypy builtin best .ft P .fi .UNINDENT .UNINDENT .sp The default is \fIbest\fP, which is currently an alias for \fIbuiltin\fP\&. More details on the backends can be found in the official documentation. .TP .B user The username or UID to run the daemon as. Default is \fI0\fP\&. .TP .B group The group name or GID to run the daemon as. Default is \fI0\fP\&. .TP .B vcs_root Specifies the path to the root of the VCS working copy that holds your Bcfg2 specification, if it is different from \fIrepository\fP\&. E.g., if the VCS repository does not hold the bcfg2 data at the top level, you may need to set this option. .TP .B umask The umask to set for the server. Default is \fI0077\fP\&. .UNINDENT .SH SERVER PLUGINS .sp This section has a listing of all the plugins currently provided with Bcfg2. .SS ACL Plugin .sp The ACL plugin controls which hosts can make which XML\-RPC calls. .SS Bundler Plugin .sp The Bundler plugin is used to describe groups of inter\-dependent configuration entries, such as the combination of packages, configuration files, and service activations that comprise typical Unix daemons. Bundles are used to add groups of configuration entries to the inventory of client configurations, as opposed to describing particular versions of those entries. .SS Bzr Plugin .sp The Bzr plugin allows you to track changes to your Bcfg2 repository using a GNU Bazaar version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. .SS Cfg Plugin .sp The Cfg plugin provides a repository to describe configuration file contents for clients. In its simplest form, the Cfg repository is just a directory tree modeled off of the directory tree on your client machines. .SS Cvs Plugin .sp The Cvs plugin allows you to track changes to your Bcfg2 repository using a Concurrent version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. .SS Darcs Plugin .sp The Darcs plugin allows you to track changes to your Bcfg2 repository using a Darcs version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. .SS Decisions Plugin .sp The Decisions plugin has support for a centralized set of per\-entry installation decisions. This approach is needed when particular changes are deemed "\fIhigh risk\fP"; this gives the ability to centrally specify these changes, but only install them on clients when administrator supervision is available. .SS Defaults Plugin .sp The Defaults plugin can be used to populate default attributes for entries. Defaults is \fInot\fP a Generator plugin, so it does not actually bind an entry; Defaults are applied after an entry has been bound, and only populate attributes that are not yet set. .SS Deps Plugin .sp The Deps plugin allows you to make a series of assertions like "Package X requires Package Y (and optionally also Package Z etc.)" .SS FileProbes Plugin .sp The FileProbes plugin allows you to probe a client for a file, which is then added to the Cfg specification. If the file changes on the client, FileProbes can either update it in the specification or allow Cfg to replace it. .SS Fossil Plugin .sp The Fossil plugin allows you to track changes to your Bcfg2 repository using a Fossil SCM version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. .SS Git Plugin .sp The Git plugin allows you to track changes to your Bcfg2 repository using a Git version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. .SS GroupLogic Plugin .sp The GroupLogic plugin lets you flexibly assign group membership with a Genshi template. .SS GroupPatterns Plugin .sp The GroupPatterns plugin is a connector that can assign clients group membership based on patterns in client hostnames. .SS Guppy Plugin .sp The Guppy plugin is used to trace memory leaks within the bcfg2\-server process using Guppy. .SS Hg Plugin .sp The Hg plugin allows you to track changes to your Bcfg2 repository using a Mercurial version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. .SS Ldap Plugin .sp The Ldap plugin makes it possible to fetch data from a LDAP directory, process it and attach it to your metadata. .SS Metadata Plugin .sp The Metadata plugin is the primary method of specifying Bcfg2 server metadata. .SS NagiosGen Plugin .sp The NagiosGen plugin dynamically generates Nagios configuration files based on Bcfg2 data. .SS Ohai Plugin .sp The Ohai plugin is used to detect information about the client operating system. The data is reported back to the server using JSON. .SS Packages Plugin .sp The Packages plugin is an alternative to Pkgmgr for specifying package entries for clients. Where Pkgmgr explicitly specifies package entry information, Packages delegates control of package version information to the underlying package manager, installing the latest version available from through those channels. .SS Pkgmgr Plugin .sp The Pkgmgr plugin resolves the Abstract Configuration Entity "Package" to a package specification that the client can use to detect, verify and install the specified package. .SS POSIXCompat Plugin .sp The POSIXCompat plugin provides a compatibility layer for 1.3 POSIX Entries so that they are compatible with older clients. .SS Probes Plugin .sp The Probes plugin gives you the ability to gather information from a client machine before you generate its configuration. This information can be used with the various templating systems to generate configuration based on the results. .SS Properties Plugin .sp The Properties plugin is a connector plugin that adds information from properties files into client metadata instances. .SS PuppetENC Plugin .sp The PuppetENC plugin is a connector plugin that adds support for Puppet External Node Classifiers. .SS Reporting Plugin .sp The Reporting plugin enables the collection of data for use with Bcfg2\(aqs dynamic reporting system. .SS Rules Plugin .sp The Rules plugin provides literal configuration entries that resolve the abstract configuration entries normally found in Bundler. The literal entries in Rules are suitable for consumption by the appropriate client drivers. .SS SEModules Plugin .sp The SEModules plugin provides a way to distribute SELinux modules via Bcfg2. .SS ServiceCompat Plugin .sp The ServiceCompat plugin converts service entries for older clients. .SS SSHbase Plugin .sp The SSHbase generator plugin manages ssh host keys (both v1 and v2) for hosts. It also manages the ssh_known_hosts file. It can integrate host keys from other management domains and similarly export its keys. .SS Svn Plugin .sp The Svn plugin allows you to track changes to your Bcfg2 repository using a Subversion backend. Currently, it enables you to get revision information out of your repository for reporting purposes. .SS Trigger Plugin .sp The Trigger plugin provides a method for calling external scripts when clients are configured. .SH CACHING OPTIONS .sp These options are specified in the \fB[caching]\fP section. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B client_metadata The following four caching modes are available for client metadata: .INDENT 7.0 .IP \(bu 2 off: No caching of client metadata objects is performed. This is the default. .IP \(bu 2 initial: Only initial metadata objects are cached. Initial metadata objects are created only from the data in the Metadata plugin, before additional groups from other plugins are merged in. .IP \(bu 2 cautious: Final metadata objects are cached, but each client’s cache is cleared at the start of each client run, immediately after probe data is received. Cache is also cleared as in aggressive mode. \fIon\fP is a synonym for cautious. .IP \(bu 2 aggressive: Final metadata objects are cached. Each plugin is responsible for clearing cache when appropriate. .UNINDENT .UNINDENT .UNINDENT .UNINDENT .SH CLIENT OPTIONS .sp These options only affect client functionality. They can be specified in the \fB[client]\fP section. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B decision Specify the server decision list mode (whitelist or blacklist). (This settiing will be ignored if the client is called with the \-f option). .TP .B drivers Specify tool driver set to use. This option can be used to explicitly specify the client tool drivers you want to use when the client is run. .TP .B paranoid Run the client in paranoid mode. .TP .B profile Assert the given profile for the host. .UNINDENT .UNINDENT .UNINDENT .SH COMMUNICATION OPTIONS .sp Specified in the \fB[communication]\fP section. These options define settings used for client\-server communication. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B ca The path to a file containing the CA certificate. This file is required on the server, and optional on clients. However, if the cacert is not present on clients, the server cannot be verified. .TP .B certificate The path to a file containing a PEM formatted certificate which signs the key with the ca certificate. This setting is required on the server in all cases, and required on clients if using client certificates. .TP .B key Specifies the path to a file containing the SSL Key. This is required on the server in all cases, and required on clients if using client certificates. .TP .B password Required on both the server and clients. On the server, sets the password clients need to use to communicate. On a client, sets the password to use to connect to the server. .TP .B protocol Communication protocol to use. Defaults to xmlrpc/tlsv1. .TP .B retries A client\-only option. Number of times to retry network communication. Default is 3 retries. .TP .B retry_delay A client\-only option. Number of seconds to wait in between retrying network communication. Default is 1 second. .TP .B serverCommonNames A client\-only option. A colon\-separated list of Common Names the client will accept in the SSL certificate presented by the server. .TP .B timeout A client\-only option. The network communication timeout. .TP .B user A client\-only option. The UUID of the client. .UNINDENT .UNINDENT .UNINDENT .SH COMPONENT OPTIONS .sp Specified in the \fB[components]\fP section. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B bcfg2 URL of the server. On the server this specifies which interface and port the server listens on. On the client, this specifies where the client will attempt to contact the server. .sp e.g. \fIbcfg2 = https://10.3.1.6:6789\fP .TP .B encoding Text encoding of configuration files. Defaults to UTF\-8. .TP .B lockfile The path to the client lock file, which is used to ensure that only one Bcfg2 client runs at a time on a single client. .UNINDENT .UNINDENT .UNINDENT .SH LOGGING OPTIONS .sp Specified in the \fB[logging]\fP section. These options control the server logging functionality. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B debug Whether or not to enable debug\-level log output. Default is false. .TP .B path Server log file path. .TP .B syslog Whether or not to send logging data to syslog. Default is true. .TP .B verbose Whether or not to enable verbose log output. Default is false. .UNINDENT .UNINDENT .UNINDENT .SH MDATA OPTIONS .sp Specified in the \fB[mdata]\fP section. These options affect the default metadata settings for Paths with type=\(aqfile\(aq. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B owner Global owner for Paths (defaults to root) .TP .B group Global group for Paths (defaults to root) .TP .B mode Global permissions for Paths (defaults to 644) .TP .B secontext Global SELinux context for Path entries (defaults to \fI__default__\fP, which restores the expected context) .TP .B paranoid Global paranoid settings for Paths (defaults to false) .TP .B sensitive Global sensitive settings for Paths (defaults to false) .TP .B important Global important settings for Paths. Defaults to false. .UNINDENT .UNINDENT .UNINDENT .SH PACKAGES OPTIONS .sp The following options are specified in the \fB[packages]\fP section. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B backends Comma separated list of backends for the dependency resolution. Default is "Yum,Apt,Pac,Pkgng". .TP .B resolver Enable dependency resolution. Default is 1 (true). .TP .B metadata Enable metadata processing. Default is 1 (true). If metadata is disabled, it’s implied that resolver is also disabled. .TP .B yum_config The path at which to generate Yum configs. No default. .TP .B apt_config The path at which to generate APT configs. No default. .TP .B gpg_keypath The path on the client where RPM GPG keys will be copied before they are imported on the client. Default is \fB/etc/pki/rpm\-gpg\fP\&. .TP .B version Set the version attribute used when binding Packages. Default is auto. .UNINDENT .UNINDENT .UNINDENT .sp The following options are specified in the \fB[packages:yum]\fP section. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B use_yum_libraries By default, Bcfg2 uses an internal implementation of Yum’s dependency resolution and other routines so that the Bcfg2 server can be run on a host that does not support Yum itself. If you run the Bcfg2 server on a machine that does have Yum libraries, however, you can enable use of those native libraries in Bcfg2 by setting this to 1. .TP .B helper Path to bcfg2\-yum\-helper. By default, Bcfg2 looks first in $PATH and then in \fB/usr/sbin/bcfg2\-yum\-helper\fP for the helper. .UNINDENT .UNINDENT .UNINDENT .sp The following options are specified in the \fB[packages:pulp]\fP section. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B username The username of a Pulp user that will be used to register new clients and bind them to repositories. .TP .B password The password of a Pulp user that will be used to register new clients and bind them to repositories. .UNINDENT .UNINDENT .UNINDENT .sp All other options in the \fB[packages:yum]\fP section will be passed along verbatim to the Yum configuration if you are using the native Yum library support. .SH PARANOID OPTIONS .sp These options allow for finer\-grained control of the paranoid mode on the Bcfg2 client. They are specified in the \fB[paranoid]\fP section of the configuration file. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B path Custom path for backups created in paranoid mode. The default is in \fB/var/cache/bcfg2\fP\&. .TP .B max_copies Specify a maximum number of copies for the server to keep when running in paranoid mode. Only the most recent versions of these copies will be kept. .UNINDENT .UNINDENT .UNINDENT .SH SSL CA OPTIONS .sp These options are necessary to configure the SSL CA feature of the Cfg plugin and can be found in the \fB[sslca_default]\fP section of the configuration file. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B config Specifies the location of the openssl configuration file for your CA. .TP .B passphrase Specifies the passphrase for the CA’s private key (if necessary). If no passphrase exists, it is assumed that the private key is stored unencrypted. .TP .B chaincert Specifies the location of your ssl chaining certificate. This is used when pre\-existing certifcate hostfiles are found, so that they can be validated and only regenerated if they no longer meet the specification. If you’re using a self signing CA this would be the CA cert that you generated. .UNINDENT .UNINDENT .UNINDENT .SH DATABASE OPTIONS .sp Server\-only, specified in the \fB[database]\fP section. These options control the database connection of the server. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B engine The database engine used by server plugins. One of the following: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C postgresql mysql sqlite3 ado_mssql .ft P .fi .UNINDENT .UNINDENT .TP .B name The name of the database to use for server data. If \(aqdatabase_engine\(aq is set to \(aqsqlite3\(aq this is a file path to the sqlite file and defaults to \fB$REPOSITORY_DIR/etc/bcfg2.sqlite\fP\&. .TP .B user User for database connections. Not used for sqlite3. .TP .B password Password for database connections. Not used for sqlite3. .TP .B host Host for database connections. Not used for sqlite3. .TP .B port Port for database connections. Not used for sqlite3. .TP .B options Various options for the database connection. The value expected is the literal value of the django OPTIONS setting. .TP .B reporting_engine The database engine used by the Reporting plugin. One of the following: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C postgresql mysql sqlite3 ado_mssql If reporting_engine is not specified, the Reporting plugin uses the same database as the other server plugins. .ft P .fi .UNINDENT .UNINDENT .TP .B reporting_name The name of the database to use for reporting data. If \(aqdatabase_engine\(aq is set to \(aqsqlite3\(aq this is a file path to the sqlite file and defaults to \fB$REPOSITORY_DIR/etc/reporting.sqlite\fP. .TP .B reporting_user User for reporting database connections. Not used for sqlite3. .TP .B reporting_password Password for reporting database connections. Not used for sqlite3. .TP .B reporting_host Host for reporting database connections. Not used for sqlite3. .TP .B reporting_port Port for reporting database connections. Not used for sqlite3. .TP .B reporting_options Various options for the reporting database connection. The value expected is the literal value of the django OPTIONS setting. .UNINDENT .UNINDENT .UNINDENT .SH REPORTING OPTIONS .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B config Specifies the location of the reporting configuration (default is /etc/bcfg2\-web.conf. .TP .B time_zone Specifies a time zone other than that used on the system. (Note that this will cause the Bcfg2 server to log messages in this time zone as well). .TP .B web_debug Turn on Django debugging. .TP .B max_children Maximum number of children for the reporting collector. Use 0 to disable the limit. (default is 0) .TP .B django_settings Arbitrary options for the Django installation. The value expected is a literal python dictionary, that is merged with the already set django settings. .UNINDENT .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIbcfg2(1)\fP, \fIbcfg2\-server(8)\fP .\" Generated by docutils manpage writer. . misc/000077500000000000000000000000001303523157100120115ustar00rootroot00000000000000misc/apache/000077500000000000000000000000001303523157100132325ustar00rootroot00000000000000misc/apache/bcfg2.conf000066400000000000000000000012541303523157100150660ustar00rootroot00000000000000 # # If the root is changed update the static content alias as well # WSGIScriptAlias /bcfg2 "/usr/share/bcfg2/reports.wsgi" WSGISocketPrefix /var/run/apache2/wsgi WSGIDaemonProcess Bcfg2.Server.Reports processes=1 threads=10 WSGIProcessGroup Bcfg2.Server.Reports # # Manually set this to override the static content # #SetEnv bcfg2.media_url /bcfg2/site_media/ # # This should have the same prefix as WSGIScriptAlias # Alias "/bcfg2/site_media/" "/usr/share/bcfg2/site_media/" Options None AllowOverride None Order allow,deny Allow from all misc/bcfg2-selinux.spec000066400000000000000000000134351303523157100153430ustar00rootroot00000000000000%global __python python %{!?py_ver: %global py_ver %(%{__python} -c 'import sys;print(sys.version[0:3])')} %global pythonversion %{py_ver} %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?_initrddir: %global _initrddir %{_sysconfdir}/rc.d/init.d} %global selinux_policyver %(%{__sed} -e 's,.*selinux-policy-\\([^/]*\\)/.*,\\1,' /usr/share/selinux/devel/policyhelp 2>/dev/null || echo 0.0.0) %global selinux_types %(%{__awk} '/^#[[:space:]]*SELINUXTYPE=/,/^[^#]/ { if ($3 == "-") printf "%s ", $2 }' /etc/selinux/config 2>/dev/null) %global selinux_variants %([ -z "%{selinux_types}" ] && echo mls strict targeted || echo %{selinux_types}) # For -pre or -rc releases, remove the initial # characters from the appropriate line below. # # Don't forget to change the Release: tag below to something like 0.1 #%%global _rc 1 %global _pre pre2 %global _pre_rc %{?_pre:.pre%{_pre}}%{?_rc:.rc%{_rc}} Name: bcfg2-selinux Version: 1.4.0 Release: 1%{?_pre_rc}%{?dist} Summary: Bcfg2 Client and Server SELinux policy %if 0%{?suse_version} Group: System/Management Conflicts: selinux-policy = 2.20120725 %else Group: Applications/System # the selinux reference policy 2.20120725 (3.11.1 in RH versioning) # contains a bogus bcfg2 module Conflicts: selinux-policy = 3.11.1 %endif License: BSD URL: http://bcfg2.org Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}%{?_pre_rc}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}%{?_pre_rc}-%{release}-root-%(%{__id_u} -n) BuildArch: noarch BuildRequires: checkpolicy, selinux-policy-devel, hardlink BuildRequires: /usr/share/selinux/devel/policyhelp Requires: selinux-policy >= %{selinux_policyver} Requires: %{name} = %{version}-%{release} Requires(post): /usr/sbin/semodule, /sbin/restorecon, /sbin/fixfiles, bcfg2 Requires(postun): /usr/sbin/semodule, /sbin/restorecon, /sbin/fixfiles, bcfg2 %description Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. This package includes the Bcfg2 server and client SELinux policy. %prep %setup -q -n %{name}-%{version}%{?_pre_rc} %build cd redhat/selinux for selinuxvariant in %{selinux_variants}; do make NAME=${selinuxvariant} -f /usr/share/selinux/devel/Makefile mv bcfg2.pp bcfg2.pp.${selinuxvariant} make NAME=${selinuxvariant} -f /usr/share/selinux/devel/Makefile clean done cd - %install for selinuxvariant in %{selinux_variants}; do install -d %{buildroot}%{_datadir}/selinux/${selinuxvariant} install -p -m 644 redhat/selinux/bcfg2.pp.${selinuxvariant} \ %{buildroot}%{_datadir}/selinux/${selinuxvariant}/bcfg2.pp done /usr/sbin/hardlink -cv %{buildroot}%{_datadir}/selinux %clean [ "%{buildroot}" != "/" ] && %{__rm} -rf %{buildroot} || exit 2 %files %defattr(-,root,root,0755) %doc redhat/selinux/* %{_datadir}/selinux/*/bcfg2.pp %post for selinuxvariant in %{selinux_variants}; do /usr/sbin/semodule -s ${selinuxvariant} -i \ %{_datadir}/selinux/${selinuxvariant}/bcfg2.pp &> /dev/null || : done /sbin/fixfiles -R bcfg2 restore || : if rpm -q bcfg2-server >& /dev/null; then /sbin/fixfiles -R bcfg2-server restore || : fi /sbin/restorecon -R %{_localstatedir}/cache/bcfg2 || : /sbin/restorecon -R %{_localstatedir}/lib/bcfg2 || : %postun if [ $1 -eq 0 ] ; then for selinuxvariant in %{selinux_variants}; do /usr/sbin/semodule -s ${selinuxvariant} -r bcfg2 &> /dev/null || : done /sbin/fixfiles -R bcfg2 restore || : if rpm -q bcfg2-server >& /dev/null; then /sbin/fixfiles -R bcfg2-server restore || : fi [ -d %{_localstatedir}/cache/bcfg2 ] && \ /sbin/restorecon -R %{_localstatedir}/cache/bcfg2 || : [ -d %{_localstatedir}/lib/bcfg2 ] && \ /sbin/restorecon -R %{_localstatedir}/lib/bcfg2 || : fi %changelog * Thu Nov 07 2013 Sol Jerome 1.3.3-1 - New upstream release * Mon Jul 01 2013 Sol Jerome 1.3.2-1 - New upstream release * Thu Mar 21 2013 Sol Jerome 1.3.1-1 - New upstream release * Fri Mar 15 2013 Sol Jerome 1.3.0-0.0 - New upstream release * Tue Jan 29 2013 Sol Jerome 1.3.0-0.0rc2 - New upstream release * Wed Jan 09 2013 Sol Jerome 1.3.0-0.0rc1 - New upstream release * Tue Oct 30 2012 Sol Jerome 1.3.0-0.0pre2 - New upstream release * Fri Sep 14 2012 Chris St. Pierre 1.3.0-0.2pre1 - Broke bcfg2-selinux into its own specfile misc/bcfg2.spec000066400000000000000000001113401303523157100136500ustar00rootroot00000000000000# Fedora 13+ and EL6 contain these macros already; only needed for EL5 %if 0%{?rhel} && 0%{?rhel} <= 5 %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()") %define python_version %(%{__python} -c 'import sys;print(sys.version[0:3])') %endif # openSUSE macro translation %if 0%{?suse_version} %global python_version %{py_ver} %{!?_initrddir: %global _initrddir %{_sysconfdir}/rc.d/init.d} # openSUSE < 11.2 %if %{suse_version} < 1120 %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()") %endif %endif # For -pre or -rc releases, remove the initial # characters from the appropriate line below. # # Don't forget to change the Release: tag below to something like 0.1 #%%global _rc rc1 %global _pre pre2 %global _nightly 1 %global _date %(date +%Y%m%d) %global _pre_rc %{?_pre:%{_pre}}%{?_rc:%{_rc}} # cherrypy 3.3 actually doesn't exist yet, but 3.2 has bugs that # prevent it from working: # https://bitbucket.org/cherrypy/cherrypy/issue/1154/assertionerror-in-recv-when-ssl-is-enabled %global build_cherry_py 0 Name: bcfg2 Version: 1.4.0 Release: 0.1.%{?_nightly:nightly.%{_date}}%{?_pre_rc}%{?dist} Summary: A configuration management system %if 0%{?suse_version} # http://en.opensuse.org/openSUSE:Package_group_guidelines Group: System/Management %else Group: Applications/System %endif License: BSD URL: http://bcfg2.org Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}%{?_pre_rc}.tar.gz # Used in %%check Source1: http://www.w3.org/2001/XMLSchema.xsd %if %{?rhel}%{!?rhel:10} <= 5 || 0%{?suse_version} # EL5 and OpenSUSE require the BuildRoot tag BuildRoot: %{_tmppath}/%{name}-%{version}%{?_pre_rc}-%{release}-root-%(%{__id_u} -n) %endif BuildArch: noarch BuildRequires: python BuildRequires: python-devel BuildRequires: python-lxml BuildRequires: python-boto BuildRequires: python-argparse BuildRequires: python-jinja2 %if 0%{?suse_version} BuildRequires: python-M2Crypto BuildRequires: python-Genshi BuildRequires: python-gamin BuildRequires: python-pyinotify BuildRequires: python-python-daemon %if %{build_cherry_py} BuildRequires: python-CherryPy >= 3 %endif %else # ! suse_version BuildRequires: python-daemon BuildRequires: python-inotify %if "%{_vendor}" == "redhat" && 0%{!?rhel:1} && 0%{!?fedora:1} # by default, el5 doesn't have the %%rhel macro, provided by this # package; EPEL build servers install buildsys-macros by default, but # explicitly requiring this may help builds in other environments BuildRequires: buildsys-macros %else # vendor != redhat || rhel defined %if 0%{?rhel} && 0%{?rhel} < 6 BuildRequires: python-ssl %else # rhel > 5 # EL5 lacks python-mock, so test suite is disabled BuildRequires: python-nose BuildRequires: mock BuildRequires: m2crypto # EPEL uses the properly-named python-django starting with EPEL7 %if 0%{?rhel} && 0%{?rhel} > 6 BuildRequires: python-django >= 1.3 %else BuildRequires: Django >= 1.3 %endif BuildRequires: python-genshi BuildRequires: python-cheetah BuildRequires: libselinux-python BuildRequires: pylibacl BuildRequires: python-pep8 BuildRequires: pylint %if %{build_cherry_py} BuildRequires: python-cherrypy >= 3 %endif BuildRequires: python-mock %endif # rhel > 5 %endif # vendor != redhat || rhel defined %endif # ! suse_version %if 0%{?fedora} && 0%{?fedora} >= 16 || 0%{?rhel} && 0%{?rhel} >= 7 # Pick up _unitdir macro BuildRequires: systemd %endif %if 0%{?mandriva_version} # mandriva seems to behave differently than other distros and needs # this explicitly. BuildRequires: python-setuptools %endif %if 0%{?mandriva_version} == 201100 # mandriva 2011 has multiple providers for libsane, so (at least when # building on OBS) one must be chosen explicitly: "have choice for # libsane.so.1 needed by python-imaging: libsane1 sane-backends-iscan" BuildRequires: libsane1 %endif # RHEL 5 and 6 ship with sphinx 0.6, but sphinx 1.0 is available with # a different package name in EPEL. %if "%{_vendor}" == "redhat" && 0%{?rhel} <= 6 && 0%{?fedora} == 0 BuildRequires: python-sphinx10 # python-sphinx10 doesn't set sys.path correctly; do it for them %global pythonpath %(find %{python_sitelib} -name Sphinx*.egg) %else BuildRequires: python-sphinx >= 1.0 %endif BuildRequires: python-docutils %if 0%{?fedora} >= 16 BuildRequires: systemd-units %endif %if 0%{?rhel} && 0%{?rhel} < 6 Requires: python-ssl %endif Requires: libselinux-python Requires: pylibacl Requires: python-argparse %if 0%{?fedora} >= 16 Requires(post): systemd-units Requires(preun): systemd-units Requires(postun): systemd-units %else Requires(post): /sbin/chkconfig Requires(preun): /sbin/chkconfig Requires(preun): /sbin/service Requires(postun): /sbin/service %endif %if "%{_vendor}" != "redhat" # fedora and rhel (and possibly other distros) do not know this tag. Recommends: cron %endif %description Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. This package includes the Bcfg2 client software. %package server Summary: Bcfg2 Server %if 0%{?suse_version} Group: System/Management %else Group: System Environment/Daemons %endif Requires: bcfg2 = %{version}-%{release} Requires: python-lxml >= 1.2.1 Requires: python-genshi %if 0%{?suse_version} Requires: python-pyinotify Requires: python-python-daemon %else Requires: python-inotify Requires: python-daemon %endif Requires: /usr/sbin/sendmail Requires: /usr/bin/openssl Requires: graphviz Requires: python-nose %if %{_vendor} == redhat %if 0%{?fedora} >= 16 Requires(post): systemd-units Requires(preun): systemd-units Requires(postun): systemd-units Requires(post): systemd-sysv %else Requires(post): /sbin/chkconfig Requires(preun): /sbin/chkconfig Requires(preun): /sbin/service Requires(postun): /sbin/service %endif %endif %description server Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. This package includes the Bcfg2 server software. %if %{build_cherry_py} %package server-cherrypy Summary: Bcfg2 Server - CherryPy backend %if 0%{?suse_version} Group: System/Management %else Group: System Environment/Daemons %endif Requires: bcfg2 = %{version}-%{release} Requires: bcfg2-server = %{version}-%{release} # https://bitbucket.org/cherrypy/cherrypy/issue/1068/file-upload-crashes-when-using-https Requires: python-cherrypy >= 3.2.6 %description server-cherrypy Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. This package includes the Bcfg2 CherryPy server backend. %endif # build_cherry_py %package web Summary: Bcfg2 Web Reporting Interface Requires: bcfg2-server = %{version}-%{release} Requires: httpd %if 0%{?suse_version} Group: System/Management Requires: python-django >= 1.3 Requires: python-django-south >= 0.7 %else Group: System Tools # EPEL uses the properly-named python-django starting with EPEL7 %if 0%{?rhel} && 0%{?rhel} > 6 Requires: python-django > 1.3 %else Requires: Django >= 1.3 Requires: Django-south >= 0.7 %endif Requires: bcfg2-server %endif %if "%{_vendor}" == "redhat" Requires: mod_wsgi %global apache_conf %{_sysconfdir}/httpd %else Requires: apache2-mod_wsgi %global apache_conf %{_sysconfdir}/apache2 %endif %description web Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. This package includes the Bcfg2 reports web frontend. %package doc Summary: Documentation for Bcfg2 %if 0%{?suse_version} Group: Documentation/HTML %else Group: Documentation %endif %description doc Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. This package includes the Bcfg2 documentation. %package examples Summary: Examples for Bcfg2 Group: Documentation %description examples Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. This package includes the examples files for Bcfg2. %prep %setup -q -n %{name}-%{version}%{?_pre_rc} # The pylint and pep8 unit tests fail on RH-derivative distros %if "%{_vendor}" == "redhat" mv testsuite/Testsrc/test_code_checks.py \ testsuite/Testsrc/test_code_checks.py.disable_unit_tests awk ' BEGIN {line=0} /class Test(Pylint|PEP8)/ {line=FNR+1} FNR==line {sub("True","False")} {print $0} ' testsuite/Testsrc/test_code_checks.py.disable_unit_tests \ > testsuite/Testsrc/test_code_checks.py %endif # Fixup some paths %{__perl} -pi -e 's@/etc/default@%{_sysconfdir}/sysconfig@g' tools/bcfg2-cron # Get rid of extraneous shebangs for f in `find src/lib -name \*.py` do %{__sed} -i -e '/^#!/,1d' $f done sed -i "s/apache2/httpd/g" misc/apache/bcfg2.conf %build %{__python} setup.py build %{?pythonpath: PYTHONPATH="%{pythonpath}"} \ %{__python} setup.py build_sphinx %install %if 0%{?rhel} == 5 || 0%{?suse_version} # EL5 and OpenSUSE require the buildroot to be cleaned manually rm -rf %{buildroot} %endif %{__python} setup.py install -O1 --skip-build --root=%{buildroot} --prefix=/usr install -d %{buildroot}%{_bindir} install -d %{buildroot}%{_sbindir} install -d %{buildroot}%{_initrddir} install -d %{buildroot}%{_sysconfdir}/cron.daily install -d %{buildroot}%{_sysconfdir}/cron.hourly install -d %{buildroot}%{_sysconfdir}/sysconfig install -d %{buildroot}%{_libexecdir} install -d %{buildroot}%{_localstatedir}/cache/%{name} install -d %{buildroot}%{_localstatedir}/lib/%{name} %if 0%{?suse_version} install -d %{buildroot}/var/adm/fillup-templates %endif mv %{buildroot}%{_bindir}/bcfg2* %{buildroot}%{_sbindir} %if 0%{?fedora} && 0%{?fedora} < 16 || 0%{?rhel} && 0%{?rhel} < 7 # Install SysV init scripts for everyone but new Fedoras install -m 755 redhat/scripts/bcfg2.init \ %{buildroot}%{_initrddir}/bcfg2 install -m 755 redhat/scripts/bcfg2-server.init \ %{buildroot}%{_initrddir}/bcfg2-server install -m 755 redhat/scripts/bcfg2-report-collector.init \ %{buildroot}%{_initrddir}/bcfg2-report-collector %endif install -m 755 debian/bcfg2.cron.daily \ %{buildroot}%{_sysconfdir}/cron.daily/bcfg2 install -m 755 debian/bcfg2.cron.hourly \ %{buildroot}%{_sysconfdir}/cron.hourly/bcfg2 install -m 755 tools/bcfg2-cron \ %{buildroot}%{_libexecdir}/bcfg2-cron install -m 644 debian/bcfg2.default \ %{buildroot}%{_sysconfdir}/sysconfig/bcfg2 install -m 644 debian/bcfg2-server.default \ %{buildroot}%{_sysconfdir}/sysconfig/bcfg2-server %if 0%{?suse_version} install -m 755 debian/bcfg2.default \ %{buildroot}/var/adm/fillup-templates/sysconfig.bcfg2 install -m 755 debian/bcfg2-server.default \ %{buildroot}/var/adm/fillup-templates/sysconfig.bcfg2-server ln -s %{_initrddir}/bcfg2 %{buildroot}%{_sbindir}/rcbcfg2 ln -s %{_initrddir}/bcfg2-server %{buildroot}%{_sbindir}/rcbcfg2-server %endif touch %{buildroot}%{_sysconfdir}/%{name}.{cert,conf,key} # systemd install -d %{buildroot}%{_unitdir} install -p -m 644 redhat/systemd/%{name}.service \ %{buildroot}%{_unitdir}/%{name}.service install -p -m 644 redhat/systemd/%{name}-server.service \ %{buildroot}%{_unitdir}/%{name}-server.service %if 0%{?rhel} != 5 # Webserver install -d %{buildroot}%{apache_conf}/conf.d install -p -m 644 misc/apache/bcfg2.conf \ %{buildroot}%{apache_conf}/conf.d/wsgi_bcfg2.conf %else # remove web server files not in EL5 packages rm -r %{buildroot}%{_datadir}/bcfg2/reports.wsgi \ %{buildroot}%{_datadir}/bcfg2/site_media %endif # mandriva cannot handle %ghost without the file existing, # so let's touch a bunch of empty config files touch %{buildroot}%{_sysconfdir}/bcfg2.conf %if 0%{?rhel} == 5 # Required for EL5 %clean rm -rf %{buildroot} %endif %if 0%{?rhel} != 5 # EL5 lacks python-mock, so test suite is disabled %check # Downloads not allowed in koji; fix .xsd urls to point to local files sed -i "s@schema_url = .*\$@schema_url = 'file://`pwd`/`basename %{SOURCE1}`'@" \ testsuite/Testschema/test_schema.py sed "s@http://www.w3.org/2001/xml.xsd@file://$(pwd)/schemas/xml.xsd@" \ %{SOURCE1} > `basename %{SOURCE1}` %{__python} setup.py test %endif %post %if 0%{?fedora} >= 18 %systemd_post bcfg2.service %else if [ $1 -eq 1 ] ; then # Initial installation %if 0%{?suse_version} %fillup_and_insserv -f bcfg2 %else /sbin/chkconfig --add bcfg2 %endif fi %endif %post server %if 0%{?fedora} >= 18 %systemd_post bcfg2-server.service %else if [ $1 -eq 1 ] ; then # Initial installation %if 0%{?suse_version} %fillup_and_insserv -f bcfg2-server %else /sbin/chkconfig --add bcfg2-server %endif fi %endif %preun %if 0%{?fedora} >= 18 %systemd_preun bcfg2.service %else if [ $1 -eq 0 ]; then # Package removal, not upgrade %if 0%{?suse_version} %stop_on_removal bcfg2 %else /sbin/service bcfg2 stop &>/dev/null || : /sbin/chkconfig --del bcfg2 %endif fi %endif %preun server %if 0%{?fedora} >= 18 %systemd_preun bcfg2-server.service %else if [ $1 -eq 0 ]; then # Package removal, not upgrade %if 0%{?suse_version} %stop_on_removal bcfg2-server %stop_on_removal bcfg2-report-collector %else /sbin/service bcfg2-server stop &>/dev/null || : /sbin/chkconfig --del bcfg2-server %endif fi %endif %postun %if 0%{?fedora} >= 18 %systemd_postun bcfg2.service %else %if 0%{?fedora} >= 16 /bin/systemctl daemon-reload >/dev/null 2>&1 || : %endif if [ $1 -ge 1 ] ; then # Package upgrade, not uninstall %if 0%{?suse_version} %insserv_cleanup %else /sbin/service bcfg2 condrestart &>/dev/null || : %endif fi %endif %postun server %if 0%{?fedora} >= 18 %systemd_postun bcfg2-server.service %else if [ $1 -ge 1 ] ; then # Package upgrade, not uninstall /sbin/service bcfg2-server condrestart &>/dev/null || : fi %if 0%{?suse_version} if [ $1 -eq 0 ]; then # clean up on removal. %insserv_cleanup fi %endif %endif %if 0%{?fedora} || 0%{?rhel} %triggerun -- bcfg2 < 1.2.1-1 /usr/bin/systemd-sysv-convert --save bcfg2 >/dev/null 2>&1 || : /bin/systemctl --no-reload enable bcfg2.service >/dev/null 2>&1 || : /sbin/chkconfig --del bcfg2 >/dev/null 2>&1 || : /bin/systemctl try-restart bcfg2.service >/dev/null 2>&1 || : %triggerun server -- bcfg2-server < 1.2.1-1 /usr/bin/systemd-sysv-convert --save bcfg2-server >/dev/null 2>&1 || : /bin/systemctl --no-reload enable bcfg2-server.service >/dev/null 2>&1 || : /sbin/chkconfig --del bcfg2-server >/dev/null 2>&1 || : /bin/systemctl try-restart bcfg2-server.service >/dev/null 2>&1 || : %endif %files %if 0%{?rhel} == 5 || 0%{?suse_version} # Required for EL5 and OpenSUSE %defattr(-,root,root,-) %endif %doc COPYRIGHT LICENSE README %{_mandir}/man1/bcfg2.1* %{_mandir}/man5/bcfg2.conf.5* %ghost %attr(600,root,root) %config(noreplace,missingok) %{_sysconfdir}/bcfg2.cert %ghost %attr(0600,root,root) %config(noreplace,missingok) %{_sysconfdir}/bcfg2.conf %if 0%{?fedora} >= 16 || 0%{?rhel} >= 7 %config(noreplace) %{_unitdir}/%{name}.service %else %{_initrddir}/bcfg2 %endif %if 0%{?fedora} || 0%{?rhel} %config(noreplace) %{_sysconfdir}/sysconfig/bcfg2 %else %config(noreplace) %{_sysconfdir}/default/bcfg2 %endif %{_sysconfdir}/cron.daily/bcfg2 %{_sysconfdir}/cron.hourly/bcfg2 %{_sbindir}/bcfg2 %{_libexecdir}/bcfg2-cron %dir %{_localstatedir}/cache/%{name} %{python_sitelib}/Bcfg2*.egg-info %dir %{python_sitelib}/Bcfg2 %{python_sitelib}/Bcfg2/__init__.py* %{python_sitelib}/Bcfg2/Client %{python_sitelib}/Bcfg2/Compat.py* %{python_sitelib}/Bcfg2/Logger.py* %{python_sitelib}/Bcfg2/Options %{python_sitelib}/Bcfg2/Utils.py* %{python_sitelib}/Bcfg2/version.py* %if 0%{?suse_version} %{_sbindir}/rcbcfg2 %config(noreplace) /var/adm/fillup-templates/sysconfig.bcfg2 %endif %files server %if 0%{?rhel} == 5 || 0%{?suse_version} %defattr(-,root,root,-) %endif %ghost %attr(600,root,root) %config(noreplace) %{_sysconfdir}/bcfg2.key %if 0%{?fedora} >= 16 || 0%{?rhel} >= 7 %config(noreplace) %{_unitdir}/%{name}-server.service %else %{_initrddir}/bcfg2-server %{_initrddir}/bcfg2-report-collector %endif %config(noreplace) %{_sysconfdir}/sysconfig/bcfg2-server %{_sbindir}/bcfg2-* %dir %{_localstatedir}/lib/%{name} %{python_sitelib}/Bcfg2/DBSettings.py* %{python_sitelib}/Bcfg2/Server %{python_sitelib}/Bcfg2/Reporting %{python_sitelib}/Bcfg2/manage.py* %if %{build_cherry_py} %exclude %{python_sitelib}/Bcfg2/Server/CherryPyCore.py* %endif %dir %{_datadir}/bcfg2 %{_datadir}/bcfg2/schemas %{_datadir}/bcfg2/xsl-transforms %if 0%{?suse_version} %{_sbindir}/rcbcfg2-server %config(noreplace) /var/adm/fillup-templates/sysconfig.bcfg2-server %endif %{_mandir}/man5/bcfg2-lint.conf.5* %{_mandir}/man8/bcfg2*.8* %doc tools/* %if %{build_cherry_py} %files server-cherrypy %if 0%{?rhel} == 5 || 0%{?suse_version} %defattr(-,root,root,-) %endif %{python_sitelib}/Bcfg2/Server/CherryPyCore.py %endif # bcfg2-web package is disabled on EL5, which lacks Django %if 0%{?rhel} != 5 %files web %if 0%{?suse_version} %defattr(-,root,root,-) %endif %{_datadir}/bcfg2/reports.wsgi %{_datadir}/bcfg2/site_media %config(noreplace) %{apache_conf}/conf.d/wsgi_bcfg2.conf %endif %files doc %if 0%{?rhel} == 5 || 0%{?suse_version} %defattr(-,root,root,-) %endif %doc build/sphinx/html/* %files examples %if 0%{?rhel} == 5 || 0%{?suse_version} %defattr(-,root,root,-) %endif %doc examples/* %changelog * Wed Apr 23 2014 Jonathan S. Billings - 1.3.4-2 - Fixed RPM scriptlet logic for el6 vs. Fedora init commands * Sun Apr 6 2014 John Morris - 1.3.4-1 - New upstream release * Wed Feb 26 2014 John Morris - 1.3.3-5 - EL7: Re-add deps and re-enable %%check script; bz #1058427 * Sat Feb 1 2014 John Morris - 1.3.3-4 - Disable bcfg2-web package on EL5; bz #1058427 - Disable %%check on EL7; missing EPEL deps - BR: systemd to pick up _unitdir macro * Mon Jan 27 2014 Sol Jerome - 1.3.3-4 - Fix BuildRequires for EPEL7's Django - Remove unnecessary client-side lxml dependency - Add Django dependency for bcfg2-web (the web package *does* require Django for the database) - Fix OS detection for RHEL7 initscripts * Sun Dec 15 2013 John Morris - 1.3.3-3 - Remove unneeded Django dep in 'web' package, bz #1043229 * Sun Nov 24 2013 John Morris - 1.3.3-2 - Fix CherryPyCore.py exclude glob to include compiled files - Disable server-cherrypy package build to make Fedora buildsys happy * Thu Nov 07 2013 Sol Jerome 1.3.3-1 - New upstream release * Sun Aug 04 2013 John Morris - 1.3.2-2 - Reconcile divergences with Fedora specfile, as requested by upstream (equally large changes made in Fedora version to reconcile with this file) - Python macro cleanups - Accommodations for OpenSUSE - Macros for pre and rc releases - %%check section - Move BRs to top of file - Rearrange lines to match Fedora - Group: tag tweaks - Startup/shutdown changes - Separate examples package - Remove %%{__install} macros; RH has backed away from those - Add fedora systemd units, both f16 and f18 variants :P - Changes to %%post* scripts - Rearrange %%files sections * Wed Jul 3 2013 John Morris - 1.3.2-1 - Update to new upstream version 1.3.2 - Move settings.py into server package (fixes bug reported on bcfg2-dev ML) - Use init scripts from redhat/scripts directory - Fix EL5/EL6 sphinx docs - Require python-inotify instead of gamin-python; recommended by upstream - Remove obsolete bcfg2-py27-auth.patch, accepted upstream - Add %%check script - Hack test suite to use local copies of XMLSchema.xsd and xml.xsd - Many new BRs to support %%check script - Disable %%check script on EL5, where there is no python-mock package - Cleanups to _pre/_rc macros - Mark EL5 relics - Other minor formatting * Mon Apr 08 2013 Fabian Affolter - 1.3.1-1 - Updated to new upstream version 1.3.1 * Mon Mar 18 2013 Fabian Affolter - 1.3.0-1 - Updated to new upstream version 1.3.0 * Wed Feb 13 2013 Fedora Release Engineering - 1.3.0-0.2.pre2 - Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild * Wed Oct 31 2012 Fabian Affolter - 1.3.0-0.1.pre2 - Updated to new upstream version 1.3.0 pre2 * Wed Oct 17 2012 Chris St. Pierre 1.3.0-0.2pre1 - Split bcfg2-selinux into its own specfile * Fri Sep 14 2012 Chris St. Pierre 1.3.0-0.1pre1 - Added -selinux subpackage * Mon Aug 27 2012 Václav Pavlín - 1.2.3-3 - Scriptlets replaced with new systemd macros (#850043) * Wed Aug 15 2012 Chris St. Pierre 1.2.3-0.1 - Added tools/ as doc for bcfg2-server subpackage * Wed Jul 18 2012 Fedora Release Engineering - 1.2.3-2 - Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild * Sat Jul 07 2012 Fabian Affolter - 1.2.3-1 - Fix CVE-2012-3366 - Updated to new upstream version 1.2.3 * Tue May 01 2012 Fabian Affolter - 1.2.2-2 - python-nose is needed by bcfg2-test * Fri Apr 06 2012 Fabian Affolter - 1.2.2-1 - Updated to new upstream version 1.2.2 * Sun Feb 26 2012 Fabian Affolter - 1.2.1-2 - Fixed systemd files * Sat Feb 18 2012 Christopher 'm4z' Holm <686f6c6d@googlemail.com> 1.2.1 - Added Fedora and Mandriva compatibilty (for Open Build Service). - Added missing dependency redhat-lsb. * Tue Feb 14 2012 Christopher 'm4z' Holm <686f6c6d@googlemail.com> 1.2.1 - Added openSUSE compatibility. - Various changes to satisfy rpmlint. * Tue Feb 07 2012 Fabian Affolter - 1.2.1-1 - Added examples package - Updated to new upstream version 1.2.1 * Mon Jan 02 2012 Fabian Affolter - 1.2.0-6 - Added support for systemd - Example subpackage * Wed Sep 07 2011 Fabian Affolter - 1.2.0-5 - Updated to new upstreadm version 1.2.0 * Wed Sep 07 2011 Fabian Affolter - 1.2.0-4.1.rc1 - Updated to new upstreadm version 1.2.0rc1 * Wed Jun 22 2011 Fabian Affolter - 1.2.0-3.1.pre3 - Updated to new upstreadm version 1.2.0pre3 * Wed May 04 2011 Fabian Affolter - 1.2.0-2.1.pre2 - Added bcfg2-lint stuff - Pooled file section entries to reduce future maintainance - Removed Patch * Wed May 04 2011 Fabian Affolter - 1.2.0-1.1.pre2 - Updated to new upstream version 1.2.0pre2 * Sun Mar 20 2011 Fabian Affolter - 1.2.0-1.1.pre1 - Added doc subpackage - Updated to new upstream version 1.2.0pre1 * Mon Feb 07 2011 Fedora Release Engineering - 1.1.1-2.1 - Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild * Thu Jan 27 2011 Chris St. Pierre 1.2.0pre1-0.0 - Added -doc sub-package * Thu Nov 18 2010 Fabian Affolter - 1.1.1-2 - Added new man page - Updated doc section (ChangeLog is gone) * Thu Nov 18 2010 Fabian Affolter - 1.1.1-1 - Updated to new upstream version 1.1.1 * Fri Nov 5 2010 Jeffrey C. Ollie - 1.1.0-3 - Add patch from Gordon Messmer to fix authentication on F14+ (Python 2.7) * Mon Sep 27 2010 Jeffrey C. Ollie - 1.1.0-2 - Update to final version * Wed Sep 15 2010 Jeffrey C. Ollie - 1.1.0-1.3.rc5 - Update to 1.1.0rc5: * Tue Aug 31 2010 Jeffrey C. Ollie - 1.1.0-1.2.rc4 - Add new YUMng driver * Wed Jul 21 2010 David Malcolm - 1.1.0-1.1.rc4.1 - Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild * Tue Jul 20 2010 Fabian Affolter - 1.1.0-1.1.rc4 - Added patch to fix indention * Tue Jul 20 2010 Fabian Affolter - 1.1.0-0.1.rc4 - Updated to new upstream release candidate RC4 * Mon Jun 21 2010 Fabian Affolter - 1.1.0rc3-0.1 - Changed source0 in order that it works with spectool * Sat Jun 19 2010 Fabian Affolter - 1.1.0-0.1.rc3 - Updated to new upstream release candidate RC3 * Sun May 02 2010 Fabian Affolter - 1.1.0-0.2.rc1 - Changed define to global - Added graphviz for the server package * Wed Apr 28 2010 Jeffrey C. Ollie - 1.1.0-0.1.rc1 - Update to 1.1.0rc1 * Tue Apr 13 2010 Jeffrey C. Ollie - 1.0.1-1 - Update to final version * Fri Nov 6 2009 Jeffrey C. Ollie - 1.0.0-2 - Fixup the bcfg2-server init script * Fri Nov 6 2009 Jeffrey C. Ollie - 1.0.0-1 - Update to 1.0.0 final * Wed Nov 4 2009 Jeffrey C. Ollie - 1.0.0-0.5.rc2 - Only require python-ssl on EPEL * Sat Oct 31 2009 Jeffrey C. Ollie - 1.0.0-0.4.rc2 - Update to 1.0.0rc2 * Mon Oct 26 2009 Jeffrey C. Ollie - 1.0.0-0.3.rc1 - Update to 1.0rc1 * Fri Oct 16 2009 Jeffrey C. Ollie - 1.0-0.2.pre5 - Add python-ssl requirement * Tue Aug 11 2009 Jeffrey C. Ollie - 1.0-0.1.pre5 - Update to 1.0pre5 * Fri Jul 24 2009 Fedora Release Engineering - 0.9.6-4 - Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild * Mon Feb 23 2009 Fedora Release Engineering - 0.9.6-3 - Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild * Sat Nov 29 2008 Ignacio Vazquez-Abrams - 0.9.6-2 - Rebuild for Python 2.6 * Tue Nov 18 2008 Jeffrey C. Ollie - 0.9.6-1 - Update to 0.9.6 final. * Tue Oct 14 2008 Jeffrey C. Ollie - 0.9.6-0.8.pre3 - Update to 0.9.6pre3 * Sat Aug 9 2008 Jeffrey C. Ollie - 0.9.6-0.2.pre2 - Update to 0.9.6pre2 * Wed May 28 2008 Jeffrey C. Ollie - 0.9.6-0.1.pre1 - Update to 0.9.6pre1 * Fri Feb 15 2008 Jeffrey C. Ollie - 0.9.5.7-1 - Update to 0.9.5.7. * Fri Feb 15 2008 Jeffrey C. Ollie - 0.9.5.7-1 - Update to 0.9.5.7. * Fri Jan 11 2008 Jeffrey C. Ollie - 0.9.5.5-1 - Update to 0.9.5.5 - More egg-info entries. * Wed Jan 9 2008 Jeffrey C. Ollie - 0.9.5.4-1 - Update to 0.9.5.4. * Tue Jan 8 2008 Jeffrey C. Ollie - 0.9.5.3-1 - Update to 0.9.5.3 - Package egg-info files. * Mon Nov 12 2007 Jeffrey C. Ollie - 0.9.5.2-1 - Update to 0.9.5.2 * Mon Nov 12 2007 Jeffrey C. Ollie - 0.9.5-2 - Fix oops. * Mon Nov 12 2007 Jeffrey C. Ollie - 0.9.5-1 - Update to 0.9.5 final. * Mon Nov 05 2007 Jeffrey C. Ollie - 0.9.5-0.5.pre7 - Commit new patches to CVS. * Mon Nov 05 2007 Jeffrey C. Ollie - 0.9.5-0.4.pre7 - Update to 0.9.5pre7 * Wed Jun 27 2007 Jeffrey C. Ollie - 0.9.4-4 - Oops, apply right patch * Wed Jun 27 2007 Jeffrey C. Ollie - 0.9.4-3 - Add patch to fix YUMng problem * Mon Jun 25 2007 Jeffrey C. Ollie - 0.9.4-2 - Bump revision and rebuild * Mon Jun 25 2007 Jeffrey C. Ollie - 0.9.4-1 - Update to 0.9.4 final * Thu Jun 21 2007 Jeffrey C. Ollie - 0.9.4-0.1.pre4 - Update to 0.9.4pre4 * Thu Jun 14 2007 Jeffrey C. Ollie - 0.9.4-0.1.pre3 - Update to 0.9.4pre3 * Tue Jun 12 2007 Jeffrey C. Ollie - 0.9.4-0.1.pre2 - Update to 0.9.4pre2 * Tue May 22 2007 Jeffrey C. Ollie - 0.9.3-2 - Drop requires on pyOpenSSL - Add requires on redhat-lsb - (Fixes #240871) * Mon Apr 30 2007 Jeffrey C. Ollie - 0.9.3-1 - Update to 0.9.3 * Tue Mar 20 2007 Jeffrey C. Ollie - 0.9.2-4 - Server needs pyOpenSSL * Wed Feb 28 2007 Jeffrey C. Ollie - 0.9.2-3 - Don't forget %%dir * Wed Feb 28 2007 Jeffrey C. Ollie - 0.9.2-2 - Fix #230478 * Mon Feb 19 2007 Jeffrey C. Ollie - 0.9.2-1 - Update to 0.9.2 * Thu Feb 8 2007 Jeffrey C. Ollie - 0.9.1-1.d - Update to 0.9.1d * Fri Feb 2 2007 Mike Brady 0.9.1 - Removed use of _libdir due to Red Hat x86_64 issue. * Tue Jan 9 2007 Jeffrey C. Ollie - 0.8.7.3-2 - Merge client back into base package. * Wed Dec 27 2006 Jeffrey C. Ollie - 0.8.7.3-1 - Update to 0.8.7.3 * Fri Dec 22 2006 Jeffrey C. Ollie - 0.8.7.1-5 - Server needs client library files too so put them in main package * Wed Dec 20 2006 Jeffrey C. Ollie - 0.8.7.1-4 - Yes, actually we need to require openssl * Wed Dec 20 2006 Jeffrey C. Ollie - 0.8.7.1-3 - Don't generate SSL cert in post script, it only needs to be done on the server and is handled by the bcfg2-admin tool. - Move the /etc/bcfg2.key file to the server package - Don't install a sample copy of the config file, just ghost it - Require gamin-python for the server package - Don't require openssl - Make the client a separate package so you don't have to have the client if you don't want it * Wed Dec 20 2006 Jeffrey C. Ollie - 0.8.7.1-2 - Add more documentation * Mon Dec 18 2006 Jeffrey C. Ollie - 0.8.7.1-1 - First version for Fedora Extras * Fri Sep 15 2006 Narayan Desai - 0.8.4-1 - Initial log misc/bcfg2_logo.png000066400000000000000000000157111303523157100145270ustar00rootroot00000000000000PNG  IHDR`GfӅtEXtSoftwareAdobe ImageReadyqe<kIDATx]YlG6 &$#Q&d2SL^(ʢ B6QREq ,Yb 6ް1|个o߾ uZ}vWשΩsNN{nȑ G\8rv!ۑ#lG9rvȑC#Gَ9d;rȑC#lG9Ӕt_9لQgF" vڵk7WLOUb>u 8(alޡ5+Wz{{_n#ӧIƄ!0+I}nbi}q/uGQ%"OE X?cmj5efzҥK/^!@͛McZZZzz:P>u4;wp£Dkhf T*cCb,|ܞ)y=эi ( .===fќ߼y?.-o߾A(KCM>Ф{GyY_>6h/>#9iSP6)tsGGGgg'{|Zw%ǣq{ʤʙ3gfeeeggD֘u|miqq4TN4X7dS،j:eLk"yݪEZ 1D_}(߾HLSx=0''qbZ#5bg~̤Q {6*Zb_-KRlywFP7`( ͅ ,x(G K/]n]Հu_gS=ُmmJQTI#kQil#Mɛd7#Da "vg![:B%%gG) G 4q2cƌai7jԯ"BڪLk^ь {oմy`O;`{Μ9-BCϨY ()B6it?D}P]v;v * =M2E %%%b N0`wuuu Ǣ"0(ҨCI,2p đ5ĘH;ϯFix/mDBꞣaFAEϔYcʆǏZ-hllܻw/ZsժUO=& 4"_T w܉弚 ZĤ4k֬ŋK4W֨Ґ Ab4/ 7onnnF!Zڛ/2{쁫OCh(+%?\гbF l(Fw~(zdƳu ϲ{D3P۷o{Ѹ@٘ݐgŸ'/A7|Pl( 8_ CmC)޷vW ܣG~P.|z;atA_m1܉sNLJ1qknh/#W8 _G}oܸH-ͼ &7|xf(l4G(xлК֊e`٣t6dhp ^P|LaMDAl2(|iKHt!g)%ǑNúp۶m|a ^Okk!b0,z1hFHP#;Bx΢pH#0ӧOSiqfC:&xNpd7W_*b7~@*Gnkk믿lٲN—1BA`'MDTնnH;BOG6qU oƫejC-/r;ݶ e!?mΛ7ơMW~Y~0[·Odd!4<"mC[,Yd͚57mD]+pر$ Ą#O(O?>taH~p8??p^ҋ xPΟ?/ ;-G"}A["JABqҥK *##%'OS_EE w=mNw2@[}Cѕ)I:91vh8 T/B, hbt Вmitkjwtŋ?8a'x %|Ə%k0Բ x"1LQ[k^P+29{7nL.\555;4L8` _C[u:1332kj|#}͘W^^>b5N: ej B{-[^kb<~-Ba@P2y=cヰ/EqDa%`4 Y@=+ FsƤ`bLlOXK=Y0`ݹfNٱ-uL34# ?j` yx뭷Щ_: %48Q:O/;G'D_#_H!| .aR A8 jhh0@sKMX3`{T֊úLOOu= {rkpgXS44"2?ٳtA@A[ b9x*-)OGKKB%Z #\Oo ز`{hyZC#D`}I]']ehkf=#Aَƨl@SU?,..+V 0:ЀcDFJ+C* f=w/#ۈK=uv~v0/3x跷"jSX*X۹VO֣em _g/܁Uoaaaـޕ@BUC1 \V9i\f  $v%. hhKn$8i#л%2\~&U e˖UUU566Jҽ&y MhiΠxAbٳC1 ~q68͛7OxuRaԁFxp'dvȅ0BPzy k 4ΗQ)7|؅%M?`^^a {_|EX/ c )1GIE7`+/ctRTSQczɔApsz_>ߡڅ,B6ZO8j8=d|`)&]t:1|`e_G ~f# 9 WK,y.f3&Ѣo6E< P Z,d2r?9sV\ \wԈ5lguF6415:m^ ,/YUD6tx1}%&ӫu܊O8ܼ`uy%Cf}ޛ%6#~!7eSSӅCgX]]]/ n{[1i2e Z|%4|rQ_P'ʃ F=]ػwo]] N(y%`q1na@#iΝ+,,evJkc… 6xz%*bf)2Ajx%K=9z-nj.\XYYm6t$UCjS/­7%gFvkkkiiiH}l7";=BF^4KwwwQQQHu7v6_sis;::b"m9աnG7ugGxt aÆ/f4w؁p.1r$]V gؕD[Kb(B ,X.`h7>}+DuZ x؞a5ϦqEEP$Gձ$%>s*g͚؟FЬ79_AJ\cf02 ΡAZ{.pQUՙ8z =* m|?c%{GM`ICq)aD6{c_z5ĦFaQz9Y:D}䐭M6!fhYf=sE4wzn?[S^(1ߧ^AaܼKOp/X(c?O?^7CVvwwìe֟%^7 .s:;PKcO÷`kr)^zW^%z ;n2F!5 ˄Rn?ۄmZ@r=䣘r\Gy%0-[?HlUV\yȑF}w%#55 -'.m<"ˊ+___υ‹ܢ*eyy .Db=;F:?#01-`p{#DKWco$#u yKK l8?!/,/2R<:F6@1. խ ܶk^.]O /,GһI$OeffBK ?(bNighyr%mƜ~Ź';f烅l3B]P$z~N_ahUeEoSrl-ԿRd@^xzHU3v:d; 7ޏsDyhʌu3q8Jnk4Gc8E,@ hO+\7wu2vñՐ՘d;z<6k^QW͎.$Zjb\#ozE7Ln i倆Bkl{J\lhY;=ǖ! &.^<{V5635 # 6'Teht Od$O'[k2bhy28gjF:4c:۶(BQf}V!25b"!/uLV lnAS%O1Ѡ&T> />L@CpFGJpbN%ٻCM"tw\9*п4dcy>FN8+IJ9OJgR/ehpjhTbJ $>r4k9&pvȑC#Gَ?Cȑَ9d;rȑC#Gَ9d;rv!ۑ#lG9rvȑC#Gَ9d;rQl?#dڟIENDB`misc/bcfg2_logo_with_icon.png000066400000000000000000000240261303523157100165710ustar00rootroot00000000000000PNG  IHDR"`6;2tEXtSoftwareAdobe ImageReadyqe<'IDATx}il׵^lHJ"XX%k$Ydxl'4^A`0?1? 0Y{ƎXϙ,%3cdb)=mϒ}!E$K޻kc_ު^Hj!(n{|,s5]ׅM64䰻&ld 3lɆM60&f6d 3lafM60&f6d&lafM6lɆM64 /niJueMSlTcMH$6Atry<F6 t+ЉtX1:I>.Qit||< Bh4jeee>x$5mN-Od<7,of"P2JRTxSsmjkb|i!FGGGFF8.үbNXYYY]] bt* $+d1|"y_ʕQHƉ),!OQ.8#.7ؽ͑;8~A| c@ޅj;`Ok|v4Ⱥd2isE)&B(Dfdkb^jd)7K8,` tfHn:HalNQ#.@Zpr궫ZUcyI0{@H$`h2gØ̋ MTC̈rJ ` ث-0Ɲ)ÛnPXyX%-us]CC`TSH~e3gϋ0ϼd0WbT+͌"smTYr*# -^q_a}ѱ( ˇd{}h]ybChfIC/ufwRgfXꜚpeN! #q}W9!2/9hV̅p:34[t䢨82pRUU5-6::J,[b 0-|<ҷ\CitS8ECh-jKbW{¥Wk[L<,Ff8"6ſtۍ7Q@L nl0ĉb&cLaJY?#4<ǯ\lGK%hlkk0 6+ 8p`hh6-Z ;f,>y`IK:ܹs… 81ڂ#YbŚ5kn&Y*/ VLμJN,=Gixݕ=ynm6,!^"֗3#vj-ɋɧ +g0r^z::b )IF<+Ez T I|l3 )f'n7%BM%{4JSd܃lhjjj}1 W+rx;.c8ACɄRP Y 3=& _&V'"v菊NkehaGdAY9`&o4 |=oI0hh8#G6o KdDG|կ~uIBkOx2D\E YOS+Wf`Gd07<>flaR[~ߝ;wm!ܷjx/MQ7_Vq.[txyJPXi+x5`*2ȼ>` j-Q,RKrג劧azlaݸq]`G$xi~ /(U_׿noo{knn""f)իW!@/Jͳ]`q(@d)lÇ={r8^Qyܶi&Q5hf2䠓lwR|#ʁKc 40`1q_{-k! |4q O-=s?34F-.q?a}!xjiH 'nxO H`rlذ_ yQ`__< !sݻ%|I3k޽O3f$x@ۂZ .\o>ZM|4i [HBЍDɄACHTTXߘW[C5_h,.zkt%cK#/:n3LK2Mrzj0`@ ǖ8֭[!Ì u+#XmM c~]5c-guuCe 3]$=OblF[MD"PɮW .Dxw_-P:::cc?+x9s?N0bǣqNG[`BM|&h o/M ]16زS t7!ǀ1GbpTojG|_Xl>JLE8оȼ[ln7(Q>E!O3?~ï4g(~v,2lnM3zb :tPm+ㅂ;t3 {IWƌa? c80K.( r '4;j$޽</3iFϟ/PZ`A:N|B"q! (?xbJEod%wX07GmPi P׮]Q)񫳥PDZq(=銊0bX]YUY!Qc3=*:;(1'y#M/i4񚔢dQF}.^x ڵ,/\@.C2+,y"jMqⱟyž2%ۆLHS[EOnc[Q0x6\(͖uTER50~p0gߖ1FXEyӼy(pY9xaE}!XHsxF&N$86&B,ɡz|2?X8Rš5k oz7o$Kft,nkk#_yω5ٍn1dvv4݆~@miaFXeUV(#{EyqY+rqt6} p=LnJ0~kNOMc2Fw>9r<[{kdǷl! *'NH[0OTcZ"3:9 V^Ml1TI`l|EQEB/=\ ]]]Fʕ+0SxY;T~;P8tWW&-9c"V<ӌrc||%b/޽cB}}cOBEniUeqF^|lX?)G9BI2 c7o޻wogg'h ,j&9|˖S$9<:y$j+(!&h/._˖-@T/COalb 4攬.oP|iQPC(!Ξ cq#JŲ<> {poH;ԑ"iOZ8 r*)<%Qhy}{RGGsE655ZLO~Zh׿ucQBexȐx z%y6mڄJ;MScISdwYy*d)#fPih-k 9|2綍?'al :bÉޑT V:42!Lc#dMGh_XE&f+~'ge&ĪE.91|cI@P"cAK.Y/Ciƍ>P_M|hJBӺHDkKDK0X軪 ri'R{79tq@.ǢE^t?K,ٶm[ss3LL g.7-r>+Ǒ0:&FYc70Gׁ16G92vZH |<.T ̒"ҮE]|2 `}DT:ٯ5uQ@J{LK\v{ժU{1In1efzTvV;*>S0Hx$-DO6nܗ|+$!3D. chhiƸ'd}m1}ЊOZwBP-E*&:BSxb)nLׄ&"sst$wt97XW͛tNjJ")ҝJ>(0l$)LԩSgΜOىà̌`ڻw/p[CC_ ޻woBI;ZQJ[f"/ x”---r[hP2$r ќk [H.kv҇QfG 0zL:EfНH'%z3ohrKu_N%=xXuM@\$*nD15ű);Ӻu֮]hVv޺uWTTG7x^¿f_xpʴ|PS  4"2<IDy{kH"6H`f-F(Zr%r522Immm:]8>y$;39ӼAIl,):/:nss34uaQSX3^zS"K? $2r- {ma_%ÚFbNLnn`Nyв4ߋF+"\Ox#>ӧOaF}0[h1QAwo[(3(}+Jf9(xjQ#\aPyfl gE[yD9sD/63xN"mZQ< <1fΘr"TD jmm()?… qٳJqwi}}}o_0(^USƒ?-XU`mZe7lh{hb 7߼x" t IBǔ\E,O4g690URbmH#ϵ/#=-  .ٍFK/tAMoժUwt 쵓7ywVַڵkޖ+_! )֤uc[`6n*l%Ү2:1TV*{Y] kW"Xi=c>w#)(ik֬A ̌|ldFh۷o۶ҥK.\PV)aVhÊ V\PlA˷)c(ڹf& Q$PpzWF[mj:N?㞔߅B+'fΟ%Sm0zl\YZk|ԝ h/v:܄1erd~\qj̉K/f]ڗɹO҃upNܩT(>jfd@j-M+Uew4gT[p;d*:y*ҷ"s:<7c#)gsSeV=^WKl`kc;J/˜_ THK2\%{>@|>X̦wvL4սedls-7)?;/9+"!D2s%E 1-×_N ov}*ɦ fb"xNq_i~sQqUy] cިv/&zYKg\|c<96ܱtXQȥle&rfM78. xK misc/logos/bcfg2_logo_300px.jpg000066400000000000000000000402151303523157100165750ustar00rootroot00000000000000JFIFddDuckyZAdobeds,     !1 AQa"q2B# RSV3$tEU&brC4T5Wcd%e7)  !1AQaq"2BR#br3S5cs$DC4T6 ?DM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4Dv]3O'[}|si[g0AMVWS-(4vSb6:BgsZ1`01Zw+av]Qsu79-ӈMO; s&{fϱk*-J,AN6@Z-D{|qaX{MPx'H^ȥIOU@f6`feX ߏG˱خ'7r3Bb7~~ )PT%>G\FS7DprZ ~}=_f5f@겼1:/= gOZ TWZGxiY&&Q f(b-!mGm g>=\{4L,xgK>5d~+IZ~y^Nn-p r2cwb!{*.IOE'"z>@|"&x.+b?oD_v) ?9+B(PP>Dh x?]gD=vOPG8 "4E- p$苇wWGuâ.crtnۡ`D_"RҟP"4|tEDM4DDMjo:u _'3:YZYmڑp(,6-)ĭGlmZD٢+ZXIt ,a9u/ۻ3#܇wݔlWړXM~^Yʕ2qqX^%jŧSҭm]{ éSɇbв'fM%p  Ö͘|``L*dAqNu%D$c'TΘK~\Kke:O922Nq^HI涾#jRK'?uU.p\3K72p0kq`Ύ} W\'a*J^%Pch6Ο"apwmTR[=\REvwEZJS6 :R$9,ySTӒ7hVҎ~o"11`*`훻};v&);2(r^s4aO)PJ^C{4o#xzhR\mJmiQ9S:q4@6#MdYA~aRVrXbTmobUP^mAu#:"׽.$Ԅ`„) ['D]W'?s;d! ?K8OFM'h7*fWϥD">׻nfG&2¬ėK!冒{-n9Y L\yi6"n79&AgV8$ߩw3$Jԧ\?EZBY#nA' QɁƘNk΢2B'e`2vY S ʉqdSeDhcsh;[ǫ4QJ~PGB) /95[3 !7;y :"S'hS>QVeHe'뱚 5r!2%ݱoyfߒKN0VtޛyU7zp7p0697Z-?k3왈4UcX6+\j%?2K:̧/6k"qx-QE |iŬ s˷.c"f{Wܶǁ)p6̨V6! ;j!˧wU ,w-ǔRGI<WmI}5!C<]akZLmci/1[#QdFl5ʗRU[?\WFN|TI7սU,X֟Z di6`O0[X,R4ϔT[;`c=bY&Q*sEN'Ckoy3u<*loˬ>2z:pZuEу1P't+j2ȽaZSBTګ׺>8玭p X'Pp,R?M~)?(v%:f}T1~*}=j+Nifڹt|clY/яHLTlz)t) OC]Emo/'O kp[zc<%̨"Li(K6m`)*JH #Q'Qza7Qr4Em5A<Ur1|)H8rhr[HQm-={,Udج G}|:^Sae)BJwstEϷ:ԼK^p(ȍ%mgEkv~?=?Mu]+#SbVSoe'["'62)XB}Vی;@D}h_^_;'皘չ2aP.#bdXW}OJ/~l#x5U/o,ǣV4۴6wVRI'd6 jf&OrKCAK9n7NI9cDWA=UGKu-m˩GvE4KU?Z {o v?oOuE 8Lj{C|cEE̪T[ȿɡYn{7j-3J% Ͷ'DPw|k{rjIe4e8͈P0S?]i);e) iJF/c;cfu3n~ iwl*B%Bۉ2?(gmDWJi^QA}zzK3e]mx*HۖHp9''N¢]ŮioK!ÔcJI%*mć"_i2ߏgwtmjd8s>b3 *qJaH:"M9]iymuO,!:Wԇ lhrԒ hmn鑚挒͏Iҵ5&K#t.?)؋ KZ!j'wyiye.2Ÿ+rx7'#^u߰ʑ:%~i+RNM\3!ce4 nv&ͪexs &.q;ܠ+f.1k7" caHds\ *(*;6ZG+nm*vٌBc0/,ߑ4\L,/;iJ9/}#/K.#V&ɕtS]t)P$2jl)DwÈ$DyPwڻ1ul\Ahtf0-;`mxnt㳥.spQ5}!r% @sa7^~"h)ɽ`/,,XhXv84(@9-eJ̈Q\> @2 YG@E=9QÌyaF D0;.]/H]u钊kj.-Am;xugefX9TJyM+O|ĘDYsЬx{O4.MSJTMCm56&8\l|Kd]n`l))&?+U{;\RGEJ]Fu:v"6.֮f]:W|fz%rcCI)KcydjtH_E3Cl{a!X{[P ? ѤjbEMef&e{L~^ {o b&Z|v~ 2Vkp<RR "؇qYYܥ[eʒT9MyN%*Xv'DX=xC18-4;*V2"+S'̺Du-=ReJ遾fauBlZ|yEJIC)ȚlˀWա0 .JTO3icV \*ykS2ONO0MGCP*^oIdƈ<]lN 3ln/H}u[QRĒtEcYCz#HRvBJ+ ݫk);ch XI䆙u2Y!#DZw7>˖; ߓ %]X0Jehk۸xTͦ**1FJGp>+r{\RU4E"AA!U)+T(0&ZPNL&>4E*#ٳv_ ڛr m[1Qc5Ka{mJˢ,}`p0v5| ׹GHÈnɉTڃALgu A wى~auVe(b殊 >k"hW?ûͭ2&^r WLvrB@ƍE,PNj3Ȝci=duݞ=>=z_F Ϧ>p=p\Mݷ'pc5;hvWVqDOm.h6DfNFׂ;W생1Ǜzq[۶iJgIoG쓭jQS>|QnT7t Y|i\yTu>G|T,DIԵʳeuESD.^Uk9;R<+`cI= hn~C )x\vREc4S_œ%+I;;l<>:s::|n } ¼MƱwAs%ĈUR{vŘ!8>!!C胢#Y4M6}o^4i$Ga p#)fGɝ)D [ A+?ev LKCyurM+C@AjřpjMʛ'-I~ۙZ{x3ݓ8{!/m-6'#Ej>]1#G <9OePj˱n*AsRv>dYG:׹.lߣy\wqhX\Zc² }GJkVۓ:"}ż?>\ynC2d5zԂW%˜ ֭dZoKVvi>yIX'8#SZXwZ ^0h4F{4W߸]͒j*j3MeR M 'SQY; ):4<,^vn(#}A(}m 5XmAݰnۛK;#r/:tT/L~H])o.B~%%M RƔ )l. }w](ϴ"mk 7lqq&7PǪEl6 Xz 4DS:Ì=m1Rr|YB:Ma,|RFOE| qfs셧՚#4"c럁LqKS^;m^'(3o;Н /݅G1ITBWҒK8mdZLbؘ.x׹erxIe{%g}?#\&T{b5p`TeȵnFMm,n8~bF XYu;3^ӨjfG@n[ynmj[('?{B;9V}AȶdR^l0s #m]PCo'b{%VݎHmAoeTNph%x {?x='hSOS.[9{!q"B]Gn*s M,J> xo:Jմ˃HcM&HBLT&\dYT{]50ŭkqp{6a^.D(zSJ2IVj$ 1UꌴN.n1#I^(Ҹdq|/(yL @_sS/sn@eeY-vSΐ< GH%6 :Ưs6ks> 6yaR|ܜf W;)zEH|Uީ̀S[~'tuD}z G통R*8+j0ܧm.>6Ih#pm&qLf'hh(="H8*׊ >+Ta %`ʤ鞟]J@$Ov˸k'#ǮqĈ#locA#p05aV|qjI2-᥷>򾓬7TDntl%J"yw,et[KŢ61"#j%m{y 芝{ʡrNKO]@Ӱ+lmlLK>($PE|"8/0~ ¤0P.bc+P`xc6(I{qލl!WI"']%kCT)܍"eWQhy/bVb8ˈPOQJR@#|EGqDe#yCʡ!I WBe$&S=^%+>EmOs-&(ʼnR*dHRZjd^6p.wV NNjo; 4.T|<$2)`hwٜ9/=aHf__m˘UdRm6PRwWBHl 8b [S9ݖ7o'*Q>ܤ~]a02l8aV! &i,#`\e:;KN{ƋψrxMgMk7nc 6-cfӐwVYM'mc._S KCHRBRոJenul6S9\I>6/4~82a cc#cZ.s8.1x׍ˍxzE s-:kEJ%G:Hvg?%y^h}!KB^Lbuc@:9ʕ#`T"/}۶:8VB .+&PD46@R P+xx*ܴDD_;/b܆qSr+E^W%aD4Wꕶ~4sL1G~2d0=6w\vF"6VkpөN56]?xFбèJj̠1Ʀ)p[ID|*z$םҡ Ndrf:i\*ʚbBl?^"9] ,UvEmk:ǫaW h!JSUӸTr%@ TUJk*&Ltn6XkX%] "->y;Souo<(q0=F <?߭mZHZl뜽K<5tI\'ѴٴcDItI-z$|gߴD1YkJ$ᶺΩfCr1oYu.ZZpt0v0&z6[mAB'4D=Z+TUY|$Ȳ#pM,4b^D_ a阑}$I5//jK^/+˹^3US7ea$4K: K{΢N̍V`F Ѳg$bzWK.m5-p@[mO.W*0 rم7˳qec1Y BK;edQۤЄ/<)6tEg:"K' 9 O[wLs֡6?2RtEÁ))`[TsEZ^}k'gye8mQ$~bmzneO.;c-Ͼ-GKZTI'(˜zl:ڭ[qEeIz*ozjۥE$ېxk+1J7  [*,~bGv~'5]\@\[}e.zKvm9,]piUW:Oq$ZOܬ"W)U.]Y\qbbd< J>h&4nagXկmlK5r-ʘI'cDV;2l,gHUZ"όC"n6JTRwtEtEڴHP+0r&oڳ`Ա{UxZY%h`JSmmJ*eI@*"L I}xIk^ŲWhd%{opX?z?t#q0QKm<4K\aqUXCvmkŠ| G[ے^?8) ]=xU:;r_tGzl}ӭ:ykJR9 đ:;2_uɞ纬8eG Ȃe>3Q#"lW&3BkZuҴRwU0>k.$W|K::Zh*-/*q7` Bq &5..3Vv%dZ_=9}XMbjLşZ<PN!IP?if-{#(740{,=FU/1,ͷk$ uQֈ(46+<ݓq^k\.)ĩ)X~aő|Ab>#]VHO8EsUh[]Tԇr|rND꽰wݎ+X(SDi %*jB_oo0':jnU\ivq6/au&O*D@xoki7J^>Rr&Kڮڙ3Xt~~B~ O䷿Z5[tD]'=pQ8^]697EkyQKj{d%,Ҝ@ɩxrgJl, g(-6ۼ<_\su {l^wST.gn!U̓s&E~2ږ \\էRGEJ)[ui$ui×M^hm9o%Amml 6bSd\nq.mٳL[TW0u䃰s᫖OWԶDLt`"i \ ǵ^([ZDo:ph@v*kGRvCOgРGk=8OK )8qgSJmNFd4kg4 﫲^y'5qaǡmP%Ia0+q55$ #lăq Xkn4i;V) 4< ɎI عݵ{]#> f]"ʙ zxMVdvOrtmHe0M; 8*z3gPZ nLoh#afi)n)G|Xzڪ{{nJmIȏQj6{J>] ߻S9~8ծVWOL* 'JJIbsh^ìxd9}APقh$-~_жA8/I;{QmXqFaTJqs݊ĠY#o |NVff,/!tt+js ԧ80:. 48cXgWbpy:͔Y5(~=>U;k̻c=ITq )5~=L U;k{ێA3'TYr]Y1|i^(ttT*h "v2uOܺOQƺo9oW5#==;[HkqglUr9 (n6˝/Ci7OKj>/-?E+Z[Ȣaޘno`&4 4cu!}2F' l'N1D}~]x*@JKu8y%!(JRf3j \~ KPPK%X;yJO{Tj%N%=-ңySOO!?{0BՁ|e~ZЉ1vwSY"~ܾ< B7TbxJ ם<{G_ep-v7>?sR?/3}%8$Տ=0p\e y R!@ÿˮ?rKpΈw\MQiZrZJV$6A?'ףM6/!彄moBÀey%$Ts5gPhrÿd5hHNèWJs$j??ņ #%׏2 ?Ry:fԽufRI>_:a)'3hY|SƏ~sVKM1D=Jl~ZsAWwi*oi-ݧ0|< Aw]uaYVzksiYR֏N$]˼ՔVѲTJ0VF ^K${f '98[}fTK$@'Ǣ+5Q:׸\WJi6=UdWk[e(c)<>c\5£DNuS~ҩw :Ŧt;l zWu}Sn[|v.+G=M 7wmػ|R?op.PBkZ?} PǔFiE pyGlXRwLZ5}]ǒAwy$]4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DDM4DD_misc/logos/bcfg2_logo_300px.png000066400000000000000000000647451303523157100166170ustar00rootroot00000000000000PNG  IHDR,s"?tEXtSoftwareAdobe ImageReadyqe<iIDATx}鞴9%眃A0xAQkDEEQ1(0AATI%g؜wBwsfj{g6)ofzgϩ0 \%\+\%\€..p p VKK%\%\€..a p p VKK%\% X..a p plX56lsӠW.`Ȏ.aR kƒwL'G?/V}.>kX5X!.`8<*;]Q*}AAԚмq=CKh٬1$TͰe0h4YVU$R9h͆)I2u{-<}u{}[X zwk].U\A~a!lxYͬ fu@& kkupð+* m36\t!YFV,Z ibfɢhx.X36M=;x9|3orxA6CKo~߰n^TDůG;acy3.T_x|>` zr*}W&+y pE^Ĩ$6|鳡S{aƭK?v bOY` h-9>L ˫i0or06u87wHHp +l#1Š`Kob,JQQq ر@6#*TAUUDprB 2H8!ļ^~vlyM Jm(aTK+h!l"`6??knX'栨q`'\l"a t81[ŠlղWߝ /?>p VEJwu)RH lTIƀp*$hjuׁ:]0gꍒ㷬j={ͅ ±c#1\񒜜 7pCb v\38p"Zmp@ePL0b\8p@˥+:o> }o6/?%{i)W맄VWKӦMAӀEnF='* :+bW\4jlԏxIvκ=p 'U*,%2BzB infx!pm|n8.."BxCjLF1Ρ(T?r  0 c뭷ނq@dT€Ib7Ql4(vաSڬ6  ʉ@ EC.)O"89y ]Qry<ؽ7s{?xʌhUU 5iW^ KՃԊ?هemEVM-15Dh{W blǒ+ً?)&hsṔ: YPlL4\ջ3[׭ ёLwH'nn|v/@Fr"NIZ5<^8>idٹIUk#p9B5o(ףxY^ 4 ]?:lڥ*Y'!M@>^5Uh6TLxDmC,+i,o 5-8 mꅬpZ2ю1+ɬtS{Z3(*BP)v#R@Aeft}gY ޣ[RB, DuOgȴlPTl*=!J>)ŰW;d`9u@}8yuH"mf&(Ȉ "޷^i{T}VamwNi+XirKKF(",uMܼ?汀E.=W[ xn2 Xw#H NW]~*)oj -' ^$V))jA3{MT;/ʄ+>"?H/- bj:ՓkN-㠩ˬ CfH+6q`FǕ˰QTTs%^  `2P9 t<{{ٲsMիW7555!***Zyr?~j۶m֭[k 6i|jW蚑qv=L@-7 <כD4R,@%ŏE֮];O>-.-ZhS&FcǎݻkŊ=z1C'볬hYYCzNK#Ptp8Ȑ @ŸU=eRWX )XMSRfPP+9>#Hx +1'{Law"Nء`S>(x0Io*"E> kTw][Y7gmTUٔ7Bq8TN*NV+9 rNG*NOO/J۽{-[C@!-SdD00*}mڴiv:th&M4c΃%''pt7Ð!C1K.];k֬Ybg҄$0` 7nʕ5hРZ}XV߂X.>7x$f$+.n#FAy8inn.|K/ロk5k6W?5، , H<P# hݯ_;oZWRbN璞Uxٴ;w}͚3gΜIU)ˆD:oc r`EJv]eJaBvM)#l|Ƈ+[*(~EC2YQYYCPiy}€dZcb\ aj ȶ.nr#U>-z*dXN˕Xz4aZU+226l@K.1i`룏>>[)jό `eXM>bLdM~雮+bqիa֭pA"V~ "k9rpÐmzg/[li5hKM\zrtXuDBh>ǍwSfZ8"'N`N`EZ:Ì[N:%]ve>;L2{̙K%)DD#X>3!!㇎9bHFξE+"ر])c-.ٳ,\跷yg>[ MA,g'1 "]EA꧚Ip4"O@ a*dDW XeePܧdC}Sns ;a#04v%" %% &aH5n15|Og{衇>>ۓϯ 90`EnuyHI<|Pj3>#07pi+| TlPEj28\"!F 4ʅD?hR5vNS\"\01R|o-EE_7qTUCտC5C%8dQՓ&GFF6moMÇu]7WMet`E0 WT8wСR,O!P} `v ]GR$O@@NZn|׌Y舘_'IN:~g oZ⢱&ĦD'*?~pxd|s&pfD9A*e3POݺ><ڿ[ѣoիg1cnl{S^˪4`֨и#x[ΊDp9a@D,V) ^PHGd"/ 4 1E9VR$6 $,!8A`"UobݩpkGBs㲧&1WD]7NKZ5[j3@ oI"*A?ZEL1Vuq :: ^xhonݺ~C}. n1P jp—۴iӎG_Q UeQ \ vn"y q<Ų)[Q]>dUlcLVZ E5Aij>;{xݖ~I5bbG!"WEe]׭v#}tO iz3=_zntOTHf>Azxo㱟EpLvVz>|xG;g/(s['E۬AF(ݰůoPzZ:!ܔ*a "x 6:run@) SkgpuA@X_lcqMq8Tr8U4E@;QL8)l\qzEMgjiI-m{Y{%Xtة_ BuyeDV9 viN {RDZ\r;"Fw˖-$}sڴiwBJ?`. *v)ԩSB\]CRғag#lOVI!=ܳwߝ%-ThR5܁_W ѢEFSN}F eu:vv_ %+v^ hgNC iN'sH"!3_ktCi 6i8Pjj2LjNK9XHA ,.z4 s-\g}ϯ/Uo k+?! )݇lRFб^* m1@S#0w01@~(  -b ]C> e," P!3P@d+^tcC;4VޗnjieIe2`5=́~UH+VL+184ݐZd?!&14 @vӣG|uKc[hWWo6񃝏&2ҏ!kc7n<7n\iӦ 4\}lw3Xg(bO?}Ǘ_~9Sұ) W^ } v>'l/qr KMu{dΞ={b۶m'̳/udQWuw^6y"m0p'Lİ3gv`x 6l-XbY6"y:cx=ኼKK3HBf-!ła-#1{,Z+'[LcTN$rV6kƫ, N(Դ[3;)CeBAHE' ETvThh\ ؞|\ 4H:KQk#V|$6 YdOtU=.EMjH~>4zٵ9` wG&ubkOAN>}^7߬ݽ{).kB)4E>wyu?f"(WXs} :rʐԇ]v)ʩm… !,_" 'h;J?c*mt知; 裏k7Cwy}po[f]n>|tis-[KqZbqk{z uVK_// G4X \Ѯ}{/tF3k{Gݎ i #X%0rrwZyBQ$B%m1X(+/4Le@}%~"ţg,KsTw"mO$.oV! 6 j+7Z-6-Lz!xJ馛A,ƺ*3$F(bE `4[oENdnsNJvܸjCFh_ #ΣOZ2'N>X>`܋D{/ޕ0~MXs}d\p"g'b]C"Ќ='ʸgn=zwyg. .v~}RSSX7gVlV[%٥wpW P 5hРd|az3>ϟxwp f9eQ߲hXtƱ֢xv&?w 68o2@"FEE)(T&cR)5 G6z-2IGa& T'4thP7"5xW\#;`3/gurԅpbY2)/G.ҁǟ+ɮ]طjR||P1YӬ^YRLj% 2777#0J}Wm ;^:A;:"ohw(¾qر(jv m&$!adݼ=y`/Yot~rK g2Ͼ?czYyR_kw_~/s]DnA:ub3=W#E=CI樋.֍cg9+F5ua9ے-=/E8#{vb,SQCVꤦYf rw)%NkabhIlc59[CE/ܑp0 nbz;u+{~+jV^}<)cunx?#\qee 2 ="fU=ɴ`eNBM o *ٹm,mqV+9Bhg W,3 %eCRL!X )W_HzˁWW>.HGP&5m2|#3E],c@DtA+R=7mڴ~=+b@fΜ9_rx?I}%WH>}Wb_xB"WRkݺuCɬAдqz3{r JG;=pEݧg6HmBH2ϺuV/ X/مE]sF^9f2 Vd|ҿFL0laNAB,"hQ-q“? )͓qQUt'1Ly;Dc批j4eEBxXU b;]טق+Kn{Wh\.Mgl2hȾvmjWC^AC^ﭴG0aIb&GLxݻ|jժAaЫzA+~>Я_n%])K,Ƹ[q#K2)ՙP-> ɓ'ӧO<; ֕vBIe Jgl2,-l^]yh@\/n"ۥ$`H Th }ʿ@qo޽{`߷#K u֭vⳢٚ4i\-O2I]]p,wW22ή]2񷴓g0}[CXP:PQ˓j'ֽ1ȍL/MQ85ZAqHbt`ؘjW1l*̧0egFtSգ" 1Rܘ16H\9 = >*n\EȾX$S͇7pz}Nvt4싏Fbg*m>YO'=bús,n)n6QDWj1?dCe[#;ݻ ,]Q:tP`B6D3g\,eXߖvm۶q2\} N`.((tN@_ P~y(:= qк0-+Vl@`ӢofW_,T4PR!hdSc2Ћ"-=(v" G8#J1,j͔HSԂ>xǎg`[=iJ&[=$'ֆ8)c ?kLd+ظHa@e]1T`*}X !Z1W2# @Ϙ_E A/2Y$c@]w5￁9 1EM"R۷o7u@ v%&_bdKtĉc˖-m36P|ۚQ' @viqK,a-u .rQCXGv횓ޔCIY `(_H ٪ѱh&MKvIl^2 5pvQ 8Ə#B&{,}dݻ v1ӧ(Cq[%EʱP2/8|M[4 `8]fQ́, ' {}cG*3Qְ1&$'e38|b`=De&MÄŌ#RmܞXEA`ɘDHU3MёΙL]*csNFQsۜȺ,6:j|W?7mTa_gӦMO2Xׯ_4&E4V!#<6^#elmڴiȍQV#Q}ʕ+w$VA1˰0#33OIJ#sJ0,ncފLXHam@C؁i k"&e #OMz' rm)[|CMMo֚ZY=P2ҫƅhW> ˎ,7VOdع{~ dX4xAɰ;~ﶁ8cF$QAA%>.'}x6) _l3Cĺ]QB⢘B8A4ƍX^=|{mSɯ״LrcuzR.ZV0X<QSHJ.9CĤGe%6Y~?ФIڦk~P"=e֭t6ҤKbo BSk`4D݋u+G Ai}nI'1 փXJeaPVݛMfAؠ|&Oa5k'NRA(޾]~8B9PD;c&}5"kd7KmRU4xm!F]3ıw"vT.@N\ 3F16`E]3]u@ Xf:zs LBΔ/Lo:6Shv&VdaE0k8̂`Ht^ Nm>^WF<Õ{D(3[0v@eܹ)I%f$O$"sz衇f(O|i9 jaeϣbIiE%B")YŰHF8 ,xk/ iԨQ } 03@E~۷oO>̓"3ӃG}ښ5kB$EOU^-SPɳe˖g^E%ӑ1c<82$%WbO QӧOg.]0~Wi_D#(vi/.-NesVNςx}Q04\.^f\d_Ih%_aWKDN.fwe@~$Js[(nIA~H6HF0"3b҃g 1(}6#;V@$7 3&LxQiΏiGplUədxᦛSLRY<+:\V9k&b1!QO];hbg]G$Eq("TP\2%m`y `.Xjvvv$bSb7MƎ0 ŏX+%ݖ!5fѿ VK=z]'ź"u.oO#; 7$5,+,EDW e{Y>wgh儁׶k [Âa*0-([Eh wPکCMO^?#E>؊m?vqi 9y-+([ک ȃ*%#l_.0a`HPhs-_zIAEtXUUdHH| }?o8^?yddҢə,R8'w p rх0(PbTTYRDsXxz.!"x;Y yAsSO]TAHTᅒLmVbឧ={^i8gd"Ŧ=f #;4az26mB~L86]GOm)̫8)[%&*GUPB c4F+pe]KNHemc38eVz^I! @&^pED2 e|\XFbs!`e=(lǐV.&菢RjeBjep0niVE |WIbuD+<b'/"ʹ0,!7pE7yʟ4|q 0ϲe˪'NT d@Ų8: An60-W!ȓ +Ī.)I9nȌO_cnI=p(e"Y(V~hPL{ۓ r7e*X;> ]F׈fnMͶ:&P +h^_eN/*%ahۆд~}Ը4]{} ;*8n@l!]M~: D1 g>ZI'9V? ["a+]de[5n ~ɉb^h|8*.8Xy8UV~Pfxl p:T8VG@IY/P=9ϒ.V׮][}Wa';#ΛHOv>ksa^Veb*ZNT(?J'$:t7߼W^=o?vӎ̪ղۮxeժU%3 V zi)lLEb+`.vRïՍGf bRnkiU9bhxOAm/,,=QkZFMa+\gG:K0ˌ;\ЯK{hT4G MUk>RD_Ы %G>rŶ`kϞ=)N޲e?4iS һw=)S,¾PwXFr~Ԛ5kcdVTҩ}(Ηaq@ Int@xv{=% D^qeǴbeU1a<݌UjZQhr!Cc_!1ҥ?sʗUVmח4f/,ɪs& +'6 UիSHϤ5Jn EF"d6 Tgfra&=͠)bȴ@衸jz3̲ߩp0[]u8܉{3ziZiP5P=rȉG}K"ħ(nݺʚbpB05']B~p7oޜMtgdd@_ŽJ+ui'Q|G;ڦMcI0UKOOgYZPB=C>)S(7aaH oC0EŮh0Vw7hРȚxoQYt(y1<<x!2 %[&/ʢs(i LQ$IqDQ nHk@MWw[ RйA-8|Ё{ҢErf˕wU> ]Y;)֍AmCZfAQaWx0sG]\W;)v,X|j( PJ;MFz WJ߾}/"'e0m5S,)W\G e[2 ưhҥ : Yj*Oц@23a"47"L\s;ӽ{{LE&|ĶMNݻwo YlFc/.wPxZtkc)B;v4ޠ6_gU/]ԃNd|=;)O[U^a\+dUۀ̦F+ڈ"2%}<>Cr8 XbW3f 18C  59D.[@Fx,@ljɃ-@B P$pp:?\L@@ηXзk׮> 6v*UVZh`Ϟ=90FȽ[f͚51A GIGPL!A5O]KG%!!AYQ_BFIT?G:uTh( .!`-@aUB Y{ۚ G{t$>?ܛ5eR, xX6Oիәp 6q}͝"ti \:֫ Ǐ=ĉoˠds/π=PRyd l7M5q7E`z xeJ(C7~EwRnC%$E&>?;< 5y#* #"[ZLydžl:XVBH(:DrI2s f=nt„ 8[סXjџ(:D˖-E:-V%psa;:|pu0X;eeV?/Mi751=m$š_"X8r#馛&s"V%XL1cBlF)djCrSyl}l@^i>@OJbh\\Q|u Xhy+T)vUZV&U q;]^!]s/<}RdrB*aVTMLȒ8#K i@Z\"gY1t$f|0db9C9eʔ[ƝP2hS(wuuFb)\@;T2%Cݛ }WAIftҝh_^G&8;2͛7o'NxkT$?^ CE bPr_WA~^ ~\mfбՅnMCH8hZ7;w:E^UVŮq\JV!wɰnH Ċg4X0}DT,@>8]5Z8s)ȋiUŅ=]ΥT1L=\sNARY0,Su."aY}b)m?GIQҫȌ'sm;r_FsY_ 7GDIJ΍,k}7Q]bvFJ;RѼ}(onCPw׮]l78Xy'+}[`&iba"٨ S-2 I3gjsÆ $K`_CLľ},**KS舠gwq'9X ت]ŒpӦMM_'LeXn!X=ԫ-|U=\wuuJhy.I-*X㻑%J".MZC/ Z"VN>K+Nj0)8Urm tR^.L^sªlr2MgV$Z? <)VrJ^$eGs4 N_Mn(sLt/so}y'0S@M5W:jѢE~x8 L"!#j_b/r!g)%P?]ؖ2u8G6$I&ڝ'nK/Y ZS_Er}?8{LU\3_^"1& x(Q&U =}?x!>ς@ +5ii7Gn8f< OBav@;:eh&$bQ1},`\^w2#0' ?fîg[-m_Y##3435aDK}>Vv?9ubiWMN9/c=j̘7HռN˖-[O8+WZ,E`Y4C UW_ޮ];t`,VZ_|%K8`M5M6mQxg} B7=+JE}GWT+KUEE6d:GO#&JJ$ˢdݺunݺnIJJjD%!9s&!PYs\m\8>؄3 aUf+XbM.#]rɥ3-Z7<">^Z-㲚`JQ˪.a4b61|)aL=V&PDk  [l>N1cnD#Cq/FlS9PˡA'k"#`ZN- -ƼQ^BlX^?a?r+@:/%ϤIf80Zyf R X,% &矿`PzH<$!e}zj4jЋYmFЮ R3$6VO!666F>c? S E,$ ؤF'sG9Su qAtoMÿFM$i=DF,"VbgYfǺfl) Z p~Jvr )l993(^q[/db҄0tfO X"R+ - A>NE"(ߒ4ʰtdk&31֊}#hjz 5tI!S՟C/Cя-|╸F5AU8KVbPEqgDҞx i)? .NdKcttnEOb lV-&^IEJ`nȨ=H]zMH^^TWeNspĐ`F%Clhҥ~6?m8~ {/!/e^Ľ +zk ,_-PR}=zȊ,^87,bs;Șj -Tɬ`ޏ0?"'_G]Jv>c]=zԺBkI+3Õyȑ#'CĞ^e~nG1|n,Ts-£UV"믿~bvv ɈSu t%%쪫^~sMP*mō#e-"G^b"У%!?n6me}l zfU,B X*@_ի%=?hn(:lXUDl1PRX&h;ԔK$O61f/3#,q,;c@)TT{ d-G_6ô'#UJ8c)\ᯛ)x UADـ>s7dXs `,WMoqp~fmQ2a翸% \HXvKOO?.R'-̟9s3x Mf޽Ak&$"7|;$#EݧD"DUŲ@ } 2f͚v-$NεdAz=den\8Ix6թ?-:_(9Rnӎ-4PZ*%RGQmfCAp9 ?c镲=,̦*,U*m VOeW9.ͪR!LB,Δ\C)AD^(rL`* F4B9xZThfd"GATd/T&'Ue bC?0agx,T"l,K,YۣGg̘q?2!ғ2#m"%G%&B *-ZӧO`'Rș}fs t6l3όEXvcI.%O⾄/%-6G?jӦMC@OfiQ'f舉9vm0E¹tYWTek4O'm܋G DC""&&M6j㻈1puR,ލ?eyh8(R?` ɎK顚GAH'.f#DBMURe)SXh{ֽ{?}j'=T$O-{a5&*ӝ={\[o۱c^>*V=IbQF-?~-۷@`CL,1ϞjF,r#:th)S>>}BIL'슘_a$VL$⛫xM7c JZÓQwM4&dWm6NXLj'zFGTm۶xoժU{ڕю/97ٲf2LW*ƹ׹^M"=&͊‹g&[;k-ڣHwΦ4tp}Ȳ\YJ8ע]L<'r{bPKu*kӎ@L>ti naH'vLc,}\\*2;H:''A19(ܼy+WnAֲES|x$'XY)۵ٳg/ĺvѽPҽ{. H,BS [n_,D淔Vz!DvUd IktaKV'W ϒmD(dnj{0Lz5$_+W!c߿ߧ~:"O~ZPP`$ҸYE\b醧ID[bUd,>,|v?3>TȺi Qrva*I$mWRPBNcQ(f  =(LCBl SgR7ʆo H UN3v|w,)8X>l}c[ DJ QKƁԷo8%̄K`n?`A]x̙r$ݔd P5h r^QkzqڵSEs&1pRRjB28px;.]y͚5nX> ~q1k珧-s;J/F*_Β6 4Y(3D2pV^ziN:@vTv|Ȩ8@QY6mھp?,X3$u\r%bq9o-[m]vYC ĿgV.ٲeq1ʖU3f0瞪@9jY&"XMQn+&L"1xj;0$5~L6? #I 2TSGÃ99 ;I.1) & ¤ VZٟ,R-@@kf*D 8)V&!xP,eBӰ^˫fF')MJ, q96:555Y,Q\$dzyLX[ڡB!9iHRzt-R{#?N[cQ,eJVVƼlRL$^@J]9U`Ȏ*osKb?d2XD‡^`NDyq|6nIT(%a?Emr8h0xٹ:m]2rnS&`'QO[I"|3Vy#B:Ą񎨪c\dC!"YL$3XkS9\Jr FŢ/n(۰r7Lx.2OE瑨5XBSY Z'HlNdrƉU Vֱ"GРQ^v? >Yke),/=U^FrQ_dr*k#M89h hOՒ 61m r ȴ@gP!Z' E=a mK 琙υeY&?ݪF9חYR~8W^-G r0TB\?* X~3@N] ny}6) Q+Cq]0Z~N`d)Jdy_}ţȦ1 AiIIl^NyCL<޴BCP+g$ 9uyMkt&p76D@>!?o@s rdSSZܗZ SG~r-AO'{W!=ϟ=[9I0⁕Yf 0'=g5a#H 'I[0ADC/}dB0VNNv3?Ūeoc5ߧ+rҨQʼQv[t¯!-S~Jߣv ۆD76;Un}Vv.LqCǠcee#Y-d@ " b[ir]bVn?L$"X<\- JUO‰~,zҙuw43Bnx(}# 4Wzԡf: #O ax6Sxss08|}VAѯV׃3\%\v  pWioq{bbX<] 3-ǡ0*zAβ}1%8C@YB(Z#H;B"dX&`q]2`l(!E'0`7᥸Rr7XG'<2w1RW;sge]ZQj+ "k"DTZ4Fm 4UAFђ "%`j1 $ƦG+*ݲ 2׽;󛻳.9f}wL׻_qH% aE$9v lwZrR *#ь#5v8`ii!q ng[WrX-t;ި\9CI")/6n܈ 6N4m_2 Z[SÇ͔W"QB>j0K6UqPsd!?:B n"*%4H~m,O.Vz+2Ēb*-H1SinI+R85G{R_dhxiP.}Ԕ{1wdVnpA0]SzEB%R"y|smw 5wQlvꧠ^DXN*Lѓ:A4'-bP%m>g&HbIb6Cf27I7]dN6 MMM &ӫ"}단q*1 [F_Qƀb㨝UѯϋuFQika2Lֈ%mx^q)CSz-*WGm8JFPq4ougqpzMxlGEC 9uQ( |og5D?UwՂa<_Q+Ux HG.y/Ly׵Nc2!RJ C*S‚tk/=- ])ۤBʨ*?^{X}:k/>Tt2X-@v̯_c? |ܩ2pwɿA(1ܾ)~0fMԷ4ŗ^&];|q[j/Y!NC%Fq_SSt{Ou1z |3`A2]؇xxVϩ;=;%?SO0wu1Z7u,+~J#EC&1H'E(-/mPezW&%6r.XUfR!UB.G[3eY'/8.zۜ%MeM)50 Y s6'>kS`u95h "u 9tmہe bo9ݾ *$XYcknr$RNI;Ǘð`1 ð`1 Â0 Â0 Â0 0 0 0,X 0,X 0,X ð`1 ð`1 ð`1 Â0 Â0 S 0Dd'IENDB`misc/logos/bcfg2_logo_800px.jpg000066400000000000000000001644131303523157100166110ustar00rootroot00000000000000JFIFddDuckyZAdobed2    !1AQ aq"2R# Bbr3S4ԕXCs$TuWcDd%5&6VtEeGFfHU7( !1AQaq"2BR#STbr3$s4CD%c5U t&6 ?@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ 8⧴ 83Қʤ-xUo5ih&dX5”uoml$Mkg}<1p{;͇^rJCmY j VW us2Ϋ]jQ\Z_Gn4WY1oBpZpǭCW]c1,JW☭%U[M4UYR*HJ'#5/''01םKO>/c_Q{P{Pu}ů;T[p c;OklM[5iǙ? `ʒY^Û;Ŷu\f[ԬVGT2(*o>:վ"-/ Vfʁ)?OY\'RL{,̈aj+=UMV`\sjly:*HU5u[̨wujk S2+iT]2[3G^e7]wuLVX%<ƦծϜD\_ oNJM_|;1<DŽ'aZd2ejZݙܭZJ>%QJ#Zyﴣ8&!y$ [Ov͛a[cKF%&5,=eV 7][y-i4:y-2zOK]d 27$j N!ip{T#fLey| .EvYHcǮ\-jW#;{5MfS('Vr.KpE>&QoS[S~=!D1[iuUQ7^&P{3od>pZWr\M΅z XXZ/engߒ$F%gGDy VS׻e2/IJyTJGuRͶqc[̚xpeaCiNK,Rkr~.Zl*hA Aw I7S!Jν57+`)L’B m"(--Y=+|i?N>0ho1\8Wl(m0Ϊkk9}=em1 t@j1 qltԗ ^l|*О+^oٱۃi}9^[y*]=hbE)\Jd\m4yv/,aµwx;eey Z%V@ :eV6Wpױ}$(1q>z~n~?e#pA)'ۀ>z?Hmqy@6ZyN8ez=&F}Hݨ 4Rjg(bm> @kQ[1߀)mQhBɨHLt@.u,XĥL ;Oz3p܉J6 uq$] èB#%D@pY]gvO_B)7}fCUj&2`$X#DRfeɿ8n52T ]Pl>]yxAowF 3$YR< ~Պ;z Z$9 omE>xH1>=P~GR?LO?Tb}z~n5?'ۀ"@*mp`ƳkBW?r@5 `̐:L,ܯiQSJs)E4 ru"i8(YMڕW?.\yn&jTɐ0dkNA^rK+ޒݸtew@xQÝgzV9t `!$o%SUkQ ^d2%v>( BIUR <=0Q5~q |%{&df{|(x*|z` ➭<Hڄ&L+ܲ*ZD@t@ԧdN+5ya+wlK]-VHWL֪CV)(Vv}R) L$U fQIV'A'6sHu @A @ bE552ECCh@RH& Ƿ:v8'hm:e^XK K*VՠT Tҭp{9xrýɤkr~o~?u^rlt Zم.6/?NWjjRKoH$m@+\WTmi<}|9L/qwSIuyq- o5-DTװ)E)3;RKYͳo6 o/Y>Eof}Mx0x6%ɭCzrSIWd)+6˻+ۙSv'vjf&ik]koMa, x-ESrcPշs۝HhOnEKNo[\x^o=K5OUҼ=Ӝ6SCp@qZq" L#Bס:sqKZ6*TL"Q0@B.n2,Y7(vN-ZAenEE;H4߳zTs}jjdXPqyԠL$L&b˅N=@muƽ-nJQPSNJJ &@d6I sJx?]\Bx'hl]ZzKEKLВ(6 [3\1M!`udvf<:˭ H'byp,53 qɖttgEZ=ܪʏz0\h;MUJQݿIb26bWB{]ݦ' [&f㏷.1H‡` ?o6%6h'6)>w_gv\%w u+ L"ÓW9:yRRJ,խ=ZC7jGERF>^Cy'xz|Y2p-eLJiR)Rgե j5JR_<`f{݀."U0ZV .jND6ro8Au#9S5 {/Ӽ1.]s) F1L1~/ a|r7ߊ>Ysr7ߊ>Ysr7ߊ>Ysr7ߊ>Ysr7ߊ>Ysr7ߊ>Ysr7ߊ>Yw~1ǩOʩ_k]n;jJusJeg\.MX~3iMkWm! ;o)w(YVX&)-Jx nM>a7'fKMKuVR ro,=5ǴS@q'7 `b Y \ҚWUVRRy & p'v%(5\zNe-?X2ڥ=&N`|.azY@z[vuL5-<:2R{Sgg܀3[DUmuAu)'TDC)_έ)&Gt's)+]Zhw'oLZg@f' @ _̻48DK-^P)BGf+ƶ>)OK,sH vmDns.r:q#A/+Rt![u%`jٰL۸Z˸YUԇuKQVi^ 8EX)\W۝^ꪜoaȑ%Ky#aRiOO-xd̽QnC;iaw)Mߗ(aL]__ 8myE w9Ǚu)gJ8pο\E?,ĝ uNAseAxNR sG}R)BE)RiKlfa_N)k#j%0= wVTz#I_J⫛:(!iAS|oi=g{^<^ꠄVzt)TM̴~t9⚕GNJQ֊!VX^b7L/"ҚGi^[5fdO*TRO:H1;]QRZý]L (˵8Xţ5דZͯ}IQKwmlX=ZR+ À\Q2a$!SqPGSɎx̬/eL^,)i"$-$)$"q)%\Rt,bkZkJzVY28By堽|ՌS Xvoye Hʳ8]ÂkZ/}Zz`p<5Im֕1E!I-D '0z_kg^P>PWq 5n-iu*W6&}zd;qi/7&]$0z׍wV7hTtu[Uz/2kA)RT Ȉ`mG<=]{` 甚J>V+0<|!lt ZzI'@D=zI'@DV ֭i4 KeI~R֡* ,8KJ:xUu Mq%4!RzC( \QN R]v|V7ٟU=cSQ;RHQi lw2W= &InÊ+ =׬P2FqI;uNH y倭#xLQJH[3;K5 N/+pvmĕKkcnJӳaRȒC PN+x/̟\ֺgV3at ;KD ^ts?E+ܟOpJ'}{>Rn8q*C[  ~V +k57]uϩۅ@.YjK[o,U:w iĦYxGh]MJ*FچiS@*V(&lPpBxov@\t+;V19Jj)<=z$ O@=z$ O@=reɐς?Cn(S9(~jʛYХR=.XlO[U꯷MMҲ67qډnõQ lJ Do٬ZvsY\u/*%6gQ73#3W>E+ܟOpJ'}{>裥Q]-O[nPNT>C:Ғ-'hRH"";in9n 0 EͮU%<:URT} LԐ3ɝ'MݡzvӛaƜi9nUnTAPqwVIIid֙B<\J g~9Hu3.n8l;޴eu6z۱R 'uBIuLMJA }؏ ޛᗃz* Tg;c`IpW\R~6o)xv{4cEQ.7j`;~M)exN/)ilqjQZV.,U{fweWdwjjH*+Api*;GNe%NT %glxK\]ԕZzc,^ hZHbGq|'f[du~Wep;gɭu W#yU6HskJ*EJ/ZkS+ҲBrR:% SX4mb&]5WJ>5~)B@i d,%!4⚴$no89~Z]?W_sb;{9J,hXp{E uCí% Һ,7=<-J]/NM[8>;v%D IbWl -nL|"<"+];ĉs @+=<*|Èm:jiOc@n8PE0 >`e^_>cǛ}ZxJ]>}83]+jj[˨\\q]q\tKZԢ)DLtu6ŗ\el6\oWt(S&xE#AUwLH=!ʦ 3A>FA+|ǯ÷vcf_9+w|&"6kM P([* OiF89*5^û l{&Wk&c(6' ڤ$xPb,%x/:^o5tWIf%Ɖ3\_$R=IGo>]F?0kڼiDZP B4zfkUW.):sz{qk_5ם&u}/CMAKu.̳PICnO4l;z񸤦YVts>5j֙g=*KG,峞(n('#͚hvV^ r_:=,v]J6ª,KS~rlLJ@jn粩/&^wyN,/F F{p-K=IƲ3 %w~ZIP*~1/͓9LǸS:( rH~/ո5-KamM2_(Iur2φq]9|ikr/q^=1_Mt.aIٳSJ*n!8ҫ=vMYE+M rrːhǪyj2('dz][lݶ9Or2*)eeuBT@%@v=h?֛ |KiXWJ1xg*A)SIqXRTԠͥ'+]))ۖSe4nѭۄ(@l@cF6YjjcBQWTKN+q)N)D&P#k|Gˆ}za[sax%8Rwʅ++}i$@J K3cϲP܊.:58%66Im2JBCԽnmwE~{(R<)mQ?TsgC>/E6M.'uaT.C[-O{`0vJQcԌ1tY7$7 ciϺx/]]:NvM+RE|C4 +t̴ʭ!_(7ݶץ#4R0m%H)R JA*Iߝ2IٝA.R{: R95-MjNggcSI@_k]uGEV#UPж<T(-Ԣn6o|tYn,{yHbݻT⧃S)ۏZ X}=ǭ }!9Z4Ooz wR-7- mU\MChx3@T7TD /=}?񕧿/{`{x2looVܾ߽ۗ{ǕbʭGKc۫jn)Jnx'liM:ujܷ@U-YzN]oNwJ$V%KQd (io6fѠ!pBZ7]nڞ;_N/)%Dv~Ca$ :ܮUO\W'\[5㮸*Z֥)J$f`?~vNxE~*!TQw pK5 YZۘ7W̤!◈ꄭ 6Kc oh}ns]Ss옴%WUŬ!JC3=^~Le-+J6/7EtlԐ9> .0#x.pZadJ wSmdm.:mN6R` `p &G<6A3@&(O:r7zF]+ Rl SkjoyQ=Scff1SU؍CzJj,nTWVTGͤM`L"[u.ۂ0I[pZj.G .?Pev$YWjM4-Y$iJSsXi7QL4Dm.7)fz4o\_8{v-U+w*6+ӄqIi뮾믾})o┷ZR(TI'i1K5V')<[8t&NIzK3?`Z3c)r/HffrURE Rv>X'gyhЏ8Ǹ:糯tl5ZrM8XSz6 m_RWo{0Õ]5zܑÛ⿰Ms*qq blqHJyE;Y&vϗS}wVf #-} ~%,$١rz- IFT6)? | l#Ҏ)x݋εy7|dVtzSsh&˔Hr? <SQT6JiPKHDfҢg·V>|t ߲mіgwרj_Gwe2r(lLd6ΥJ93/v| ZOw׏뾼IR,"<@Ozԏֻ~Fi'@(#>믫ZN{9}rd'%`%v(Ď~ȯ2m֪\}jBʋ^SCLoM+-Kwo1wq%SlSJغS &+ߊiۗ:,K/gQt̒;`HrĤL$=*.Y-_eWKógMT^tum|K5,A4duNɳIӝO^uN5Hu+Io4W:x<[&KBƳ+V*rX,7w4i wb{Ti-SmMcp8-Fmp{MO[;KRζ>cqEMm˷p%Ŵ;B` |-~NΗ·M=ZZUl|:` 4w'Qj`J{EE26iRA^(TS~fo-JN&6$un\c!uk{:Rum$z~K%r;9 &Ys{}zRQFJC&mo)KdSqSP54zfkMX[=$zZ)])UXI]k/('Dbր*օsFa=eGTCWPҶ:ʊʒ4`gHS)ϥV0L}U2V(ɪ?U=Y[g}AB )"Kp 8ssEZ5jm 0${d7;` G4*i=m6H%AWNO D@dz V-UQָW$?bpf #=Y$1ړj\Y-+\ScnB>s^}.Be7Lu\[RDDvJSoVj,LU&٬Տ[}]cye3QM;)Mh9W}M5BV̸K!CPTP#θ7>m?m`p>zivzm*jpVcWg19\h_vϡ#}%@04߄+XԬUW,I܏tU[Ly?O}ֹ>j{|sb1Rou__=Y5- `s@p/"_.WU%NV8ٔ&r吀#:[r4?HGFs,g~SPe "d2ؽiګq%+v*QTlϑfRޟ)0%Xy<SqzkyaCu!F:;uUU2x @gpZu6m#:5UW/qI{zE "ߥep]sڪ AQ' G0IcBh:Jd&wg~MI-Sȷ$0 5)KXTUaw*z4)No-vV|@Ϭ{ܢZ}N,߱DI`>PO4!Jm#BHTǸ7p?bm-4d5u aGZ@9tEpKYK5W*:T}WtrshNߙ &NM++ыo[ԠE8g)+@(/(s$ikڙUOoN^r1L.xʹZל+D_;lXdZ{>9oSR~:6_c3Ip-=$7Y `IZ]'LQ4IHt[Ѝ > ƋSXͯG\LthՖqy)I*IsZ\kk(#99|Z,:tʔ8ҫ[JnȬTҍE@h'Tu&'9+T3 ]jr]=Z}-(٘NqSlr4VI'} a#\?bq9ZoAd&I]EpmGg+Τ U]VXֻf[l5NmO-x5斞ߨXNKFOJ[wvCx[mIsiPJgА9p#XoMbW|B&{3L@ 53toGX5TYU+5RB6]V"K;&#awkZKD5rx÷ES+~{X%-lt33[.KǪ.Um%ˋ1H*qՄ$xL^A%ӆ/tNk-mMfjB|w%ҵߌZI=WƜii{_-Uu~|q>Ŷ2Hk7ǚ*lY+|dI]-9n['.'BRY>Z8BTZq!Nc|OC{=CKyl>I=Z9o5?CK?{NqA\?-~R]ޞCO;wUWU% 4 $Jf l%B`vQYc]+zѩI8vtR崨Ν7ϯg Ep{j"Gl ׫lM)YC ]*Ee`:tm2f*,g.C5kޭ<~7j3 X?z/0J9Z8;bo`u¿aɝI+bm-A֡ahrN6RGhRH|l9ۗ{ m[seSy>:vtFUN^Қ ԶXWu\>Kã<0l<pnrϚ-4_j6\sٵ䷋k5;َRQfKwZVVP65'F4i9?Zn"Cz="j֗}~Q͘ %w~ZIP;})||/:yk5Y>Qjr{Tee|ޘ -l@Ɲɠ`'bG?rԞ@/W_R ˤ@{Zx`sg@ !(+%vcͯ1Hío*@)3 a}Ϭ}#fnYUϋYUwmd$S^)JDlFh-.5`in$Vj {bT: }CR !o;2|qũ6x^)-OٝkJQ"Sodm@!t[C@&֔%)J$:>7` 4&ǡI;f)ϫ^>>̏}q|!#DŽQbār_ݜ\,Jyz`Q{pz1n&A(+Mn6uaʋm-ր}iIʊRH@񖰝ڝMkN/G8]jY =cly-]#ZV|kEߝ8-IP ;9GOx]E"Si.u.Ӱ;iW&*7n5U5k.ԺTqg}JR2I$/N͍R2&t0 Bj '',kzSWR4 ;k%K՞Bӷ4=CUTo(S8vd;B؛Ǝwӈo~~S(8>? o14;Yw]FES=oU™:S^%!mUl=4KKBR:#T֖p޳p-WUݏUƝ'BߋKdo)rifRx*T?F-IORT:jIr娩kqgyJQVI3$xYZnSm^'! Y˼'F8 O]8hݭ.ʌM*N賰ZV̮\83}3G{֫)ɥȴ.G7me;kmN.Iju\a3ALԌ3.]lɥ#prtm,\1?0P@4hzh>`rP/FMjQQi.n9H\J{ExI{Bs.-qfgC碌* L#aԇҮoݼQ[xxl>-oDuR$b'\nV+yG"++^@9 ǻrn]k/Hܣ>i4]E3r ݮX!##:rocm}]{߽禯'1oN֦ݸ ptYU/L#?uYjjsoZ w/~4MWۡ!ץmL)`Ǘٝ(' tjf;^\XyX)GcXjӡ Kꄿ.Mio+dv_JnZ7%λwu ]L,q2l$"lT_15\pYvX,;$GJ-VO~]VrD]UJ ,֜J%c'[Z>E7 jOx[?sBGOh(U:ߙ2@if:;ru>˦)\O$ذ,.qd.TT-\q (z%s2Y]8iE <@_ߩ*ZO:sSlǢtUZTx݀7.M/PDi/նf)0jJ]%@ּU H$4JF],j+ aD!Go@TG|mmt8o]wR-szBm IJ?4I 2e3Ji,fm[EbuJuRUd( ɵ 9NneW;YiV-~VPV7*EeKLT̂񇊍e&j$蒚<~i)*RV7NIlMc % ߫h!%{nBS f{^|۞uZX߭,˕Ϯ)0H vS`ѿ;zhڻu }ihHKe;J7լvKtoB6ZhVKNSclSy3*cRԼQ!(iJ]hTS2D }q ыݎ]څK]K[N0V홡'AS]Z.W:}\S4A?E)]v9Cٵ@6C^R[q{C)f[k7šHk}c Hۀ6˷Pdu5ciACL[hZ+V*2:0K֤)EF~aIaVkN¯{:m7?@s*&3ɹb֞ |r~3!wk]e/M,*P!&@.~!Ny5@TIN㭯ԨG`4{P8v[6 r7"\7z=@cgѿ;\ ZNέq+{X_mr:zkwx*fT,Өmٶڧl*ŨCI5UmKaKL2\p{enJ@{_[GD}PJ$ 69o/$ ]liiSP#b7u?я 6y[@Z5%'E[i$$r>SG&ROr!#'ɚZe65+m^e}cH*Q*ٱ)Op@uRpp*d[QtMsqJ,!V )@F{*uNOg7o4,7FtLVInvZ:)C[H-_WX.]þ͐P$ 4ўYp:#pR+U+c3 5&y{ S[<'B'z߫֔NUteIZBTVGNݛ"͟ٵe6Լ(6iϨA5o̗7V"BpB(*el:,aG~õަ]M?9Ι@Ҍ̛#%Mf#!3R LV۶/hrKFs.9tgZhx> {*_=c&l XVKddU1ҍk .T<6NQ. O \\BC^Т圄N(+Xl2xh}:\ם˞8˞Rh9mIW?pE<(u133 q]F?j$A^&.^2) wt+B?dw +ՊEs#SdE^XTs^W>'I׵I[Z9E:Q)ސb!(\@_ߩ*.zaRL~{x *iR)X!B\dJ46-KjTzJZe%Fz{Ef>?gSmK25Z%׳`% ]2DŽ@:խՍOL{mmQ`6OA"83Ihi}뻡ޯ5-]EK*=Ry&[}Qi]Pvhm޶׊0|`HB“":Wm[XAT`zV; jNJM6m!;)!5[NM Ѷ(E\0P7T}Qy#bR@> 5ϺWjVWX&]Sck{Wp7fQ{oUUF|G-x=v9xiec4%/D_Dҹm%iIi)7DFAiwm=-N=\0M#NNQV ȳ[6f@uI&Cg,{{Q^o$+޺o3ofQmʯlVBk+Km)UIRKc7߶'&Z|;O9Ϻ]+h0X~o|%w k΍iv?Hd*KKEmC2[O̤Fo+\f4QI:_)y[meWF*Jz56k!Y>*A5ԡ$NRv;7xF|qX| x}BE4;?u9WRl)EEmK{-(i>Tԣ! j0z9W2m%ٕ~x˵VfZ-'JŮ - BߩRHRXl))j@ k'Yз5̹z~貿7Ŧ_eó:|;44d \3<⢦e!W?WKgU8JzֳLsNaYԱYr'F.{.F6u& J')FI!Z-srwތƄY.>3"R6XtT9ERiunʘ)ikyD%/Ke[$7iUFF:>^=[WY1XmXm_+†El׊Uɢm dv:R|2QZj"VPIv,M,x. 6c7[oH# p<Mxi6{?]ݿTC7Xf X]DܹmF;U]|oh[ Pg+RLj-/9[uMsi%=hjÎ+`]wL ~ůԕZl(sD>`i'|6$6i.⊮]ߦB}43JZBgw"n*"n*{)mNܝILm[e-)_Z.Zi}UqҲAuҀ) D<׀'| 6 -5||Z ͶdFrqq?:܀3MiR6q]n_=;v[fVm\XYTnH 6* Z*RM[huڌ&GKş+MRAJvjO[bܥ-Ƥ%41db~3 LJ m$ .` Gt?0`;@)H9@&`s2 Z ##D<)&q.M"h2s -< RE8aIMj`i'=zi'=zi'=zi'u?kOR#3ԫxO.$} ӳvV5{[m}U-6Vm)sTҐ$4@ B>8QQA /]cvN ICɿ EB8Oׁ -3؎G4 䱖VtEi|zYtZkRh+[ #cFqS)<4p}PRXj}!KM;s}VZh|ӎ)) )]pľ{4|wCF-r/+CM2ћuTTy^Bm|UjGZRk]I@4"{M^>ۢ)T_Ud_ PJHKhIJR$H6# 6lcC9g}0yHz"Υq/qU}a^QSPMž]ԤV;yiaoC_M۩_uguL ߋ S^? YjWRQYpo6k eJPIZtWy^YJq˳Tm ޸SO4]o8+l,fakuUAʥO:dːZMO7|gK[jtiF4ZZ;"OY<|"Om7jF.]-8E;-ӈT’5YD@T7N=hG_FVU-)PO){'yhTx;7U2Њ*K+J,F^Kzޘa>EG!;%H&_ k@KS?=&K_\rzkIʻ }#́VeڔpΪw<OwF C#h"seq)ޓGt7J!ץF0]~FZ'\냜lCyuN`aYf{n]KF t&jڑZAP2*҂Dtvk%NS|cz&,ų|&52JT)y)u0ceVqZ1RXJ)68H7M=Em;sXǐOQwW){pdqo'-Y2zz@eϺ.c8\Ý~C{^:\[q?C8 ock@{%@M ]CP@YoOdw@D뮱g=EW[+oSWF isn9gހ) n%-JM|oN6my-a%IWB.I>9MRe]p[* BO\vs@Mysni%u}E0(@RWg`6BcgL6RQ0v܀6oR9W̷Oj뷸*TfARkq[|`Ox gx VP^\򚨨C֢ҷwsP~l;s4߂ [K5SVi*lM.WRHClfBR6F?nS\|B^=Σ_k}GV}=gkv {<H] 鯮9v}q]eH(a?7nj} ,!J,zvu;VrnKJT}H Vb*+b2KC<Kc|wV-p$lUݮ,y Xh8?XZTo$R=@%쮢~&.8$6N磢r|VH@ ݪ6t-gԀ)#[\UW6#v[zՍ͠,N@c x-vO՚E=O~RTcvK~ *V h^e%4q+]cN4U-%KVvI>D+ϒz:,WU]ͪzu3Y3B/;*wԆ*K 6a;4<:*-hT"x77zԕZk[X$/K)! Gu#EwZmBԄU!joy93ݴTr .Toj^4Rц-<8;6i][mMK*kهƒJiKJ6m9†Pp–!Gdͺ  EY@CUUo*D ,+(g9a]֯')[uwK/,keMS[|-ml4OjU\+]uJ[8)K$I$Ũ˱Z[e MД@@Ld'[!g9!Rg`dLD{#FlGoc~ڜӼ:r8#~sLg[LvyٴV}V=iRSOJcO4HRI Tq̩eĶSaoi>pKn2-XkF!l70)S 38-ꪅ 8t*[)@RG)YciF IF RZV"FV#~Coc;{חB?dUŃ2z\ޘG/:fU%>%_q}e 5//{}^6j(LMԐ},kƣrVT^3vBK|'M~r~N><uqQ>0sUTx6*P'GB{uKWUxu)6i&uNϧ;c|};AՑ_{{V~0X]K|h5,JRfd{;"Jto.ԮPvr7j|>̶| ?=8?U0e?+nlEwmN]5mJSrO("=RqxΚq4UƸrd܍*qjq摳xPJLGO$z&BC-׿K'q5N!M wSa ^-b%++@%/Q_FܟZ> 6[N)okjaIOx SC}>U|e5xl% zK҈>TԜ6!ί+Eʍ`+r]G#n;"ǐrc@[8Z^J`:!c!W9xD;X] (PU%֒ %<k ݛt@V?F1hiKr'v5-{ i[0}|0*%c/KAMWD*|S)3ɗYp/VI-n7*riS2 և[3v}o~hȈMV)ϝ}8PC26@>pd.D7RQmvU9;vl,N?;yj [E%Pt$ǣ!h1{Z4#N5 Ϩ::Cqm>G LHSSWctUYQTWD2WqPT^%];@WLrf(i$<n m(Yo[b;=DV);JH}\@}RKoF@OMvh%#V@~q9Z}t'*CV}ԴoTx5*HJA;Lbr>%mͫF!c.R ,y8_-5ihjoTכV6U`H@#q%XM ՞"ӥ-gٽjfH0HJ*tv1JF9 8ml(:nf9mlelK[mIbmKj6DZmMfnխYSO1=bSqeʥoX{[Nru+\iQz1\>Ƚ2̓saqtiӃOiBj)L:jL;jZPBkjyƌ MP-&CM^i]-I#l'i3DR)Kffm @ z*jŪnP )rxOMlIo^-hU Y莀g4o4䱧f}_;WRYmU5E2]BT$G$^(r9 p5ʐUޝM%OV+*d^b) %)t-%.'QwL9*;W4ֶ\sm6,0-dEZfާ4}@o+\mb ٽ*+Cw#,xU{*K +.U\B Hǰ3~(wS;;vw/qeMsU/TU}2'}T{ GL1CT R R̀m$9(u0Y.5Z-]mrnFmh㎧Jc}^t-%wQ`j_C7ٝT3WYei 婘FI%>O8?I+/z~0ΏR7om2ep_E2αJilwc{Z_^Cj]mǵ~ƶ>).qi-ꌊ*.6:› VBYkq2ʪIx &V9U[_]9~9WE~S)~dJUSݸ7Aڔ vUoݩ_Z OSɇ0e],W]W.5J%o+rtN=X4&V9u7)[ekrfR=6MEn 9qcfaubٱAR10jJUz[k[B2Ǚ>}cmPk4%w~ZIP3D4cd=9(>u=mK{@,'E!orр']8ɨAO7G?rHhe`e)JZQ5MOyẙ̯]+Y*=TTʍO!Z\uC ~(h>"uB+קiJMШ 0h>#`R[9ruK`B-S.Hkf[Q] -8Hi~i'G8ZGnay PMEUv06>JW.1UֶeL)y ) IRH ~vɿg[W+n!{:[bmTnϝkRWv ATT] 5A, u^=ݗ2ҕ]gx%}; tXvQVfWc'q 8phmG@MOORD`.-y=Eƌk^Dž;#xL9XL'4ɑ`)]s,!˯u(/U:TpoHwoD`rAzT-ꔚvxOw S*o'hɗJ !CArPqOâHbSѣ $v:x=Fؚ\|H-O@Iu,9{5F;|ѿ}Z(+ܒ,Z9L͂'zKO===:9VTM:+HCJ'a;ѿ;=e'k.hoRq甡=jKvƊ( 3B&Ǯ5xV^ךԞTXeSeJX$Rym;~m]Pi J+tt0~Ci})Rw2H0CZݧq>rEں|RS*yz嶄wf`0-Rܿ?6Q]\*+\`mw /ѿ;_fQ%z)I]*MzJZ` gmk0!9s@jp9?F]acun};P,0 EA -Gt1Eu'om1ދ,wrRo oB[YGT;<'e+Suadu?MxD=MoՏU>F6ai h?":jZ &A=%şmU,)J6O֭aBPS~7%c| ému5ئamCh_5bc<2v,x=8qW|}HV*U) ;ɦAJP6$4xy75uk-Q\ |Sp;;jZC<T ĒjjގKܼ\ސJ#um!2J ؔ'q9.Zm̶R7G@*%N@j# O~r"wߜm6t@ gƠ'S2JJ#YUyf)Uw [O0nS2I>#1YVߓ?4 >hg!k#\2ވЖJtX؃ޔ^٫3Jel*Zvl]Yg6q.KMuK(QhR\#w 옖6c &֊Yd1.͕GNkN|f=ي,{)vR>]*>t)\̻صŢu'ji>;y6.5)Yr|4-ejf˴[c"DM+Z&6nt(!c*9 9C1jqWM;eө%*J qcIO2OO#bu}XU}RU(WW#wf~A.c&?bJ%Қ_9sH6Ow]⏦Ǣpul|C䫙=o8ӝJ ;6tr 2rLUЀkߧBĈ rxޛq#hmڲiwnx cD/p%}:tI:oZe9!ຘ\7k>^QC\H ڶ}==;y k–K+S~~u~wZBKiS -@û믾>R}d)kZRyI&d=!ݝpf9M44/$ۂCM*uΗRTjnϛ}L9HҼޙ[@"l6@^]jFGHCIj̤{ʉ$D)r9x}sԬ~ Sm$7CxXu[\Go=z )j)_bURST!mAIZRH#h0ZtO\Xqz](! S_=vQPn)P T̈L=߸vԌ=[df5ks1KqJTv ; %k5FO/EI,* vl];n$#U>4<m`!#NSzg8ރY~ jAf}6RkE>:MKuʞQH.(xN^/,czݦxm*mەRfT9vI;Lwz_Cj9_s=Tϼ9Ԣ9K× v=&{/3wG+U*J\ !ChA-:Ieh*2P*q×H]lg\1o远> a0h(I{,#kzOn\K>L w}xj>'ӹ=e/}coze1۬6煢ӌ(HLh GAkGRnVZ;]fp3)]*R'q]"J(D0wn+rrϐX[J•$R JvH0apg,qv%o 4ʉS-Ϻq+*as3ҭШu-i*UwάɟUSLtHJSK]6s@^u~uN<PCy1U=4UMz@&S޹*&]܀'[$9YDݞXPQo&cΪ  ! <$,^(F<ݦS;.1{(l[A%$z D}zYp'~<.]0G|sqnʆRȦe@4HRAJj?mwB4 ^۬m2⺋B5}`xKjާ;@@R6欺WzMGZ]oFOkSseE+m&*kD֍O^ZxM N]/_ n9ͥXY0ZQq\ eC*(}^Nʩ*X~ou,֖ Z6e;כ8ƌ|͏Ewp<7lδP{\7MG\-tnژc -ڵ)j]A$5yD.k_\Jiu-oĖŰ[eX k}]pWwӚהWURnZԭ(\I$b_Kov!'LL+7sx\o]`@x=79Rc@!Cӊ'.VexmΩߴ:p6w?YW.e-[9` c(V9$@@{ ⇁+h=s<$c V^ΚPE'T.Qꌓcm8ib52S/2J^e@iRLPVADuUnKm,l^c{z9 --S0vEfͨTR[ 싽3%YUT)䋝9 1* wu N \ F%{otXUu )kKK*K҂:ԡIo̎b# {8!q(jBIeoӑUQU GS,1M4־=XjsZPfs$'o~.{۪q*8:"+ \F_HŻ\GB6t[LI* =Re4&NvBm^EWF'L}h_RHeabN#&68wmTsJb'[+[)yhJٴ"@ě|֎B+zkŅ{\OlI ڕJ(rg'6X)IhOn 'Z/.F/E3чnXѸTA+c ԼVz8ψJd ҉W]e9_TZ,yrvTC)6obKBIiۋ-{T[qr=PԵGLQX-)I'-Jms JVΙIMYK/eeg"ZFwR Tzb^Fʨ/&YܜY+y% E՞+zvѻQu|X=\ݐqv3ۨ=ʴZ+.N#wf2I⢒gIŬ|6 8H%w~ZIP?:;'y@o AN\7\DRSS n4~6Y`ew񈯹`;WGڰYqb+eXIu[r]jUGaD)K@G 5(=ӅG9%mK n~3Ϋ mRBEeH-?TyS6rGī^ȴG(VӦRi2 Odz N`hx% 0`F_lY5r=VEe-[*ԺTӧj V %T5uˇ̣DEP/pq_ruU-A%yI<y>tLQ!V\\uLj+)l}*S[DЖnCǎS2خNNTm5RѮս5J5 P<>Lkٶqj=rnW[>&=~7[G<_ >͵yՖhvNڤ&tr* em7`gW\MBY[M5#j% ;dd) %G"uHp8@)Vƿ~vz7` dhOE2݌x-wIԝ gɀ;LJ]hRСZ&`w;_l1:.UL#w;_l3hKTG*ayMCiKw~wGfݘ.eO :ݽ76zYu<,PjIRm .P>XKЫ{oNjI<dT0\<ˠ]d-k$Tv7nA3jbي^,u (rq49)#72g੊>W \vqí ޗ px+5\il︹Kx \= Z0%;Ml1RZ_dbYME5NEjfUJnպʊ}h'lN̫Ў 'Eu6E;:[!ҝ7,Y=x.Wyt:S~cޔQ:z]}}x9ɶ~OX &ӓʍV}zx>_yt٦LR~`tND~[}}Ei} }8(^ϗ]sl:sI?}z<{wˠvu}{XK]+**(QII -E-qsV2).fYkYUMUhɩZaεymS9ȞxFXx>LL3-iܬb>sZySE#ӝ`)O(w'WWWAf[%IDÈ$*wu;엣 |v)C)%]jH黬*bwz+:%K)]2V_Kps's̺ ]Ah|Σ6Q{G :m* UF_iIerLJUO(S{rƚ(<Ů66WeC!K^6i C^p`k%%aśr_P53>$ŋ4BJK̾H*QM.i#iIZ`adu3Il7oir4Dszҩ7)ko:5HQ0]<TH KXV-tWPOx2S2G/L1G<L{]0Qt1G<L{LR,U%Jgj $Fb-Sx-DC|eRj0S]tMV%,{ՕuJJP>܈2 V1Mz2zn%Ԭ ` HR6$ ٚBB7d m[*IDgZ骪TD+IJS@ 9ejt^m4CP4Σyܤ[ZNd9ոS-Ux([z 뚰:<7R!\!_EYeX랶^.4 V#.H0[0>w<ùF2̵6;?0yNyd60Aφ|DXԷkmuNPTnZ)ߩ|<]$'f'r]jN wze$u( ޺*pd)@2\|1e iK!;dbIBtKIR I>HoAP1(^`z2xcSuYz )A3@$nAcj1J|ٮ9RlsdnS1"DP%*$֌Nuޟk5NEk$ە %BXtCRT{@5O| vሶI*B@&Q')ЈN R;(q IT=|;Fyр;,ULP9[[Rj6[SdP{z:Yح)5M.g[pqGu RHݨzbx1.4ե-tKM:ҒH*l MA$1%c*2<y=[:E 7}M~O :Zj&l"MPaڴmNOHp _xH{Hik-$hrE1uTF B9@=aVÝrz)g[c%Sk:澪b=\S! G$!rs_UO1\k#ˀu}U?{9pBg.kq~CCMԁJד;`@LߍΉhU48T4_­.e4w BghhٽnKJZ׀7.ⷣKFXy 6wϬpllv4oD_(T4qgVZx^h֙Um5֔ 0eBU'ŘLo(2wl˚*Q] 5MO-S OQcl]?2ΣiGqPFWz݊՟HvARQjcJ8kaUݰ RQ[ԄW?AArK*w{p9V#(ݧdeáqSQ`za\7#{w^[vm)9bz1m`ԸVWgm20?x2Ob̻nߥ~2{١ǠOOs9/1+Y_Co~ч+X~q:Uomr2x2_cWoaǠ3Oꬣ+/6KC:]↝.2q=XcŘں F/=hm>n7[R;#)C^Qd_RRNfMiAֱ!&@|-jmĥךxרgYmF&vm=z=߲ͼ%CndVZ)k}(PuUdtR#p%*<Ďe\{G_"~3+*Ƣ;u x>u"m1x+[mi6@˪X{}ɖuz'}TLUڽl(ʻ+U{s2H=I ˳-[@~m@¶.ڀm&]? Lj5WA;j_J5%vS{"Ϗy:p"rGvj{ksMER2MN%J ؿdP[ރCLAFz]Z&huդt;IIo ( a."͏SжJy G$Dh ZYJ3P|LhHvtfr2 3lFimB#uW*^?RTP,}+y%KJAQ򉔠 kdZ5wLa*Rd<yϺVeNUкnJIk]JZfeAMž]jXqEVe%eDȟ',w-_Ć[vf7| 3.ø@4Ifk#x4r\|U0f݆&PBJu`JE*.Mrletܔ$|A(mJP&'3)l[6Bw@KHC% ٰ(yukoyZTL@؆)x%VB9CD%OJu-822PQd.-LЇպpilj֌zÎ٨-9NÍPmNIBMP䥾$eT:4'wt$ LlI0d@N6TXB L utRi2 #YrV Jτ6$e y-Hn\n@$r"kKEsΫ鹷!'r\NIiJd|0Y&_]e?o ybrbrzK\%hci՗_q!MEԩyAHHwy ^e<NMޢ8oe`\>NXpni+M?7Kąr)@(;S^UIRNͼ*ͿPYP].9JFE)%KS#=H;4FSnuWj(m0x%'[iaf@` tԽi KSuZ,N3*RTJ&c[i}MH-ewqD l-:_2=7/0ۡ;^[a]Z9\.d:gCԦ, y#W=wE(qL :S)'9ͳn u[XYuFTtTo(±~PH<r.zs?K~~9>9? {t?uT|O8?g!3zG}ꏃ| '# >OSҗH7=Qo?a\ޜzR:> 'c3ә=OJ]#wCPGs}rzs?K~~c\ F}.ocz?z1gapә=OJ]#wCPJrb.+av$-bdZ)ұD+LJwYjՕI9I޶.FM(+$$"b2i%2K'!$@릋ZO5rqewT+2ʇԨ *-.BjC΋w {h֤Tc?^/xVԎ.5MKnS/PMNWJܐ'uD":#awB5aWqy2Zuִ%|n\2zβ,v+!.*4ʷhAٲ.j0*M<@Uދ*}hOެWBf1uj] W7dU4/.m`KL|E֨eX*m ݊ؒHvA0XBÆlJ6c甐r8cXӹ0%֞85o*x[\bިŎȍ-Yj_,v[;5v.)l6iRT9>+$@+$@S8R7 \;`8F_Β᷁WaTSRQnt+̉ m{>ʘwԩBcr}|ؕo6ƥE]:=ݿW^57V֪%f:W98s RC$̽^O/.1ڞ~؝[Gn`Y m|mNf#mhI7+OU*5,7k[zbZee)-5WbQ.nͻb^2lQFVf8ƞ`G:QYn;7 ʥt'd%)?{sEOqn]w;)98 OU[MY26)7t_*iA4i9x3i^JK-]/蜛.V~3y8jO##e.^I H3)~ETWN '@tWl`>]:| gt?v&kKM@=>gow&T.Km34x@鍹%NM>tr{K H @Ch[x|ri*T>\M.Wo >l>$*m~H ZH)2EhZn%L6D; ͥVMLi)Ԅ %BHH@FQ&m60E8; BH@"O *`Z[mQL+~O0Wl`>]:| gt?v&~ ퟱLΘۛ;)'f!KGRL,'ZTJ @f~ɪ3~C1?g;uK59r.*Hh;U@TȕiKL/S77{pw'UX*+O ~qؽ)-{ݑЖ}rW~.{D^پ[Vk~6͑uT_;4])U0{}5l<_Jؗc';o{_e_*-WwX~"r>W mj?"(՗"\jMqHRV= ԖS5k9&$OLڗÍ&3JFKSPZ]M@I&gbTS<7)w֚jsGϯX_h^\3G*XKI/ yz*X#.ܝ֫Ƅ|r| õm[${SLSػ iy9}[J'exm#dw+Z1Mac3̯*]ʭY96w>N%$f" W9 OT>*niZ_w ݞ~ϷڇZtjRǠ}TlIt'~(*2+.I$Z /)\ESbekQB2{HtE汵2L ~*enS9}XҚq+-%pU%%IpѨːZh[ǣޟK:t2Ue VSqlק}hF@5S0MSBZP (&*J0qLd##._wU~|^~7e7ƾ܇} tŝ}xp=rs%?wW߾oKDu4oWqVo4WBjÅPm;J7=.C(iW*mEy1H=3HVuQᎬzU@o)v԰ Ryd%BFRdx}*)WfBdds%koSǁ1!;7=_B]gu~m~qLې.o?XZ_Z 3zO[o>e;W_nJr2naaʞ:k81Hr-E'g^~9xc[f-@V4m Y2 H$Z?7ds%kŝ}zArs%?wW߾O|^/qٿ;oS:롺遣zjO vmou]wV#(=ݦޜ㫭^d9yӯg8 qxcx2*r_ί0o:ާeꊕ!zto!yJQ$$6Zy)?L>Sl#>fgK˵߾㻁/׺g7܏>gK~-n}O;N.o<\O!m~wJv.Ԟp.ˣ)qk|;.!.lVec8`VCj% FW?{O948b7E?Ed"#wM}0Qu ^FycNBd܏Trh2S{[S^[% +~i,bݒ3R4բ C.PclĨTXJ/>nZ4NSOgp~Y|j_:U@IU=lsBIp䢖-zգJsiE,[K[5*_EQXT-5N:36ޛncGv &]*V)-2o@x1oXˬ2%+_@ 9VeyYE Y6mMseNFT;$sՎ"w(ǸD*wPZ.SI/۞ʴ˭R~.}:5%xw[wRoĸ[-/b gbw&`KV!2z+uSӰ$@JG@yVU$'z[7=1" qTK& W:R3ޫG":~r%_!~Vu[<_}-ȏC2W?.cG3Ȏ${PG4Ie4h #Dh zY>H"D@ 33@> ecXS+b'$(wbl-E @RbU &ɦgOs/^B&J@7J)*kU@6s.T֒eh".rea) M V˥5ѡ&VA3S!|A9RI:kI1ѡ "WSH)rhp+ Nx;O"&*T&^H"F&VSL4Or$QWMj&:Tg<ؐʕR'H+RLtķL$1L 6sDTEh&t`l=JȔNL4-`Jd%Y:$z) 1'"`\#i"f?NGY`xD/YGduٳ!##dY&!=G8 @ s/1m@r<8Ǩ70XƝ SOPHq%BDJ%(ҙ*Tqk~gݹ}gN[~GE~w&KViV\ޢ⩭mJ Wom(]xU_\|+hZxZTeMz◊[u?+-},*T_FˡpjL%kٲ wYZuudL$x_N_e,XŒ\\}s˹E,A7kV'.n, OEU^hޫ+GP/CFCW,"^H!͠f; v{qtmd}u %RVqK_9 xm_v>Uw&XPo͗#j.GGf7w0˓KJABf-%E tTad}v#};NYaZq7`6q3Qg:oE$w\9R\e7~ZJЗ!d7$ŝ#3 ۵o?uV̾_2,]ˤ˕HDUԮWYe%5d"J Xr윤W .nr kpl,&|F{($xc41T M[#ZbZ6b]*o NWoFҰLޥ|^ݝugԵ~|/p|v ÿ[6(blo**Y~ q痵J<@s[GmaF X$\R@J\?r9.캯cF o5ȏOYkx >-_<^}kso;gُ%s@f%@꼣F:0=-?A?/uumqkS7B,fn[_Ȏu|~_fV~}3Ȏ${QKR5Lk\O,I"}\YX"|kGZNT~P!ʚ;)ySs ̦a3J=^(dG>s.&JO))*{]Hi~cbEʞfG|E-BOZdO;I"OaIL.tcWQ_L<>S2OY31sE<$Kx"D)+!ig2Ua1S|eJLt9L7rOLTܣLuPDTQ0Kd^HNL4-v/"}H-dnOT7*{ &l#PDQGXGz!zȖ878#z27Ow#ȳ~LB@ @{Y[5~NZUTu ]e_Jn!E*B(2qx QRX=)|W܏eNcbۆqKR.tP3Y]b6 Ӹ6oxRx=5n)ΚpmÅqkL.+}AQdٟvfejJwӬ+m)%*B m{i?d8bTkNL4-.fJ\.ӒV{KːHvv +"ǝ}k2)!~k ^>Ȟ t3n*uLPU*6RFAѤ?3| CTko+U-Z!!2HEœ`b$KFάTnu%MO[Ǐ]S ? bT ,tf[&~D[s !{p^>zu.9%$wVU̲{WS,X.>2ˮ2d'T2+!~&wdM62ZǕ~)ߒ~@?gGPJ9aK~\{d3~^eT9=gKٮDyh6dyWQ~:?qf<QQCo?{ԕ~[}Gh8@GZ9Ͷ=f?omO}oeW":~r%_!~}Y+1fπV":LGʉ/YK#1Sr$1L yCSDO1-Gy"Y:0ĶML"\G=SDND~ dp#ܩD(SCGYc"Z'4㬎D;?'܏C"DB@ @ ,vSaMp2}z3oRQian5% 6!f ג#'ȷև~.a7~o͛ҶىoݝuW(Ӈ5Bmq5 bnb%D9tJ ] %V|CpxWnJ)#JҰӭбxx&dhn6JKwtJ/pk3]qxj/1aߗ)ݝy\aKF![( lD1{k!55\ʙFP9Ϛ#ȤD}VP==ҧ! =Ow;4qnTd KgH2|%)Ь̎V} +і|씥 ^1S:xc-2υhΤ]V1Ox ƅRT),&JyHddvG50,s[=NY*W_&nmt{rKosS<z_|sK"ʷc}*=|sKV}=ϥOE=͢+Qפ9NOS_GYkEK=GmooOdτ]}OoU5a8?bYVu&:l0?-; $BaTBY IIltx-&=Oѻ?o/\v>ҧ#tx-4?-C6:\UOsSnƞſ~;GK^ʷc}*/=E{H&^&ڛ-UAk5і oyʷxVv1xMyc-#nO؆Uӭ+9՛^NuaanAtUS5YKSFwتmm II3=Oy%wt.]OQ<7XGu;GK^N=ϥKG/Z?KSb?o/\(ݏc:g:GK^KOsR |] ɪ:glsK<~{Tw'b߼?*ۮmtd>/|^ P:+#XK}f"4b|=7E,K8x FԝѰ'Ӗ)zQ%^l%?Et}wUGRE 5QN$V#E4`ŧXP-+M5 Pg3x)b->g({RP؁£r @2Xėڎ`,9W$($pۋ[Z-Gk Uz$9/wIZ<=Cb y:rNiC^Sry8ujE:ޚwfRHs?X~Y>v8zǎ+92ħoÙT{WJO{}:ԥMmL@XI/_*s=V'POV%=11|zOk]d{v?q+c&Gs珪{3z[c'dzk5T5ُKw:Lj{"Uŏ<_|Os]2ћdj*=c'pWƟ{=JYReUh]dw"|isg7əd!{[wIs-Ɨ{75/SkB%ػ$պV*]ڎtn$-h/:jj li~NUzߴ]gݪ2tn UA5[OE|.=(ooϴsDqȩ{#o)ߴDœS}ڎOuڑ #YT=Ɉ7)dwuKt*ݑ+Xn%ڼKfGt{[қ+|JU?chUczmhnRU:Tjt$ǾK~[fԍ9]P qh ժH"s-(x# T6fbWNj}0Kh-|fޘpݧDY6]Zn'W'HKVu%EkQZ"UKx"ja(ztwϴwW\iRw+VޏP!f,{cIc*<{_7Jz1z;xx)|yܳCP8l;nj+1r54)P6AbJG ;HQq6gO ^4jZױ-pq34k:Us6RjP꘺iaMKzsp0;e_2βNkONV.&t{F=k-T;-1kѰ5-3 ԶL(BR6#xfQZ@ @ @ @ @ @ @ @ @ @ @ *b_YkݞO":vj#⼕2kQ ]XzÖyuFjvǪOLޤQW5e4(K\bp­8pͧ)>2<< \#ۭP2* 1f3qƐ7P -BzBU%-oKd@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ misc/logos/bcfg2_logo_800px.png000066400000000000000000003066621303523157100166210ustar00rootroot00000000000000PNG  IHDR 2tEXtSoftwareAdobe ImageReadyqe<TIDATx]E~=asewIKU@$@1 gLwz3^0<3*)Q" A$Mfjzgfav>-fvBOwuY {" ((((((((((((" ((((((((((((" (((((((((((((" ((((((((((((" (((((((((((((" ((((((((((((" ((((((((((((E@QPPPPPPPPPPPPDAAAAAAAAAAAE@{mnu7QZaBV+((((((((V ۖ9g"Ϛ7ie#/ΙtzQ؋ [6| gi #w?L[$+2l~K E@"P[ ޟ9Y4G#4aڵ˷ck BlPPPPPPPPPNرaдieAr) B{l|ϿCOg@V?(m׻;,PTX7rBw)i|쪬 l^1 @^NZkˉzխï(h|U0zw/Ɛc؈ē\gV3ߘ gO=1o9懥𿯿@x{hf8?oZP "A2p[o(k/ "h_V ~6u! QӂsxH>J  pVL>.koPD"QXlds=oE~$HrdX/TIAAAAAAA(W1lxSgx?__@^P'G^.9mκVxއ8hDXspppH.>h1LnUk~W,f 5Hwx6 iA\d؂+3m !r9?iH{s*n}fХdw=5z#7oU~i FMu;@N7PPPPPPPPPPсX?|(m9~sꋀ_OVsvEUx਩TT" tڕ'n}INۭ2 5~i˃'|z=&]rd5 yO>ӗg7*],Ǻ:f:;x{."|ʱܪُ@m> ;_^}' nYP^K{EMJOI\U5!7"ȖQWBho}9<29t<,{ǎAAAAAAAAA `0 n}ɗ`mu% djz9 4wDBr[~Fu3&DN;?f1΀_zNCȇ" (jn HFVoK LtERc]wyKW K'Ͻs>V: ޟt=\8v4Xii 4 0>? Nx*ᢳk6p{ނl(C1ox+ ScO|vUۏrbیm1*}x"}ҕ{עok<_+b-s[ [´3&AyV^)ߞx .~@V/?1:Mo?@\<\[`/%c zx_XWcC?bx*vgǯ?Z1 /!\zuSN%*\AAAAAAAA7]{'+0wrog O H5Їɚ[l}@׻c~tM'jNQ=ԢdͲ *W$ DLF>Wb[oV_eߞ;׳J|(((((((((oc`p9aCFّ]U;y|>$c;@'m&20CGYd$\t!+^Ɖ{Ͳ_k-X#mRbd1> ?5r>{ٷ\ћ.|-,6nUzZ+⡠n }exə#F swՄ97m}#0ːdt:j*-N,{:tVϊ?,,9'f?x 21a_𢭃!bw@F›~z/1ḓ*C֭ix*؉_E@ 56q6zyؙo~ ~Z}7 @ðzF@DDC,#1ɊeJdDNa1wL;V4=S~trBH<Iq&J!Q(cA>6nP^">|6W"C$COt%7X\HsMK~r%wIMD1a1 Y+nya#6d A3Z=3.ߚ_`ױmT/)SPhl8*߶M|^LE@.(zJg,~ ~nrS̀pBx;Ȳd9 3C[K\dkI F&(&YK1#٨MF ^C^d>f$' Qj+kO`[~I27AQPB E@8(m.E5N; *2K[>:#Fɐﲧ+^uє@ߖHйY)nf#!$b&~Lp>b&,&!"oG}W&%\;n[ hoLRp(4E5(Ҹ@U1Okˬ… fEYQ2, 7} b8\drᴀP L,U$A, #]xH,H]r%:r1 " $$jƯg 9?|>B"CJ+ƹ߮Ū <6zKZJK157Ѽ1Pbe]vMn)+++V(Ai(_ģ  2apBBx`¶_#!HBG! [$L]`Ip1Т1ph?R-5ܫ ^D^N@Id|itБdd8 GǢGy 3 I$"㐈O,E:X.ϵ:nEXeczy!- ;7}D} 8>rK904ɢ Og&/Y:D|xs'2йNN#M x :ˆ9P-23_@?G' YaVY vU2njPM65Ǭ!"J${KkA2!Wap2B#"#)1 ;-Tñ"$i L4ZhuVeaTK]m;5v^{R y^?@#| c_&ԹR\A@ q,}F&<L8!%,l uq389tlQ (iv#>9`pr= 爉pm*`ƭ|-DdO  %Bω_Ш?H|Uv_EiuN<aDL$!=_T֬ttpTWyNF:R Lc]1/JKJ( E g q8W4%'ht DDRXuuwE@>z`x n'LCaaw_QJm[B(o ++xyՑa1-ik.3}sm|ގ!$BUlYk7l ^nشvIDǏځ7r"N|6i<(Ő (_Z<$"6cZMMd~'Č-0=id4o J ϩE3ܳ#c noV7~Zf]Uhݾ>$hQ!Y@xYߙ삆j#O;tOC /2t)Z &@Yh.BٞnT5^:5ԟ&wL݃dxtFHw^8?gz׼hJq]G:W_cI֣&GD;3y !Y<5Rzu TacVck vi û.eP2"QfuS'V{+J9\] q 9h,l07 Z䷇CԪjXq+|lxϿ",6D2t sAxf-1d0V8)D$HkIy~o*VuK%,R*aylZ]BtiHHF<ƽUK )FRaކ8"l_Kq! !ٟdZTBPcqgHjFFx=ޓԅ YN*L0bF=gαt\hBj =%ɈE.3thYct&uIeR|F5I&@s1,7lVxh90w)v J spAC(e؊pNh ˅,"Ec,7a"'1bbRND˄p4k*Хtk]el=È7[;jD"AAYn kPdžQNiz JI's1pfp#ΦNxJAzAn pl+xy ӫS A&N25:dDҍxFՋ"s/2"ԠqZCҵ>zqT^*b9kV]h}^pl_]ܐjtH:0:|w,:XSJ 7#&J[wT<Y:WU1K[:D)1]T3<󚃹hI?0T`E!BBlٮ [w@/PY &9Kc\ܱ,NB?L4=uD7s8|q8 jslB@zcr{5Yn@!nSDžI&h% Lu.W.}F~t!Y:n_ݚޮk2q&B<,}{Co|/$ZK2/Yc2]ًGs AH9{ǺƲRhCKhNXE@"Z1-nLfB![=8V!MI硈7ҳ.>' ^ ]#%(6ŠDt4.G̓1&%n-!rAsMr.>oj^a$k!jíY6BkYpppyk5 |bml, 7XY4X|/X3gV!x|7~;pڹmB$4a"*  kollКP%1V uMWDv6Lc!—۱Si}SͱdC7'9Vs .[2K&HuOϓ7%hIA3 !t!V#e]$1\ t'^)/y(;ق" 4{)^%va|ҹ˕ؗ?O+`.]?3p;_.YZ py\6L7Su 6ȇScL᧻4v$ݚYMT} GuTaΝ Zl<###;770G";6VVVV\rٳ^Ҳi@񃝽I6""AK=HKAe8/Mxc_ 17ܭ.XƝs[JfεS| 1cƴ(...@eggdcm۶mCPڵk7Λ7o˗_~ZLE&$B\b6B=Ԁ%!Yl9#1mA#Q~0X^ 47D C[=t%C"#&B.n ;v)5T[a2Q8X6+,Şp@YbuyKr ǩ[(DJV&ox-+f#"i^k-HB؊ sܣ^_·edǂP _-dY$vѢ}AEo3 l񚛠T Xk.V+ņj5HCnԩPxY<ܴNR \NEرc˻tRڬYYYY-rrr2OD:;w\dݪUVk\xofcZ*"U$*mvJ5Ⱦ4>$cqJ SVEД1KA&}sλ#8u{D׮]K žh_:V]F"9+V4wܵ .ƪ9s欗' #yu;zc^5ȿ!G:+/Kkb!˦=WzrSd@H2.~#n08# B/[eñzBAn&Dag8dg²i5<ʂ9tN>l"bICiIP-U)3kTCX0̙6V|+nٰmnݺ]vU f0)&jpZ&A3yhmu:+k@r߯_ғN:;$ q ]'= ʋ;w #Gjm۶I?㷏>K,u՚ "# d2"y{gJKK2c}ͱz %=i c]#˗/Cor7 σϣ>΃Æ k3iҤCz(AQ$#-Z(֮]ÇCH7/ZhҥK|嗿32=7Ry 'IoKH]R<u (ߟA} 80y27BbF<7" Du#H8N/n0$A.XrMQCdkeSzu+튛3ǎm! Ϳg#]ݬ*`LVL֥1TC]Ơ4ubB7(F*JYg<Dā,䄈_+Li\biI <\䤊Ba6{}%EWr; 3WĆ|uUD2o|Y={>.lF.]w݅(}o5Z31Qb&!(PMCްa&To(lcM(nx뭷6!feyh ݴ@+of aڃxiv+16|%Ø1cbU7" Z4ЩS}]:{oO?bi{$h ˲bE?|ߍ7;vTќͱݝnND#kH<,gy/r޿2]ȾIf tns !GuAPXm4@VPܸqR\Ӿ|?{ꩧx2H뛘 ![hm|\7H<*35MnZ!l֣Ge!2~@o8]WKVB]N&'WAWi @~>~f ,q7cҗGXpMMM5 ;pYhѢΊ+Wp5k׮7m9ZnODDsټ !t^B,\ݳgV~ &,//?[f]Om@M6A`-+{z='O|%/?H4@OaIX867&jx-dau[pJd1MNevsE~4F㬳ުU6mNsVβGϡ0 V9AsC)gmʔ)i^k|y_"G]@$k";$7ڵkӧg-:c`gkj֬Y#hUU~5kRSV^zcuJ#[)8HH;pVf(5;N}^ *1J‚#29[y֬Y̆0 b^J,ªrqR%jh<]ɄV,~k6x * K+ QVE>} [CL~eX\`"l)3-H禘HN'Qbj<X?6[Y hTv mڴm 8q"m;~U_~3g~/$E ;4솴ieI伧ܴ6hKc>|x?cǎ/KI/__~-[:رc ۷1iM.W@6#x_mɈa:usW ֽ۳o׿+l&! `8~ I,RX>Rr1NyE?kP?nȐ!! C47o Vúu`۶mln=z}?DBhN@ii)km۶͛36tС_sYo̺ߚ7o7~ioH"Ⲟie#'SYup:cRkGys̘1 G Ox_u8C\~(cϾ}d΋/lr%r;cբܪנH (wdh${J!\ aadn`'.6ϲR"‹DD,eGcÊhk<ߤeZ?ʕѭ":iShTmm!7|7mڌ IcC& MF2i[z9Y"Cp˲ˢUEd1$D"#RcKÝXΔEk^0#1!P,[PHdr;#+3'iy۷?ڤIN뮻+0=9rJ67h^,^qtܠG "[nM4'(..={B.]z9/8}ĉ'"y[ny!p*R$kY} ^2i~i,:uOc Ra5kMxc58!Q$}¡?j%79Cr% BGpЭu1T#1iLSZ,N~F7~%WYQqq_Ӭ9xV,?@R ?\ EyЪ-hLhWv,]9^v8D,+Vm$nh3Rs@D}@vq3sY ZwXalhSn0;~!t0 92I[v.t*M32ĉS#.owsn~!{LTH!''m-Z`paزen>5ɂ4(TkqbfffdVΟ?>sXz5lܸ/)5٠F}ZD~cLEEE*r!@6:Q8eʔ3&Lp?_-\'Y%dnuBiA$y/KP;qYeRN1'Oi},ikD8>3Xp!D:>I;U} 6okd Aaz vڹ HO9#G/P/^Dx+0V]-Fu~\uUc>cq/N_kpY vU` s>[fA^Fr3d%(|8^q}O~K֨.H2"nE@9=#] xOr2dCBB uY8P5& < 6ѐ 4sq 3$C\#bi S]' &Kk!+gG*C>idp a5.1H>Lc[#q,e%1y`{n!(Xj^LE-^lE|pCڇ_۩xv3~D֑;t3qy&>ހ$\D-r6-ҝ駟fBey&$$!BiIg?++٨Q&}&nޛP{hH3t:շ4znhl3F͘1c*E%*"$\}wRAツ)rJ~OB?>,Ț-ݳӦM;cqm=e$nX$Pi$4܁{ecN!caR_<{/*3><<7:ujcT<= ~)kdEH6sIyꫯAĈȐ!C999l^3fM7C=41/q$V7kHh/ɺFMrE8N;(Y?"QX~3|zXoh%šD i[7σz@Ч]+(+̣~_uO> IʫW^y_u yX" @i Yl6ХlW~aؕiQ=dH4 {H>4|p}Eda`+љ-b l!@lMy(C%> ;mەBvN.D"BQ^~-fpAD,dɫD솕09cGm|Ij7%Dy\HԴ- V"hP$!|>Wgh i+He<KcSg2XW]MxpX۟ eoذpҸ&NNB5i\H>ʟ8q~PBܥ)YF9& qzxivbFwqСCGz$͞=iYECYK_ ]tذaBHnM/ov{uc_&"ഀpJGLi]DwN#v.a]wo>,Xf͚6T9Q 7߰yAs#dB'> ͋\pH6K =B%B6`7-ƺP!}Cosz(Y6_[3ܻgغF'g|~n]YxzҲtoC:-onʑS7bĈa_뮻d:%Z 2⶗4(I,#U}R]AIcՊ"4>8~0GGGMFl"1BDo㣄H  nឤY"C{h =K0/\dUB ṋ!L4Ui߶ ,ZrZ@aV2]qk'Qu8q:iJˎ~&t=Mb# .*۴K7` .?g5jk:H\TФY؈WJV@M,f- %r2ok^W3D=)$-ĜSG@uM*ܨx] Ӱ-LQ0L"@\JZq X|XչPB 屧Y .6D!+?Q ʻ]Pr Tߩ7덛6BMN fC\DJ,H,Bslˎ.NQJqˆIu43O4c-hv-]4ˊeŨp.^E(%pݘpG_,[m[B36ca>77 5]d|(9Z?܄$W ! 5Va!![}D K.en : <ƍ]O䝃 /~8s&B`^ٚ 5K"c=sVv]GSO}w_d,[YՊքn }),o _>p>VLJiiiGyN:=|5׼ d AOe p\eǎ{]ϛS/X@U$Gאί+Q>t5k,N>MD|>w~w?e[bT{K' ƕҳ5ǭR]MJ+4$DT4ǿ}:K 0mRV !JWnk縉p|bR)j-Ϋ+,bh챖 R.¥a,FܐwH.bdH6AҖd@a sWi<`W 1-AFbADE}' v \C#ɴ]*LhI~Mn7ũ d#C }}oS\#AQ|xUM{4)Mtxic9c@d$QWc=mX}9s6a„;WZC"!nDJdIUKnm!);1P`Фcr4̰)yz܇}GBf6A+,0jg#m܅"ڼ)@6o >s _~G+ѣG߱e˖JeMA@biZBxI3 ywwy[&rYb=:|\O?ʈ#xq#<IJO<^0,3a9Lg&&PdGV;3U<7g$ fMBxx@Ī *-fM*'Re/Xf(Ai( <()jc8 J@7XML̪mС}9^&ŭG#\ZA,?w`=087>j[¹kv\Hkv?q+#V7lmHYV`(dŒ *oOJ-`!:x>Mm?x w#-" 8ӟ߯՗T"HLLٛv]w!CoF>+>#_cqm~BD &:q衇{Miii $ $UV-yKq 4wsq#,LW"8I>IqDvmCvM!b^z%Yjmyqԫzeyyy׀9煛 嶶|\$FY_ū7hV/k7ˆ^_vP-ǚ^x, nޓ}.gFaJۅ9AB|Af9YmQ!.H!RǢLzԲkdP&ҼW2ޖ#{LH6y*Ņ~DxBF|57,sci صZAn~K+dΝ)6f'dj&?%7L~}~nI'Wߍg 3<¦+o~&4Ӿ+j,0j錮Dlr! 3ax<~If _s5=ZplHpQ(c46m FN?IhB- :/ wF$mCw H]k~doObM"fAB9sƓ@gD'$n.t꫿Aa8܄-Djկ˺ |(8g8kJR nK⡇bNԳ>{6s;{)>'[aÆ[@&5o+d(v %+k}g~w85]L9ei01+ڢ%ZCxݍmK"nC"x'w*v!W5)0ĭܕ VAW%܎X6Q: yan9Ç wmPP "gFٴ(~$r3T[' 3k80o!VJT0Y-b#VV ja-~ݸ߈ J̍ Dٮq5HB~8{ho s/9&HYӴ;\?3.uꗄpQ@7Yd=ztgnBu*<Y .^{zڵkk UH&Ҳ+Q%?±;G AFDɯa%0\b?~1wK.!yAk Z^{Z7)TީV]˗׿ 0Y6.Yxxj^To+u?b$U 0 |DkB3z%CKLKH{Au(EeVBobeQ%Y\;f ՚c,pbB֑n,'B/\xxL/l@֏ d! A  ' :O( =;7Zߎxd+&*ǭ0 Oa_.W1kI s֐%IV `SQъGy:^qcHDYn6L 5.aHcF,h=O?O=sa9$$'@.h@I46jرc;vmWSv'{a͚5|{HIK.inXӦM;gƌ2K= $*f3;i';@|I6DwDFDJR_~.΍>8yq{h=s|۫a unwuL˯Ͽ?P0+{ɰٲõי`ݲ ~xCTTﭟ ,xC,Ff[)]1rPg`g!%Z% β!XM.D[s{TSpG5)sVK" J@U;q 'ci~^uvJl|= P]W~7*]W8($&J1"[89<b΢H&X煫]*- C>dV;0]sc7sˇЋ.P?Ҧw}=T$Ԭ\>|C=aS țQ^^"77T՜\K~Xݽ%82%2 vjrP-;wjT>C{ӅHVv x_p!k{)Ȥ' Q- 1W=XƤ.lshSO4{sn'gʸEƿۼF97 !rNmaxvС9$ L'>*Ur)}x,}p̏ᣥkd j .S^ Lx_~#n!u,NdjTs3SZ7~nTdcAC_QdwAѯ;Qa:jOQ,^8 P tiYs&L5>D3UE[YC;QU,xcV&Xw5t@d#ŅPЬ9[c/S@^maגe`5k8C 2`ksa%b.e ċ$ǹ*q+'>Ruauq8tDB/MczEqd`IY+'G}DbR{;uߢ@Nݾ}(&:/|EK%!dY?))@oPH&F?X*ѽQÀRC ,DZ_5iݺ5U_Off='-[Y\"*$x1BPCYZ)&8޵jժ=sGy$֚CgbyȴtF]Zjjd)"EEEPVV͛7g„yA}@sj| K&5AJ =7yScsYM6M9k:ZkUosZ>D?%,`O|7̅3_kA2ت̕Zm(`z9piWs=駟ơj_aQGPHg(_c[87?GFz\gDMh]Ҁ0oW PCiZZ7&A( ~rY'QQ8ϒ,t, IKc ѭkl\gdYAȊVAnl2-g3#(dpAeĪ[6"vrl%'o5’ܩ@qcdtߢ+y n-6lDŽLjq,bf.sVi Pe?,_kάj  mK Soذa,3ǽaoR, dsهhSmHM)^9hѢlQ]]]3gqjr˸C+-ZĄ>R;w :u;FRB!AkŰd+ [idGmiP2+2FqW_=nfnGO9V_BOLj fB|gˊ|˗AF! 9] >}@h۶-#"u!47(U0 hVXHB.JOqR3fEDZ<ˆ[+$z7ҥK|~XzgɬenZ r-]~͆ٙ1ӮP;+amhfۧ jd_/Iz^F ߥ~y }FG@IJV pfP9, "#!\nY?L(Y݊[0885MMk-n`&O+k-.23]fr/hB|Ͳe`@pJ@vMk]2\ uZ~eea U@ufM>jaq l-#']f,}75.&ތׁ{^C3HY삇6_wԉ:O 0Oxo7ˣ`_\SR4׿}ƽxo^{nbi)=Հ m;iJI޴i-իg݋<~R&.XnUa„ =Q(^BkPH5k~8 4MG5:` IP6zhXnsFwd)"W_O?=aK? V^A@L EiНV /4!Xi^={KsgBtH!?dIm)u WolC|ms-fVGy0ȾeĎYF  xyŠO9e#l >wY.ݠTu&U7brkFGIOY{ri۴a2$ EHpzGxY7t9(|&o076GN:Yy v۴iw,NOTqn/:z˖-;vmN8UVC!}ٌ +ބScgMYO NR@Z|RFhk #GZݪU+J}RYHbnPE=b+1؞ʑC6T;UW]5ڙů W写1G;r!0CYkPH2Wv+ݦwD_xֹelGmIywޗ;ݔHO?$ƻ~3`<\8WkWz< e0GV"s}ͶqX,i/j+a$jW)@>^Q$%]{,S#lfVx!Xrqжf1Ͱhpw+["?'\LldPzXZo|֧ Vcs/))Ol4;0΢$  ;&ҳ0zϴbV)a1vsͩP`B7'OeظZc!CC}di]V;/F$(FX-Fw8XvN؂k1qBv >I"ӃuU7U_ZЬYa' _֋[BAR`ſcpߤ?%mą`0Y )VM4TX8J@hFQ:''p߼ܯ@u2eIN s=Ǭ ![ 'N;ӝ甉׽vҥ߽K~ ;wܼq]?|^-3 ݻjժKK$ڷ]v]Q ݻ:н>o_~:e2Ǐ_o~UW>-DoH Hc% Z"@ xV㫯zlǎ_{IRڭד½޽{AVO7矗~W߿+o߾ipMܵbŊxQ6Q9n/:ꨣ t)qr1!wHdn'=+yA\o[]r%Ǟz꩏:NaIBϥi'|a ڍLȨNY驣`P6׼j$fm %[A[> D#KaKAh6;՞Ov@G^} / G?F;p Bgfl8iܑcOlU_xYH ]"x" ]w%YѾ^>b!jJP .$ ; X@fô_#kGe_Wո!U}C8$')]Y͋  b"t0씕@교WjS`o1(!GT3X,L|z輓)A-mb3;y]ÊB9-NZ@.ma?Q,(݊fdڏ`7Ȉ(X{ /M,\p9 maIS&cӲv[X@*n c6֭[݀:p7<4JD}e!B' ܨ[&PΨiyoIΔT  3ijsCb\KW3A jP`tFJaLt$MݻeCvf*r)cܰ.|II bċ JeͼyƵ쫹s?{&Gq{{fsޕV9@ @$l0`a`s`l`=a #BHP!(*6Lw:kgfwfWtpUuz|#9Cw \v齷@0]ivBt|ub5{ldVKlҤIo0@[lDž2bA1C-5l9 a_~A~\Ƽdr*\aRnps'3|a޼y#"?Ҝ41A=Õ3믿O?}FMMMɆ\q0k,vVrgq`++emq, ikptՓ? > Xg{݇KՕbu#7ةq cF|AST'@eOUh`tJZw6nܸ{6m8_OUTK<8[~Ff`l :Z\@/q_q1ꫯrUW=~2zxFh&sA?J?oض!Y3ֿO~ק~},; |h{Cjay'?<ܾ^Y6@F5%] k! H=v`C}FYݐ"֗jp:ZGLuB``= S)KRTa?TZ(V`OɒJ̉YscRK$*Iw! Z#@e \3\bɦuЯJKdMQ;`l[ eUC\HXB'ە%jU@5%*JŒ_B\II)^qG*_ʊ~οP9y x} )BlǣBbV-3ޟ߄|:=̀a( Yaj(Blɯ>_? ]چ˖-5k,ƌ3µZg@l^veg 01 mda9Dg}vwwѢEor-_O*Rg/(YAg㧗^z_ᄍ\g'maaw^_Ȥ_<L:2Q۩z]n6JfioO:W^SB)8pwHiѐ e0N<ԇ}s9N?0=\k!/hl{ f㣏>jҌNa! \-{  O< oL2Myc=u]- N89AVA=qcKmwvt¯?; ]iSiaW_x]riͻ|G]2s/t 7<3~ȓeg}w? (jɩNȻ! Y/"n uZZZ=дr[_K RV[~Lv>f4laHbv궯.WN ew. *'%~J |*UC@x-mIF4 ׿ ] 0`F.XV6alBW  aro>ۮUv(O܊+610YpGob qx,BQ)xn@<,^{|S K*B(8 =Ⴑѿ;ꨣ~l֐n<[}yEodΝ mis9z4 T.[UU+bExC_-B zeħB `'S!ac"J#;ΗX<5*NQ] . |`׾^tEfa֎y=bދ'!cas=ɿa}pѯꐽD0jc{"x9rWɅ)3=0uPX),xsy Ac̃K󥲹<e]Dןoo?oGObאfռO [O< ]_`H| Օp7r(,0bzfDse>l " Jx8@ NEg)P@ y2DP``4CDW*. v򝛡@ل⢌0ݱcIemԕĠ]SJc8"2?""s2lPAAaExh@,r8&D5SӖdg[> TRϞȁ Hg1^ݪ;ᄯ̀kcVU/nH2"yf 3D3-],xg?zU<_BܻRq/ش0%(iJ~4@쇪^(# î0s-^xW]uCG6Fk.;7V= lڴiix;{; #!==J f1h3Kuwwt0`Rǎsg`ksmVRۅ{8:,hn+w܏)˼ UuNLy&@o.>B0Nacj !|~ '0)`ؐxeOrtuOUI}0˞'I~ۦm"{3, #'V^BքOg+n= pX`B~u{-~^nD ^ə `5%X^td;Rp+r1_BI{u7[iw%CWLW e (EUEPu=4VCڝ{Mȍlb^Ig2 `% K!5Px@EoOUf$ٽAA"iTMT^=vׁ(WT-HrвobXgu.RW\zn򀻓 9@$Kp #ĸ cQc[li~dَq{40 u:"ln/袉l x1l E#-f&.zƍ0ֿ/ vög?v`4+pCfϞ _ׯ_~}L9bb =Է?|?fl ?6,12f$|6N3#| ]p'謘vb8xۓO>̗߬XI.HQC󜇍l;# tM/0#NJi=ca0 [@ԷӧO)++ 2F9䓏8;:` b- )ð>5,XW_Wz&V_O~~"&,d@/(=$ǻ쨳x:t'2f̍򌿰D!ڿʼnk6 lRd&$'g Bx%E~7demAg[B2H=@n$j_T"Q]PMP &8Pgh 5ko]Xx꫰+91*MV7F}a"Ċག$,J$KY>;e?oO"B^ " :OG2*OC `]Qzu#{} 3 &1ɐߒ'xbZѣGx D׉ q/$ybQIIZ1}kiit橧Z*#[=ۤy"Њ%l_777oүUP:W+% Z0樍d 4ƭ)Naɓ'b`45x-ZTаDd ֿ1'6_`}-[k`G4CWF"\}ѿ3x2z!lvYg 5EC pokɯ:̃ 6xKETy'|=iӦs6q@]gf`Jcҏ_|nǟpL<;Ta0WxdY*,8\ZnrG􊋜_iI1 򘧤.[`šcou^xa_!mrqx衇*!alxeH5i!!VHUAA RQZW~,Ȯؾƀ?WzuCVj\j{ւ?r6R29od &ЃK~´M`;l[c24(!%hB!WP]ťe: vjD5B-Pt@ gfpd)vI19(HBP -_wb ҞcU "1,'T٢1:_/ֿB /E٥_K^{ <ؽ|DGTzh=[jTjZo1rCTZÎ !Ta]F;vYE]4]ȘndRvVT6W׿L#+F3zߙ9sMoϟg((`a 4h_\!qa%Yi,Lr{tR Ԏ3&pMMM/䒙XH0ċݫe ]&0ھ}[o*7AJ቟| WҷÇ1 0C9u1ћZp$J詭(f6gL {T_Md3t֬Y w`h1(mLm߿ kpx<&)K9akg}gAl-J±/2o$ `5SM]I2y< KsP# vi`܄tǷF*6@p%x%-[*qwS{mo[_^?"vȀ% iSŎxIu݉ qbWJ/ӫB}~cG!s?A)ۜ9s馛)-0d03wg,YaDB4|xiַ}֟{!]Bm '@bXwSVԌ3& 6,P{CmV0%Cy{/^Q!.7pr Ӈz_|Ec}d1|*qgx'MGsyi(iW)ġ:s}7TϭN0 I<}2PUoi ނ[0cǨ8yd)HF6 - QFx)a>C)b#D;c,b91@ =C7`cwޡf?^` !9ꨣ.p{g?Jg>!c#SQ=PS{y裏>Jg( | A|z(ЀzlssC#ad0R?R, @^<1C ـg>/+yS׭[KWS ck*`'MaWCU8[g#® F%g ͥЊFa:Hh><[OB:wPZ'W {xq5:dnK7{Dy hPǨGߋ…b d/L [ >ðJcTw ge^pT$x2 6qL r*aIYQ^aȻؠbAXH0h߷$uʩ(T[ppq ptYhLLN'ȚrXۂ!VT3WT-Z\p_C'߭' v.@t$C"Y 3WZ0K.q?a_P"@{WЪ߸aY- OPdVX-Bw$dB~O2t]o|iӦy̥Q5{ (ǫ;d$Xau@ \Qo'cƌgtZM *TA~uKlO:[ IHW 16H_Kg͚nxo9r$nݚ&eG<+[!CObm+@ȼϠGl.X@%7"`coE˖- 4N]y) (2 l[o0 $TTTsIc tn:8b]"Z;v5?T%s/ 2F~I)ȐfґI" HSx].w'[;vb[5#ɮCBAgJsd =yh*iY>`g 2JuqFwϨߥl*_{_꘍]N௶ TK 5A\CU{y(q=| Ɉw-\4 =ج_YfS466>oiiYb db@ Ǐxy?شiS,c&,[_>!L]2Lw*zeG5Lja.ȇ~XO/Fa^v Pq$d! 9@~0R!X..Xd dbc)Gz뭷6<* N4],ω sGy9èQx1A;O[o򆌬N3f|taV {fxŖb>z[^Z{l˰gX0=?V rǹw% v#ᚫΆ[>a_ʫsUNƂ`֔C%/0ǃI bug\vJ`{&z-*ጔ%a(kxh0ǩd s4y^WY]Y A&|JYއuddٻq[Z&%Y]\y8lTԯh($?NzaXx-dT6OeK+iA#C< KHvvEOLP}k`'bNRב@wWƟ+K)-G4pK+<$P; a”9ӌ d1:uC h9\ }~,=0}tCCC,Ed8zxMGss#+,~]ff2T%]rw/:K˜w"XcƖ.z# 4A+ij 362䖭_Uq8ƍJpY+U 3^?9s:2x8 YmEByF-I{嗗-++Spqq&$_ a2)ln5z71]HlO*}jؐspP bc=,)-Ęu>m }_AftT8|rhΕWC>  Tys#BUpGqHV$ƋRL`AV~. ăĮD%T3̓S1_HВ)p %c"!,8?;*xyDVX}<戶LHݑ߳d^JO04ĉBqL.X5$5sG1 F  ʺ"*]H"S/cY[|p-kQ?< t1*c=; mP~āSƙ*MSy4dr WV/.[l,=ztG72.Ȁn.:ӟBz~? 6~x%*J[Wb*T JPz;|S^Be`JEQ#[{Ɣ=|0_ _Yu r!${(f7@XM,xGbUX$2g *s=ʕ+Ip!|lmF"@HmxO.\ޗyo Faa1c3`5]J_E+,*[ c:lcVlD;v7+fr`>w< Xrh{!tx J ۋAD/*+ xID{:\"埗1- BЉ P< V@$豓o~6c qcV:@D"m)S 81(G/j^ 3t[n4FV~jO>䀑w|Ld*_=S@ݻݜs֓A 8>L>|Y8ފ Ro:L2#oO',nĘRJ ~e I9M4)Y"ƀ,G@ # ᄏzk-af h:Pַwq1V16=xiQ H.&!Q OOq2;Bokiiy_^ @p`}āp_{}P+!WxDW NK: *ʡ(a@p QQCxw<c(QubV/ ܖ죶-e e8}b$ha1|J$µDrz"{H2U}~7"T-TX)}~rm޳Ʀ" rr͖Y;US UD0cD2‰ByxrXg0,@0Iz:}c+\{sN #uБt'kH3,Q}Ό@N(;`Fis羻bŊ󩡱o߾L# ]RL6OZ2fLBzȊn@b7ׯ_zV[vfJ*pCmӌ<5tÊ 5fgp,aAZ%v' Dh¿7-ZQ٘|AGؿ:9:.Yt),G67ayyyECC͌\󽨨(pMm TIEyauUh}G xZ keY=JĬ Sv暽o~ 6}~sLY6谵JV溢:\Pbt1Clj0Syab<νT"$~g▨CcAzg*I>.U8|e<\ED%Э)+b%K"~}"Ø]*,$z@W ۜcoh?ISx$2ąxK<\G!pq{Q-2tyu\oB}іÏξ M+c=*rZ)a] vi/MWOS ]'J z6w }ʯnjB'v'J=6'mf2fa $ҳ:kyw~]6nN3ϼ a`QB=qƒK/-dF*ШYn#˅¨)1!f(V$&@3{k܌3fw ^ǘf#,Ò{DIrȑ# tqYPa Kdm}|ސ̇b%r t*&$裏~t>WSSE2H9:SIeR̕;l޲Ė'X6SZZ`@vvvqv+_uCra2Q/=|O2(md&?]('^ @p7}`y>G^_/L+gskzѸuPUVH o3Wd#*co ~"g2$A5 @8ڑ T6/M\PBfV2<LVc E(u-TǍ%4Zw}2C^@uH2JL+I# ,/ALwG]~ruN! %a`yq7 =[v߰+'bF$pbT-g %y,G%3PH;ڋ_8f= 2+^0 #JM@n!XfIL!\3﷿7Lcݕ kD7CD`.s=K2E;pf6S(# 4F:o޼U ,潺=Dֆw}Gcϱ?&B0L&Llٲw5fƫlC!W$tH/{ U Bs0L'B@8bر52 />v A}ȶٶklt* ݈J8Ƽg2#z^cǎS8ǁI!eX1~+G9L:Y~wc6]UYٴimӕ aM%%:$*jժeT }538Cb/š8"6WĈ TXPbǸnUy1p @ O@*[82ј{**[r1_6Ⱦ(ҌwZx\R.3~/1av@㯤!#">6?'ֻPEmo*%wEy*Xj\ R0Ζ.,̿(hTcQTk 'K. hiett'ԎWoZyY|ˏ]Yķ^ =!a{1uHarBE PysW*øfx[=rJZ;>π 8 ̈({4(Ҳ'X~rw&Yʞa5cO?[nyL~^$I[3A x̢~z?,cwwa* , &FϙAa„~Ј6!8xv8}m0 ˂1Bk֬Y}ÁٌLhs$L`\Ɖޖ<<( `SŐ@@E ^ruPzmy ytgRg3#Fذm'|;ohC-Temmm[ 5?Ĕ4MV[!^tnyaN"T0?‘L"@ <+&%wyRBJO5W¯Qi@1>^\w0P^/Pņ9jy'aWRzy%\O—F̥"S+Ar"HF`# *Y _Ɇ]I$YY:ڕb+yr4PP<J%Wr!|VŬO Թ0,OD%q%}J]HR$`i1{VYIVM5ʳ'OfHSfHR 4N4/~eDy3%{C 's9' :ty,3pazꩁ-\~MsaЇ-j裏;jԨ2 Dp!+//ӷׯ|w #Sl;- #FW4\od 0Hk HhKhn<7QHsB 6׹AcuL~ǰ)\SO=OAP\BGsY\d槤 ̕ {f8Lg@ک=P[ڽx'KBe6 gOv ' =YMڅā 5PS`͛B؏(5 r"e. j\z#T֒1Y-,,(*Q F\ryh/IknBt8ΫcWʹk ."P)"~X!ݒos _~P^HJk \ɦ:@<-(D]/%bѳ".jhLR|_!TX]YCwD& Le8shʻ~h#AI&SԮ08bpLpRY,c db.ʼ* C(i`"ýCQqh{>;q5ʃeR|y!1D0n(tqa"ءAa)S)gy[l+~H{d|wyg-uT(gq![WĚu YU04H$@փ{HHR`C0 _{E>>/-)HȄu[q%գT7Ae8AcRYvY+Q":IʀAY`)&y/*֩CņNB ~CDgTREWE 140Rڄ*+) ԫ/BF(̒@Cx,gǨ!ȴ3bUtmP@ C}(Ax>TL?~ؼR w [5EVtO7>x[oּi>jԨw}]vgCԸmٲ3XWED~c=WL6{}f3A7VWWѽpz ed!0`@ZK/fb>z[sX'e Jf柏%P[)vz|rW1y9@ӫKϋH(@AXbI*7]HגƩTRaki!W;6{K-J\mݖD v&S^ҺZ`(-K5:;F27=ƃ / ~Sc0I=m g#~@EYyJ%03Ju)CBt5-r@,E9@K1"\U^] wht`5$}`hÎr!NyvB:;W<' u5!m\|E{7', Ø-Z*7^[As>P^~g@0LT K>LO?ninjj !Y͂׋ @be0NSH{d|E1f2w}%U|PzKIbk׾3|R0^ZZZR__?0xηx uq,Y) ۵>D ˜b; ( d`SX+A> Sr!] BXJҋ;'K s@̴SxFEEEbe;ڸpȰ Hfc];0; D%~$Jn2jַ{~;&BE#@M,TPz B WT`C~P- Tp%^9p^ ?n J0^A%(Ӌýf*z7f g "۔$/+A|zI JhנPjؙ7[:{Khc/L?๋h~ YN:xyg);Iſx8*T}υk+..aR8's"X12.B׮JJ-VbE2j.T7A"Xc* )Qu7d!TQ;5o h߹KG⛝oޕkO6l Els˭BBUK*Ha[oe;`N 1| s>Ҷ-_|9d2I!=y}ر0L>Wy&jS3W@(Z@x<{&ҝys mtnx zy!EEEl t\ׅL V $u:w9#/ C| G@m2}Q `40Aϑ>7Ƌ s$Ayqr_mzen){q5_}iaS9*Cg]"q 1{m!B5OE?x#y2-GLRVٍX\%+klsK%Pw`؍%lxUCf tk:Z ń'o7(6_Kh&Adx:dwc2ݑ}sS?|+1/1#: ͰZ+'SA HL %*J8I1Ù8tEhSRqR CSdW .) ^'Hy`1=Xʙk>:rY-gTVM^wȴ׊c[W<.^9჻b VJ}_0g KAsx><7@+ lq<@wOJ3M33 x. Yxh{bVUtYf3kmMC; ߌgoV; (t޼I"zlհ[jB9;Mq\?aҕre=؏\sRȤ jW:`۹>|ի(%4]_*$*-2,r6B0<"rw >Mc`Aa8:A!z衇cscwx:B倰bה?`Ft {!"nE|U@N1@< t88^^hR!(VVDEkkD|@84î+k kG7؏$LSF ,Xc8k.oD yӯx]pd=om{[7-GT4ޙ)?K4ˤNW?)կ@P, Ks"@s%k_jPT2.kyو2'K 8픙ȨxQF!NKDQԒi]THGEoaȘ J:&Ƞt@!¸W*bD$P"ղpct8tR ."C}&!V .%7z>&1pRM+SU:S.ND~J"uB&:亇@umiۊگ Xt.zk+t֭[yvo߃2ST upտzU{ߝ={ _~60e"v]vkɧN~x/{V[kq B 2&֣9  r=޻VÇm72hCCCQtfU[[[3(央JjGwqټc6=Ƭvnr"A/®BK xTRЕBg .ŀaÆoVVVcTg,55~@k CA>,ko l >7qByqp3g?!] 5䧿{n#T6|!2%}hT҂ENHRZ YP83p,F'5B0<&2"HǽS3jr)yh$cb$搰[lDdWxK)!W$( +؇,{pyn !OJ|G oKjxn϶?ؗ1'fIHt% (ie5]*إˌ|nxrĔ mYʚVׁN|aM)o/ °R&ev%h玹1m_A 9IT&>m0$@q|( e-] 39bLPV 'CX|fϞ .RZhmPB؀бHibf夓N4jN3-+ :8c*|+'xGuԴ@L{Ok64xL N ?o~plA y*H/1#،1Zƣj~  X֚Y(c~BEytcxU)(V=,U߭^8^62p-`xxU0윘8^myQWg/It@ylf𺼘W#y N} xc' ޼1!GΪ݇~xEg< *9rdEqqqd@ >P8xk 8 b-Rk֓m)t/G>˽qȅHZN{,c5$#DW{ru?zjz\&0 m c>]zhV!N ?%\˝ 6Gɓ~׿ƒyN忏9* C=ܼMv!,H$ ށ h}=JW4}0X&A@c aXF7{?~w~---I4UlF鋬 )8 }Q;W߾ ` x?Ž>-l^~=ǁjC|&.tuysl0Ơ6s5,fSvfɰz95sT˰⋦cBkՃo;vy|m|;ܙ-2ُewy+:;@Kz o&0#${C=i!W{ B]  % Pn92J M&uױ9\DP U`2s1QAnVCug T}!Lm+u *E)jCTR DVY A=uPVoG/ jao̾A&& (mtalBua;Vfm i.h!8p ȘRȶ}_=o?C,LTX0G70̤q555՜L&!FCB?v`(I7DiEEEoyz>^:@c`m޼)J%!xW@x@{o٪,O3(7, 6LL*?CkjkkӤ+pSs}|6TK*+?<-/||ӦM5q == s_X>p;)S^eEZ![%7]z_e^93<7 :eBykλg}\SPQ.! pwN;{E HXRPƍd>GqghI oWX"i@Cw^'NJ>lOY%w&ܶ-;73 &bTT|֯AXN}d=n TGX khJ>A[Gac~_|p1%P NJRPǮ;"R1{ܼcQKJ6G@ĭ g~hr;EbbY_BC绬{?wͿ՟3 =?XcȑRrX ?ON{梋. Vvi6eY&R a`HY>>dl ?.{O'9.Lͅ",  +;w,H9 ȓky=@¤s|f(z·~/aDZb <+Hjb/_΅? }6닖e3dnk﹫0T2wɘɦȊDN [3r50)AcGiu9` F5lotġ1 k7mVdVQIqd`vfAGy-4V5Rha}30ę WPHF@wWL*ő ʜRod%äⲋ$K+ 3N}V,{UPg3 ʭg8$-jvz*11YsH@]ГCG`?cMjweMw˳/V_x;x;O]Mϸ$ $Y TZp!2p~헦ڰ3<}ر#}i D}d0PSDwL|t|O? yCa!m2+ʓ0JBOl+VO> *x=jj ҥT7*=bG!VF0=("Yj-/E˨k!cF=3XQ},}HˆQ=o RZ XDԍ yВU^ 1*R"/CuE1h,-:fxl}aAЯP&2c^Qe@ZvM[6al!YcQʚ*(o*6 6wCy8[GDYV˃'mU g*xIT*~`Aګw,x}b;v5!T t?cN=&>Ӯ0bdh?A !RXӮCaDLX/?*L>*x{GBS7 8\hWO={-W]u/{%r13kFELRA >nWjdOzW~%Btȧf+JpnAպ=YUPaϸ +>gr-]{5'ji>=wp^#2%mxGY*%!GWNO= =zeITTf:4M@yVZDj愲5l~IxH;܉./~P׾ j>5|*Wxp:("ȍ퐡_\UʕRPNzORXݿƨ.C5C^u -a@q1++PC7?L8xhDnBB2{q)4+c{?FV._ o/[ 4JʡxW;X!Ԙ)13}G^L$߃V3Edgc=++Sf糉* Ok,! (ce#CK??Ү jN8љ -ɈT#ի,3<!}3?N l477?aԩ{iG(u+Bo|ϟ%P9י$B?sa.sL5&,@#IXH0jHhd=&Lpdut+\1x%iD >k8WZ5o@THܣ]~uw2+,m3% jJV^w(FHkJ a܉FzCCO߇v 9qgzаWr6 :RqwW憀z. _Zǚ/3BkR@`LE 4Ȼ$ 7JUpGp0ƒP д:^2ڜU:veKAo=N}^@F!lԾ0\X=(3 jj`î]P lxObקKܳC4]d Dɏ:_0f.$4 zD-aDoBw4e\)С+]/ JKK WϾ5]|0 ^o`m۶᪫kÆ -rHhC$x?[:: k~ |T>p-ac7T]dTΦ& k:AH f[0PV }kmOcGe\}#ԣIqhH^ٷȂyJ frIHD\Ǯ? |TCD TXGM3CZtuS,l`Mz4 :"-P4Vp;y(딤cá}HTQJ /6Q!˛ڂbC*8cO<^}mX;J0(.89n ̌v^\f,Ԣ>%L]XWJf.GOD",(c<_pRH&DG{Q#dD_˸H1 tw~wŊ?\rvȥ MBsvOH6ay&'\7gc[6葺yL䤀,Ӯ$gza C]VW/*h󀋓N:i/~'LpX؁73ï/zqjS޽ |u[֯^|}W؏ۭy xr}<Qѿ %iձT;TJc WZ6 ǡOI4VC^ƕ0Q' Lzx!ո]J2Iq s_)AM3 Z@"UCcN_N>o<X<<Kj=((;7,$ [#98aub.k&{cqv՛7x*wN{_=!yu= a?@H@va_&(+*AG>),=A]+zX L2&5HذFVÔSN~A*Sa} wqfGZx#Y߇}KK;IʅpQG}FB{' O9fi#􇡬/4XQ%uU|1ӆMT b4sK1@u_Zo-[ځPU[UX#[vaMgi~0F!N3] 5pOXD %[Gk@wdb b?^z5D74< 0F0mڴ%};SO=bɒ%%$۶mkgƎ+744k&&ܞb?Lau V0t2&ژC};Ҡғ:@MB*؆c o>@sD ƸCx5s#= WEvqW^y++kG Ks3wJ ^fT%Q!5x/  zd{l--!tz_P8 -9N\钺4x< Ǡ2i Qҩzd}Oim@ekO a86(1a2 Աع31kP)C7`ILc_ S*2v#_?|zu}5vXaYt%nx)S2]A+裏>ƉG3DvQq۪sY$¯8wN],ɶ"۸Nq(v !%'| `>b i'@ )6` p,˒lX睝=diooowU09$̺E̯`݊QI|&à Bvh'u藛`BfAj.huԮBCE(uG@k鳿AL~hFdAرFπ^[L ˊ^E!5pΈ(5 4oetFN롐 .|߶|!5;R[ mм!p#KǜDa$9;#"x68΄wX,|n DB)qWÆ C (,,Iqzq&W$3r饗կ~UVIk (ē b|i/91$Nj)[LT$[\+(%M~M(XEXT>?*|HjӦ&)))H@ xg\ВD\J\k1o-jz$o3|233ESdlv+\^!t-B-STz駟[Ϳv8}?|nWRps9DnS]f>[sa~d-yi00=_k>u24dnim%0ƨDd` #bmK΃bnp.ˡMH%ϖKx{`GoJ/NBBN0v 5/98jgT_$A`"ivZZ %:Lu )(5kBH? $fBz}#$Sd1d~!a"8ME~)B2DGܑ?Xw<|۷_#|QC4^ NYOD XN0!uܹ;b#5O}7"!xݺ:>gQrLB@^e+SDlB6!iMSRXz-EE@ m{Bёx~{-=LhSͅY# Ⱥܰp_=}>=% p,a2h.-q)av:)i) d Ϲr-i ļxNJ#$"0XOFgb-?0HAfr$A;!`LJ)&B;ߨ0(=L膘<ݷ"NyB~C( \plݲ 6l )iy}DG0"7h.o"_)st{?%YNi wU ADɈE@eΝڒ%KO2sq7pÙǏ"o@0b xaSB`xg7nܻ%w}}}D4J @1Jy! &+DžWͮJgC|:xU4&H(!b!Jǜ24ײ,>WAf̙3g(! {=8&"ո/'-օy1OkpST?֦=pGɒ]+B=k*1dF'--~7=!XŒl QOf0*k SZQZB<$B~>m: z8mT=~<|k{nW %D-~aUard O@%f,ۡ`0ۼZ!;!"$"æs %In5L6 ®#!0HOȂFHniZCr kԟb،FI0>#?tJ W^y}dH(XJFFՀ 9qȦ&Ç!s!vrKfn+z!+ްJĥKn,,,Nߓn5:"jB PV:Js Y@νjSH̱eРA1AǑ:"2kԆa:ZcTT&W2ȸJ\hE?OHfjon[_V{@i{2r 9+O#z 7g+N<[ѭ!wO'4ňXB7MxB;SǨ$}3 _fn?Ly>d̈́`0]|Bc9(0M@\1(sr!3d'M_ۉ0|`(<@*1LGG:{4vHvBS Yg eJ00F:`v(ܼ;Y imlTөLL!|s '. 4bteAviq HDČ% TQhT _lٲ]=ܕ]tE'MDKJJ< ӋسgL2 `>:* g1:/}#x%Roĉy4Z!čXyN4TQtB"s.\s;zjB+B@ dvP9ǎo߾ l2&n@Ӱ.}L=:z!{hpJ? xE@P` ARڑ0DntnÙ=u 1txMM1vSW--ȇګgyO=3T?fA_A١ }q*~L蟙}[nKuu5&9䮜t0V/}@HBl!ErD\)P0K~HFG Liɐp[-0sT9z,{vbPРd=R c^ZRkyy}!%93{Y!CAO!%>ݙ:*< "~ % M6Lqfhzr0FQ8\rM2q%BB@o _E@%(KֺCt ɹW H:gΜ?iҤh nGst|p'|G}YܴcXJ*:nywMLF9>;WnMzؾ9ró\d=/]hs=je\@ I@g*mhBND5$ipis3",vxJ0eV^S4}-}q  ʥ'Y223 )~Ds%wfw<3^kȀ g} ޽ >ݼ|} LJLtRoh{P!Li Z"8'٠$Q-V }%d2믿~ɸqOJԴŋ_ue];+'VûKme2ZqǼpǂYпRa~G˿T\\|=fWʅ1J0أD% >>z È~Y'`f>4 Jz72StӖ<9lԚx<݊v <#nL @fF_xp@DIh}Kjk z$ 92QP!?p?!6-Td6H7 wWAZ Huu19.Xn2%c\o:j@MBH Iy1S*)tRG}_I8H1bՂx!F`S[[K5!Yg5qѢE 񕗗444fddF&^ [Ԃ`rF^f͚5|Bn`˄ݥ Q9ʚel{v<[ێ="H_ƨ}@ "Wi@KjΡОO?_Y0I? J;}[YY)N7ow}w ;'.h YOo9SNgN͛Z"\^p:=N+ܳ>K\iPYrHՉN@4OOfW d&HM}3`π~`°piN'7҅091b޿řD#1CF'`4}2 ;ejNKIIMĦ~&Y(b-JK̙w&7"ϖ (ƨcxXPXQqG/ j3bQDI!XI5,a\ML>8pHyWu]&N8_9jȴѿ\$ yyy%ioݺu%%%'Oγ{ሎ$( H~~X#U6q7IW$-nLtl/O6 ?n}23#/{;¶2Ê7ғ6g65@^u%044=jN1?ju /,TuUzWn9HbǬ;zdΑhS jxf[X $ླ-g΀~}RUg˗/1l޼\"\f-'"3u_ щm>;S0 2r2ݷ5 &[4ɛ/7AhۊsMXi.Éʐɝ`v:P!׀h`g@M'D 'Om_ k8qF$B&"4IR41 Marh$]1kG5kcG< rt4^T/3첅</xo@|@ *Rf $ x&(ARPP>F9sfVaaa'"d$"A?>c@Ɲo|c ! E".F&ԙ9oԩL/^ mpZI:teƍM1t$x5BϢ"9@E@Ԙ< JjH 3gJp$A@:SOٴiS8s&DkmCBիȼa~t\A&d~By+H_=պp?!\0;>:_yZuuu?£>ynQDsd v8 ȱ*pL>λɇfn3ahNFnnqӊe'̋oͱp_8 Na[T nس!{v,7bv;E֐ 9c$|!o \cF('`D<$5Y(W90j!pSG~Qse+rLL(B,2#:0*zq/(9>r /͛M;H$$n72 &B0 P?\{0L@HV#9A t(Wۃ~]L~Kk&.))) NKK$}Q;8nܸq2GߜIRjmm$T`kL)j[|-l9->3/ebqyd]dɒO;>AFO^j=v6?~x4LQ 7?~ɸ=Ğ)IB:YX%S$|^3>Z J/G}=8pհkw*Ң9D}5QMH9 ,1`+>6b_4 :~Sn hiihիVG(}(l%zUy\6# Q& *c(N, JkoVzA@P/6Y믗qH@K6;%2mڴgup~n'"`XRU` 9#ZRE4RQQq̕i@ 5CUM9*JW(F>b[gF  w"9G jre}..噑09-sfvb{!:1jԨ>d( "d|s]Z[*E'N~"5pëKWuG(mmS?JwqC#[.u z|p'G0& $+A0nH)<RSYM8ƦnRS!jy1,D OvUh&=?f N4z_Ԏ b2hG6zCk>¤_ªo$< o7h}, ݿo/lܺ d(N΃OC#]XNzw 8(XB.7""l$K䐗w'`Q$"\!ői2R@R/))=bĈD~~>J=%D)8r1&|g;ن߅$ M[C6s38cxQ4yC WfkUb,۶m; jߤn#ncR0s4\@lPAz.YC(Twŋطyyy څ䣬 Ǝ [A"IH-&* jIHZT>&DGmY )J=BAn &o0L5\rH@T ͰЌ˫| P1g̙3矫W.gH!Q H,mɨP :%#G'^ 5<("|1ˀcRZv V ?+Yj*82~#o}Z9Cfo߾ݳ\$ ]w|rU|y"_˺ǟ_-ב_N>}$dES[2{MDa͟?(o#Dʫ, Ȃw/ &TVYYY_wN B$• t#xGOC? H'.rMQh Gz ~ƔISpO`Dw-JY~BsvvGWҮݮ&ٳ&~yRai13<r^l_44hA➀JBvK.[xTUBY!©bTaZ ׊׿>.==11c2pD+F_ׅ p _#G$9_u% WK/9B> 8 }t1ȇE@<OԀP ) #i&t]>bZN+!vp0 )) - 0q$HA n=*i:c[ hqZ| (lVĒkՓ֒|I P$TsW_h 70 gv,lLUN,$+DB IHٹU:w lh:(n!F8Ba)HsGf&8^ɢ|hqHu8l+VwI ,5666iC(E3,URB k.={v8!9w}˗/\}bH.HC#릛nE(bhB/C:}ƍ6"!x]@X0d doYs H_?O322i4^H8KKK gyٷzO=Z MmqSz/uVecKLLL+pl6--TCUNh@T]r1s/?A>+&W^]뙛ku66j?pu]#r*BlsXZ_#tbluy։i DZ8ʡ׌|׊h kPGcaC|^yvZP7vUA]4|,NQ:Sv&vAIK]1O ϪH0UP B}b*4IdQG ZH"vڲQ4O2Hd2UwdB vVY07#|ϧCMM5cXJY>8mI_[Z?,r9úDdό Ћ> ^k@R89Wu;rd&aY*JP3 戱-;?9$x/Ç,V h%kA͛ww9[$M4"iH/co9\&;xwX`$AsN.\ ʤ1 -0$DuV':Cg?̙3hN3|cfjpwa߿Uzi~&>b9rHŋ!:š,tc@MHn+^{m= (N\rrYk{nO{L>(`),,h7Л&MiN޳A}ٲe%^H7mDjb7y?cP;9 2 =C璵͡8tPuh? ~w38xe=>bfWi>0%/==D;^%SN@$ِ׀XAT1֞":67> 5[HY 6 Cs"Q1YtI DY>?tv:$& TYPFgaY|:,?2!hlr׾R}zC1c %]Ȝ9~fEb, ׮`O=[oV?*4z0"bq4aڿoJ΄l(kfmow,*8#[Q41$$|4@;im=e6;d@IC66RnT$a0YԳXN ܔm߷5(N6^ի젛`EUi@TκbsSwY2݀k L!zro#B4 ? j?Ds. FI 6,ʗf0%3 POcӊnqb51h;mP]]?rphYAm۶yoݺ?u]7>8)@ bXw5vW5G('kdo/+q8 pϻ+/;5;| fKLW·FD}XK֙ ."Q JI-`#gX$]&fS˾!'Ro~[ZZ1b)֞={4-Zt5\3ݫ8/yK3T"*M`B9s>MCڏHwsӈWr}/on{o߾=d{ J)-H;&$SmY!m{Tv|62a/jn&K͑,L˛2U--ͰmV6R`M[~";x4ט\%GAnsU_P! [`Öo),mjCnN@y1\'FD5V]M Θ$KrI0Rr#ňUASv"O95_VkqR~Mꫯrҥ7M.E 1o$Vߺ]_|yA/WrVTT$)I@OfiPHE8/[z/#G9B9XCzzz!{w7I"HIb<ƚ'{饗nC"رcGOHxk%>edd8r}/+14{x; k f͚B<sbcnLgtԧI1M}b?H߾p÷V11.Tʫ Kb=b- kv! dPGљ(Cݸa"819s5"[KS#l),C|)7A)#-gƱ."}! mRT Aڵy|cVx iNhreC&zbUfcMsO-is'A8lM H@ڝvD>R:Qi'{뭷~x~M# ۫dff:WV t 7H*Q Wr_Q̘1ce}gۓ>HuG)>jM\s=!YBɮW\$M矯]re _!:旚iAdmZ[VFR MPf4e 7pڵk7o^߄N N;nנ4n 6yA z9GD ,7߄ zWX:8PVIv  L%fW/ ,e뗞ͤv*YVJkf1"hk" (1^uB-&f rCWK']A3Qrc94%v՝k|z mиk/gBDKSvh M k9qk>)MONLp݀A&̅AAS 5 1Oñ&Y,0jP;p T䍁M8$3E {j;43l k`.v+#:zPjy,m7(a"vdn:51e9)Gy-Вyr^ۿ%xGh6joڋN_Kw9ӄ萅9_O6?/Γ/0JWN̴zCr5IΝ',Yd7[yGB@  D&1  +O=ַ>|衇bŊb%A Rn&.4$9^{| *_O?T+@5,Ԯ_~=BN}qCHXvы끤MF4!:;xc=;7--mѣ B4hN9[ ?aiii F ªZD*T5(%{+_ʙd|d !Դ5-- :/H$o:XӎxC\p:\2}\/_ 躰?Zwn9vt愦 hXrB`Sy'USPqtYh0M#aiӦUw$" aiʙw390dLQæAK0jͣJn%d̜#=qPk7#́&&hi(Z؂#?>0|Yжo#~) :ҳs A aY'ӤMWZT5AșP'0EZ&iH:%̤J !H"!b\4"\ iwͨ6!+ֈ*u$#q/䴂zytgB-R]-TrI J+FϘ1c *3VMZ{H /_$ 3zI8k+Ɲ/N;mkر{ Z(hAi逸J&ܹss{ټ}7";XvT(>y 7|㬳:CsoذާώkMwڵ7vAD@BA"_'a@||^{?"8&NrBW\ d>FVgr-~믿;OyK/%{w⢮ pYӣ!>D2ɹ=-cwW)"դuW^ {_ɬ )"b~a?UԄMig0m5Kp$6mmƆ9v:H0SMZ 0@ݐ_u  dvVp.54YאB8hx,B=%pւR}rcfC2hܿrB~y ,9\HG>ZYv,8@HzC@ &s+ Vȥ L7|89\V ťh 8: Z}jXt K,5 Q`mn*-7r!("); Go#[j=# =Qw7kkk3橒w2 C0LAB4C0EFFF:0]EB.ZTPP8zxŊ!:ׁ(e';%k |aɸ$|;iҤɗpNڹ,\pҥK?-,,ܿ}ҭ[VI*W=iӧ:uos1*i;%rˆDR„ Q〴sT̀8CR_ z8}!8&OLC5s;7t \iӦO,Y2B`ba)?8{\L9g},2_N5`rQDݩ@H$twG^\j˫~0m@Q@YYY Y[ndo:ygV'(4\ ś-[ۛB˚c҂7y@KS_BDDM*hs,cM[=^ h$͌|bD3A#5k !edOL1gB!7*rr(hx&t0$k T Ү1Y>#E`?%/ =a rcIXSG*@fY9MO>D5Kg ɂZ ٛke鰿 ` l*L:: Ԥ"͢!N.D!}q,W T(o-T3X2|4,Fôg̫LI+АD,1s||ﮛIч~9HDD;cĈO=5A[_Dpfp/ %0,%]vݻwa(0Q'ï F Zc :# rvK `om݆`MM͑ R:TJq'"3o߾H+\;~pܹэc`\rAB {챏@ A9RWIIl~)jY"jNXO<'|r(E?)}@XS7Ka1hj)~;+Y $õ5%)}ɺէO~'<'555 ףB2ߺ]!qj@@vșg9lU;=XɐIO3S>2dHgʕOC" ]2:. ^Gz~  $ͱԜ`OWi@8GKQPA2J"Ɋ۠KKaGP[#P0|$  M7mRSnnטФ%ZH`hL`F$&ˆ}[ jBr\ڥ/n(џК7Z2rఞEm!QhkA@x#׿K-Z(~&h:Pʣd# >-8''gV~Wf/_(I!̳>Fss3< $I }iuL`. "?駟^{嗿{5vSNիWw qCVUWWS FNs 9Q^7&<gI͇!YB]}qsĺ~qHvQA-EEs`'Aɸoo>WDyADL߹p>st?-O#:F,mCIt)ȇ%iA02CI0 D-\!#F!GЭHQ#gA[U 4lCA~4^%+872iAP*jN7Dz¨fJ䵘LNL%H$͟ @"PGyd:#Ш0`OK*`MEr>ܵ"/5Uy条i>jhף3πdI"L&Hv3;ft+GHn0y8Cv@PMNK}L HS=Zp\o`bA(OG鎂5wIIɞ'|r#$Z!:ɚ,ڂ>c@hڴihךQۀH  m5To`| $=Atd1$'!rjkя~wޙؿ|A@u˖-JČ8?p^ QԽ.aEF5 ׸c]xc.m2dm̺ *~TMԫ@~8k<\'l` >x<6@nNoa>6yrM\lpDDFw1 o Ǩ]B}Pj!~. ԭ:4 b[JHB*mC-;•iD[La $9a3h[ ,SiҢ;&>}Z:)IiD^C1"E^qSģ tM0#ƴ3dnz\38m$f 72o~blDYN" 8k 5[gH= e_MA4c۠fJ<* n%Z7!rJethhlX"`ދ i~DmL"ng;J*(G1K1%!(w4ˊ6O"}3v0?eN6U|Y*b:f8M,4g@3aiMQc?!y=>_t=@(mE`+իW/G@C$* &mrތ;#G|饗^)~!hٷo_:ػI90AU2艾{E!G);zh 7lK i82ӃS+ ֱDFDkBto',Y///o4(d̘1)9fO\q}A;n#H9kĈ#!\⺅͹_HD(K⩹iFe;{nēljV,:&XeB4Ohh<\UTk Rbӊ$;Αs3H.Qߓ)0I$jw gn瑶(6JissE} 3HRH$>)Ia]E%ke0:"9q|3!h ,$6x-K.v$VM7ペ RSaFA1q)S ՗A a߲? _W^PwpzD'RřMMD?D-=r-/`M\8⡝WU&&Fh&{ƒYeQs~2 7j_q7?Iȯ#+)͚5mn[s#ּFw?>:MEQD_|p;l¯yzYT& t' / ُlÙ\pSjmײ"~ap s(@ M!LmXI5愭4pKlX~5xI[>~,Uޒ5=AWtD@+>660%nA3!cBt"@"׈.Z ]n){r\GZF ^p|'%l :A>%!MW_}S%%%51 !<׾+G_qɯX'[vž3 ǕD6-O!jX KaBev42h&D( E]]0/τ}w9!mf)/%gH=?f0OHjmUE^_y:=&;w_~Ĩ(H;T~i~lf =˪U޺^hGf HOfCkaa!BB^dɽ(D~hSn|n$:f͚^AdMUGc/"l8 !4 ps#iѢE Sc4=°Ҝ3>Fs[_EYn2G8N쓛j_cՀM#9n,n8eƧmׯdeBG᳉g:4M&bA[|qI!Bdt@0ΥsIkҹZG?mL|ׁ4dBRlLeKZׅ|cD4tbErfsRU7Dg- * '-)bFJZ pɄV& bw0"t&'`(!L#*$#'M0ӶWV雦<O`=㫏l?8l(Gg̘QqƎTb(H'bc÷_|Sf KfUuSLF&IЫ#Fhv1E4h?9ׯ$}huu5O K$@e ln5wSTڂE)悩A$Y _Ljb(i w{2tNQk[@#п"bki7+l e9Ko8{ D@U¯}  /1BL# Y,VuBHLT!m zLF>[tg#"K8h?~HפKDO* LiJ-Dx|e ;kZx4Z,R$ #S֞]?;VeO>f߽ tK`d!n*Gq]۩\c~.p:vHḎ0+z 싳β3&(bZ$ c~]ј gHsA]ˤ O&ʲxD;T54ˠ966acH. QMI>@H _.Aa3cl['ŋa yh}|vx`R1LZ7tPvwь=9uӀWЕ7w}2Ӈ:,Yr̙3NJJJh,v?>qc|,~SK.䗄|4vC+Q;Gy ~RMAPgH7N??!I8FP$sLkj2 jջ0'5qG "$񋧙t`DbLJrKGx麏&N$fil0>al}N[s CH Cc"A0onRCA69` A7()(i}Sk&OSn{줃: ={Т$~54OҢXv| 9y~~!4qoM7II@N Ab! N:%V0(h "fĔb#`D*af^ Xصl a "F{JRغkUA gd~!QĐ#G}~e̥DsC&&pcU4MS@yMTZ bc}Ndn`xi4O" {2Towj7oE>` ز]g|L$7Ρ);C 'oI ho(&7'!~=_܌iެI+^_d:c*j3Q{ZD>!>Hi5,U*k@0$cM uB^PʭSs .2XJX `9(ò8~AiMήV=~KԦbԔ ؃,Ƀ$ SR"`m d HZ693Yϊ=aI u`Lv -d)_,40|ôQe %TbvD~a:u!Llނ0)IM|Rm!&,0πVN;ef]'Qٌy3.\zEp`3|i&RA4[@ T'|ZCAML(]W人|;ϓ;y7>ڿ1 ى.}ƌ#/u_k_# *G@"פXaWkӅY'VD~sC$A뭷~3<ϿP<iȐ!8_ DUyv/G>ݳgI )7@*2!r,tb~.$?\@ډߩjņ ʂDvDVWIL_B%ɡ'pݏf[>cebڋD2M`PmL)_&Uf!i&x]CЫ-{Ț6a@giBLF`ls 1 5!kZ.p)] MrP_N{No$'C4RJ_='S\M"'FS9yZv>¢Yי(>:M P`Q`K?|1DRьE~38aWP 5dN6Rh 4 2i_˪`lV2.בK@tK/iq7eqLE g#-D#"&ˣf4A¨FhU=O/Zh T*$EQC9zkv ~.? V{ {_!>?dtw#OB}e$ՍX̷V$$tJt0 M!D%F6>D܆eLI$0a6qI/є_Bc\KQr':!QL4ċ WQ͍-̠Dfh02@L$ШHf ) %@J+#DE Y*d@3kv**ȜϑTQyXJ7s{ySk?|iUM-iƍY3 CwKhO4-8訜C H,jB_yުnbΤhnˆQDO\t{o]~NLLtPÃh~r!-E$H<0 >#s_իW6 \/Q+iIWbXXmuW!׶\ՉE:Ĉde-ɠ~綧~ڧ0dP$A"**/Wy1|UJx I`q]v6t{z޾|uW9䐣v%×ZZZ` 9 I.쯲6h"kG? A ͚5YnjuLhQmOz ]C?kwOK:# r5QU.R2M8Nqt$ >W/B{ 5-llˈkeyn|xNQG("xybĢN!Bĥ@Kc=dxړ¶tr8Gm"C4_:< Z3%6:dl2KAM,0&-*}1A>#ku}]HH*@$kr:%jVfzRR !=<ڹ헎<0 q0Gbd h#6eܷ$ِ )wċ{ToV.&>d%Q. `j֯_.sS~ӟ~8ވ5Ґ#2J@q߇37-D/k>=<32n$oUQ[FW?q h C< LL۸M[bI<څqooGiB|۸MF*b)^f02, t?*c]Knql*A'ۅ*LieiTC\av~Y8sT0k YitKsg$ fR2,_|'|kW8-d6*|dYHHȯٟ7{>C?_W^'ҠR֖-[TblgBK$$0Y_|qΕW^y}ݷ4VnS<& [r޴>x<(1sAlҝF8DW c9?,"|vHaڪȈ~bBo`2+ۿPJ"/œ~7kK#k/|(1S-h,xo}H !(XH*X=1j4H>("2l WMD DžF..Ps) @׼BB"Ef:Ck¢ 2RR I6^z )>MlmIVq>{Dqz(|]"T$(pR-&"JZO|$Er=;!D.}@7 IH#syl 0ju Ey)uG)\JX߯~s;WOHcP\ 4cN`{dΊ~7o^z뭷>vwyꩧVƏ|`PZTp! #2PU(0ӟ◿峅3=ȕ _rW&KYb` cx'iFiRG~.gXUAo9sq1ȂEmA1Hs&-<~mć\l'd)c9+fvU/κN{ .8㤓N:E";&/'.Ɉdߐ}D 9r_>lGW>e$\߳ٹU[/Gg_g' 0ї׊76~i^ۣ6 a^Cυdna$CU˗/oKկ~e]v9 &e`05c.+!)`Ddr2!۠4$[%G%ِ$pK,y[nꫯ~FlayA>Ex8}Ll?@EI42n _{~EbZ&]tYl2IN-˶/dVBNK"rzL+̙3}{Κ5kP믿bkO>:PX6E<0  ǰ% v1x',ؓaiTH.0!C sbKj^ynHg2wIe NU:)+SKbbH5}x9 ~'끼,%O;WJ# T͒GË+Z0PU$vhW<ZzWP7UP\=`Iukz r~ \|mIURc֭kްag}{gʕ+W cv3$>2S2"Ρ`C+M =/Wɢ /{gNUc 72 $)1Č6tw3kތm/\ř3g>[sZh\Z0(Xd0 n.Q0&!ij.i|s>ώ !;/}K;OvCzH-[^RRF@/o3r$UP\\/>ܬYiĉ'?O?{;8I2$~L[#<#HYe#"---RBu<=ӣ{@Hj` (עÎ<Ŀalal{? )&C¼ MBM,U܏t ^~yO=oyުU6jYdX2 sG}>pYA풛v)$n8E?9s]Gsw؟ٗlN}_~7˾D=b}/Nov|>/HycCE/F:9SCoHe=7(][EryH /g;X W.^JR7*X^gjT,z~!uws;@sG|r=y|-h Cdu/+{ /,TCu^l ^ތ'UJJqwkEalؿ3pR<AJ lo4OO+^{!/ï叇Aātt_5||I8FXZrh,_e\Ͳr9&i Av_h}jv{pv}YmCy:h)cƌ+ r6^TAh;6nܸ^'k,^xܹs[1Ky/hӞ՗ɾvGչ ~yVg (⁣ezBe>x;7< 3>+[c3"5x@H;G\cx>CRĂさ\<(Mact넳+yd[?{xakua("C&55p<ٰi(ajP׉"`\EU {#pby+¬Į7/д[ u9Fˈ%FƳ C_?)ߝ{r_mx~z>tiӦ՝viM:u¨Q7y7R|s=Gϸ, K͛y'=T*HlFVadg͞=Y&GPѱ:Edx6 cP^ k1PSN`dW|KUP҅b?$ڵk;twqMGqĤ dILxȆj^]=zd un Ekgg'6^zvU!IڃMLYv"KwʳɅd8K&+C@ݖ" Y@ÆH*쯪ѕ,Y\?\@+)z;:C}v//w=Mb% qR:(n;\p򰤌pK =e3|tt21)0ypuC[kVIl1S! I$Pn-IHFyb$q]S% y=x :CE9Z';5 f0wp Wm7Qx͋}rDAz2VbK1^bmQ W'm8Uk7DU}FQǍ7޸6BjaUco4c?;v?{Sr`#6T~5FdygzdsYP{sSHrl [f*T Bd'le?iii)Lny҄2m6^ݔspKxFy6g{=^]Mi}?/aC@dV- uzh#&]9.Ȑڕ Y^C\|I(`}CE 󈛿NLNP>MMDm$}a PT2!]`yH ռ$ >ML: Fk\G7Ue8zfaR<:EUcD&;b5+@Yq%qB昂 fB*NZBW8|럃ws wRF\K"p,3R/C&J֗2ڇM@p8pxY'xpųiӦ<&6lhI!M8xQ`)^4!6Am%˸)#8ƾv*׿ N7/ n>ᔸ?YkJd --m)2nZ>{~V ѐ' ͭmz:\ ]}T } `,"":x гD=ZXB$+e EʻºN H;3 hn,akgAdr7#A+M)G^iw`QK'z<,Z=d.uM8`\C6DmՋ{MˆSEq5Q%N8ܵ*jJF7+7)A+ o)aQDA}ToAzAaBm x_OӡqJJߑ<f=,gua ٴ  5Dt9Ϯr2cŕ!#/!]L@Ҽ$&<𫨎R☈%~[lxY=#rc^IGoX#N aeFG~>b2#lmF`JEevX&(AKJPjCoඎ9ܔry $U^VG$OTxʽy¾J"dpשz~lfa+#\MBs3`>[!684 F0po₢1=* C MҢ ñ~HU9B͓ 8&[`jf{2فa& O=8|#g+#~G1WEaKe#aQK@w[U#LBwq;:;P;CZET zqxꏸ0#X2xONZ½l,Oŗ^_X'W~çU,(J@y0J% Ҽt,cRê7miJ<zx?^c̵"ѷ}S ~Tp<V o_0`25kD>*8i[xmg3x |v 2 <ĕѥT^&Kni x'(ܐ(Y]R(s>דvQ,eHG:x8qs~\V=i/9a|w7.X3(pywc>[w9,n ɇX(J{0ljs*CԀ'2Ѱ`h]złdN͵) )|8LTYIy?sDdp:ŞbBmi-Z`lӨ}`ea9SoNj`{/SNo }ѡqִЬc{Z$}{G%mo Wu֗vpD٢)/!/~5P('#~o_$U3DpB뮓=C0Bƻ2H^u<Vf8بo #˅ך`n{ܥ/`8zvuwO> ['o α o0uYOψ&vʻ0 7XxqGQҹ͈<Aa z%X8,*zM\j_n sZn1Dp2 0x/HʎwĹ>F_\:J G^#ۼϛS{ w(q0Fqb}!2H$G4*d<Ubr"U=;rv4jxR ♢xL" L+d6CP=dBtt/zlo&H:P/ɓ e/=;kOJRQ7OwSX0&ULPu@pess?J,RnY()YQ:cd@y.mV_??6C_'wkI_J%MC  JEZ?6As= C26G9 C!ps5Ha&KN..6{'!U.J\Oڌ9|`dzU^ NJMt7NwtH_O&Y- -0iBcN8UW+WsBMYpK^IH),zƔ'D%4=H ò=Qb:$k}0 ?.LdnGqr<hOB6S~mlիqcS1egBG+qP#>@1jC_w6ʭ Ra$Y:< FMLZnFRʅx3p2RBnj;SZ*hM̥T=7RەύxZ/ pQFC ̏>æ-onhkFqL?zl8;/H%@$-[m5n?bG͍I0\L2gZ^Y ytl_% UG@nZׄ{. kEp!C&AEWgZKbAkm 㛠e,H{9|n$4׮- Z;:t蕯* f{..adZ#@y:E,"%2;$o5ӕpXl/î5G76'V5pDjb=~5B`^u# ;_v'_.jF^-5W8hs 엻{N}ܾ)v`?.!"ڎ&5~'/}ܩaB'a_!G@ιⷢ+x0$k|;D9 XVYH6sqqxn,ԖP!Kł薹ǕMM@+a1]1萢' .yNL^(ۇ瞝 F}u\ &ܪ&nH8_E o>N4O $<( &x`U,n* 怔n8#|0U.IUC[[pe A§$9W[׭9qbumK@5T׭n:=lP}' tw5A8қge?6I$+Y'GFJ_e[r9t>,|*~/r-6p2.؆z)8u{h15Nz:x u4ɍ=܆[|މ{; ,28:Z}pD8 7A8qLY__'ٟwڵCׇVE"Ѻ_}Ϝ9HWw7 8;J܅Oϯu!Xq9 Doц8"!`U9h#E Xu¤?2fx41Ch8@%C*,i`,8#G!Ne.w"!XVWL%ū+("T5K VJ#Q5s\ٜ#vő~CL0 kU% reݠ+vZ] _xb9Z@Uا@5S^̉W%gTvp`CN)9+2Xe0΋K.gx[8fUjb,#f`K51i E!Hw29~\DEnE@x15& F+_ѱcE0R3>7I0XLYGkˆ3$KiRd3| ENjv!ѱ;~ՐS6ȜEdڥ Z!=2!VX1I "UD\tXĆCaU猘"RI"z+ǁ5'=:f@~pˁ9-0nXi^} ;=b>zؼq4oL=XhM@fJ@БĭpuWjd/ plcB ,\jW-aғ0 =c `k0;=̛ȊM= /S9$ GA@ʬy*PţJRcZ'H? 1ڟ*YşYah <:qD@XҘL=}| Ġ/K~3 N6ۉΓ5P:O7AT#RM;Vr?U1 Oe\Ws2ZXu!q=xJ?H/W'\XBXkGNWtCQbp#$[HI `! L}?@#,9/<ry`}vM͏>qP?G@ " ;x|dSݎ:8vr`%2pm+?x2i:3TCg- pԠ`IOz;Kc-)"nKMrue6x7e^Fk'l7<쎎'%,*=\ Ⰽ4.d\$cX㗪&THhu nu )^G[ 6']%IHUUSipA"Qʈ Âv];FM8ut1D.W +-:an{M-JWy?KgEJtX<z's,CIRǬrs-'C: +\d0DGy!h˝' =z@ !k>~pKkCި20 6cL w @B$ 5!?̵GCG̅p#[Q A$IWwχV8~,m.b,y9hkio.7ۺ`SgGtT)סM-@\f׋1I鐔ʵyй n-+k}qVHNdl7Ş.F j1}J Crx#4C~ 24KXbq*\T߹5uEU< b, *X|dȒ#28 )MHoTW{¸1~H]lkY5o Е/R$1^LFP^H-^Җː`@ U,gxL8O`2bpX=XZ@  Ŀ:!- w@ F! UNDŽ"Q~|0kOtCthQ0$9ujĄ oM=*1qψV_3v7ߘ^E~#`ʷ`NX?iwx^^ɂV2Q򹃪%,xV= (,s.QsCĮM: jJ45>t}Wr @ Rz'5ԮKz0(ׅ60 f;@L<]ţ,H&ce Gy(ي\oBn/\o7B lk?8/o`aOa>x8VoqQl,)YR-|xV\T?,~SbX ~zx64L7F٭+3 ì")32kRA̯щ\b׉'j‚>?,DI 1"H` ʿW^B]@ ! JBr6C.w8Lgizqnŗ%B GMP{CZ|pDkjb"s?z &IO<8f]]WnQB Wݦbr6zwX kLUL$C|q¹xhYU"w',U 0R mAq9G$2"BA? e@ l/d3y@Y2QH6娻 ICI7鍒8I]-ƫaPsJ&gq9R&%ꐐ/.SuXQ] cFNWȦ1~JX5^W]U`I2t]_x$_ 7#C!^kC9DC^Cp96)%x̼Uy|飼@ /)~a9YR!r< ׸<.H27" ,& > w)&]RTp1B, wyK˅ך[ rcaޫWʆ۔lJVfF'`a+_t9TIe=U(^YcBLcXkbE@ qɊ M-~' sZ_J%@"GDYBGBbLZVr0?HXu>Sй?Z @yz`9B} @ ^Ksn\*K "χʙ+/I6“= pEdBo Ԩz X/pxԄ+.l(gI/H݄Hn+3` = ᕪ0{:uҹQB!mq fPD(^f%{d(ؗvn>x("eG^ ֤#>ZL?Wl b 91M @  nRba+aGe{Z3rf]A!os2TnN<#ZK ˽ML',6!FXW;2'cJDLg%++`m>1[[W`p4)^LT<*R AgRrĮmtIGîb($ٶ鯗?/*klm|QGի kwx@ 3)W\qeݷ>4jpX@̭) 𼸕'TC_calw#y8J _&"N,Y!'̅eՆ"ʽl!ady191&ky<%[Kezo ӓ 2P‚Z,f k|\Q'@ IXCuIy7\1UB F"ؖCH*"{=:~/@ Ȑ :Q bȱ*F2^SbHp-/ϒqqT-ew#+ckVJ$[pRbyAa)x@X1Q)APJ_U;Ca9U@lbz4@ 610z<` GdŒKo+`I5!Les'rO Nl4c_[*A6 jPGXE^9᥹FT:WB)X~ĵG* oBPaA\{<8*XuABGb׊20cƌz㏇}&L766*nCkk+tvvʥ yxЅ%" , \ K<" ӡWʼn׉(< y|"tb,2So2< J !tfϳ ZV %sRfap : AEQE9 1?xX=1FM$(R&ةB' ]s1pBSS{ѣ6zv۞p6$K9A8D[4?k[_~dy#HWnJxI[c:}=~^{wb" ;a> ?YP-U݋yMrzBdž 3ѥR7hqz%SaI`^U&Ϛa)pDWjp* I— d1BbD%*,UAL!\]ڻ6Rx`sc\ >zPt=і6=chW~YyD{v';7VB4j 6N:?OG>)o`ʔ)0 ;h eDr@ S&/N zbݪkr'{!7sx92^]3שC}a뿾^6 "󪶮QU|\|<,W-^<KyڣkdqvO:URo Ss&3CǃQ[q‰iSiZr&Ox9f\R~/Of;ӣAѶI97>fm(gFô.TY߻8O76x\_}dEO'ߕ B.9qaGM+e ˪ҡZ*'D 0-U=!E2K0W&w#"nIIHx>)e<|^ AaUվ @zzLX+R2DDLއeL0|7R :hVI՞[*(_ Nm.W8rSѵ՛NTrB|vݙ$񹪭US:w}eS>fу>O>`υ|HkK{ՌN庥Υ5)yXom-C-<uJIj~.+=/2׆hs=U1ڡ-GϺ*/ASu,},3׉ *:n2 ::ܵ JQB}{ C =ޮ529'$'r~O/FN(C ـ`s@@b^afRL20< "eC&"a'WࡐE=M2 P$.íL%eBjÌ M]bZSF c`MvM7[L˨n7ޢK@ \aY !HƔcakrD,4CQP#Hs CT(1#$[H&yY%E ӋJPg_bI?7tÜ(PA#C|.؛9j@ D@v58 /y |rB%P,|jM͕T/ ₄ YI?RɈ~eȆIN!Tfy׊U>/ Η*X$k}t\`~0@ .5b//\A sCt.G' T.)SI,4M8qCS;IU+yr &&$^2d%A0,RLnBbH%K'yA0M2:tV %ZK|=,@ " b0'  SJ@0*oD*8\q 7)^h1R8w:Y5$D [iXz9LJ"2;&q]Gmmn)F6֋ _$~(cn2@",( ^.HZ9`+L>*$I Z]= ~>֎ł bWa6@ pݵyo{{NOyߛ+>ւs@L8תS Ex,p-pߛ,4PnC:LM8"9Еmp^SW?}z i}@ bն.ʅBn>?uͺM3~#>M (^mZY6Q5IpAS>H87 .(Sx 꺟-:n*j~k?NqZFB @'Hb*j>s= !HH0}9:&A}!p?VD@XA:ramMPZq+]%5i(q20CUJ|^,̇0cUZlIG8DPeϣ렰_?P/oxEl©9@ B=o<>῿ hh/k1<y&8_PS%# C^9H>F%ƨ~HpNY|lW4ߕWqyk}ұƎiBl&I@$js΄ϝy tCp-ivE @o0 o7?aYK{@"spgN[~[iȿ4;Ĵ\O P'H~+ $#$Г%Ԓ&铤 z?PRrzڦ W]p.L7Ձ ūZφ Aq4c'@ Ȯ^ P`|~6yR5m|%:Ša #ڡE*NL]Hzܑ{oVx)ՋkB @Oӗ.@ 3&y~V-}Jh;D`]M5

    x9ǡ'suk <{up;+h;9𹏜} xCpo l A#FP%@gP@%daow\s%PO;΍C?Ԩ{\ݯ{}G)aN"+>y\q`͜᚛}uttarE / Hr94ݣmB08gL."T1{R̟xc).X0BLJe-. +Ŕ /~:? spCO_]x@U @G͝@ ȎCS,}R{c@W+`ݧ1G^Rȧwx?gRNM I> kUaAYCDN2FN] /cbˏ@ Bop*l'{y_ \T2߱ +֬wx.t-^.n$m9O,]#C_Yp@^0<Vk˛n ~:@ ] b:B&1-d+{*4[и#ñ\A>tvOs>qV uu0qj@ D@vB ڱbz^ {eF1 pҹf^q+dL^)j@ $3!d"T9m2,}p&!Oc+sYosu5 @ d@&iSOoV˾-i rmޓ? O=x U]yf@rFL@ Ғ,绠Auѕ/a gz|_V9 2ÿn9@ @ @ B @ @ " @ @@ @ !@ " @ @ @ B @ D@@ @ @ B @ @ " @ @@ @ !@ " @ @ @ !@ D@@ @ @ B @ @ @ @@ @ !@ " @ @ @ !@ D@@ @ @ B @ @ @ @@ @ !@ " @ @ @ !@ D@@ @ @ B @ @ B) IENDB`misc/logos/bcfg2_type.svg000066400000000000000000002224521303523157100157100ustar00rootroot00000000000000 misc/logos/favicon.ico000066400000000000000000012615361303523157100152730ustar00rootroot00000000000000 ( V (~ 00 %(  NN h^( 9:(::::$:: ::!:: :::::::::: ::::::#::::$(::# % 7# W$ q% # $ # % & % % % % & % # $ # % $ q# W& 6$ ::(-::$ " J$ w$ % % & & ' ''(((())))))((((''' & & % % $ $ w# I& ::-2::# 3# m$ % & & '(()***+++,,,,,,,,,,,,+++***)(('& & % % # m$ 2::27::}+% `$ % & '())*+,,--..////0000000000////..--,,+*))('& % $ $ [^LB::6¼P{mf`NE8 ' ()*++,-.//0011 2 2 3 3 3 3 4 4 4 4 4 4 4 4 3 3 3 3 2 2 1 100//.-,++*)(' ,aOF|ngžO"$ j+WC;XD>&./012 3 4 4 5 6 6 7 7 8 8 9 9 9::::::;;::::::99 9 8 8 7 7 6 6 5 4 4 3 2 10/.3 \H>ZG=ZF<0 ' % # $ 9+# _$ & ')*9!]I?^I?^J?A)1 3 4 5 5 6 7 8 8 9 ::;;; < < =!=!=!=!=!>!>">">">">!=!=!=!=!=!< < ; ;;::9 8 8 7 6 5 5 4 3 1 6^J?^I?]I?8*)'& % $ \3$$ w% ' (*+,.?'_K@aLAaMAE,5 6 7 8 8 9:;; < =!=!>">"?"?#?#@#@$@$A$A$A$A$A%A%A$A$A$A$@$@$@#?#?#?">">"=!=!< ; ;:98 8 7 6 5 :aMAaLA_K@>&.,+*(' % # t' + $ x% ' (*+-.01G/!bMBcNBdOCG/ 8 9 :;< "?"?#@$@$A%A%B%B&C&C&C'D'D'D'D'D'D'D'D'D'D'D'D'C'C&C&B&B%A%A%@$@$?#?">"=!"?#@#@$A%B%B&C&D'D'E(E(E)F)F)F)G*G*G*G*G*G*G*G*G*G*G*G*F)F)F)E)E(E(D'D'C&B&B%A%@$@#?#>"=!< ;?%fQDePDdPCJ1#5 3 2 0/-,*)' % % o+# Q% ' (*,./12 4 5 7 8 S:,gQEhRFhSFM3$>"?#@$A%B%C&C'D'E(F)F)G*G*H+H+I+I,I,J,J-J-J-J-K-K-K-K-J-J-J-J-J,I,I,I+H+H+G*G*F)F)E(D'C'C&B%A%@$?#>"B'hSFhRFgQEP7)8 7 5 4 2 1/.,*(' % # Q% 0$ & (*,-/13 4 6 7 8 9;X@2iSFjTGkUGP6&A%B&C'D'E(F)G)G*H+I+I,J-K-K-K.L.L/M/M/M/M0N0N0N0N0N0N0N0N0M0M/M/M/L/L.K.K-K-J-I,I+H+G*G)F)E(D'C'B&A%E*kUGjTGiSFT<-;98 7 6 4 3 1/-,*(& # $ * # & (*+-/13 4 6 7 8 :;"^F7kVHlWHmWIR9'D(E(F)G*H+I,J,K-K.L.M/M/N0N0O1O1O1P2P2P2Q2Q3Q3Q3Q3Q3Q3Q3Q3Q2P2P2P2O1O1O1N0N0M/M/L.K.K-J,I,H+G*F)E(D(H-mWIlWHkVHZB3>""?#A$dL>nWIoXIoYJU;)G*H+I,J-K.L.M/N0N0O1P2P2Q3Q3R3R4S4S5S5T5T5T5T5T6T6T6T6T5T5T5T5S5S5S4R4R3Q3Q3P2P2O1N0N0M/L.K.J-I,H+G*K/oYJoXInWI`G8A$?#>"=!; :9 7 6 4 2 1/-+)' $ $ ? $ & (*,.02 4 5 7 9 :; =!>"@#A%B&D'iRCpZKqZKr[LX=+K-K.L/M0N0O1P2Q3Q3R4S4S5T5T6U6U7V7V7 W8 W8 W8 W8 W8 W8 X9!X9!W8 W8 W8 W8 W8 W8 V7 V7U7U6T6T5S5S4R4Q3Q3P2O1N0M0L/K.K-N2r[LqZKpZKdL"=!; :9 7 5 4 2 0.,*(& $ $ 2% ' )+-/13 5 7 8 :; =!>"@$A%C&D'E(G)nWGr\Ls]Mt^M[@-N0O1P1Q2Q3R4S5T5U6U7V7 W8 W8 X9!X9!X9!Y:!Y:!Y:"Y:"Z:"Z:"Z;"Z;"Z;"Z;"Z;"Z;"Z:"Z:"Y:"Y:"Y:!Y:!X9!X9!X9!W8 W8 V7 U7U6T5S5R4Q3Q2P1O1N0Q5 t^Ms]Mr\LjRBG)E(D'C&A%@$>"=!; :8 7 5 3 1/-+)' % ! .$ r& (*,.02 4 6 8 9;=!>"@#A%C&D'F)G*H+J,t]Mu^Mu^Nv_O]B.Q2R3S4T5T6U7V7 W8 X9!X9!Y:!Y:"Z:"Z;"[;#[;#[<#[<#\<#\<#\=$\=$\=$\=$\=$\=$\=$\=$\=$\=$\<#\<#[<#[<#[;#[;#Z;"Z:"Y:"Y:!X9!X9!W8 V7 U7T6T5S4R3Q2T7!v_Ou^Nu^MnVFJ,H+G*F)D'C&A%@#>"=!;98 6 4 2 0.,*(& % o' $ & )+-/13 5 7 9 ;< >"?#A%C&D'F)G*H+J,K.P2v_Nw`OxaPyaPaE0T5U6V7W8 X9!X9!Y:!Z:"Z;"[;#[<#\<#\=$]=$]=$]>$^>%^>%^>%^>%_?%_?%`@&`@&aA(aA(`@&`@&_?%_?%^>%^>%^>%^>%]>$]=$]=$\=$\<#[<#[;#Z;"Z:"Y:!X9!X9!W8 V7U6T5X:#yaPxaPw`Os\JM/K.J,H+G*F)D'C&A%?#>"< ;9 7 5 3 1/-+)& % + & (% '),.02 4 6 8 :< =!?#A$B&D'E)G*H+J-K.M/N0U7"yaPybPzcQ{dRdG1W8 X9!X9!Y:"Z;"[;#[<#\<#]=$]=$^>%^>%_?%_?%`?&`@&`@&`@&aA'aA'aA'bB(cD*fG.hI0hI0fG.cD*bB(aA'aA'aA'`@&`@&`@&`?&_?%_?%^>%^>%]=$]=$\<#[<#[;#Z;"Y:"X9!X9!W8 [<%{dRzcQybPyaPO1N0M/K.J-H+G*E)D'B&A$?#=!< :8 6 4 2 0.,)'% "%# H% (*,/13 5 7 9 ;=!>"@$B%C'E(G*H+J,K.M/N0P2Q3[>){cQ|dR|dS}eSfI3Y:"Z;"[;#\<#\=$]=$^>%^>%_?%`@&`@&a@'aA'bA'bB'bB(cB(cB(cC(dC(dC(fF,kK2qS;vZCvZCqS;kK2fF,dC(dC(cC(cB(cB(bB(bB'bA'aA'a@'`@&`@&_?%^>%^>%]=$\=$\<#[;#Z;"Y:"]>'}eS|dS|dR{cQT6 Q3P2N0M/K.J,H+G*E(C'B%@$>"=!;9 7 5 3 1/,*(% % E# f& (+-/2 4 6 8 :< =!?#A%C&D(F)H+I,K-M/N0P2Q3S4T5`D-}eS~fSgSgTiK5\<#\=$]=$^>%_?%`?&`@&aA'aA'bB'cB(cC(dC(dC)eD)eD)eD)eE*fE*fE*gF,kL2tW>fPuleulefPtW>kL2gF,fE*fE*eE*eD)eD)eD)dC)dC(cC(cB(bB'aA'aA'`@&`?&_?%^>%]=$\=$\<#`@)gTgS~fS}eSY<$T5S4Q3P2N0M/K-I,H+F)D(C&A%?#=!< :8 6 4 2 /-+(& $ b$ & )+-02 5 7 8 ;< >"@$B%D'E)G*I,K-L/N0O1Q3S4T5U7W8 dH2gThThUiVjM6^>%_?%`@&`@&aA'bA'cB(cC(dC)eD)eD)fE*fE*gE*gF+gF+hF+hG+hG+iH-mK1tU%aB*iVhUhTgT^@)W8 U7T5S4Q3O1N0L/K-I,G*E)D'B%@$>"< ;8 7 5 2 0-+)& % % & )+.03 5 7 9 ;=!?#A$C&E(F)H+J-L.M0O1Q2R4T5U7W8 X9!Y:"hL5hUiVjVjWlO8`@&aA'bB'cB(dC(dD)eD)fE*fE*gF+gF+hG+iG,iH,iH,jH-jI-kI-kI-mK/sR8bI{gwsqwsq{gbIsR8mK/kI-kI-jI-jH-iH,iH,iG,hG+gF+gF+fE*fE*eD)dD)dC(cB(bB'aA'`@&cD,jWjViVhUbD,Y:"X9!W8 U7T5R4Q2O1M0L.J-H+F)E(C&A$?#=!;9 7 5 3 0.+)& $ +$ ' ),.13 5 8 :< >"@#A%C'E(G*I,K-M/N0P2R3S5U6W8 X9!Y:"Z;"\<#nQ:jWkWkWlXoQ9cB(dC(dD)eD)fE*gF*gF+hG+iG,iH,jH-kI-kI-kJ.lJ.lJ.mK.mK/nL1rQ6|^Cs]}t}ts]|^CrQ6nL1mK/mK.lJ.lJ.kJ.kI-kI-jH-iH,iG,hG+gF+gF*fE*eD)dD)dC(cB(fF-lXkWkWjWgH1\<#Z;"Y:"X9!W8 U6S5R3P2N0M/K-I,G*E(C'A%@#>"< :8 5 3 1.,)' % 3 # ' ),/14 6 8 :< >"@$B&D'F)H+J,L.N0O1Q3S4T6V7 X9!Y:!Z;"[<#]=$^>%qU>kWlXmXmYqR;eD)fE*gF*hF+hG,iH,jH-kI-kI-lJ.lK.mK/mK/nL/nL/oM0oM0qN1sP4{[@mUqzxvzxvqmU{[@sP4qN1oM0oM0nL/nL/mK/mK/lK.lJ.kI-kI-jH-iH,hG,hF+gF*fE*eD)hH/mYmXlXkWjM4^>%]=$[<#Z;"Y:!X9!V7 T6S4Q3O1N0L.J,H+F)D'B&@$>"< :8 6 4 1/,)' $ +# ' *,/14 6 8 :=!?"A$C&E(G*I+K-L/N0P2R4T5V7W8 Y9!Z;"[<#\=$^>%_?%`@&uZCmYnYoZoZsT%\=$[<#Z;"Y9!W8 V7T5R4P2N0L/K-I+G*E(C&A$?"=!:8 6 4 1/,*' $ +$ ' *,/1 4 6 8 ;=!?#A%C&E(G*I,K.M/O1Q3S4U6V8 X9!Y:"[;#\=$]>$_?%`@&aA'bB(y^FoZoZpZp[uV=jH,kI-kJ.lJ.mK/nL/oM0oM0pN1qN1qO1rO2rP2sP2sQ3tQ3uR4yW:cHw`vvw`cHzX;uR5tQ3sQ3sP2rP2rO2qO1qN1pN1oM0oM0nL/mK/lJ.kJ.kI-jH,mL2p[pZoZoZrU$\=$[;#Y:"X9!V8 U6S4Q3O1M/K.I,G*E(C&A%?#=!;8 6 4 1 /,*' % % ' ),/1 4 6 9 ;=!?#A%D'F)H*J,L.N0P2R3T5U7W8 Y:!Z;"\<#]=$^>%`?&aA'bB'cC(eD)~bKp[q[r\r]wX>lJ.mK/nL/oM0pM0pN1qO1rO2rP2sP3tQ3tQ3uR4uR4uR4wT5yV8_CpWrzxwzxwspW_CyV8wT5uR4uR4uR4tQ3tQ3sP3rP2rO2qO1pN1pM0oM0nL/mK/lJ.oN3r]r\q[p[vYAeD)cC(bB'aA'`?&^>%]=$\<#Z;"Y:!W8 U7T5R3P2N0L.J,H*F)D'A%?#=!;9 6 4 1 /,)' $ $ & ),/1 4 7 9 ;=!?#B%D'F)H+J-L.N0P2R4T6V7 X9!Y:"[;#\=$^>%_?%`@&bA'cB(dD)eE*gF*fOr\r]r]s^yZ@nL/oM0pN1qN1rO2sP2sQ3tQ3uR4uR4vS4vS5wS5wT5wT5zV7~\?jOj|xt|xtkkP~\?zV7wT5wT5wS5vS5vS4uR4uR4tQ3sQ3sP2rO2qN1pN1oM0nL/qP5s^r]r]r\z]DgF*eE*dD)cB(bA'`@&_?%^>%\=$[;#Y:"X9!V7 T6R4P2N0L.J-H+F)D'B%?#=!;9 7 4 1 /,)& $ $ i& ),/14 6 9 ;=!@#B%D'F)H+K-M/O1Q3S4U6W8 X9!Z;"[<#]=$^>%`@&aA'bB(dC)eD)fE*hF+iH,jSr]s^t^u_|\AqN1rO2rP2sQ3tQ3uR4vR4vS4wS5wT5xT5xU6xU6yU6zW7~[%]=$[<#Z;"X9!W8 U6S4Q3O1M/K-H+F)D'B%@#=!;9 6 4 1/,)& $ d% L& )+.14 6 9 ;=!@#B%D'F)I+K-M/O1Q3S5U7W8 Y:!Z;"\<#^>$_?%`@&bA'cC(eD)fE*gF+iG,jH-kI-mWt^u_u_v`~]CsP2tQ3uR4uR4vS4wS5wT5xT6xU6yU6yV7zV7zW7|X8}Z:cEsXszxvzxvssXcE}Z:|X8zW7zV7yV7yU6xU6xT6wT5wS5vS4uR4uR4tQ3sP2vT8v`u_u_t^dLkI-jH-iG,gF+fE*eD)cC(bA'`@&_?%^>$\<#Z;"Y:!W8 U7S5Q3O1M/K-I+F)D'B%@#=!;9 6 4 1.+)& # B& (% (+.14 6 8 ;=!@#B%D'F)I+K-M/O1Q3T5V7X9!Y:"[;#\=$^>%`?&aA'bB(dC)eD*gF*hG+iH,kI-lJ.mK/q[u_v`waxa_DuR4vS4vS5wT5xT6yU6yU6zV7zW7{W7{W8|X8|X8[:aBmQkzvzvkmQaB[:|X8|X8{W8{W7zW7zV7yU6yU6xT6wT5vS5vS4uR4xV9xawav`u_gPmK/lJ.kI-iH,hG+gF*eD*dC)bB(aA'`?&^>%\=$[;#Y:"X9!V7T5Q3O1M/K-I+F)D'B%@#=!;8 6 4 1.+(% "%$% (+-03 6 8 ;=!?#B%D'F)I+K-M/O1R3T5V7 X9!Z:"[<#]=$^>%`@&aA'cB(eD)fE*gF+iG,jI-lJ.mK/nL/pM0v_waxayaybaEwS5xT5xU6yU6zV7zW7{W7|X8|X8}Y8}Y9~Y9[:_?iL|byy|biL_?[:~Y9}Y9}Y8|X8|X8{W7zW7zV7yU6xU6xT5wS5zX:ybyaxawakSpM0nL/mK/lJ.jI-iG,gF+fE*eD)cB(aA'`@&^>%]=$[<#Z:"X9!V7 T5R3O1M/K-I+F)D'B%?#=!;8 6 3 0-+(% . $ '*-03 5 8 :=!?#B%D'F)I+K-M/P1R3T5V7 X9!Z;"\<#]=$_?%`@&bA'dC(eD)gE*hG+jH,kI-lJ.nL/oM0pN1sP3xayaybzb{cbFxU6yV6zV7{W7{X8|X8}Y8}Y9~Z9~Z:[:\;^=fGvZt{yw{ywtvZfG^=\;[:~Z:~Z9}Y9}Y8|X8{X8{W7zV7yV6xU6|Y;{czbybyanUrO2pN1oM0nL/lJ.kI-jH,hG+gE*eD)dC(bA'`@&_?%]=$\<#Z;"X9!V7 T5R3P1M/K-I+F)D'B%?#=!:8 5 3 0-*'$ # s& ),/2 5 8 :=!?#A%D'F)I+K-M/P1R3T5V7 X9!Z;"\<#]>$_?%a@'bB(dC)eE*gF+iG,jI-lJ.mK/nL/pM0qO1rP2vS5ybzbzc{c|ddGzV7{W7|X8|X8}Y9~Z9~Z:[:[:\;\;^=dCpSl}x}xlpSdC^=\;\;[:[:~Z:~Z9}Y9|X8|X8{W7zV7}[<|d{czczbrYtQ3rP2qO1pM0nL/mK/lJ.jI-iG,gF+eE*dC)bB(a@'_?%]>$\<#Z;"X9!V7 T5R3P1M/K-I+F)D'A%?#=!:8 5 2 /,)& % n$ 2& ),/2 5 7 :< ?"A%D'F)H+K-M/O1R3T5V8 X9!Z;"\<#^>%_?&aA'cB(dD)fE*gF+iH,kI-lJ.nL/oM0pN1rO2sP3uR4zW:zc{c|d|d}deG|X8}Y8}Y9~Z9[:[:\;\;];]<_=bAmN~czz~cmNbA_=]<];\;\;[:[:~Z9}Y9}Y8|X8\<}d|d|d{ct\vR4uR4sP3rO2pN1oM0nL/lJ.kI-iH,gF+fE*dD)cB(aA'_?&^>%\<#Z;"X9!V8 T5R3O1M/K-H+F)D'A%?"< :7 5 2 /,)& ! . % (+.14 7 9 < >"A$C&F)H+K-M/O1R3T5V7 X9!Z;"\<#^>%_?&aA'cB(eD)fE*hG+iH,kI-mK.nL/pM0qO1sP2tQ3uR4vS5|Z<|d|d|d}d~egI~Y9~Z:[:[:\;];]<^<^<`>b@jIx\v|zx|zxvx\jIb@`>^<^<]<];\;[:[:~Z:~Y9^>~e}d|d|dw^wT5vS5uR4tQ3sP2qO1pM0nL/mK.kI-iH,hG+fE*eD)cB(aA'_?&^>%\<#Z;"X9!V7 T5R3O1M/K-H+F)C&A$>"< 9 7 4 1.+(%  $ ' *-03 6 8 ;>"@$C&E(H*J-M/O1Q3T5V7 X9!Z;"\<#^>%`?&aA'cB(eD)fE*hG+jH,kJ-mK/nL0pN1rO2sP3uR4vS4wS5xT5]?|d}d~eefhJ[:\;\;];^<^<_=_=`>a@gFsUmzzmsUgFa@`>_=_=^<^<];\;\;[:_?fe~e}dyayU6xT5wS5vS4uR4sP3rO2pN1nL0mK/kJ-jH,hG+fE*eD)cB(aA'`?&^>%\<#Z;"X9!V7 T5Q3O1M/J-H*E(C&@$>";8 6 3 0-*' $ # B& ),/2 5 8 ;=!@#B&E(G*J,L.O1Q3T5V7 X9!Z;"\<#^>%`?&aA'cB(eD)gE*hG+jH-lJ.mK/oM0pN1rO2tQ3uR4vS4wT5xU6yV6`B~e~effgiK\;]<^<^=_=`=`>a>b@fDpPe{{epPfDb@a>`>`=_=^=^<]<\;`@gff~e}dzW7yV6xU6wT5vS4uR4tQ3rO2pN1oM0mK/lJ.jH-hG+gE*eD)cB(aA'`?&^>%\<#Z;"X9!V7 T5Q3O1L.J,G*E(B&@#=!;8 5 2 /,)& % > % (+.14 7 :< ?#A%D'G*I,L.N0Q3S5V7X9!Z;"\<#^>%_?&aA'cB(eD)gE*hG,jI-lJ.mK/oM0qN1rP2tQ3uR4vS5xT5yU6zV7{W7cEffgghjL^<_=_=`>a>a>b?b?eClK{\x~{y~{yx{\lKeCb?b?a>a>`>_=_=^%\<#Z;"X9!V7S5Q3N0L.I,G*D'A%?#< :7 4 1.+($ # ' *-03 6 9 < >"A$C'F)I+K.N0P2S4U7X9!Z:"\<#]>$_?&aA'cB(eD)gE*hG,jI-lJ.nL/oM0qN1sP2tQ3vR4wS5xT6yU6zW7{W8|X8fIgghhhkM_=`>a>a?b?c@dAfBkIwWp}}pwWkIfBdAc@b?a?a>`>_=bBhhhgg~Z;|X8{W8zW7yU6xT6wS5vR4tQ3sP2qN1oM0nL/lJ.jI-hG,gE*eD)cB(aA'_?&]>$\<#Z:"X9!U7S4P2N0K.I+F)C'A$>"< 9 6 3 0-*' $ % 0& ),/2 5 8 ;=!@$C&E(H+K-M/P2R4U6W8 Y:"[<#]=$_?%aA'cB(eD)gE*hG,jI-lJ.nL/pM0qO1sP2uR4vS4wT5xU6yV7{W7|X8}Y9~Z9iKhhhhimNa>a?c@dAfBfDhElItTh~~htTlIhEfDfBdAc@a?a>dCihhhh^=~Z9}Y9|X8{W7yV7xU6wT5vS4uR4sP2qO1pM0nL/lJ.jI-hG,gE*eD)cB(aA'_?%]=$[<#Y:"W8 U6R4P2M/K-H+E(C&@$=!;8 5 2 /,)& # ,$ (+.14 7 :=!?#B%E(G*J,L/O1R3T6W8 Y:![;#]=$_?%a@'cB(eD)fE*hG+jI-lJ.nL/pM0qO1sP3uR4vS4wT5xU6zV7{W7|X8}Y9~Z:[:lNhhiijoPcAeCgDgEhEiFkIqP`y~{~{y`qPkIiFhEgEgDeCcAfEjiihh`@[:~Z:}Y9|X8{W7zV7xU6wT5vS4uR4sP3qO1pM0nL/lJ.jI-hG+fE*eD)cB(a@'_?%]=$[;#Y:!W8 T6R3O1L/J,G*E(B%?#=!:7 4 1.+(# $ T& *-03 6 9 < >"A%D'F)I,L.N0Q3T5V7 X9!Z;"\=$^>%`@&bB(dD)fE*hG+jH-lJ.nL/pM0qO1sP3uR4vS4wT5yU6zV7{W8|X8~Y9Z:[:\;oQhijkmsThDhEiEjFjFlGpN{\qq{\pNlGjFjFiEhEhDkImkjihcC\;[:Z:~Y9|X8{W8zV7yU6wT5vS4uR4sP3qO1pM0nL/lJ.jH-hG+fE*dD)bB(`@&^>%\=$Z;"X9!V7 T5Q3N0L.I,F)D'A%>"< 9 6 3 0-*& # P % (+/2 5 8 ;=!@$C&E)H+K-N0P2S4U7X9!Z;"\<#^>%`@&bA'dC)fE*hG+jH,lJ.mK/oM0qO1sP3uR4vS4wT5yU6zV7{W8}Y8~Z9[:\;];^%\<#Z;"X9!U7S4P2N0K-H+E)C&@$=!;8 5 2 /+(% $# t' *-14 7 9< ?#B%D(G*J-M/O1R4U6W8 Y:"[<#^>$`?&aA'dC(eE*gF+iH,kJ-mK/oM0qN1sP2uR4vS4wT5yU6zV7{X8}Y8~Z9[:\;];^<`>wZopppqxXkIlIlJmJoMvTd|}dvToMmJlJlIkIoMqpppokL`>^<];\;[:~Z9}Y8{X8zV7yU6wT5vS4uR4sP2qN1oM0mK/kJ-iH,gF+eE*dC(aA'`?&^>$[<#Y:"W8 U6R4O1M/J-G*D(B%?#< 97 4 1-*' % o' % (,/2 5 8 ;>"A$C'F)I,L.N0Q3T5V8 Y:![;#]=$_?%aA'cB(eD)gF+iH,kI-mK/oM0qN1sP2uR4vS4wT5yU6zV7{X8}Y9~Z9[:\;]<^";8 5 2 /,(%  $ z' *-14 7 :=!?#B&E(H+K-M0P2S4V7X9!Z;"\=$^>%`@&bB(eD)gE*iG,kI-mK.nL0pN1rP2tQ3vS4wT5yU6zV7{X8}Y9~Z9[:\;]%\=$Z;"X9!V7S4P2M0K-H+E(B&?#=!:7 4 1-*' % v"& ),/3 6 9 ; >"A%D'G*I,L/O1R3T6W8 Y:"\<#^>%`@&bA'dC)fE*hG+jI-lJ.nL/pN1rO2tQ3vR4wT5xU6zV7{W8}Y8~Z9[:\;^>dDgFhGiHjIesttuu{ZpMpMrOuS~]rr~]uSrOpMpMsQuuttswVjIiHhGgFdD^>\;[:~Z9}Y8{W8zV7xU6wT5vR4tQ3rO2pN1nL/lJ.jI-hG+fE*dC)bA'`@&^>%\<#Y:"W8 T6R3O1L/I,G*D'A%>"; 9 6 3 /,)% ' $ w' *.14 7 :=!@#C&E)H+K-N0Q2S5V7 Y9![;#]=$_?%aA'cC(eD*gF+jH,lJ.nL/pM0rO2tQ3uR4wS5xU6zV7{W8}Y8~Z9[:\;aAgHiHjIkJlJmJiuuvvw}\rNrOtPxV`uu`xVtPrOrNuSwvvuuz[mJlJkJjIiHgHaA\;[:~Z9}Y8{W8zV7xU6wS5uR4tQ3rO2pM0nL/lJ.jH,gF+eD*cC(aA'_?%]=$[;#Y9!V7 S5Q2N0K-H+E)C&@#=!:7 4 1.*' # t+% (,/3 6 9 ; >"A%D'G*J,M/O1R4U6X9!Z;"\=$^>%`@&bB(eD)gF*iG,kI-mK/oM0qO1sP3uR4vS5xT6yV7{W7|X8~Z9[:]=dDgHiHjIkJlKmLnLlvwwww~^tPtQvRxV^mt{~~{tm^xVvRtQtPwUwwwwv{\nLmLlKkJjIiHgHdD]=[:~Z9|X8{W7yV7xT6vS5uR4sP3qO1oM0mK/kI-iG,gF*eD)bB(`@&^>%\=$Z;"X9!U6R4O1M/J,G*D'A%>"; 9 6 3 /,(% @# ^' *-14 7 :=!@$C&F)H+K.N0Q3T5W8 Y:![<#]>$`?&bA'dC)fE*hG+jI-lJ.nL/pN1sP2uR4vS4xT5yU6{W7|X8~Y9[:`@gHiIkJlKlLmMnNoNpOmxxyyy_uRvRxTyV}Zagknopppppqqqqqqqqqrrqqqqqqqqqppppponkga}ZyVxTvRuRyVyyyxx~_pOoNnNmMlLlKkJiIgH`@[:~Y9|X8{W7yU6xT5vS4uR4sP2pN1nL/lJ.jI-hG+fE*dC)bA'`?&]>$[<#Y:!W8 T5Q3N0K.H+F)C&@$=!:7 4 1-*' # ^% (+/2 6 8 ; >"A%D'G*J-M/P2S4U7X9!Z;"\=$_?%aA'cB(eD)gF+iH,lJ.nL/pM0rO2tQ3vS4wT5yU6zW7|X8}Y9[;dDhIiJjKlLmMnMoNpNqOrPqyyyyyavSwTwTyV{W}Z\]^___`````aaaaaaaaaaaaaaaa`````___^]\}Z{WyVwTwTvSzXyyyyyarPqOpNoNnMmMlLjKiJhIdD[;}Y9|X8zW7yU6wT5vS4tQ3rO2pM0nL/lJ.iH,gF+eD)cB(aA'_?%\=$Z;"X9!U7S4P2M/J-G*D'A%>"; 8 6 2 /+(% " <& *-04 7 :=!@#C&F)H+K.N0Q3T5W8 Y:"[<#^>%`@&bB'dD)fE*iG,kI-mK/oM0qO1sP3uR4wS5xU6zV7{W8}Y9\%[<#Y:"W8 T5Q3N0K.H+F)C&@#=!:7 4 0-*& # ;$ '+.2 5 8 ;>"A%D'G*J,M/P2S4U7X9!Z;"]=$_?%aA'cC(eE*hF+jH-lJ.nL/pN1rP2uR4vS5xT5yV6{W7|X8^>hIiKkLlMmNoOpPqPrQsRtRtRtSu{{||}dzW{W{W|X|X}Y}Y~Y~ZZZ[[[[[[[[\\\\\\\\\\\\[[[[[[[[ZZ~Z~Y}Y}Y|X|X{W{WzW}[}||{{gtStRtRsRrQqPpPoOmNlMkLiKhI^>|X8{W7yV6xT5vS5uR4rP2pN1nL/lJ.jH-hF+eE*cC(aA'_?%]=$Z;"X9!U7S4P2M/J,G*D'A%>";8 5 2 .+'$ % ),03 7 9%`@&bB(eD)gF*iH,kI-mK/pM0rO2tQ3vR4wT5yU6zW7|X8^?iKjLkMmNnOoPpPqPrQsRtRuSuTvTx{||}}g]^^__bbbbceeeeehhhhhjkkkkkkkkkjhhhhgggeeeeeedbbaa`c}}||{jvTuTuStRsRrQqPpPoPnOmNkMjLiK^?|X8zW7yU6wT5vR4tQ3rO2pM0mK/kI-iH,gF*eD)bB(`@&^>%\<#Y:"W8 T5Q3N0K.H+E(B&?#"A$D'G)J,M/O1R4U7X9!Z;"]=$_?%aA'cC(fE*hG+jI-lJ.pN2tR6xV9{Z>~\?`CbEdGnRw\z_{a~cdghkmopstww|}}~~~~}}||yywusromljhfd{bsYjMhLeIcG`E}\@zY>uT:qP5nM2jI-fE*cC(aA'_?%]=$Z;"X9!U7R4O1M/J,G)D'A$>";8 5 1.*' % g$ (,/3 6 9 < ?#C'I,N2S7$X<'^B-cF1gL6kP:oT>tXCx\F{`KdNiSmVpYt^u_wayazb{c|d}dmtuvvwxyyz{{||}}}}}~~~~}}}}}||{{zyyxwvvutl}d|d{czbyawau_t^r]r\p[lWiTeP{aLw]HrXDoU@kP"83 /,(#  xr1>'D.#K5*Q;0WA4\F9aJ=fPCkUGnWIpZKr\Lu^Mw`OybP|dR~fShTiVkWlXnYoZq[r]s^u_v`xaybzc|d|dmuvwwxyzzz{{||}}}~~~~}}}||{{zzzyxwwvum|d|dzcybxav`u_s^r]q[oZnYlXkWiVhT~fS|dRybPw`Ou^Mr\LpZKnWIkVHiSFgQEdPCbMB]H=VA7P;0J3*^J?aMAdOCfQDhSFkUGmWIoYJr[Lt^Mv_OyaP{dR}eSgTiVjWlXmYoZp[r]s^u_v`xayb{c|dkvwxyyyz{{|}}~~}}|{{zyyyxwvk|d{cybxav`u_s^r]p[oZmYlXjWiVgT}eS{dRyaPv_Ot^Mr[LoYJmWIkUGhSFfQDdOCaMA^J?\H>YF<`NF::::::::::::::::::::986420/-+)>F0'G2'J3(L6*P9,R;-U>/X?1ZB3]E4^E4_F4bH6dJ7hM8kO:lQqU?rV@uXAwZCy\D{]F}_GaHcIdKfLhMiNqU|d~efghiijklmmmmnnmmmoxrrrrssssttttttttttttttttuurrrrrrrrrrrqqqqqppppoooor{llkjjifeeedc~b}a|`{`z_x^w^v\iLaD`C~^B}\A{Z?xX>vW=tU+W<)T9'Q7&M3"I/F-D*A'=%:"64un% #&*-0369- % `' *.1 5 8 ;>"A%D(G*K-N0Q2T5W8 Y:"\<#^>%`@&cB(eD)gF+jH,lJ.nL/qN1sP2uR4wS5|Z%\<#Y:"W8 T5Q2N0K-G*D(A%>";8 5 1 .*' # ^$ (+/3 6 9 < ?#B&E(H+K.O1R3U6X9!Z;"\=$_?%aA'dC(fE*hG+kI-mK/oM0rO2tQ3vS4zV7iLlPnQoSpTqTsUtVuWwXwYyZzZ{[{[|[}\~]~]^^cpbbccccddddeeeeeffffffffffffffffffffeeeeeddddccccbbgy_^^~]~]}\|[{[{[zZyZwYwXuWtVsUqTpToSnQlPiLzV7vS4tQ3rO2oM0mK/kI-hG+fE*dC(aA'_?%\=$Z;"X9!U6R3O1K.H+E(B&?#< 9 6 3 /+($ & ),04 7 :=!@$C'F)I,L/P1S4V7X9![;#]=$`@&bB'dD)gF*iH,kJ.nL/pN1rP2uR4vS5fJmRnSoSqTrUsVtWvXwYyYzZz[{\|\~^__````eqdeeefffffffffggggggghhhhhhhhhhgggggggfffffffffeeedhz`````__~^|\{\z[zZyYwYvXtWsVrUqToSnSmRfJvS5uR4rP2pN1nL/kJ.iH,gF*dD)bB'`@&]=$[;#X9!V7S4P1L/I,F)C'@$=!:7 4 0,)% & 6& *-15 8 ;>"A%D'G*J-M0Q2T5W8 Y:"\<#^>%`@&cB(eD)hF+jH-lJ.oM0qN1sQ3uR4aDlRnRoSqTrUsVtXvXwYxZy[{\|\}]~_`deedbagreeeffffgggghhhhhhiiiiiiiiiiiiiiiihhhhhhggggffffeeei~babdeed`~_}]|\{\y[xZwYvXtXsVrUqToSnRlR`CuR4sQ3qN1oM0lJ.jH-hF+eD)cB(`@&^>%\<#Y:"W8 T5Q2M0J-G*D'A%>";8 5 1-*& $ 2$ p'+.2 5 8 < ?#B%E(H+K.N0Q3T6X9!Z;"\=$_?%aA'dC(fE*hG,kI-mK/pM0rO2tQ3{Y;lRnSpTpUrVsWuXvXwYxZz[{\|]}^_djoqpiedisffgggghhhhiiiiijjjjjjjjjkkjjjjjjjjjiiiiihhhhggggffj~cdeipqojd_}^|]{\z[xZwYvXuXsWrVpUpTnSlR{Y;tQ3rO2pM0mK/kI-hG,fE*dC(aA'_?%\=$Z;"X9!T6Q3N0K.H+E(B%?#< 8 5 2 .+'# m$ (+/3 6 9%`@&cB(eD)gF+jH-lJ.oM0qO1sQ3bGnToUqVrVsXuYvZw[x\z\{]}_aflv{lgmuhiiijjjkkkklllllmprux|~~|xurpmlllllkkkkjjjiiihlfgl{vlfa}_{]z\x\w[vZuYsXrVqVoUnTbFsQ3qO1oM0lJ.jH-gF+eD)cB(`@&^>%[<#Y:!V7 S5P2M/J,G)C'@$=!:7 4 0,)% # & )-14 8 ;>"A%D'G*K-N0Q3T5W8 Z:"\<#^>%aA'cC(fE*hG+kI-mK/oM0rO2yX;nToVqVrWsXuYvZw[x[z\{]}_chq}oiovjjjkkkkkkklllmnqtyçƬǮǮƬçytqnmlllkkkkkkkjjjmfio}qhc}_{]z\x[w[vZuYsXrWqVoVnTyW:rO2oM0mK/kI-hG+fE*cC(aA'^>%\<#Z:"W8 T5Q3N0K-G*D'A%>";8 4 1-)& ' % K& *.2 5 8 ; ?"B%E(H+K.N0Q3U6X9!Z;"]=$_?%aA'dC)fE*iG,kI-nL/pN1sQ3iOoUpVrWsXuYuZw[x[z]{_~aemwpjpwjjkkkklllmmmnptzçǮǮçztpnmmmlllkkkkjjohjpwme~a{_z]x[w[uZuYsXrWpVoUiOsQ3pN1nL/kI-iG,fE*dC)aA'_?%]=$Z;"X9!U6Q3N0K.H+E(B%?"; 8 5 2 .*& # I$ y'+/2 6 9 < ?#B&F)I+L.O1R4U7X9![;#]=$`@&bB'eD)gF+iH,lJ.nL/qN1~^CoVqWrXsYuZv[w\x]{_|`dir~skrxllmmmnnnnnnosw§ƮƮ§wsonnnnnnmmmllpiks~rid|`{_x]w\v[uZsYrXqWoV}]BqN1nL/lJ.iH,gF+eD)bB'`@&]=$[;#X9!U7R4O1L.I+F)B&?#< 9 6 2 /+'$ x$ (,/3 6 :=!@$C&F)I,M/P2S4V7 Y:![<#^>%`@&cB(eD)gF+jH-lK.oM0sQ3mTqWrXsZtZv[w\x]{^}afmxtmt§ymmmmnnnoooqt{ǬǬ{tqooonnnmmmmq§jmtxmf}a{^x]w\v[tZsZrXqWmTsQ3oM0lK.jH-gF+eD)cB(`@&^>%[<#Y:!V7 S4P2M/I,F)C&@$=!:6 3 /,($ % (,04 7 :=!@$D'G*J-M/P2S5W8 Y:"\<#^>%a@'cC(fE*hG+kI-mK/oM0`FpXrYtZt[v\w]y^{`~cjsunv¨¨©zoooppppppqu|ǯǯ|uqppppppooor©¨¨lnusj~c{`y^w]v\t[tZrYpX`FoM0mK/kI-hG+fE*cC(a@'^>%\<#Y:"W8 S5P2M/J-G*D'@$=!:7 4 0,($ % )-04 7 ;>"A%D'G*K-N0Q3T5W8 Z:"\=$_?%aA'dC(fE*iG,kI-mK/rQ4oVrYsZt[v\w]y^|afozwow¨©éé{ooopppqqrv|ǯǯ|vrqqpppooosé驨mowzof|ay^w]v\t[sZrYoVrQ4mK/kI-iG,fE*dC(aA'_?%\=$Z:"W8 T5Q3N0K-G*D'A%>";7 4 0-)%  & *-15 8 ;>"A%E(H+K-N0Q3T6X9!Z;"]=$_?%bA'dC)gE*iH,kJ.nL/`FqYsZt[u\w]z_}cjswpw©©êê{ppqqqrrsu{ȯȯ{usrrqqqpptêê©©npwsj}cz_w]u\t[sZqY~_EnL/kJ.iH,gE*dC)bA'_?%]=$Z;"X9!T6Q3N0K-H+E(A%>";8 5 1-*& $ $ 8& *.1 5 8 ; ?"B%E(H+K.O1R3U6X9![;#]=$`?&bB'eD)gF+iH,lJ.pO2oWrZs[u\w]z`fnyxqx©©©éêĪ|qqrrrrrtyȮȮytrrrrrqquĪêé©©nqxynfz`w]u\s[rZoWpN1lJ.iH,gF+eD)bB'`?&]=$[;#X9!U6R3O1K.H+E(B%?"; 8 5 1 .*& & 6# X' *.2 5 9 < ?#B&E)I+L.O1R4U7X9![;#]>$`@&bB(eD)gF+jH-lJ.|]CrYs[v]w^{bisyry©éêĪīī}rrrrsstwĪĪwtssrrrrvīīĪêéorysi{bw^v]s[rY|]ClJ.jH-gF+eD)bB(`@&]>$[;#X9!U7R4O1L.I+E)B&?#< 9 5 2 .*' # W$ r'+/2 6 9 < ?#C&F)I,L/O1S4V7Y:![<#^>%`@&cB(eD)hF+jI-mK.lUs\u]x`}fozzs{ªëëīūŬ~stttttv|ɲʳ|vtttttswŬūīëëqszzo}fx`u]s\lUmK.jI-hF+eD)cB(`@&^>%[<#Y:!V7S4O1L/I,F)C&?#< 9 6 2 /+'$ q$ '+/3 6 9=!@#C&F)I,M/P2S5V7 Y:![<#^>%`@&cB(eE*hG+kI-vV%[<#Y:!V7 S5P2M/I,F)C&@#=!96 3 /+'# $ (+/3 6 :=!@$C'F)J,M/P2S5W8 Y:"\<#^>%aA'cC(fE*hG+kI-eNu_{eny|t~īīūŬŬŭuuuvvw|ʳ˳|wvvuuuxŭŬŬūī¨rt|zn|eu_eNkI-hG+fE*cC(aA'^>%\<#Y:"W8 S5P2M/J,F)C'@$=!:6 3 /+(% # (,/3 7 :=!@$D'G*J-M/P2T5W8 Y:"\<#^>%aA'dC(fE*iH-oN2va~jt}vīĬŬŭƭƮvvvwwzĪĪzwwvvvzƮƭŭŬĬétv}tjvaoM1iH-fE*dC(aA'^>%\<#Y:"W8 T5P2M/J-G*D'@$=!:7 3 /,($ $ (,03 7 :=!A$D'G*J-M0Q2T5W8 Z:"\=$_?%aA'dC(gF,lK0aImz~vŬŭƭƭƮƮwwxxx{ȮȮ{xxxwwzƮƮƭƭŭīuv~{maIlK0gF,dC(aA'_?%\=$Z:"W8 T5Q2M0J-G*D'A$=!:7 3 0,($ % (,04 7 :=!A$D'G*J-N0Q3T5W8 Z:"\=$_?%bB(fE,kK1tU<{fwŭŭƮǮǯǯxxyyy}ʳʳ}yyyxx{ǯǯǮƮŭĬuw{ftU!A$D'G*J-N0Q3T5W8 Z;"\=$_?%cC*jJ1sU!:7 4 0,)% $ ),04 7 :>"A$D'G*K-N0Q3T6W8 Z;"\=$`@&eF-pR:dNryƮƮǯǯǰǰzz{{{ĨŲƴĩ{{{zz}ǰǰǯǯƮŮxyrdNpR:eF-`@&\=$Z;"W8 T6Q3N0K-G*D'A$>":7 4 0,)$ % ),04 7 ;>"A%D'G*K-N0Q3T6X9!Z;"\=$`A'gH/uYAxmeªzƯƯǰȰȰȰ{{{{}ŪòòŪ}{{{{~ȰȰȰǰƯŮxzªxmeuYAgH/`A'\=$Z;"X9!T6Q3N0K-G*D'A%>";7 4 0,)% % ),04 7 ;>"A%D'G*K-N0Q3T6X9!Z;"\=$`A'gH/uYAxmeêzǯǯȯȯȰȰ{{||}ŪIJIJŪ}||{{ȰȰȯȯǯƮyzêxmeuYAgH/`A'\=$Z;"X9!T6Q3N0K-G*D'A%>";7 4 0,)% $ ),04 7 :>"A$D'G*K-N0Q3T6W8 Z;"\=$`@&eF-pR:fPê{ǯǯȰɰɱɱ||||}ũƳǴŪ}||||ɱɱɰȰǯƮy{êfPpR:eF-`@&\=$Z;"W8 T6Q3N0K-G*D'A$>":7 4 0,)$ % ),04 7 :>!A$D'G*J-N0Q3T5W8 Z;"\=$_?%cC*jJ1|`H|ê|ǯǯȰɰɱɱ||}}~ĩͶͷĩ~}}||ɱɱɰȰǯƭz|ê|{_GjJ1cC*_?%\=$Z;"W8 T5Q3N0J-G*D'A$>!:7 4 0,)% % (,04 7 :=!A$D'G*J-N0Q3T5W8 Z:"\=$_?%bB(fE,|^Gnxë|ȰȰȱɱɱɱ}}~~~ç͵͵ç~~~}}ɱɱɱȱȰŭ{|ëxn{_HfE,bB(_?%\=$Z:"W8 T5Q3N0J-G*D'A$=!:7 4 0,(% $ (,03 7 :=!A$D'G*J-M0Q2T5W8 Z:"\=$_?%aA'dC(dO{gmuĬ~Ȱȱɱɱʲʲ~~¦˲˲¦~~ʲʲɱɱȱƮ|~Ĭum{gcNdC(aA'_?%\=$Z:"W8 T5Q2M0J-G*D'A$=!:7 3 0,($ # (,/3 7 :=!@$D'G*J-M/P2T5W8 Y:"\<#^>%aA'dC(kVyf{gls}Ĭ~ɰɱɱɲʲʳɯɯʳʲɲɱɱƮ}~Ĭ}sl{gyfkUdC(aA'^>%\<#Y:"W8 T5P2M/J-G*D'@$=!:7 3 /,($ $ (+/3 6 :=!@$C'F)J,M/P2S5W8 Y:"\<#^>%aA'cC(s^ye{f}ilqyŬɱɲʲʲʲʳƫϹϸƫʳʲʲʲɲǯ}Ŭyql}i{fyer^cC(aA'^>%\<#Y:"W8 S5P2M/J,F)C'@$=!:6 3 /+(% $ '+/3 6 9=!@#C&F)I,M/P2S5V7 Y:![<#^>%`@&dC*yf{g|h~jkmqwŭɲɲʲʳ˳˴¥Ĩ̳̳Ĩ˴˳ʳʲɲǯ~ŭwqmk~j|h{gyedC*`@&^>%[<#Y:!V7 S5P2M/I,F)C&@#=!96 3 /+'# # s'+/2 6 9 < ?#C&F)I,L/O1S4V7Y:![<#^>%`@&iI0yf{h}i~jklnpv~ŭɲʳʳʳ˳˴¦¥ȭϹϹȭ¥˴˳ʳʳʳȰŭ~vpnlk~j}i{hyfiI0`@&^>%[<#Y:!V7S4O1L/I,F)C&?#< 9 6 2 /+'$ q% Z' *.2 5 9 < ?#B&E)I+L.O1R4U7X9![;#]>$`@&lN5zg{h}j~jlmnoqtzŮɲʲʳ˳˴˴¦ĩ˳´õ˳ĩ˴˴˳ʳʲȰŮztqonml~j}j{hzgkM5`@&]>$[;#X9!U7R4O1L.I+E)B&?#< 9 5 2 .*' # X$ 9& *.1 5 8 ; ?"B%E(H+K.O1R3U6X9![;#]=$`?&pR9{i}j~klmnppqrtzƮʳʳ˴˴˵̵§æǭииǭæ̵˵˴˴ʳɱƮztrqppnml~k}j{ioR9`?&]=$[;#X9!U6R3O1K.H+E(B%?"; 8 5 1 .*& & 6 & *-15 8 ;>"A%E(H+K-N0Q3T6X9!Z;"]=$_?%sU>|i}jlmnopqrrtuy~Ưʴʴ˴˴̵̵èĩʰккʰĩ̵̵˴˴ʴɲƯ~yutrrqponml}j|isU>_?%]=$Z;"X9!T6Q3N0K-H+E(A%>";8 5 1-*& $ % )-04 7 ;>"A%D'G*K-N0Q3T5W8 Z:"\=$_?%sV?|j~klmnpqrsstuvx|Ư˴˴˴˵̵̶è¦ƪ˲ϺϺ˲ƪ¦̶̵˵˴˴ȰƯ|xvutssrqpnml~k|jrV?_?%\=$Z:"W8 T5Q3N0K-G*D'A%>";7 4 0-)% % (,04 7 :=!@$D'G*J-M/P2S5W8 Y:"\<#^>%uYC}j~lmnopqrstuuvwy|İǯ˵˵˵̵̵̶èçƫ˳лл˳ƫç̶̵̵˵˵Ȱǯı|ywvuutsrqponm~l}juYC^>%\<#Y:"W8 S5P2M/J-G*D'@$=!:7 4 0,($ $ (,/3 6 :=!@$C&F)I,M/P2S4V7 Y:![<#^>%uXB~lmnpprrttuvwxxyz|îǯ˵˵˵̵̶ͶéæèǬ˲ѺöµѺ˲ǬèæͶ̶̵˵˵ɱǯî|zyxxwvuttrrppnm~luXB^>%[<#Y:!V7 S4P2M/I,F)C&@$=!:6 3 /,($ # {'+/2 6 9 < ?#B&F)I+L.O1R4U7X9![;#]=$tXAlnopqrstuvvwxyzz{}Ƴǰ̶̶̶̶̷̷Ī¥¥¦ĨƫɰηммηɰƫĨ¦¥¥̷̷̶̶̶ȱǰƳ}|zzyxwvvutsrqponltW@]=$[;#X9!U7R4O1L.I+F)B&?#< 9 6 2 /+'$ x% L& *.2 5 8 ; ?"B%E(H+K.N0Q3U6X9!Z;"]=$rU>mnoqqstuvwwxyyz{{}~Űôǰ˶̶̶̶ͶͷĪ¥¥¦çŪǮ˲ϸѽĶŷѽϸ˲ǮŪ禥¥ͷͶ̶̶̶ǰǰôŰ~}{{zyyxwwvutsqqonmrU>]=$Z;"X9!U6Q3N0K.H+E(B%?"; 8 5 2 .*& % K# & )-14 8 ;>"A%D'G*K-N0Q3T5W8 Z:"\<#pS=opqqstuvwwyyz{{|}}~¬ɶƯ̶ͷͷͷͷ͸Φ¦çççĨũƫȮ˲ͶйҼӾͼɺɺͼӾҼйͶ˲ȮƫũĨçç禦͸ͷͷͷͷȱƯɶ¬~}}|{{zyywwvutsqqpopR<\<#Z:"W8 T5Q3N0K-G*D'A%>";8 4 1-)& # & ),04 7 :=!@$C'G)J,M/P2S5V7 Y:![<#lO8opqrttvvxxyzz{|}~~Ʊŵĭ̶̷͸͸͸͸¦¦¦§ççèĨŪƫǭȯʰ˲̴̴̳̳˲ʰȯǭƫŪĨèç秦¦¦¦¦͸͸͸͸̷ȱĭŵƱ~~}|{zzyxxvvttrqpokN7[<#Y:!V7 S5P2M/J,G)C'@$=!:7 4 0,)% $ (+/3 6 9"A%D'G*J-M0Q2T5W8 Y:"\<#orttvwxyz{{|}}~̷̸͸͹ιιŬ§§ççèèèèèĨĨĨĨĩĩĩĩĩĩĩĩĩĩĩĩĨĨĨĨèèèèèçç§§¨ιι͹͸̸ȱ~}}|{{zyxwvttrn\<#Y:"W8 T5Q2M0J-G*D'A%>";8 5 1-*& # 3@% ),04 7 :=!@$C'F)I,L/P1S4V7X9![;#westuvwxyz{|}~~̸͸͸ιικŬ¦§§§¨¨èèèĨĨĨĩĩĩĩĩĩĩĩĩĩĩĩĩĩĩĩĩĩĨĨĨèè訨§§§¦¨κιι͸͸DZ~~}|{zyxwvutsvd[;#X9!V7S4P1L/I,F)C'@$=!:7 4 0,)% $ (+/3 6 9 < ?#B&E(H+K.O1R3U6X9!Z;"m[tuuwwyzz||}~~̷̸͸͹ιϺŭ§¨èèèèèèéĩĩĩĩĪĪŪŪŪŪŪŪŪŪĪĪĩĩĩĩéèèèèè訧¨Ϻι͹͸̸ư~~}||zzywwuutlYZ;"X9!U6R3O1K.H+E(B&?#< 9 6 3 /+(#  $ c' *.1 5 8 ;>"A%D(G*K-N0Q2T5W8 Y:"|aMuvwxyz{{}}̸͹͹͹κϺƮ§§§¨¨èéééééĩĩĩĪĪŪŪŪŪŪŪŪŪŪŪŪŪŪŪĪĪĩĩĩééééé訨§§§éϺκ͹͹͹DZ}}{{zyxwvu{aLY:"W8 T5Q2N0K-G*D(A%>";8 5 1 .*' # _ 8::::::852/,*(%9A*!B,!F/#I2&M5(O8*R:+U<-W?/ZA1\C2_F4bH6dJ7hM8kO:lQaJ<^H:\F9YC7VA6T>4P<1N90B/01456678999::) :::::::::::::::_MEYF<\H>^J?aMAdOCfQDhSFkUGmWIoYJr[Lt^Mv_OyaP{dR}eSo]¯ðııŲųdzǴȵȶɶɶʶʷ˷̷̸̸͹͹ͺͺκϺϻммммммѼѼѼѼѽѽѽҽҽҾҾҾҾҾҾҾҾҾҾҾҾҾҾҾҾҾҾҽҽѽѽѽѼѼѼѼммммммϻϺκͺͺ͹͹̸̸̷˷ʷʶɶɶȶȵǴdzųŲııð¯o]}eS{dRyaPv_Ot^Mr[LoYJmWIkUGhSFfQDdOCaMA^J?\H>YF<`NE::::::::::::::9 #(-28::::ykdXDgQDkVHnWIpZKr\Lu^Mw`OybP|dR~fS°°ñıIJųƳƴǴȵɶʶʷʷʷ˸˹̹͹͹ͺκλλλϻмммѽѽѽѽѽѽѽѾѾѾҾҾҾҾҾҾҾӾӾӾӾӾӾӾӾҾҾҾҾҾҾҾѾѾѾѽѽѽѽѽѽѽмммϻλλλκͺ͹͹̹˹˸ʷʷʷʶɶȵǴƴƳųIJıñ°°~fS|dRybPw`Ou^Mr\LpZKnWIkVHiSFgQEdPCaLA[F;U@6O:/I3*=  $ (,/3 6 9 < @$F)L0Q5"V:'\@,aF1gK7kP;q`°±òIJųŴƴǵǵȵɷʷʸ˸˸˸˹̺ͺͺͺλλλϼϼмннѽѾѾѾѾѾѾѾҾҾҾҾҾӾӾӾӿӿӿӿӿӿӿӿӿӿӾӾӾҾҾҾҾҾѾѾѾѾѾѾѾѽннмϼϼλλλͺͺͺ̺˹˸˸˸ʸʷɷȵǵǵƴŴųIJò±°yitZFoUBkQ>fL9`F4[A/U;)P5%J/D)>"83 /,($ # m' *.15 8 ;>"A$D'G)J,M/O1R4U7bE/z{|}~®ïıƳǴɶɷ˸˸˸̹̹̺ͺκλλϻϼϼϼмнѽѾѾҾҾҾҾҾҾҾҾҾҿҿҿҿҿҿҿҿҿҿҿҿҿҿҿҿҿҿҾҾҾҾҾҾҾҾҾѾѾѽнмϼϼϼϻλλκͺ̺̹̹˸˸˸ʸɷɶȶƴƳıñ~|{zaD-U7R4O1M/J,G)D'A$>";8 5 1.*' $ i% ),03 7 9"A%D'G*J,M/P2S4U7~eR|}~ɵͺͻλλμů©ªªêêīīĬĬŬŬŬŬŬŭŭŭŭŭŭŭŭŭŭŭŭŭŭŭŭŭŭŬŬŬŬŬĬĬīīêꪪ©ªμλλͻͺĮ~}|}cQU7S4P2M/J,G*D'A%>";8 5 2 .+'% & =& *-04 7 :=!@#C&F)H+K.N0Q3T5fI3~~ɵͻͻμμϼưªªììĭŭŭƭƭƮƮǮǮǮǮǮǮǮǮǮǯǯǯǯǯǯǮǮǮǮǮǮǮǮǮƮƮƭƭŭŭĭì쪪ìϼμμͻͻĮ~~eH2T5Q3N0K.H+F)C&@#=!:7 4 0-*& # ;% (+/2 6 8 ; >"A%D'G*J-M/P2S4U7pȵͻͻͼμϼưª«ĬŮưDZȱȲȲɲɲɳɳɳɳʴʴʴʴʴʴʴʴʴʴʴʴʴʴʴʴɳɳɳɳɲɲȲȲȱDZưŮĬ«ª¬ϼμͼͻͻî~nU7S4P2M/J-G*D'A%>"; 8 6 2 /+(% # _' *-14 7 :=!@$C&F)H+K.N0Q3T5zaNȵͻͻͼμμư«ìůȳ˷ιϼϼϼммннннѽѽѽѽѽѽѽѾѾѽѽѽѽѽѽѽннннммϼϼϼι˷ȳů쫬μμͼͻͻîy`LT5Q3N0K.H+F)C&@$=!:7 4 1-*' # ^+% (,/3 6 9 ; >"A%D'G*J,M/O1R4]?*~Ǵͻλλμμư«íDz¹ºººº»ûûûûûûûûûûûûûûûûûûûûûû»ºººº¹Dzí«¬μμλλͻ­}\?)R4O1M/J,G*D'A%>"; 9 6 3 /,(% +% |' *.14 7 :=!@#C&E)H+K-N0Q2S5qaǴͼμμμμƱ¬ĮɴɴĮ¬¬μμμμͼ­p`S5Q2N0K-H+E)C&@#=!:7 4 1.*' # t% ),/3 6 9 ; >"A%D'G*I,L/O1R3gL8dzͼͼμνϽƱ¬íȳϼϼȳí¬¬ϽνμͼͼeJ6R3O1L/I,G*D'A%>"; 9 6 3 /,)% $% }' *-14 7 :=!?#B&E(H+K-M0P2S4}mųͼνννϾƲ«¬íƱ̹µµ̹Ʊí¬«íϾνννͼ{kS4P2M0K-H+E(B&?#=!:7 4 1-*' $ y$% (,/2 5 8 ;>"A$C'F)I,L.N0Q3kP<űͼͼννξƲ­įȴоŽŽоȴį­­ξννͼͼjO;Q3N0L.I,F)C'A$>";8 5 2 /,(% . $ w' *-14 7 9< ?#B%D(G*J-M/O1R4qñͼͽͽνξƲ¬­Ʊ˸´´˸Ʊ­¬­ξνͽͽͼ~pR4O1M/J-G*D(B%?#< 97 4 1-*' $ r % (+/2 5 8 ;=!@$C&E)H+K-N0P2iM:ð̼ͼͽννƲ¬­îdzͻøøͻdzî­¬­ννͽͼ̼gL8P2N0K-H+E)C&@$=!;8 5 2 /+(% # W& *-03 6 9 < >"A%D'F)I,L.N0Q3yj¯̼ͼͽνξŲ¬­­îŰɵŽŽɵŰî­­¬®ξνͽͼ̼whQ3N0L.I,F)D'A%>"< 9 6 3 0-*& % R# (+.14 7 :=!?#B%E(G*J,L/O1aE0¯̼ͽͽνξŲ­­îïƲ˹ĶĶ˹Ʋïî­­­ξνͽͽ̼_B.O1L/J,G*E(B%?#=!:7 4 1.+($ # 3& ),/2 5 8 ;=!@$C&E(H+K-M/P2iX̼̽̽ͽͽŲ­®ŰȴνĹĹνȴŰ®­ͽͽ̽̽˼~hWP2M/K-H+E(C&@$=!;8 5 2 /,)& & /$ ' *-03 6 9 < >"A$C'F)I+K.N0T7!~̼̼ͽͽ;ı­îűɷſſɷűî­;ͽͽ̼˻|S6 N0K.I+F)C'A$>"< 9 6 3 0-*' $ . % (+.14 7 :< ?#A%D'G*I,L.N0iO;̽̽ͽͽξIJ­®®įƳ̺ĸĸ̺Ƴį®®­ξͽͽ̽ʺhM9N0L.I,G*D'A%?#< :7 4 1.+(%  $ F& ),/2 5 8 ;=!@#B&E(G*J,L.O1o_˽̽̽̾;IJ®¯ıǵͽĻĻξǵı¯®;̾̽̽ɺm\O1L.J,G*E(B&@#=!;8 5 2 /,)& # B$ ' *-03 6 8 ;>"@$C&E(H*J-M/S6 |˼̽̽;;IJ®ïŲɷĿſɷŲï®;;̽̽ɹ{R5M/J-H*E(C&@$>";8 6 3 0-*' $ . $ (+.14 7 9 < >"A$C&F)H+K-M/_D0˽˽˽̾̾ó°Ŵ˻ŹŹ˻Ŵ°̾̾˽˽Ǹ]B.M/K-H+F)C&A$>"< 9 7 4 1.+(%  " 5& ),/2 5 7 :< ?"A%D'F)H+K-M/rYHʼ˽˾˾̾óñǶ;żżͿǶñ̾˾˾˽ǸpXFM/K-H+F)D'A%?"< :7 5 2 /,)& % 0$ y& ),/2 5 8 :=!?#A%D'F)I+K-M/o`ɼ˾˾̾̿ó°ijȹ÷÷ȹij°̿̾˾˾Ƹm^M/K-I+F)D'A%?#=!:8 5 2 /,)& # s$ '*-03 5 8 :=!?#B%D'F)I+K-N1~qȻ˾˾˾̿ó±ŵ˼ƻƻ˼ŵ±̿˾˾˾Ŷ|oN1K-I+F)D'B%?#=!:8 5 3 0-*'$ "$ (+-03 6 8 ;=!?#B%D'F)I+K-S6!{Ǻʽ˾˾˾³òƶͿžžͿƶò˾˾˾ʽĵ{S6!K-I+F)D'B%?#=!;8 6 3 0-+(% ' $ +% (+.14 6 8 ;=!@#B%D'F)I+K-U8$}Ƹʽʾ˾˾³óȹ÷÷ȹó˾˾ʾʽ´U8$K-I+F)D'B%@#=!;8 6 4 1.+(% & ($ N& )+.14 6 9 ;=!@#B%D'F)I+K-U8$ŷɽʾ˾˾²ĵ˼ƼƼ˼ĵ˾˾ʾɽW;'K-I+F)D'B%@#=!;9 6 4 1.+)& # I$ i& ),/14 6 9 ;=!@#B%D'F)H+K-W:'Ķʽʾʾ˿³óƷƷó˿ʾʾʽZ>*K-H+F)D'B%@#=!;9 6 4 1/,)& $ i# & ),/1 4 7 9 ;=!?#B%D'F)H+J-W:'¶ɾʾʾʾóȺĹĹȺóʾʾʾɾX<(J-H+F)D'B%?#=!;9 7 4 1 /,)& $ $ ' ),/1 4 6 9 ;=!?#A%D'F)H*J,T8${ɾɾʾʾĵ˽ǾǾ˽ĵʾʾɾɾ~U9%J,H*F)D'A%?#=!;9 6 4 1 /,)' % # ' *,/1 4 6 8 ;=!?#A%C&E(G*I,N1zmɾʾʿʿôǸǸôʿʿʾɾvP4I,G*E(C&A%?#=!;8 6 4 1 /,*' $ +$ ' *,/14 6 8 :=!?"A$C&E(G*I+L.~j[ɾɾɿʿõȻŻŻȻõʿɿɾɾqcL/I+G*E(C&A$?"=!:8 6 4 1/,*' # + $ ' ),/14 6 8 :< >"@$B&D'F)H+J,qYHɾɿʿʿŷ˾˾ŷʿʿɿɾs[KJ,H+F)D'B&@$>"< :8 6 4 1/,)' #  +% ' ),.13 5 8 :< >"@#A%C'E(G*I,]B0ɾɿɿʿƹ¸¸ƹʿɿɿɾ]B0I,G*E(C'A%@#>"< :8 5 3 1.,)' $ +$ & )+.03 5 7 9 ;=!?#A$C&E(F)H+N1pbȾɾɿɿõǻżżǻõɿɿɾȾo`M0H+F)E(C&A$?#=!;9 7 5 3 0.+)& $ $ & )+-02 5 7 8 ;< >"@$B%D'E)G*I,fL:ȾɿɿɿĶʾžžʾĶɿɿɿȾeK9I,G*E)D'B%@$>"< ;8 7 5 2 0-+)& # $ k& (+-/2 4 6 8 :< =!?#A%C&D(F)H+P4!xkȿɿɿʿĸɾÿÿɾĸʿɿɿȾxkO3 H+F)D(C&A%?#=!< :8 6 4 2 /-+(& % g% L% (*,/13 5 7 9 ;=!>"@$B%C'E(G*H+_D2~ȾɿɿɿķŹŹķɿɿɿǼ~_D2H+G*E(C'B%@$>"=!;9 7 5 3 1/,*(% # I$ +$ '),.02 4 6 8 :< =!?#A$B&D'E)G*H+lTCȾȿȿƼlTCH+G*E)D'B&A$?#=!< :8 6 4 2 0.,)'% & ($$ & )+-/13 5 7 9 ;< >"?#A%C&D'F)G*L/~hZƼȿɿɿɿɿȿĺ{eWK.G*F)D'C&A%?#>"< ;9 7 5 3 1/-+)& $ ' $ x& (*,.02 4 6 8 9;=!>"@#A%C&D'F)G*N2|gXȿȿȿȿȿȿ{eVM1G*F)D'C&A%@#>"=!;98 6 4 2 0.,*(& # s" 5$ ' )+-/13 5 7 8 :; =!>"@$A%C&D'E(G)iQAuueM"=!; :8 7 5 3 1/-+)' % $ 2. $ & (*,.02 4 5 7 9 :; =!>"@#A%B&D'aH8pZKqZKqdy}qpcqZKpZK]D3D'B&A%@#>"=!; :9 7 5 4 2 0.,*(& $  $ F% ' )+-/12 4 6 7 9 :; =!>"?#A$[B2nWIoXIoYJU;)K/nWGywnWGJ-K/oYJoXInWIX?/A$?#>"=!; :9 7 6 4 2 1/-+)' % # B. $ & (*+-/13 4 6 7 8 :;"V=-kVHlWHmWIR9'D(E(F)K/kTD{mzljSCK.F)E(D(H-mWIlWHkVHS9)>""?#@$A%B%C&C'D'E(F)F)J-]C1pZJqcvvqcoYI\B0J-F)F)E(D'C'C&B%A%@$?#>"B'hSFhRFgQEH.8 7 5 4 2 1/.,*(' & # Q $ w% ' )*,-/02 3 5 C*dPCePDfQDJ1";< =!>"?#@#@$A%B%B&C&D'D'E(E(E)F)F)F)M1U;(^E3_F4eL;gO>gO>eL;_F4^E3U;(M1F)F)F)E)E(E(D'D'C&B&B%A%@$@#?#>"=!< ;?%fQDePDdPCB(5 3 2 0/-,*)' % # t $$ ~% ' (*+-.01=$bMBcNBdOCG/ 8 9 :;< "?"?#@$@$A%A%B%B&C&C&C'D'D'D'D'D'D'D'D'D'D'D'D'C'C&C&B&B%A%A%@$@$?#?">"=!">"?"?#?#@#@$@$A$A$A$A$A%A%A$A$A$A$@$@$@#?#?#?">">"=!=!< ; ;:98 8 7 6 5 :aMAaLA_K@4 .,+*(' % $ w"$% `$ & ')*.]I?^I?^J?A)1 3 4 5 5 6 7 8 8 9 ::;;; < < =!=!=!=!=!>!>">">">">!=!=!=!=!=!< < ; ;;::9 8 8 7 6 5 5 4 3 1 6^J?^I?]I?.*)'& $ # _+% >$ % ' (YE;ZG=\H>>&./012 3 4 4 5 6 6 7 7 8 8 9 9 9::::::;;::::::99 9 8 8 7 7 6 6 5 4 4 3 2 10/.3 \H>ZG=XD:(' % $ " <# l$ Q=4XD_ME0 *,./12 3 4 5 5 5 6 6 5 5 5 4 3 2 1/.,*+_MEXE=}$ # I% (H3']I?713 5 6 8 9 :;;< < < < < < ;;:9 8 6 5 3 11 ]I?G2&(& # H% R' ),/R<1bMB>$8 :;=!>"?#@$A$A%B%B&B&B&B&B%A%A$@$?#>"=!;:8 8bMBQ://,)& # P% K& )-03 6 \F9gQED)>"@$A%C&D(E)F)G*H+H+I+I,I,I+H+H+G*F)E)D(C&A%@$>">#gQEZC66 3 0-)% $ G" % ),03 6 9 < dM?kVHI/D(F)H+I,K-L.M/N0N0O1O1O1O1O1O1N0N0M/L.K-I,H+F)D(D)kVHbK<< 9 6 3 0,)% # % u'+/3 6 9< ?#A%kTEpZKO3 K-L/N0P1Q3R4S5T5U6U7V7V7V7V7U7U6T5S5R4Q3P1N0L/K-J-pZKiQBA%?#< 96 3 /+'$ q$ % *.2 6 9 < ?#B%E(G*r\Lu^MU9$Q2R4T6V7W8 X9!Y:"Z:"Z;"[;#[<#[<#[<#[<#[;#Z;"Z:"Y:"X9!W8 V7T6R4Q2P3u^MpYHG*E(B%?#< 9 6 2 .*% $ $ ?' +04 8 ;>"B%E(H+K-N1xaPybP\>(W8 X9!Z:"[<#\=$]=$^>%_?%_?&`@&aA'cD*cD*aA'`@&_?&_?%^>%]=$\=$[<#Z:"X9!W8 W8"ybPv_NM0K-H+E(B%>";8 4 0+' & =# f(-16 9=!A$D'G*K-N0P2V9"|dR~fS`C+\<#]=$_?%`@&aA'bB'cB(dC)dD)eD)kK1zbQzbQkK1eD)dD)dC)cB(bB'aA'`@&_?%]=$\<#\=%~fS|dRS5P2N0K-G*D'A$=!96 1-($ c% }).3 7 ;?"B&F)I,M/P2S5V7 _@)hUiVeG.`@&bB'dC(eD)fE*gF+hG+iG,iH,lK0fNfNlK0iH,iG,hG+gF+fE*eD)dC(bB'`@&aA(iVhU[;$V7 S5P2M/I,F)B&?";7 3 .)# {% ).3 8 < @$D'H*K.O1R4V7Y9![;#eG/kWlXiJ0eD)gF+hG+jH,kI-lJ.mK/nL/oM0{\Bww{\BoM0nL/mK/lJ.kI-jH,hG+gF+eD)eE+lXkWaB)[;#Y9!V7R4O1K.H*D'@$< 8 3 .)% $ */4 8 =!A$E(I,M/Q2T6X9!Z;"]=$_?&lM5nZoZnN3jH-lJ.mK/nL0pM0qN1rO2sP2xX;yeyexX;sP2rO2qN1pM0nL0mK/lJ.jH-jI-oZnZhH/_?&]=$Z;"X9!T6Q2M/I,E(A$=!8 4 /*$ $ y)/4 9 =!A%F)J-N0R4V7Y:"\<#_?%aA'dC)rT;p[r]sR6nL0pN1rO2sQ3uR4uR4vS5yW8mRmSyW8vS5uR4uR4sQ3rO2pN1nL0oM0r]q\nN5dC)aA'_?%\<#Y:"V7R4N0J-F)A%=!9 4 /)$ x$ i).4 9 =!B%F)K-O1S5W8 Z;"]=$`@&cB(fE*hG+yZAs]u_wV9sP3uR4vS4wT5xU6yV6{W8dG{{dG{W8yV6xU6wT5vS4uR4sP3sQ3u_s]tT:hG+fE*cB(`@&]=$Z;"W8 S5O1K-F)B%=!9 4 .)$ d$ ?(.3 8 =!B%F)K-O1T5X9![<#^>%aA'dC)gF+jH-lK.aGv^yazY%[<#X9!T5O1K-F)B%=!8 3 .(" <# ' -3 8 =!A%F)K-P1T5X9!\<#_?%bB'eD*hG+kI-nL/qN1fLya{c~\>{W7|X8}Y9~Z:[:_>sVsV_>[:~Z:}Y9|X8{W7{W8{cya`EqN1nL/kI-hG+eD*bB'_?%\<#X9!T5P1K-F)A%=!8 3 -' & % +17 < A$F)K-O1T5X9!\<#_?&cB(fE*iH,lJ.oM0rP2uR4jO{c}d_@~Z9[:\;]<_=kK}}kK_=]<\;[:~Z9~Z:}dzbdHuR4rP2oM0lJ.iH,fE*cB(_?&\<#X9!T5O1K-F)A$< 7 1+$ # t*06 ;@$E(J-O1T5X9!\<#`?&cB(fE*jH-mK/pN1sQ3vS4xU6mR~efaB];^<_=`>gFiigF`>_=^<];]b?eBy[y[eBb?a>`=`>hfkO{W8yU6wS5tQ3qN1nL/jI-gE*cB(_?&\<#X9!S5N0I,D'?"94 .'$ $ +2 8 =!B&H*M/R4W8 [<#_?%cB(fE*jI-nL/qO1uR4wT5zV7|X8~Z9v[hihGdAfBhErRrRhEfBdAdAihpS~Z9|X8zV7wT5uR4qO1nL/jI-fE*cB(_?%[<#W8 R4M/H*B&=!8 2 +% % K)/6 ;A$F)K.Q2V7Z;"^>%bB'fE*jH-nL/qO1uR4wT5zV7|X8Z:];y^knnLiFjGoMmmoMjGiFjGnjrU];Z:|X8zV7wT5uR4qO1nL/jH-fE*bB'^>%Z;"V7Q2K.F)A$;6 /)$ G& ,3 9 >"D'I,O1T6Y:"]=$aA'eD*iH,mK/qN1uR4wT5zV7}Y8[:]"9 3 ,% % S)06 < B%G*M/R4X9!\<#`@&dC)hG+lJ.pN1tQ3wT5zV7}Y8[:_>eDiHkttuSpMwUwUpMqNttciHeD_>[:}Y8zV7wT5tQ3pN1lJ.hG+dC)`@&\<#X9!R4M/G*B%< 6 0)# Q' -3 9?#E(K-P2V7Z;"_?%cB(gF+kI-oM0sQ3wS5zV7|X8[:bBiHkJmLpwwxWtP{Y{YtPuRwwgmLkJiHbB[:|X8zV7wS5sQ3oM0kI-gF+cB(_?%Z;"V7P2K-E(?#93 -& " J)06 < B%H+N0S5Y9!]=$aA'fE*jH-nL/rP2vS4yU6|X8\iJlLnNpPrQtRxz{~\yV{W|X~YZ[[[[[[[[[[[[[[Z~Y|X{WyVzW{zptRrQpPnNlLiJ^>{W8xU6uR4qN1lK.hG+dC)_?&[;#V7 P2K-E(?#9 3 ,% # (/6 < A%G*M0S5Y9!]=$bA'fE*kI-pN2uT6{Y;~[=gIrVvYxZ}^bce|}~pmqrsssvvwwwxwwwwwvuusrqpp~}xgfcaz]w[tXiL_A}\?xW:rQ5mK/fE*bA']=$Y9!S5M0G*A%< 6 /(& <'w8!B*K2$R9)[A1bI7jQ>qWCx^J~dPiUoZs]w`yb|dlvwyzzz||~~}{zzzxwvl|dybw`t^q\lXhT|bOv]JnUCgO=_F6X?0P8*H0$>'G3)w (-269::`NE\H>aLAeOCjTFoYJt]LybP~fSiVlXoZr]u_ya{clwyy{|}}|{yywl{cyau_r]oZlXiV~fSybPu^MpZKkUGfPDaL@[G=_ME::::952 ,*'$wr/6<$C)H.N4"S8&Y>)`C-dG0iK3mO5qS8vV;zZ>~]AgKtZw\z^|_acdefhokllljjkkkllllllkkkjjjiihj~edca~`|^z]x[uYsWcG{Z>xW;rQ5mM2hI/dE,_A)Z<&T7"N2H-B'<"6/ `H@ $ ]*18 >"D(K-Q2W8 \<#`@&eD)jH-nL0sP3zX:lPoSrTuVwXyZ{[|\^_aiddeeffggggggggggggffeedde`_^|\{[yZwXuVrToSlPzX:sP3nL0jH-eD)`@&\<#W8 Q2K-D(>"8 1*% Z% ,3 :@$F)L/R4X9!]=$bB'gF+lJ.pN1vS6jNoSrUtWwXyZ{\~^fjddkeffgghhiiiiiiiiiihhggffegbdjf~^{\yZwXtWrUoSiNvS6pN1lJ.gF+bB']=$X9!R4L/F)@$:3 ,& ' .5 ;A%H+N0T6Z:"_?%dC(hG+mK/rO2cFoUrWtXwZy\}^crohmhhijjjkkmquyyuqmkkjjjihhiforc}^y\wZtXrWoUcFrO2mK/hG+dC(_?%Z:"T6N0H+A%;5 .& ' (/6 =!C&I,P1V7[<#`@&eD)jH,nL0yY"D(K-Q3W8 \=$aA'fE*kI-qN1jQrXtZw[z^du{nrmmnnnssnnnmmnj{udz^w[tZrXjQpM0kI-fE*aA'\=$W8 Q3K-D(>"8 1)& =% h+2 9 ?#E)L.R4X9!]=$bB'gF+lJ.zZ?qYtZw]{`k}q§¨topppuupppop¨§m}k{`w]tZqYzY>lJ.gF+bB']=$X9!R4L.E)?#9 2 +# f$ ,3 :@$F)M/S5Y:"^>%cB(hG+nL0kSs[w]}dus©êvqqruurqqrê©ou}dw]s[kSnL0hG+cB(^>%Y:"S5M/F)@$:3 ,% & ,4 ;A$G*N0T5Z:"_?%dC)iG,wW%eO~jmoqsyÿʳ˴˵ɮȿȿɮ˵˴ɲÿysqom~jeP^>%Y:"S5M/F)@$:3 ,$ % h+2 9 ?#E)L.R4X9!]=$iTmoqstvy˵̵̵çʲ»ºʲç̵̵ɳyvtsqomiT]=$X9!R4L.E)?#9 2 +# f$ @)18 >"D(K-Q3W8 \=$jVnpruvxy{ª̶̶̷¥ĨʱʼɼʱĨ¥̷̶ʴª{yxvurpnjV\=$W8 Q3K-D(>"8 1)% >' (/6 =!C&I,P1V7[<#gSoqtvwy{|}­ü©̷̷ͷ¦¥¦ĨǬ̵ʹǽüüƽʹ̵ǬĨ¦¥ͷ̷˵©ü­}|{ywvtqogS[<#V7P1I,C&=!6 /(! ' .5 ;A%H+N0T6Z:"dPpruwxz|}~ô̷͸͸§¦¦¦ççĩƫȭȯȯȭƫĩç禦¦͸͸ʴô~}|zxwurp~ePZ:"T6N0H+A%;5 .& % ,3 :@$F)L/R4X9!z^Jruwy{|}̷͸騦§§çèèĩĩĩĩĩĩĩĩèèç§§¦ι͸ʴ}|{ywury^JX9!R4L/F)@$:3 ,% # ^*18 >"D(K-Q2W8 pT?tvyz|~̸͹Ϻé¨èèèĩĩĪŪŪŪŪŪŪĪĩĩèèè¨Ϻ͹ʵ~|zyvtpS>W8 Q2K-D(>"8 1*$ \,,)%"zu28 <$C)H.N4"S8&Y>)`C-oT?̹͹κȰƮƮǯȯȯȯȰɰɱɱɱɱɱɱɱɱɰȰȯȯȯǯƮƮǯκ͹˷sYCdH3^D0Y>,S:(N4%H0"B*;$0 $&)&"(-27::`ME]I?aLAeOCjTFoYJu^MybPhUñŲƴȵɶɷ˷̸͹κκϻмммѼѽѽѽҽҾҾҾҾҾҾҾҾҽѽѽѽѼмммϻκκ͹̸˷ɷɶȵƴŲñhUybPu^MpZKkUGfPDaL@\H>_ME:::962,4v6@(K2$S:*[B1cJ8kR@sZF°ñijƴǵɶʶʷ̹͹ͺϻϼмнѽѽѽҽҾҾҿҿҿҿҿҿҿҿҾҾҽѽѽѽнмϼϻͺ͹̹ʸʷɶǴƴIJñw^KpXFhP>`G7X?0O8)G0#=&F3(x ! (/6 < A%G*M0S5x^K{}¬îů̹κλ˶̶͸͹κκϺϺϻлѼѼѼѼѼѼѼмϻϻκκι͸̷˶λκ˸ƱŰî~{x]JS5M0G*A%< 6 /(# % ,3 9 ?#E(K-P2cF0}̸ͻϻ쩪ìĬŭŭŭŭƭƮƮƮƮƮƮƭŭŭŭŭĬ쪩ϻͻɴ}bE0P2K-E(?#9 3 ,% % K)06 < B%H+N0S5|m˹ͼμíªíDzʵ˶˷̷͸͸͸͸͸͸͸͸͸͸̷˷˶ʵDzíªμͼȵ|lS5N0H+B%< 6 0)" J&-3 9?#E(K-P2tZFʷμμíư¼þþþþþþþþþþþþþþþþ¼ưμμdztZGP2K-E(?#93 -& $ T)06 < B%G*M/W:#}ɷννî«ůɽɽů«νμǴ|V9"M/G*B%< 6 0)% R& ,3 9 >"D'I,O1zcPȶͼνî¬ʶʶ¬νͼŲzaNO1I,D'>"9 3 ,& $ M)/6 ;A$F)K.V7!ǵͼνï¬įμμį¬νͼŲ~U7!K.F)A$;6 /)" J% +2 8 =!B&H*M/u\Iƶ̽ͽ¯­®dz¹¹dz®­ͽ̼IJs[HM/H*B&=!8 2 +% ( '.4 9?"D'I,O1qŴ̽;®­ïʸʸï­;̽ñpN0I,D'?"94 .'" $ w*06 ;@$E(J-\@,Ĵ̽;¯®ıξξı®;̽ñ[@+J-E(@$;6 0*# t% +17 < A$F)K-oVDó˽̾ŴŽžƴ̾˽mUBK-F)A$< 7 1+% ! ' -3 8 =!A%F)K-l]ʽ˾°ɺɺ°˾ʽj[K-F)A%=!8 3 -' # # B(.3 8 =!B%F)L/zlʾ̿ijij̿ʾzkL/F)B%=!8 3 .($ ?$ i).4 9 =!B%F)M/rʾ˿ƷƷ˿ʾ~pM/F)B%=!9 4 .)$ i$ ~)/4 9 =!A%F)L/}pɾ˾²ɼɼ²˾ɾ}pL/F)A%=!9 4 /)$ y$ */4 8 =!A$E(J-n_ɾʾ´ùù´ʾɾm^J-E(A$=!8 4 /*$ $ ).3 8 < @$D'H*s\LɾɿŷŷɿɾqZJH*D'@$< 8 3 .)% $ ~).3 7 ;?"B&F)Y>,ɿʿɽɽʿɾX=+F)B&?";7 3 .)$ ~$ j(-16 9=!A$D'H,{eVɿȾȾȾzdUH,D'A$=!96 1-(# f# A' +04 8 ;>"B%E(Q5"viǾȿȿƽviQ5"E(B%>";8 4 0+' # A % *.2 6 9 < ?#B%E(T9&ĺùT9&E(B%?#< 9 6 2 .*% $ % v'+/3 6 9< ?#A%hQAu`QwbS~t]Ot_QfN>A%?#< 96 3 /+'% v( % ),03 6 9 < _H9kVHI/D(O4"qZK{nzmpYJO4"D(D)kVH^F7< 9 6 3 0,)$ " $ M& )-03 6 W@3gQED)>"@$A%C&D(K0\C2nWG}iZrexl}p}pxlre}iZnWG\C2I.D(C&A%@$>">#gQEV?16 3 0-)& % K$ U&),/L6*aLA>$8 :;=!>"?#@$A$A%B%B&B&B&B&B%A%A$@$?#>"=!;:8 8aLAL5)/,)&% R% K% (B,!\H>713 5 6 8 9 :;;< < < < < < ;;:9 8 6 5 3 11 [G=B,!(% # I# M;1~_LD0 *,./12 3 4 5 5 5 6 6 5 5 5 4 3 2 1/.,*+_MEM;1~# 9^JC&# ^% ' ()+,,-......-,,+)(' % # ^:!9 : ' # A% g$ % & & ' ' & ' & & % $ % g# A ::: : :8:48??????????(0` $# " C$ c# s# s$ b# B# &"N:28/3 5 7 7 5 3 /8P<3&"( ' /R:-D*A$D'G)H+H+G)D'A$D*R9,/' " % o.8 @#bH8T7#Q3U6X9!Y9!Y9!X9!U6Q3T7#bH8@#8 .$ k ' 3 =!G)O1qWCbD,`@&cB(eD)sZEsZEeD)cB(`@&bD,pVBO1G)=!3 '$+(5 A$K.U6]=$}aKnN3mK.pN1sQ3sQ3pN1mK.nN3|`K]=$U6K.A$5 (+' 5 B%N0Y9!a@'hG+lTzY'J0 W:'~cO~̷ƮƭǮȮȯȯȮǮƭƮ˶eRX<(K1!@( A+!J0#]B1{cQıdz̹͹ιϺлммлϺκ͹̹dzı|dQ`F6M4'E1( $#/@#P2w˸íðųƴǴǴƴųðí˷vP2@#/$#'8 G)|eS˹î˿îʸ|cQG)8 & &".=!O3ʹï®®ïɸ~O2=!.( $ r3 A$hO<ȹŶƶǸhN;A$3 % o( 5 B%u]LƹƸu]LB%5 '  (5 A$pYIĸĸpYHA$5 (+ ( 3 =!Z?/øƼƼøZ?.=!3 '$$ q.8 @#|fY~zdW@#8 .$ p$#'/P8*E*D'_F6qZK|gY|gYqZK_F6D'E*O7*/('!"%I5-9!/3 5 7 7 5 3 /9 K7.&" & D$ c# t# t$ c& D!   ( @ $^KBm)/2 2 /)ZG=l$ # t2 T<-D'I,L.L.I,D'R9*2 # s+) ,< I,kQ=\<#`@&eH/eH/`@&\<#iN9I,< , ) .A%Q3^>%~bKoM0tQ3tQ3oM0{_G^>%Q3A%. $-A%T5bB(oM0pV~Z:ii~Z:mRoM0bB(T5A%,+# t< Q3bB(qO1|X8{_kIkIy\|X8qO1bB(Q3< $ r"2 I,^>%oM0~Z:jIms̼ͽͽ̼sjjI~Z:oM0^>%I,1 $WD:mQ8(hM8z]FpVko}xxzzyx|okpV{_GiN9Q8)ZG=l)D'\<#qP3qU~`xhiooih~x~`pTqO2\<#D').I,`@&eKpùźpeJ`@&I,/2 L.iK3Ⱥé}|¨ǹiK3L.2 2 L.sWAµƭŪsWAL.2 .I,sV?qɼȰǮɼqsV?I,/(D'pT>w~įʴçīīçɲį~wpT>D')VC:nQ8(t[HŰ͸̶͸ϸϹϹ̷͸űv]KT;,]JAn"2 I,wʷŴ¹¹ŴȵvI,2 "$ w< qYGɸ®®ȷpWE< # t -B&|mǸƷ|mB&- & .D)u÷ötD).) ' ,=!wbRv`P=!,)  % v2 W>/dLo)/2 2 /(WD:m$????(  ]LABpp^LAA$%8^>+X5X5^>*8%"$%B%a@$aF`Ba@$B%%"0a@#~W4|W}Z~W4a@#5F2(AV6 bChtbbsibCW7!H4)>qR0ûvuûR0pq[:![;!pC/$@\?)īɳƫʫɲĪ]?)K6,?+ Ųʳ+ 'U6$÷öT4!#):{x:&O4+CqpU=.Bmisc/python-ssl-1.15/000077500000000000000000000000001303523157100145135ustar00rootroot00000000000000misc/python-ssl-1.15/debian/000077500000000000000000000000001303523157100157355ustar00rootroot00000000000000misc/python-ssl-1.15/debian/changelog000066400000000000000000000002441303523157100176070ustar00rootroot00000000000000python-ssl (1.15-1) unstable; urgency=low * first debian packaged release -- Daniel Joseph Barnhart Clark Wed, 25 Nov 2009 20:40:46 -0500 misc/python-ssl-1.15/debian/compat000066400000000000000000000000021303523157100171330ustar00rootroot000000000000007 misc/python-ssl-1.15/debian/control000066400000000000000000000071501303523157100173430ustar00rootroot00000000000000Source: python-ssl Section: python Priority: optional Maintainer: Daniel Joseph Barnhart Clark Build-Depends: debhelper (>= 7.0.50~), python-setuptools, python-all-dev, libssl-dev, libbluetooth-dev Build-Depends-Indep: python-support (>= 0.5.3) Standards-Version: 3.7.2 Package: python-ssl Architecture: any Depends: ${python:Depends}, ${misc:Depends}, python-pkg-resources, ${shlibs:Depends}, openssl Provides: ${python:Provides} Homepage: http://pypi.python.org/pypi/ssl/ Description: SSL wrapper for socket objects (2.3, 2.4, 2.5 compatible) . The old socket.ssl() support for TLS over sockets is being superseded in Python 2.6 by a new 'ssl' module. This package brings that module to older Python releases, 2.3.5 and up (it may also work on older versions of 2.3, but we haven't tried it). . It's quite similar to the 2.6 ssl module. There's no stand-alone documentation for this package; instead, just use the development branch documentation for the SSL module at http://docs.python.org/dev/library/ssl.html. . Version 1.0 had a problem with Python 2.5.1 -- the structure of the socket object changed from earlier versions. . Version 1.1 was missing various package metadata information. . Version 1.2 added more package metadata, and support for ssl.get_server_certificate(), and the PEM-to-DER encode/decode routines. Plus integrated Paul Moore's patch to setup.py for Windows. Plus added support for asyncore, and asyncore HTTPS server test. . Version 1.3 fixed a bug in the test suite. . Version 1.4 incorporated use of -static switch. . Version 1.5 fixed bug in Python version check affecting build on Python 2.5.0. . Version 1.7 (and 1.6) fixed some bugs with asyncore support (recv and send not being called on the SSLSocket class, wrong semantics for sendall). . Version 1.8 incorporated some code from Chris Stawarz to handle sockets which are set to non-blocking before negotiating the SSL session. . Version 1.9 makes ssl.SSLError a subtype of socket.error. . Version 1.10 fixes a bug in sendall(). . Version 1.11 includes the MANIFEST file, and by default will turne unexpected EOFs occurring during a read into a regular EOF. It also removes the code for SSLFileStream, to use the regular socket module's _fileobject instead. . Version 1.12 fixes the bug in SSLSocket.accept() reported by Georg Brandl, and adds a test case for that fix. . Version 1.13 fixes a bug in calling do_handshake() automatically on non-blocking sockets. Thanks to Giampaolo Rodola. Now includes real asyncore test case. . Version 1.14 incorporates some fixes to naming (rename "recv_from" to "recvfrom" and "send_to" to "sendto"), and a fix to the asyncore test case to unregister the connection handler when the connection is closed. It also exposes the SSL shutdown via the "unwrap" method on an SSLSocket. It exposes "subjectPublicKey" in the data received from a peer cert. . Version 1.15 fixes a bug in write retries, where the output buffer has changed location because of garbage collection during the interim. It also provides the new flag, PROTOCOL_NOSSLv2, which selects SSL23, but disallows actual use of SSL2. . Authorship: A cast of dozens over the years have written the Python SSL support, including Marc-Alan Lemburg, Robin Dunn, GvR, Kalle Svensson, Skip Montanaro, Mark Hammond, Martin von Loewis, Jeremy Hylton, Andrew Kuchling, Georg Brandl, Bill Janssen, Chris Stawarz, Neal Norwitz, and many others. Thanks to Paul Moore, David Bolen and Mark Hammond for help with the Windows side of the house. And it's all based on OpenSSL, which has its own cast of dozens! . . misc/python-ssl-1.15/debian/copyright000066400000000000000000000057001303523157100176720ustar00rootroot00000000000000This package was debianized by Daniel Joseph Barnhart Clark Upstream Authorship: A cast of dozens over the years have written the Python SSL support, including Marc-Alan Lemburg, Robin Dunn, GvR, Kalle Svensson, Skip Montanaro, Mark Hammond, Martin von Loewis, Jeremy Hylton, Andrew Kuchling, Georg Brandl, Bill Janssen, Chris Stawarz, Neal Norwitz, and many others. Thanks to Paul Moore, David Bolen and Mark Hammond for help with the Windows side of the house. And it's all based on OpenSSL, which has its own cast of dozens! License: Python (MIT-like) http://www.python.org/psf/license/ PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -------------------------------------------- 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. misc/python-ssl-1.15/debian/python-ssl.docs000066400000000000000000000000111303523157100207170ustar00rootroot00000000000000PKG-INFO misc/python-ssl-1.15/debian/python-ssl.preinst000066400000000000000000000006551303523157100214710ustar00rootroot00000000000000#! /bin/sh set -e # This was added by stdeb to workaround Debian #479852. In a nutshell, # pycentral does not remove normally remove its symlinks on an # upgrade. Since we're using python-support, however, those symlinks # will be broken. This tells python-central to clean up any symlinks. if [ -e /var/lib/dpkg/info/python-ssl.list ] && which pycentral >/dev/null 2>&1 then pycentral pkgremove python-ssl fi #DEBHELPER# misc/python-ssl-1.15/debian/pyversions000066400000000000000000000000141303523157100200740ustar00rootroot000000000000002.3,2.4,2.5 misc/python-ssl-1.15/debian/rules000077500000000000000000000016121303523157100170150ustar00rootroot00000000000000#!/usr/bin/make -f # Unset the environment variables set by dpkg-buildpackage. (This is # necessary because distutils is brittle with compiler/linker flags # set. Specifically, packages using f2py will break without this.) unexport CPPFLAGS unexport CFLAGS unexport CXXFLAGS unexport FFLAGS unexport LDFLAGS PYVERS=$(shell pyversions -vr) %: dh --with python-support $@ # The tests include network-based tests that fail under # Launchpad PPA and probably sbuild, so for now disable # all tests. Should figure out how to run only non-network # tests later. override_dh_auto_test: # The default auto_install target gives this error: # copying build/lib.linux-i686-2.5/ssl/_ssl2.so -> /usr/lib/python2.5/site-packages/ssl # error: could not delete '/usr/lib/python2.5/site-packages/ssl/_ssl2.so': Permission denied override_dh_auto_install: python$* setup.py install --root $(CURDIR)/debian/python-ssl misc/python-ssl-1.15/debian/watch000066400000000000000000000002111303523157100167600ustar00rootroot00000000000000# format version number, currently 3; this line is compulsory! version=3 http://pypi.python.org/packages/source/s/ssl/ssl-(.*)\.tar\.gz osx/000077500000000000000000000000001303523157100116675ustar00rootroot00000000000000osx/Introduction.txt000066400000000000000000000036021303523157100151120ustar00rootroot00000000000000Bcfg2 Bcfg2 helps system administrators produce a consistent, reproducible, and verifiable description of their environment, and offers visualization and reporting tools to aid in day-to-day administrative tasks. It is the fifth generation of configuration management tools developed in the Mathematics and Computer Science Division of Argonne National Laboratory. It is based on an operational model in which the specification can be used to validate and optionally change the state of clients, but in a feature unique to Bcfg2 the client's response to the specification can also be used to assess the completeness of the specification. Using this feature, Bcfg2 provides an objective measure of how good a job an administrator has done in specifying the configuration of client systems. Bcfg2 is therefore built to help administrators construct an accurate, comprehensive specification. Bcfg2 has been designed from the ground up to support gentle reconciliation between the specification and current client states. It is designed to gracefully cope with manual system modifications. Finally, due to the rapid pace of updates on modern networks, client systems are constantly changing; if required in your environment, Bcfg2 can enable the construction of complex change management and deployment strategies. Bcfg2 is fairly portable. It has been successfully run on: AIX, FreeBSD, OpenBSD, Mac OS X, OpenSolaris, Solaris Many GNU/Linux distributions, including Arch Linux, Blag, CentOS, Debian, Fedora, Gentoo, gNewSense, Mandriva, openSUSE, Red Hat/RHEL, SuSE/SLES, Trisquel, and Ubuntu. Bcfg2 should run on any POSIX compatible operating system, however direct support for an operating system's package and service formats are limited by the currently available client tools (new client tools are pretty easy to add). There is also an incomplete but more exact list of platforms on which Bcfg2 works. osx/M2Crypto-0.20.2-py2.5-macosx-10.5-ppc.egg000066400000000000000000014564701303523157100200740ustar00rootroot00000000000000PK!  k0Ht-3pMPr;MZyG9 ȷ\qw#$, W@,ihnln?Mٮe[yjr&;?@Tbeim.Y^4Y7U+Kڞd͊cP$xDe]֞Er@02q73 n 5vԒ<f#S3#]1kC,싛:r& r8Y.e9 T]m^Y0,iEr*$Gg0Ujk]=- j @v. prfMCI+ߖ}-2+䏂)YM #ͣH*ͩHذ kͽo\/ )hEZRzCGRDdBlc%l*7c1/DccY?'Ux?`òa/X~kB/u0c{D9T:3pJ%hPҚWDG& }(au-G07vaT"}!oPɕW;#'5[s5S:TD*buEGR8 ^a2qz+OP9 v}\_xeD娎nL\vw F K}+}lqBB$RVIN¢߻MXZ)Tk4o.QY@[x-9d?ְWu×6өV(ϓ\R"LٗNՏ筷7hwz}OX >D.tdN[yO݆+* 'M19kH ?ꬖ/W1tzHuFZJ 02涉;c0fMQJMb+&" DRG_N6g‘ܠ]3-':Zn!sVDCPԧK%;';z ?7N2B &^uOgcn pzPt 0)"n4tjZe!Yz;I,'N"Jy[ʒ~gJEk(F3a?='P]TdZZ :i/tZgbj⯴ 9u">XRx եk9B`ǘs"ܞ C>'gݐIBbax8LnoϿ>|{~PLJ`V<3ʪCz?CP*YG&G r_z9(%G6Gl8kP}Mr+=L*\\;ehg0qw9DgY+܃iow=}K6Cx=%ip{-"ø}ǣy@pia:dglaٿ%`#|foR!y-~wУ)ku6aU?PK |<Ǚ{ssH[#k0B#\6m!9J PYۢTjQwcf:E 43h E14uV"DmȠMFrV4뮰i8f@پRd57G,19=4d@9f0lӠFHvb~2n ӆ~6j)nJv5:޷KgHw٦?pGM&)f/{zuŊx]3xKz`ǍM҆78(˺L@Vk ;Nca*y[l>6X62y k[LdWVn(@ g9blQt:u,!4҇iP+(:n")R٩!li)yvy(* _<^<-zʮOeחrf0Ga#A r"[NGcC_b6j=xn #`m&[[oi&= ݀=Z\z<P2*$x埋x ?B9 ܕr#x$9>yT@8辇B L{\s:xY{;yLhwOGS{}X>޴ڦ=+ GoÄzCgx ;*qBc9r3PK*pn VhNtx ؔug8< l9`s_vy`@,@4 ' *f} TA.@{ 5*הugآ7px ؒ7t[v;8|]β l2U~VeGG`C`~Ï=v 'Q0`_>mR1kyz .(T*Lk ?Ae9< @bRȔ)/䙈8!Bi5KBA)Te˂u=(dwXdEyqQ-a!Fk[ϰco@УbMl% Z‚"(‚sГerlt(a0⼒eYj_^AhѶ^k=.NOIq&3bab U4})_W+qXcި:dy 1IG2}m.iVqN${m3m+]Xs]ҭОe[ֳu7닎^>]K;=n^Ro;냮qeP]z=;mKgg3=AgZ(mk}S6ѽ֨C/,Xʰv1ciumK-3[C#>&ѹm.~)hejA(ǵ͍<y;̇&QoH{4ԪFu+ Sw%[VPKM2Crypto/__m2crypto.so xL?>I&1" זvJA)jbRJ)URT "VU5io֍I5}{Ν{g?1c&y{{76 A 2"װ:; RqHB3 >w &HE 2 )ҠA}{yd|c~+42t}gC`#4>FLw!,GqP(4}1Կ0o6x=-7*zkekH<-}:݂[,ѐ7: 3_ 6Yގݻpk`6\$GLn;gXgo5!m?2C}āҷ:pke7eytܶϭ. k/nUQW" STw-Q*= !p6{”^Nob0atkRm,OG2~rM6{|Ç 4^W'Xdzyyyyyyyyyyy__׎iVWfZ8W{Zo[nij<i|z}Zu"r~dkVR;;cɖ {0).O KkV8|L;&gai)1ߺ%*ߖRٞ7pPq1ܶ=.e71#!:&7Z,YoHqr8{1sd HPLsrkm\/EΨ‰.gGN]7nLL›fKѦfgS\.WT-R>{-7w{J g+Bn͔̙R1͜%5_L)T!I9G$4~圅ptf¥iH.Y/KWeeZ$7__<3M(wTX57_JwѫY}X q6dkݭu1  0o'WRž&Ȍ09ϹY6 {YlVxi053቗ќA[nSLT{}̉#S PÜm7g9ŸlYG/ Zh߷~o0--#<'`[Z8St b{^Ս8 ʷInHR}^4P `rʫ&oO@s.hVߪlm.Mv B:$&CvF^Ɩ9e& n6|W"ĬH#ڂɱ=CdK3Va4| i< %KrlV fA@ 1#4n>hd$+c5{$+PtYP 0 FJV&hjj '{Y8T/jY;;72!-}mo`ޗaҜ= i B0_M5gu8,x|!;. -Y`"O|rH-#ؽ wFk2g1,[Q>[XÖ½^lIW2RWIZ=Z9"gMcBVBid.8GYB47D[ՄRJ0pSvCֲo@3 tYLT a;fO7p_00О уy@MI({#pi$ƫXCs{2{qv(Ü.1ӌvԔ QLQ#| 8eQ5ެ*·U[v.kUQ֪e5)e=A)m}ճ=b!w0!xcKYHL /^ܐ4rZBؕZ]r s3Lڞ_!YPGS̟b["8wQhtw(]F{V=J_xe6 Y#4MOϰp=]} ^*#GzMB p`ٸ <r4:C~-ږ|d:5eG T^Uv ƆTg)h]ѿ˗y⺲k疥"K6\tdF%KY,.KWH^Lo"dLT$Y*ū)Qaa<m-3S٪:^hN&üZꌗE*QK 1VFpqX^sS(.il֩m/KA*M`"mc[r]HKQܣP#sv6xa"4BPk-Us^_z '@L$;'^R0jц(u2kPZˣ[8FYhq)<OǑDRkGTNT9[ĸ/K͔,mIy՝/sM- H߾Ԇ#͇#:kzKƂN彖leKb-:ƴr UwGG#YU{9YpL q-<v-9 q d( 8U D);q+H_l.h +EhVM=ZPc Q)ҼC#(5DCI3QYXr3UϜmE'VءF/1 ,0ǽi]PkVh>z͘0l xeDŽa^Ⱦ55㢒ٗVOq}D<_lG`8V%#ف>^9'*9U=B^u!Y/ VS=fޗ75ZzaKQt ϨFϦ4]gFkᠮ&RItG7tr &aMpuWJ:+EH&n !<yG3=>KKBrʢ]l),K_Z m`VAwH(Ƃ-.L=xTXغUT`Ow?èM7a>뉭vcogIdjNb53sPd ;:}V5 Mm[hZP\5\5/InK5}9b35յgr |9R ~p@~[n VP 9a' ʀGRdE:,>m(160_9zs!͔:2{ ;(l Xe -zpvf`w}A[5%rnh VvhICk,.vcR#8mxpsjё p9PV%>{wQk0Y:J@)dzM+}](ou7Ѯ,l;*!LdžSR7b<)ڄ^ &6Ut*76q;DDtUŔ^"$ y9ᜇ5p-bo}:V77&ɧg[+X5-/ @@Fl O x(6pIcYL:?*E/6.nWwgua UcBU`MGF鲭BH*guiS_"QQ>s}2ۭ#zsخ-rU5gMՃr5w@|y"wW`Z6@EVwKgU|[F[[2B疢/ɺqa&2L=;䱱1D;.oy&M'C+h'cRW8Ϡ4~̴c A<b)gm S*]CEWUa7>-G_Uڰ⯈G)`HzdJێ/.UA (3KRWѹjff˻$ )UA a0屛3t>!Kz{FL@?R-GJYݪ-U ,9QE}w7fPx1hoN3݀pTtgq$!E;<9-}]$_a!bihǴ3_:r(lw-Q4pnW'.:Ք-O(yIm]p{/CDa"&cQ= )ܑXf"Kt66YN{DRg[&R3b"sD ۵k^mrضiOi XaPXw-ERY@$Ctц}0kn]J&V#-+ bǢφ5S.o tOݜfA\i2g+#X/=PXj 9ߠhHVdgYZlX9rɐAe 62gK+=@1ndKQs~o{&HTZHlmD+*/0 *q!P`UDň`ekO8+~1h0"(cp`ⲹIf)_% m 5Ϋwk9H~GG-Kmf35З}Q.x=1hz&P5kzMQgHXaX XJb*{UWus*Mܷ/$Y#c#=p_{PhkK1L%ܓw!VX]Wy-B EyMlr6g)#.O򜙈_C<-HxLaA|&]yf(`f5MG(E +Ѡw"ij,B!0hTY= ʑGbyAߤ:YEf/VJ<>ԘfΣ\boA 6oȯqIsSfM Ky\>ak8=cKo?{qT^Q'Vm觗ûly+o =VtK#HIf~% }K U>-QY~%1vDQ0.V[^AJb](`/_{4o"?9, 2H+G3>Fw|~k=]`iJJgb7O8h1;haI-^O~JMt]|\ucll%̺NcT[ H#.KL +{2yC<[\XsBtd</K,bYaTgcK|{>j)܈J֏Ṱ#ac$a{(v9 ۞ ;<iYtZ䖵I#kƟB֏TCղCeM9RԻ^I}x"Aʒ~" l\`P6Lkg}FWc!G8ccs9cӐ9Hl17sFce$;9!?BC.dwei=i(RH8 y+ϧ}з~g3 n:sHo~5iYӷ dX?t\CZo[az`ߺ`7UfKL PQI&=|\`=? =HRҌ0}@|$ g[~lV;$KBxPBa>PW3(TQ}Z 5[1(5\@ JHT&oܦ2Q[>7_t9xC~q[ũ ȯET$M2b[m `mGZ^!oB9c#"UW"J 9+s%X ?ܠ:A uĹGu9ɟk/Cteߨs(pAPN,{jI'XyXY+@o X~^!s7P1rAWAΓ`rOIssSxh$*Bhk. .gXdBa[W`'n6 r/Ep!5+Tb&jf&jiHAky> Y,+Vm PN[W; g)a6i֬T9\?$#nfT^ךYy}o0#(XQH!6;: ɰ ֜g/ / W== Dc xkTzEZ$ X aPl:ZH*Q\l8o []{%;ґ:Pd'-TeDc=c^Jmgb}Bg`eߓ&4ОqdQ Ig؎e{\J.5w_o1#'>9[qa {l-V]|d f#.<Ϊk+ NV0z9݁>ָkblcjz 'J~GmC,PG&TT ; Q\yr |SX)2:l|d܆N{eKs+U8l^&+oc9f4wWa`ae9F&Pn'OכP n ̾qՎN4qҹ\'iGsn5zm8-#Ё C|QP:g(O ('岶swI~!bi9iOe{FeO.8 n-u|2?kؔK>/pG[JK\v3DUZ/0Q;sǛI7UNM Hb/M;@ tda75$6)H̹&ѳX^$&]n+%Cԇ.]̐<rݷ:dAUoݯ SuFn`!>^OW Ӽ_c AFeC00jDMF%sS sKԔAzZޤp% ,/;ew\}j:[[-7YUFn;Stoh Z3ܧ=~J[L uT{ U zY1#۹׽_GBVHpNorʰ|&iw c&<څxٜ*l(o=ᰕU\c f hi2sv54eg*eP|پ%nQxM9[bLos_N\iYZ*$xΠ~K}Xr˻0&q" |:BDLUvYMFSTyʻ߷z}O/B2&c}ga y ])w}W[neh("a˳ʅ٧Z=Ϛ+ \rPql&)Q1۱WVL|ZC:rkR!{,_sfIIg/íC}Ψ4K ӯ~"(َ59m|8`;M@kᵓq3=oA׳mGٱ+PBH:bIj\Rҽ:apR]!\%6^ofٝɲ?4m(k/xg Tik-f:72 }cwa4_W=WtDV[d=,XkGNkAe*?i hrK=OGOԂ}Jg$2QwK(ZJ2K~XN;:-ٯjv*G5gOf?~umoԬFZ4 ۤYNqz=::LpDYZ]ǫe;N laX;fAqMa\\f>cvWׄbm lv˚f-l.zjrK9Rp?+Bf>qKV_ =IWY2_NNE$F2NA~ݵ]]tljlx1n]oSDU J!fwp$1\|vh"H8a_K9cB-W^k'ԣ N:T}ď4L?p]c{~Is~Nswkq EN{ݖ;{+A7pmfOtI!9C\>DZ[XTF7^j [xB|!'> {ZH/ ړ@B. d=+Y yLx/f{@.err'dy'|N 2Ay?h |Qԓ@B  {k!{B1Hq0{+8ΈoŸ!ҤDWx\5]5͘G$ O\tcEp.{d%S}2kfk۱k/*׆kTײZ_2vu\Q^vm.:_g}]G G]#.h./!()|w E=L>śy|q"\52Hy0].J. XcrG<aBЇ䵲Zvg^h{T5?hs$!|!=4gKL0f@9#-FCMkR\Vk~@üa:1FKm2N1l@8 x{5e`4@vedO@E @Llyr-+^/NaW`08>YNJFA{-:N%i#Q.-ZxѤ=hOÐi3u"LG1 t$`& _|δQpy.ȑĐ2),򅛕h\9;L;#} 7fE3Q'hN>J4)*ɜT񧍅^$hq RlDZQ'`봈YbF.*++SΒ S=cdr[e2 `JvVWrtpygZxc1:[:i)O{j,O*=ϫyX`ׯF6X.|,2߰Tz7=Nj EBkWf!Lw|O!_5q8=v"zb{l= 31^ P^ޥA/܇Sn=] OjAzHMuffE}ZL(LqtW 1 2}g  ǃ$H7}?wj@H{ ٭2; '$hA> sY=B2SMd >H-H$@od 17N} I2\ UH s HdQ7tAnl׀A& @Vׂ P%UWi.@̤* 7p"d7|] -2Tb9M2J] U mAׂ #nM]Z}AX7i >H5-*T8j@=$&H}}Zd(9~ ⼡olZNstF}W SL2X2 R^/qvȅ iA@ d>-ElAj@bA^Ђ(@1Cn>-˧8HI 5]  Hot} Қ >H'-H9b%9U]P-oEa T A|Y"@J1 + ])}p RCu-H!WdTՂF2A>_5 II T|@rA郌҂\O #ȧ*Sӂl o3A*y 6i@F YH>-Hur# Aw<_ +b~}.Z Y2A$R '@"H >4 9$]O ߦq d S}ZYD2HrAs<Qr?UUr.WKҏP,/@3T A:hA r n}Z:*N] _ ) d>R-@2Aׂ uAȵ˺ U د'o@* ?Վ R_2Bd@U_: ^c={ܘ݆wr{{m-OܼɈێpz n{Os.[qjq=恻]]A'd nT,%Mڅ}AP{5vN:ݜ.Nz.NnuN<}NW:+;Nx>Nϥ\<B{NGONf+D1]0 t%eNMUlp!3i`V؄KތеGRc^|L,/x=0PMq70.>9"c=w+wH̻5hH|$.=B%[C8,*%}._cDx>VZPGbI vs"ɥޣ찝]v"pSQA=WlOlwt-<,%|G>~gXJb U(&rLɞߔG[3@Ӵ` t8;_2&[>+һP8>j B9ʻII1G{-Dž1lZ|Q^z omg짂~WfŅ@HE| wn>}u` gh4SuK&u̇%+NW}78'<Ƶ U oC :Gaht{NeZUߏ4|O'd$k XGMNLs>bq|YI\\;-Ou$=.^Uk;_Ψ>IĎ:rkܓ-О)dHx6'SssuC(D2N q<שQHcgh̀<Ħ1m)̮:rZb5޶C^c3 E~]< P,Z;FE˸b3NoƵn}v:۷p^Ot./;CjW1gbuK%pwXdKEB\ϥ<#<I$f nb4r%oBJ\gQp/ʅד\2n\f:N20/Hvk6.G{qvxb-^akm?oSm'+Nq: we믠?2x]NIHH݇{g( =ĥyVk76q@)L'eKgF!Jtjf^*=P Jѻr!̆5X< fQNƅcgYH2#E8曱W8+J"pN  'zvlmbl\ܟ֢|?E"Egl"Bz$@||.r/ &pƳ&2nr} K+\usz{ƞ:QϬӆ@5rZY) k{1^avzIN g_};~+YVo7Ɓ,M veyݖf>oD)> -HB7=8PnVއUvЏV {ƒm8~דldێ%[k.%F3mN 9o̹8YސA &UL̗rCz>;n9udeK@B-㐾H! 飀8Ȇ:GsJm)q)[wJ%c@EngϱFe^rcڐM%6@Bui|h$n\v;{\^r)xGe ,ᆘg(۰.bQ`Ds>bv2z=IWҞ`go@\N$ q#.˞'B_{6c^HKjUBc({4' ^bC^[[2M|ll=:TEHU 7m%y+畭d}C@3Qqvݑ24[6y{b~,Z|r4ߠ>wq6&\Yy걙I̧z3>13_0?p22.Bpm= ˰ }{/e_Aox _/_﯃\sݑty׶{y߼WʽKuyou xBYٟu>'ωɟu@.y=1#i7ǷM3=ޫ_Ym)Wm&n&Gl&?LLLLLo&l&;l&M@|}82c6XVG_bOUac+__;vAr[gkDDm"~n"~{m"~?#~#67gįFߞζ]cvGv)i*|q>`;#'Ӯ٢F|F|FFyF}q_x!Ԡ8gТ22,7*-5}1! )QH 턴铀 TU)<4}֗s '6f% mE* kTXim  e,ߡS J]UzOzK:$p @NfOkksZZĹc-qZ|Z.)ge{lĊic2]OaPkDvw>x|߇ƛO%|g7pj wr9Dm+p_5Wr k\}+p_3NjƋ^E& kw-P CφptWejҴ쵚쾚ZE\&.+&.W_7rEO\瑷?lArԄ<ˑ†ϱ畵5N`Sn Ƃ}6PSGE Z'tOW| MOT]*siA_|Ϛ0bk@3]EohߤdE0hAk~JΕ?J_Jߒ_W~ZI|%oJߘs@cϋd2s'̧}f}&dGs'n!=@Y /Hp\@AWv.OIt!OJLO۪W& ~/Hb!}!tk0 ψJwĥxd7"7W}%VH tp]LʵI.p&L0#,L):-|ك]Fu1(n&p%גI埵ZFk6\}Lҷ]-_/-#(2t2n2jr eY+H +HEW. ]Ʈ ][A $vx\2-%يo),-; Ʒ\l[~|KA5[0#Geз{|4e*ߢ^FG5b 1op4qӴzF:إEqq^f+tpo A_[J:t{ %KH_.!f_[v=oCY۟K擳5Y.8c۟I}t@q2Xfy>uH\G !}kh~ ;ooio-&ޮ/&]LXLmYLYLoARW46Ӷ} 8hy|T9lT혏ZFy9idiBC78n|a{t"Ա88ݰ88=8ݻ85/"N-"N@xB4yVB&O]oR2Y+5S~?n17Q<"l: 7 ۫)ߝ>ť@KoiϖlBU?`+)-!U\=ƈEm=f-$UkC/t_-vjr]0K.T@v \Hza!魨vn!魊VAzۺB? Io;rvL.kg]|7j2P˙YW]X\k3Y: U(]( -eFq'k]GN7y|#[@z6 H/Kk H/xZjƑ1#x8P7W7kY"_bj:.$UE"AWW+W8[L {yv;N|o>]N|5uj-GeY$W $fld;S-?YdN͑4fNC ^R{LY^ -[wLvG%z3 zM|D'[aS*OgI)umbo.<6a\foNf%>t\sI'_%lK:i:t\ɤsI'@LRS R1#h|[ҞvFmWU]룫n;Q,&Ua?s.uT =qZ5HC  !+[bkHWM$!Λ'灉yDS"q<$868m_hS&g:~gHg!Jq6X .CbOlA6`Op{`gѤ0#i(uW,jsrQ :%7 lӇ6GVTa 8 7,O?EZl=+"OG֚I:);trs$j& ҉y&L3H'fN$A:t]t#i0l\VM[sFt0I,y W4=6C5w 0ף7D:|R \;o}]lӧﳧ裏L'M'N'WN'N''3ޭ`rGRP{^}{Db4Y LͶ'nG}GYdqzAox{8 =Laox{8 =L4?? J aǿebBԈX5aߙ)39V>M?Y ue*TT5*zx*:x FJJJJs~dƲɔﮰz wL#v^V t0}AoH\KA.O]fQӑM|)OznK*{ݏs=.m S dR{^y` ǓS }铉o&ޓٓ~ӏGk ^H {ε%{ܼQV]F1duh=^W;.Pxg_Ry=x}scZo$$u$u$5txi$+H+o AS3|_K\.*fs$%8K<?ޡ0a JO|Nz[cjۍڣms`lDD6u"q.qDԻmwu?&'>I}0mZn‘u1Ǡ먶9<3яM=vJma+7J@+J}l%޴G6AOq$ENb ?bV~ZIDF#UŜŸF\S#U)ⸯ8nh!-qS qLk-i,]f\8n45y;r^]yߤSW(䓍A'#/##ߙ{Y4/w$ݗQݥ}۲yϏ=syG|ZZf#>ACF|=~mozkкU;h=u醴xo{s`ֽG%=Kz Kz01XS߱'y )r,1oOeͺ($5G~]@ Jq)8[Jryz4C1*<]Y#C.<3BO6yL@mhScmJzKNNo:tob(QH"]6t1vb(ŶQH]AeY)9뻺i79)RA\Gɦ"(ԥ̭eq7f8V#YF`{5%TYEH#gowI.IInIƏ$N$N$N;$N-##.#C |f=q$=^`I3u oOAJ-Z#~iittpp M;temgvVcrK8nQd?8c`Vsb8;>/@KawÈÉeכ9ÈÈzqqX]$n7l XAV H_ʩ,G\&[vF\,`Td?@ i3O }i ͆4 QH R@zqd}t`^GҠX$m'w2طD$<"ц#G8^w/(Pziz>z]CH7놐n!B=tsz!~CH7snV! BGG/_l@6tIRlN꾐By. hFk56,F8矆 gm8 [E[[ `O;o~ &vMI_O.x>YgA6:a0.:@|H|3n6n4>1:H| UƖ>f@Ukfk\"bkh_|1 &O@~ZQچ8c3>rHHscs_VrKq&9q(C_%R? C}=>HTX5M4`,R*b-'!?#''#/###)Z7?`F`RpÕZˠ!?<\ zC=x'G=P=`,| FHCZ=Hs!Mtcbo`›X:uc^'73u dھEoO b"/qC҇8-ݗ8qB_}?_!Nؼy1= Wla.(hP1#iL+H sU8ֵºA[jO;x^ Oeq886;͊#nUq8G܆gx#fKFςzڸ'ب%kH TIԵwz{zz".E\>׋ًދ|qYU x#C!'5^Z,>àz^S@fIГ8,ԓ8|'qX'qا'qس'qg0'qX'qxq$3ZZ#ǶR6}UeTBd>婄Wtv=7)-#ܑtP#`@GAqIc:t H:x# #QGC+Qd%YV>fZd/oHC@wM'ZA*)͂uȚLR<{|n{w+hXiɲ/Z# %ׯeʛW%ֽdyH;!m !-diVPA.,C[xu}Z b!ah ϶cPm}Fl&v{M.5xUGሯ30#]ٯ:b&gϘmf⿢֖---͌n1?,6A?IՌ|ow@>!m%!e! )IB{k|3{f!;Jnڢ<;OԽu=s '^.O?<}}>|>#> ֓Ç\~\Nk \vmτT|>EK?4}f=",-miii/M+RiN*ȷ{WHo􆿻7'zÿ7y?GoxÿkKeh[i\'2Cb=ya:P_2~QˑsveCrr#_N4ҽNc=ޢ%/h7<0Dly M`kn^8Oؚ{ 'l/yrp]WLͻMy)+=y P7i.h4kIiY"ڋF|g3XCUez~H5TW) ݆^o\&j7_<0YCU>0dDPוr[Cq])HnjRuu<ו5^!>twxvwwx]W|ו{!HF`:he vCU|¸N] ualU¸w]'ׁq0^u`\a,1y*Wo\ܡ19a ~TT% =i.P_>u:b'2W>5h7xwwOۨ6j{ϹWy}Q]%3l3[J?2/Hz":]+KAϖi}Z\^ #krw-X˳`y,jrb-X.KZ'$3lvIj*.'w}Ǹn f&Q\WQcgiWxtg%WxpgUWxNvWx³+<=薜#ϳra_{F雰k*kUJ~9:ڟ8sU17\T5CfhV?RK,QZG}Kb.nkXtu_Xր ]\`Q֏^9hYLHT&ɽ&9 u:+W19\iO *AHUONϊE"Ms?n'rͫ wRu*ﺇbd Y /iy#_(BrrQV]9S__SQYXYWSɬK?FʵLeU0G}rtc*?*eGa ?")jh 0}P\)4\gnALʔALFLALK#&- &e'2UyiDI6Đڂ*yu+B42lrq-Lw0$x/.iw;ot;l;߱ ;w|;;YISw7Su&(^VA选bRoL²-,]la9nc ,b Kw[XW+BgzrKDһ{\:(Q䮃+>9"e(|RO%$Op?SƥnP + %$הƁ"wr86m@?Vw/PA~4F -ݤ9gH} ml6pof6p/k.ΰB=.{ kk*WkC˫T`-jYXn0@gӭ _jVnV—wN +2Tt g/+HTpZ!)oĂO}#j Y `|AEEB;b4-+'rFS,%`V Y0+Vf=JK Y0Tfh9k*-tkm=鍲"5VJnk[zfss.~Qf.-KtU/XQ7:weD|]-]I/Yڻf;b? ~ ~ ~ ~{7 ~V抨APp#޵ je܀0@>~R=/T پTuMKhɮ#d/]EJeלVr[zbQ1C ! JbPN1pC ~ -[_^y\rqrð{in?-(sk^ApC[E )֛kɭquTn9#Rn"[YA"ǠH!AAl*`}6bp<7z~֒[[ȓ[r+˜sH[mGs2T-ȭ44YhxNi/X'wރ~OxOx ?' 3w:SdP/5yrhCȋ|(\-LBNh&Ӛ+օX?`úXWOa2'Ej W5Wr*G9j%11q҅ɢ1d{}ҌCz7 %ƐyV"_R}$*mTV#YXB,Vf!Y},bob1: ^DTig-zS>'\C9OEE*$M[&2 ˡɄLXe’̈́LX˄&,f!Ŗp} {c9 a}czO6n(z}hoy֒T O53 <|Wx_Y+<}g"Or8.xI=VzF"=ֲޯ*_HKϰ7CVrR=B>ɀ ò ̀qz3`<+{3`|>2`](oX7tgL+WVvsM k(Y±7P~ [PT3+dh/U3743a-a+{a#a/a[/aEΰQֈkF-rkFdq 3N rL 4X, iHe4XLeb*,m`TX^4YvSj9w,Og]w;&UU5[ ?0TyoX)X׆ R)pMKU/+Rz.R* 5G]mD_Mfˠg#gq_{)We+NiP2L'tc2L'H2L$ôQ2L'tV2LazHpxfe󘡈2& ى)HkZ&A! IpIpDzIpD81$M$0G9!uq-i`F#VsS]|=};u '8_gD8gD8'y'8G|s'8k}Aom:޶ ۬җuG#{mЗ\rq79 !~^?P{}FrY O3\kmzr!YՃ` ;xuo-ū{{c1q7 7n'Z2%}! yMS}2ܒB^ɼ?? qǽXڎ \!!b9C,,H E=feyi O{U x;&b!އc=)F择S2`+ݤ&rn1$-}B} :]KUk^>Jp:;-[{ oa-W[~ oa\po{n]uRWTgnQrs]!}v&ʺe]muu{ZWש&G߉굇Ruu`9j6} o_;뽆w im޷^7Q]%3re]ըZW+gimbTj$O]_8%Ɨ0Nx /a[_KO{ FYu-uJey\ƗsgDuðWh1ʺe]UNuWQ]5~#%KS娫ûsx{DNcؘ3KgjK/Ka~i3Z+=_$ O; g?U!eFd?D"N"N!N"N!NV۩j9Tv)#\e TAr,cjȒΰSIsQ~-%,=d/KY*%eb,_FT,3"`y6,X^XD%9a[_s@?bI̜q'\N,gKRmdW*.n@}I2B&|܄o[}~o|܄M 7k}en}$0Ѓv׫.T lA登sdaNJTaxYvd 9[!7?߀?unހ ݀GuufqDnψ\e˗3;4M4FqSmMҤՒR6s2.; ~o Rn>6O|Z]1s8Qє6  %vlh.ϗ[UUUUfz qpquq{uqrq3`&9a;=6]8Ґ"d.|Ec| \UW=7 # !㬕/{F2zڄl;)Մ,-:)ZyWWJWWWW W/+W+W+FZyR 9ZDr-'\'ҤkEFD=^^s5ϋ!t> _AݿFu2ܩuÝo❨/xq.k3/;"\wEx߸]7]~s._Hњbrvڥo ڲhzNS1"ІWsǤH 2LW_ 0=p`zL_i 0vӄΰ;qC# cH{.coE#R g|ióyx?}UóyxꟇg2_NnXQsvez/-{Y}Fd8GRMdQs0M? {gaZLis0-u{Y&遳B(9m9x2G#20Y_<."GKPMCR?e#g G(9aͶ掫h %Ru >lo:b:گWzpd9)M\>νqp_gp89/+y$\wjr݌9NF@$ס)s|&s`6¤w#ۇ`EpBO= r':OO)8Sp> ΅Nùi8>˧CO l$_F;18>Kթ]BT>k(zOJR y>}IN> 'a$l-XM'a[$l{혓u^^r8h{m=)" B[=rr=zn'`hv'`hq}O & :aq.2$3xuVK/ #G}(uG}YZ*JFZ,/{ t~RɰT"s:Fd>cOo~$pTqORӜ}M7iʟBO%G-"iy)CH"N{/p_hCu!O咕>FaaltXFaaltHFaxWJ{_7+LtU"),1?:)ԑO/~Ь9ZDMS|IrF* %_K#{{A8ەߊT*Oc g@g*-d"R൚;%>hJvi6+ˏ悹>vާ {x;ꀰ7Jt]%3lUݏV몚 8F6_haBM[ǚn;%J].~~P\yw]&Ä` aB_0L }0/&Ä`;M'6q&}BWh m>MșvCL{e̒~C0B;/g>eTr߶М3兜)u9C'qΈx's7s Ixۡʇ@V?~A`.iSo^k aUdoµU~bqIڍ8A\v#.{z{[/=Kn^>.^=r~ϛ pB2MD0_D M\r?xq$}<l}i($/ɬK[ ,Cz%s?{wp 9F1h/"g0e_TlNw0 h C!ՖZ7Ȇ?Ltmo 9#ҌkYQ""߭Wzwm{ծ{mu}V7 \:{w N;ӑӴSӬSB[}&^R LQx_K!=%)FO0l2TtyP2zC)-v+fZhvrd2&vĥv%vRn;qqqَ\܆$nC\~oC\nC\E䎿72}X3dEU|9u[4&g2l@pTn \[FCQ{:~䎦:zw_-Usd׷{[`z m-0[`~` o;Q\'P PǗPaXE:>w5C_t[K?]\oRu|pS*[\v89aWiBu6:x/+W1_])}?mUd+dJur 9Z=6Vw6}}7}+pڢ G l7(-wR&= [/<=T"ĠyХ`B!M&gەd[h1VKcohjoƐ{Z.>}VwXYGR!nuzĭz:-kVd=qcr#[ 1y{dcT^Mm17p=iMsӭIMin:<{C ˡpP? {P3p p۹wLsr/49am17})^yn:fhnVtޫl]MN[YoCjX}Q+vk!Okp-m»Zxۭеk]n-;kl_Jn{ :0r\g *t?ݜ|sEKi:սҖ.}$ |j8_[ g5pΖkl[V9b5cVyja+AK.GOe*WS?ۜ~*@K"$U(?e-U?CwJ,Jȷw P1"#Yc1"Ma˹XKn}=t}\Z~9aOT[?PV V[Scż{᫑bQ4z^#$گ몞JyFqKsk%b([XD,zD,D,>@,,V"W"+ ߑ oL(1iZGߡywcrree˚ee˛e˘e˺ee˄e7ͻn2=]썶s>}9˗r^= G}K^InJ鼋]#ʻ5:ɳyݭy~T͈#,E\<".K.KKK!KĥRĥRGoYݴ2\+|N= 6ᏼ XNrJwVI]p-ːp7bT'S1yɍ`d[0b&1|FLRɾ`B0b2?1yۏ=[K?~܈~OoSsO@-D >O@-?b]k,=ʰ/_A)ruK NO9sS?&K=_kp6|Οzg>\{̇p]7} W{N ~OE* >gljmwkսi k>TsAj[}_bCҿDct2/[hiRhkiTk:B}_]k3Ws~\Sg¶aѳp:(H6>3bW~9$Ҵ+~OsqRo3-gs xu3f<+̀g2Ug!'9a_+=5G=|3 "yO힭z/>|Q2݋>e4Xיin1־adj:Ouoj;GL*_A9"rÆc\aY}(Yr=hFIqU<{o9Sv ?r N)_4a=m.xZӪ(0Q3 m~et#,^rdXv Kɰ;a8s&l2,Neɰ,<۾_dIg뽨'e\؟#hns~3@æ)@ .<O & }O4I}O4IMD>$xn'$ݡԳSeen@n֛c+7Uu.$?m;xf"L㣉p\7~"'L㦉p$c8y"%+ߐs֩r ;u͔sxɽ,/PMumw[!8R 'd[e4lKQu `;ޣaplGh>%[mcC[+P;bWa<~ NRLIzqoK8E￟S=6R#H?R#H?R#av;yȜ:P2X9|5]K͇_Q=#I9`FX6#`m=FX Xa=a 0vʶ8cPc* vL-uOv$a ۯ`{~l m]= 0؆Hrr>v<&0[ǕhǏR8E_Q(<.K*o#gn9 ºPX 5Ca]i(; :uJðZ+THEWGߩCtQYՇ~ :C`=-wL Z »X7ip iESwTI8\KZ1$r$6\<5d#2A<4`yl,A S`yw,֩oY۟ZQZ#oE[Daz::X2 &lEB5pCwhuUy29k]p?}@h ܇ps˽JJC7S/(q=cy樳yj5o#y%I+}e^> WG|<9~bg;Q1;$L:g2lF :=>gBx8rɥ7p@EiE˗5FIa򌿻m~}R?N1`Uֳl :Q[Dgh[rxr/{BE*\`R>kJWp}j鞧}ຯo(u/u(N]]i{?>3mG'3@|íT7|c{lo _z׿7|'[r8fPWfʼ]@swZ~j@;?{6 ٥<^ =Y<珞VIgذ&si1 IPD{y;(c׈_}-S!B!B!B)B!BekDp{U(|_]o5 (doQfmW%cMpLwz;\{tp]} V&Lkb*Ïd"4 OHbX؟PM^a)Թҫ,+ 3hTl%%׷kߟGwM y]aܻ+C¸W ]a\+w0nWҲ/y{z-Wf~s6m\ޚ s㟞)?-t;2T&*3~ݶ3m:{`gx N o$`ΗK&hs;;9o:F89kejRۢjc%/)mke'v-aWvaWv;}U? Η/'w(;7ns^=#G}RmiVytSR^RVjOnOǖ,[U x𬷄Y'דðCj31hͥsZ7V`[(e-Hr*HsnmY^snڻ9\~\9 k"+l%d}+:nE?\mPJV'? O~B꿟P'?bR^Guu\ɠI!K#-9Vy8ܝհ5PeǷ5o ?te//oޢ͚~//¾"o8/O$~hDɰ r$6.6҂Bma\ ҴaSzRWhsƌ~MMὶ)٦vh M=)+73%s *vͤ=bjIclo1 T\U%Gt2]JrneBrNy,w^>0Omsgg4ng5yG6_@q_BB:soEq2ΫB9nk^{Z^ȏfToT)K6/Hf{Y臐~;e4GȭK}7zý7{?oy}7y}7ܧzs)ianw;ײS wjm{×j[1|7//k _m7 F[1|_y M tWo} WlC~봥kn^lTvY%Zpkeo:+"q͂d\N~=WK~6Q ZR ["˾H='b7D{"v!'bDy |iΡ"v*b7S;&cG~>tbWZ:+bWZB<~4'_HV/qz ~=Ec/x ~=ߜ9j)KگWO{B]ƕKss/4r/X:=.) ^҈{X^qg崏 jK*je%iD&T&_5m}Co\6lx97@ 6@| ^E ^] ^ ^/Cc}k,\ !/\\\$*TNf y)_>e ;ic} 7TfT4ס4mCBbD9V0ҘF -OxX{\E%zpqt_#אatPXo2i6@6'yl^R7M貋 ǡoJ* +8oG|~_s&z~ Ԟߺp;r;r909999P999P90uFn~G>!i횯=W(;+lg߫\8$BS >O{(bSyO) 90vY}Hҿ!U7dj㇩UwD\H@DIB:_ z { y"cos?؇+]cSE=6!ֲs{EuN\>Ηi755Qz}RW9!CnH7b~ 1qCǺ!n&t(T(sL}#vj*͢}#'1R}#qhhy]:XTו~2/GV&kYm#~WR#dzW_)WĨ+b&bd%D"F嚈QrMH1^1 L1Y[1C߅Vemc+VjVV&c[Mc,*i(M)V{9Q"-UT)%Eqko WG=u_{JW":#>;藍(g٪D9 ;rhdE,|U:@gHh'յ%DɳL8tL/9tL``[`:Aə=sֺt#LT+"ҟB #`ڹL]+tt֩y`:LVi 0m]+tQ='3U9@9pj Np\a npa -pi ^pg p/ۓ6EuK9_DKA" I./< ݭ`haöV0@+a% `b K6'2$󃦟pMaFzEy=.YRu\h"5.r'Ǹl-`{k[KZXvl#-`k³}E>&R.}eU}u4C_e\=u:.JVշV"#.ǚ1YνEZQ]r;z218b28b[1W1iQ1Y]1YY1/./T,xunklu?}&\jJʫ _iуor,hjEniܝn=z HK4h0HA(K~[-YgI֚hw$'ɚ4ofg]3C[f9bdnm1C"3hbTZHj iT {?'Du&}sX_βuEi 5+_%!j\CJ~#vWYC=%tK kH|M+VeO ڪ*kH#%UאחZCڨ붂 "Fc "F "F "F "Fg "F- "F "FK "Fm "F}:E9 {O) )BØ"Ms7$/z5hhy#3icc6i7c7(c30u4i=c* ̿vs`jN.LMm t^G)[H<] i{YF0M6m#({F0-UV``{e: 36Y&YiHda& ~rMj+zcpn9p=k@CF!\{u!\µ0?A}?{B.vuy)omE]lR嘃5҇=8>Ѓ>X^h=8&Mډ=9a|M)/6"F9X/R[4{5tg =a3Lki0>t&_L.vȔð3`z4vd4hR$FOitHw:L ôs:Lۧ]L>`:G'3goW ,w8ilDD <'!ǜ}J*ߤL*3Sx!h5p ǧp\ >|!9modqöz*~yy!#6jiTU%@b2ޚߕ)߽)) Oo78S[}\E`Q4Um^qڰ&N v^}yD'õ^2\-.%:(ᚖp /IpmXݕS׀bWJ~Ryj4Rd'ǜhZ"cx.x)Ip4IpD8nHc=/Q;Jg6mmJ[:"ITv7 GgK+G;|Op< ?'8^68}cOpAr=Rh;:9Ec#ɢ .p{':\߻'E<+&<$3 ੟ Ngx6%9aOEҼo2/ÒI^&yJIL2͕x%UԖiԌ|6,~3,O}eGXf}ُtYv}jjrtLx~* Eņ/ Kdx7ga6a>aÉ0c0C"Cr8VG MBNi?Z^W͂tZsqվJW6))m -v4$pN\|p5>Z ~^H]McOo}ջOs?O<٤/gf7 ]sGaظ4i5^x 0&E/a(G/8mhy.k^O-3{&5WE}K"&G{ģ|,Gi 铒se'"?TTAntt8xs^+s2Dm3z55\ZFv~hf@~=iq],1F D#}}ۙt?DڷV)Zj o[K[J-VkKbO-#1A,Aľ!IAJJTr=9dw}${;s4ha4 FAfiРV:?8+pr:Q Q q"96/wҿT^mш|'yjϗA}f:x{%l1D;U]#1.Q9$5l(Cl{VUxC_/ Tp} R;.Héz.\kkz^0[:% WY\Uk$hq\IygqYpp#΂gqYp|[ǟςijW`nڍfˣ=bZml^D>'(,/SO:3-]g8πg3 x:gs> O΀Nθ;̖9)uUqw5ȾG@St9 wR19=OcJ 8V< >qm 81)mU\f˷8ĖFۼBڒ&cZJyܫՊo3ձ PIa1f 0jRVpaTyզS`;NS`~'x N7M0tRd'g LO~I`z/LJӆI`(Q*re&AsޮjVS])河vXO˂`y*,+$eDJ˳ `y#,\v5Kl??f;vVԫi}~L>-K3x?Sc9x;ggc9x<}ƪtטc:tA=gƘ lUJI|wG`Qzl۟xylۗ`[*Vres4XVex#vkRLi89EpoŃ#x8{Ǫi{j :c Xo[~(Ky&? 8d`~1Wܥ5(uޮ Cc]3Ԗvy9908O< αyapN9 ɇap~sSIodxQex#dwE{LK K/nӯ/+,~(Y: K Nx?W7WZ<+ZecqР}4 AZq? ;q8h'o*oQyl Rb~HgiKXU,]eiAO&I WcXc+j- =xV#UTPs˵jG?lժ c ,k?xމx'o]xG}m?x*Tl:aN-K:/mKҘ]ثZrKNzS57ĂX0 ;b8(bh,wI`i|>뷲O|Jr<_d˧[>J,ϧG%ߵU}!^MGN ;{N;Ng/v{J1[ ,EԲ1Õ ؛?Jh,[tAjnxVϚ;s<_nϷwg )Be;*0c;(E#gmn_gMvGDZEc}T4XGuz փ{`]7c׃փuzgnJ{3*eC>q^J*_n8UeZHGzu`qXցu`za[߯ӹt:0ihG^21[*b^ݠYFRo\Zi.C /CRic_L׊l=4hxBBkA(hPt-4\CkUB- gi!QBB%otʐ&oHmw-S5`րu5`=z XGkzܔj}7{̖zfۼ5`h=gx}~bZb=EZLՇ`j0LΫtj0LӮtj0L27=TL`rgn]+bz`=a ڑ` V{`dӖ` LViH0M]%ognJ=f6F5{Q#g+ bb?LmetANr/>noJ_ +7}%F;+J|׮ؕ;q%vn9mfKٴ&_3 "}zZdwz!+`l؎\-V`;uؖYWm`[~X͖i9m>3YIGpw2,R|{poFq0.`|k97"r0 Dqr24d=8e~IǃC{pt2n=tZ8où2Еr`>\LYzRpMZ 땥n)>\ w뤥a)Z KWWU͖Sxj˓z xvKȵ2>}V2oZ$I uD8~3pp]pp?kp] &tْ1ss"E]5>wney¼e98 B K0-`x ,ۋm ',coJ1[L{Tf+N-Xu;eV_%EK=kerH{rZ%ţzM{"w- "NX=qx-oe]YA-E&r7;ʊt屙u䷬Bf ޖ לm-+/v Bpv!\#됅2 \^-Bp=;rY$KݸD+\:.^qY*+(op]LZ1Nw ,e0`yXZRO9",T]_%*^yUū_:8uu>xE|2M|62OTg*^yݞ^Nגy5jxm^W<:2^恗ex^Rv;ŝ5y|=2.nF^V#ٿ"|$XZ’.,q?[;Y[,w_/;„x1YSw| *gp?Fbh 4V@,4%ڛ pυ?υ)s\?o.>w ̅mMϟ%}###########mN˟St} CP?T* C:k)S翑#?KS$}7 ,7 ,7 3D1O$WB??&Io&Io&IoNʟG7W{b"?D"C!?'%?R}BOtttttttttttuo*l.m1oMQ{Jeoɷ^PTKA5zT|T;d1zCMH]VIg۷۵G!ݮ0yt |ԎdzgO ҧ٧iC/%8)[ɕl=a+T^37whsi?-e[s3? fqחj0Vߘ370%s鯣h,U&3psd.ryTGc4Jc_7##W##w####A)p%h4X@/~jlkLkq!n5募Z1 )ҝ^d'p#_b4(plzYFO宧,dq¿-R')զO#6O0{dPT=Y22?8a?hO*'ni:7"7?Q+',"',"',2׺I)H%ߐH%Gi1&r8 [e9@~'t0II"oLycEޘ$$7&1QI"oLgg$re >'}.Y=PWf(%P/x~ijـ@%hWzMeb~cUe뱿^[ Ы?z}4zMW Ыʪҋ.7[r6z^5CdzUZ"#Ɵ"uLȏ^%^֫-\I}A+:zM ^G)z^gW 5;z ^^J/l)7Mgp4YMe^zeKzzE'_E;k4zYj ӜJIq8Ur<8z^Aqk8q.u^tc?Wz6VLX˒=$ #7zW"#z>z>N:z]&j2hNz}3zu j^BwBQcX6z ڌ^B%͖=}q]]^&UfԅN"J~Gz6EFiE*zk|M%T yދ M&FCmɸ$a4492|1L M,IФ sJ/ &RL_? j$皉WqXc&}֊G^WFQЧ(sg$; < }Z>GAH5 cW >q273V)~#Qɘ5NAj92I;>eI{}咾[$%#$-5$G3dq B<uxxEco U$>m@h[*v >v@ m@ۊ@h @̯]QtٲYV>H^) r~l,K/o; Hѿ2Uw c_2Tb5믃:_KE_E_E_E_E_;*rWMg ]ݦ?#Z3N@YU=IZ^Ū^d^^W%zu Ϗ^}/: )׫@-&aaЫ0f*< z^àWaP5y*ͮʅJoi+kRɨ\I2Bw&E &%G*M>55ﯥɿ{u$j4 M@MCɨ!hrp4f3h4=|1DHչ(7"QQ(aDyf/JM2aHnQ&^ƛR^n-k]Ft"~&t! SK/}78 BnCJUN~gP1PF*w ŗ{El"d*;R.h* "5Skn6aߓuWzݍC'kߟSeͧƦ]MJ׌ hG智e\iY7}v{Q?JhgXZz6lzU7um*d'31L~!M#ߍ%ev6b݈[$^D}T`ƘOgO 0|ǯl| 0 1'5+z uo2%ƿw9LHwBj,ٝFxlpxPCMI #◬!}xP+`'2 ߛ&zOw^L-=k]8|[s2ziأ;]+OxkYlc(G]' A{JiBlo),w_*iK ̋Y/"#i[?M+hgC 5?lMH]꧆H~;Q<Ğ 5ei6\^^ vU%UUU_&)W*OWjbPuRW-TJWm* *¯ڮ\]%W-6REjm+loqe5+FZN# 4lLmOq?Fn.\nk1iفkX0ҟ&o?;OnP0"ݳYly-fS͌sٿG&~SiMWU77}C7Uk*f^-o^? {Kc)cz0Tl}lrvᶅWs&gYn;؜{#3/s5V6XWlo*,h J =# ]R⿬VZ}fVMX[P֒?*v]l)Ұi'9r&r|97JuӍ_n) de [Tz6bM;4aRl؈r[bs+c v^(md 6b;^VLlŶͿVOf)mTlJmM7m>㶯',[5aL&(e{3[,[f)Ep"vJXISmbVʦܶMNm۸R6.q[bSƥ+6l\P")ZlC:=Pl|6ev$IHn+IVY@\֕) >ү}m(%mEq^+w`L n[ؼKXsFvZnSlKz&SlJ؞9۬m3lf,a&V:C *aϊVGuRArFbsQVTlR"}͓n䶁bq6]p:Ŷ->{r 6\?ƮSUxv@aIQɤiR VMS}E,faPl9m)cl8n}bUVxъmD1-Tu/f0RlM,[v嶓]bm{n{~=sy%6z۞ٟj)6bm}ؾ@~_[Uo[!x .>nۡ)j˹b;)X<-Cmm. n{67*Vm-lEVۚ*ְy1u-*EmEnZ8ron+AU6MG嶕m'lx۶)Ű_n;~=_ebkWϿ%]v[ ^2הo[ =/[ Fqw휗-MfVl{l18+e^ۮ(ɰAlBdm[Gh3?)C-[=hHIm_)^8K@-΢mbPgb;PgT,lVlZgq[Sl%V۾SlVlnVl7hѲG َsߊY!n+KM~C ,VQFqm l4!}m *n؆Vlոmbd+mۇly%%)򲍕Y-M|Vfmۊ/?n+ )l+?n˶ʬ㶷,6DmaS+U*m[3*MfMYZ[o|*)n .n[خ{ʐ%ܶF)Fs(av K ްU+i+JpYV=<[ O[s^*7mQAq[b *h!MPlW̊ͷͿg4iY'UlE zmbm|la Sl6l3wŶlؖz Ϲba+x>,6V6dۧe^*w=Tu]p0zNuV UtA6w.nzԖn-GPͷ{6{=k:l+̾rm)d64̠h}ї5?(9(=tՋP. 5zh|L[>*Ϻ`\W ]0_]+櫫v|. u|.9o&tyV"|`E,j_5(9gX,ei+KYZb.4נ/sOR=hW {`d  ` ս[~~>~W3+~ܱ@2Y|[QZ !,%t=,maiK+XZ,1 䟝 r-WaѪ: Vu&S\'!Tt«d6zGntL_oϞaos!Lwir8ݔ;--[r9.˖t[BueK-Yj{,f2K>,dm| HA|'h@_4H Ju:CN 4 ";A&ti\$-E_<UcЖ?ZJaZJLx~^m;Ďա#x^rGv wR !8?*yyY4pqn|ouY@uf˹³$:kձ\+6'6zuvv/w:_A+6zkzĆ^MLDlufz<6v=wlX9Ďx*ʹ"c\{:oOv`:bW;$_+jbk3ẔF_Pkݑtz<96]"Nrƒ+I?S!v:eY];g9;sY$;J&Zq>{[{RڀW[Km~ۂ_Ŷڀ6෹ *WX3MrvL6Xej=[~˷bGtm},WV2ue>fI)o*Q웆IBZGb$ᓁr4h'G~~x])Y;g46;/h"IKz霛,yD>9o txoZVཫxV=xkާZx¸Xyha25>!q&4Ytζl?A90s}2 8^>+ήϷQ?t7ޓ J5ƛ|SYYa7uxOIAA[P+drB62Q![ *뼈P0y\~~Z{yXf-/ m~Z߮׾i-S ;:z2[eyݺuh>(>K7XyN@e=ώCug^9h͞|&]a%oFU|!oK tňM`Z=Sν&+B-2b3.bІmm"BiL2Zc7o0~c2~l?m5wNs lÛ9ؾl[5j2`)(Mjݡ`U l~4&sEhX~|@_F"  Diڛ,gR0KA,Q MGN;eۏ7bUMoc4#%XH<}l,h \{,yNj .Û_Sp i .k .KˢR)tk .?6wEt5uHnS!) I*$ÁD,!b53z;_y i6fMcdhї691EоM@T|M 4H3~E ֨Klu)O\'èw;fǬliC!NkƆ`[!~lhemC!]#iw3*$^&%Ӥ&gM1xUg}Idx:Tԃ~և~uC=zo=qfeN< ۍT1[lgدE*^/K4D'KX*l' z)gL|M0e baU4Y䰍#e:KtbKvgG槵$_tْK6_|6e|u/RL˻=: eq+q-=}\u.]gvˡjCNT./V!v:eK%I)vsXʫcgE!iO)W\(*eZ\?( ʀz4ʀb^c)pit}KJ]g'?NLjR.y\qp]( ˔qk|ܾ0su=oip%Ur/v7rz07P _!-My'8䛜DZA3I,Yt},mc)U,]ei}|VctR e惧W aWJ(j}}2E|: {}aEEߢТyQh1(hYZ, ->EEۢТgQhQ(ExJu- ܏d) g,p}Ur߷{}Ts\xKDadL3w>ߩR7+tR*omt fU)ȝ-yG oxġp;T>h)R~.j j3 =^j3|VUg` <SF o|O~dzN gm)q zi}Vv7R'#8fпIٟZѣ@Y54Xww4x>O@'РShT *?@'h 44mණAMY:fljk:ҀMҀU4 _@'* d9* 4 14| | "CРch1414x14HeJ4l5)kO n(Ҁ 4 lA,hki`3X4䅮_Chx>)AG4X>=1ATfMtҠy kH2B;VҀU4ϠAWhcSYۗU>]~~]:<.S@eRt x]j?.ӕg0z3@q"#0np@9=}:f8:|(F21—n^׊9-􉑞=~/ ~.=w=s~z_#_n1򎈑DEwY" |ͩV2@J+'61=< uf4'Q'o;eI58>q|n_;G_;?;so_\eIͲ F;%z8>ufˬsq|$}\>>ݖRۇhd]2U>ط}EڭY81: ˄N3L5:gBЩv&t2: fB_cU,5"EyE˻ .%Tn|E%oz"d|Nי-VN~-9>S LkLC6IW+|^ eZ,L U/NOyYl[B٥cd!03J8%/'JӬE +5.SʩIJeAv.˵rkRnQl- gy]M_&Qס$:4y|ܿMסɦuh2:4!iw&XNCkFdVrmMSv\f^[=g~IfeQ D ¶.%k-We]j4=a[k'6 6wLJ :\GsT !c𥓦iZTyy"Ǵ3H**ܖqU0^z\W8*']WU0t_㯮qBbk΅~0z.d1 ߑ#= =5ufKxx^Ϡ;aﺺuNN+MtE6&"DWDh]m+MtEjW-o]ѧmNJvK1rvn=G;<#8D:#2վ ~._72\?epj{[:@yW.Ҟ7"W=}<1 uf<ܞB#䎩՚<摿~s})}L+,~SX3bvTnjc,t?aG7ߎo7x*-o"}krO'QNz<4QvNpӇ?0p,Bh}k =v el.* 8GG4ae yLHB8[n2<):,biKQ,-g2Kanzri>̖x0htro? P\ h5Jw^,:پ; ƃ`7LŃx0-tL=x0=vtq<c5{|f6`,Xcp;N4`=}|ڏʶ֕ eɃ GaB!6x4f[1rX:^,-f몧H|]jt>GR^6xeBc(%Ba1r^h}AV ;dwj5q^^Mz#e:KDGA-;C`!_s~!owG!\ $j} f:mYi+F~3[03"c:쵟yoVl)Ak{&oA =~_ U?(抓qq,v'%q9:j' Ü۝] Cv{I G |+~|DZ[m?86cJ]ͷ 4긮Nfy:n˷J}X]d31 &F8{2qy௬\.9~|ԇR:[9*4S3TgY造XũM*_jg8Y`7=}XE>tҥfˆUmTa:M V8LI7^C:GwƮЈcOq'}QAʕ T:h)༿h%_z:5繰_l: @ǧjt<: -kby{2%ݝVuX˹b5pŻVm(N8YK8wye:Sj]B7C}(X&pN48qUqڔ,a KX*YjSW=yn5E7KX,ei?K;X-V]wcT\8~j95+_u,`iKs){LbKc|_WC`X dԛn,dfCOt2m9o^Ϯah_%JKAtvUZxNeEy]V48/glu۱`h҇,){2,e3&cФVhRu+4v+4 Ml&/@}u+40YuY90Hj "4/J4/9IMjx=V(d-W]'g'^:(X45~k L f\f1/R 3=am_zk~91,aiK X:,7=J/diשꮯٯ+YZ\f4,`'ֳ]O:TD-?zf&˜CXoYg,cK4zeVOICg4ꮵ+Ʈu_[x5u뙨Ck"wK߳-KYv,`CFTCOtٲ}!*Ѩ8|αznFud.ךc^jVE%Fqt4Gh0 ƝxT4XK4<ǹDhͱ(K|9++HW&&ZÿUm]>.f.3:Dez0+.x;oc2m|~NzvTԒ/xL1n󍛞ߵ~/'fa.i,ң773D1!zË%1Z6y<+.ϊr~Hԍhyn2aSW4 \=TuI%1~A% p}ގo:dz?8~wcT bU.?Z KY*~e8. 9}YҼS4(h^- wy(h> ˓,΢-_nP1,}NiYw0}=kEQЙ ]Xj',5f+]fsZR#Nf̨,QdܨgK|)m)]ja˗곤ؠ hT`Ü :x-r?td &wm"rn֌R>f, WU_2/Z_-,*}ruWOѯzAO?;#Eamr^uE檧9]g8Yս4ck^Wh3%m\Mm}"Q3ڲTpэ#r`~0 +<,̏3!GqRڰɔhCKSg|qЪ^9Sh]ܙU" L3}h*-<gNqyS3Qy-*>IQim2Hy -ڬڵng k28.?۲<}mw]I+d^!ܸ\-քմl"z>_66/G.sXMHva4i,M`iKCY>,-g>f>J˻`I.eO_}ND*&+TBr^F-eegkix7˨`W&-z`h ص`wr =[vE+AeOEEg"pY̼ㅕ^SaW*B-R6:.P/?:{?2mEœ%\jԊuWE~ a< FW0^' چW0^u/4sP\3[N"x5-,+AZMH9%'^3dq"}(y_˶|5,a`5,ke8ˀnu38Fq#U=ou9 X:⹽lů\rEcO2םÖѩ}_{3j_7zÿ>ezygY q$'ǩNO<\f_g3{7dX=Fs{r&sHݬ.yr¦^|]!|q?wcT'Jg.'GA$t>ߤk3d{h4.XfG-^7?>"V-Ki׊ڵ"mN׊"mqH_+vǵxh}ĉ#^ g04T~SM?ҽnp>qn>t`qm3ϲ~u}X0 .-tXOGGVq5̙ɰ{eì~5}m&6?%n,VT8/\QG~:9t85tFB>ޭ"n*oVnwVOn"Vm"n*oV|ic=~_AC߾$7lbF}6[8sqd[[&VK۶Tп>')_662Ot(q/3YEK:24"}9La 3ԼKޣd\LǍ-o&ssغ+Gyf>nqp;8E>s}= :&~ ,ckMqƛkMj&ɳ=^I{}Ͱ{P69?$EYgnyֱE[Dm"lglyYE[_=^k>]dlOxkݕ9ngc͚ɸPֿ, V,˸vkWwv{UY __bޘWlN>N^d8Xk~3-,^Z\ kD;m>&oy}QCFmvȇE> dȇE>AîazƆۆ6Lxmyp2ָ^E>-ˢy :cO]tDǮ3jWwh5Q:ctV:vұo&Ww9X9*Eb>nq%rq&>8 .?h9*61#1x6F%ܜ'_۶'s*6-0_/ٻ 3?ٜNnm8J_gs"?oD~_^玜r"?sNy17ۚe[ޙ{;{l^4ֽ~=g`сִњvdkVޚsWk'њ}xe~:oKoyzct;Evwu%>^ef*~[w{]^{5~[2yccKRY21O}zػ\NǼؾګX5?j|IH8,).)$)%)bmRKm"/&E^,Hl÷+hFV{}]~>;Ӟڇzߩ"g>}ޙ3_}T }ys_Ǎ;>ʤfWQ y)OW>,b+~L诟`۶-{}}Ԡ_Kq;W G G$Dy4ytHȣ[Dݒy4ytBѭ 1t=e ~}~~{:K?sq<}pj?*f3vvV/;gdk}޸~!N}%`l ó_ʾ]z8n=q{x}b7R^ym_& Y.|rbd[7eX" C8F+|WbpYq.}4f{ ؾ|/]#kDFKnȗ/׮5"_X#}kD4r/?Tڻ`9&=7x7tw9t\2]Zg e[Y̟ΚɸqY樋Zi녓d_#:{ߤtD>{&f>wf}zGWGY0qh?[/&-:֬}ZAHՋyct ?tLYV0< yn?|5v1f9pCC؆;}1?Eڼ,;"mfE<,樸H"mhif6/4YͣսoaivamWe9wWJo8wŜwd7\۶_;#5WƟ6_m|ණB|`7왐>̖ _lfB̄Ke6ym}&e`%Xpv<(4w ^fCXPԘRSk NKW4 O-'fgU+ a+^gVū+ 8K7CsiErC9Wur\Q-H8/]) X3/0ɲЙK&әD:.(֕jf:.($iv.ui:KglvMKp\:SHYO.hϺEG (S @qJ 䱃[fܶ|On.ݓS{p֝Ym}=wl]o{{*hKg;FT2lOӬR)dLcQY8H?fCfWJT2ݓX Mb,nJ \%G|Jm(\'a *NVafB^ĎU2-tZ!kB?ӕ,{V򩼕~.!Ӛ]NlskcI/RVny+4&+pyk[EUOLnН[HlXbv @UHYvҦf ox,yFJe6KZ'SMp]>w^0o]r9ZM;pY9jZZa^{š*KÖlg5mKtjaj{H՚5QiHN\[']iV0xPt{(mUq21dՠdδxw6Xy,""]=m a'+[W!ȋ" Ck4 Q'⦛FZ40d!֤uȘ͕ΤR֥ yG$^P2 ]2,y7s#]L(V׬[R6\[+u䐮PmՀ[Ay^ٍL:ߙJ"#ޒ\,iѺX ҭ],iQ̭zk X Tf|ek:-!%n/AvmۨBk`P.ۓYryNfE@VP?@۲qތJsSEBf'E\[KI;%m2Օ*\<^vu=.rʤΠQ4VlNRv;QHg$R)dOaHD* ӫ-xtw{V:K]&@m 9+i.E-\)۰F,Da#GR2Y&^Ɵ]yapv^=6+K*c jڨvt]n|ȃ@,OyRJ2F.W}j3!-[v6v\k{AmQV{."}RV4vgIe^rmH:֡mmKlŚ]^H[1lV4o) RWǀI>O}Yo*Ӗ2P8ؤTuJdR]L`hby룲Hw0FuU夃9 M^ǃET $JejlX-{ mYM ٴfe]M0`I+ jZeH9I³;5{t;5VWv Yβ&+ӔXU/ĹTh}K7٥2܂v4=“?KbE'"k;gpM ک%n+l!-qͭP8;頊#uik&!6fh6]O7 |dSj(.8烲J2'mI+ibYؓKPIaSQ.&ZD,ն4(ߘȥ:A yӥFTVɓ˜Fgc/qWy"jE}^N,2RyW:͋S:Yb]4/OC3} +d:藣Tk1t6)t"VsFkeo'>Np=EOoGZiBwݑyt~^ vA,2 \"a M lagumtd;8qѸ9ޛJ%SioMwɮYTe=W,?#]*Ρ5{z\^ԚĚLL7RNmRȦ5"jyyt>BhP Js)srI)l>F0U(M34DICT 6\a9| YP\fq FvFv3L$I}xw'9B^TT2=֑L'P*+"U_,6eUzF<*jQ1F#nu J@; Mv/uIFSQ۔JT7XJ4'1g0j=]0k<4ojԳs9,¿3'!R#:P ^yo7r٦衏s6/r?ۚcww9)Մӿ&~ {3a ζ:};Uz zޢ)5@,C&M\"A_%˸2ēI.*oc6{~9mL4<gzC.=D+u; c`V78u;J;>|X=ܺn݋:uE0_9Q~_J#ək5tkp,q7]CIoJ2/EKr.0Ru#t|thur8Ӽ}`{ѯ*Xfӭ6Xg>=X"󸈘1vm\~^ԫUV')/VcnʣOW YF4k=]-W6І>g saC;sï#==F } _+ +~:2A)-Hv[/Vx)hZjnE"TСgkW~kuv.fjsoX8׹;$TM,6.TE@x߫ϳx5HC2]^\o݇BN5^_5T!VmN69f1e@?UNs!-[YUMUU5@g'٣*,z7Nrac5OrFԻ "\nN]c$Ճt~߯{Ou z$bHfA1a@bh?Bsc_d{o}REܡ;t!5Էg=LG>o뽹+,*Zo9{]7 C;SPqukLbzJpxP0V,+Z7a3Ǎ=˛Vp|ۿGS!bNs=΂q_O?:>b¦Ft:|/x|is(r%eߩ.tku%o"\^Ӹ^?Z>bmL-"ڷO[!2ĩ+h?-9ٝ{Nw{oR*nTf@S-ksg-´aV5/?DmA9|(}x>\ŎO&%l}wkMYgC Ovn?oexA ~ rm!!9Wyڮ#rITMt_sC_IiGWunw[ =s^%5ٿ &>8fQeOtkB};P q%o׵E]Kwokk8b߯1[ l_Q1,93R#o|tP.fy J~dTp+m ٿt MdZڱ[JCT;^~Nn=Zx.}pI֞C)}ϩKu 5-ȿߙ}ַ5Bo*] ^OԶQDjԷH\T'KoE~ :2L j{~^zv cC:I\yyc3sP⬐rW,M1KO0M(bo|beO^T40-ëpm`Jv4CZƯg7ZS E*h = {w⸸6!ҭ;k}îCq n2:ط bCC @߷;< ϾVMd̤r:'z3s5'ko.^EuEB.b^zI3Zk'=BJ2X^v7mLȦa4jXkguy賿t=è94:ZS}]=Ky٥+e(5Zj8 pPG +Rv ln{:eשge9E>̖̭˷)=a%RX֡bk؇EVM.>DT-RVDPgU]}ʔm{!Zw9A]YUbJ460Z'6l.abM;R\.a,hˤ-d]TCj⭫в [Yi|r(-)hᰬC6~BanL0k2Wʯf$BgZbš27w;{zgnAw<[Q ޺1@cw|]"q++h$ۚv;گnn϶zҥ/I:vԞt5tv:K5]=-0suP mX)HGok5SHˡ``r\s S'Vƥ't B]oJY-,w=2B]W]]=SX5:a:c۫m-V*3~6ut O&Qf(əV:ֹƴ; @߆\6t)I |1Q{ .?iLhxyJ1.! ީ a~`U[|~C:}2RWzv6rON Wޛi )4dp׈~mr ށ*ks} p&<= 5zMo)2vt[+ x\WiO&Em,=96MT>O9x蒟V6?7WLu'($ 8z)nowsZw[Rk;[ijoLw$\4b$mBs"þv" BMygX{ AX@CZVN?"K"ͩlLxlu9mX(6֭XjY֮`1B'uS\Aײ͉XU7X: 56!Eq ^[hi[cQe}auv+܋UuD]0{L=i[ZP}Ayb<ѳ^gkh(HEaI]hiۑhoMwf]+<6V_ݴq:5v JF\EI ċ6CWV&jAuF8ԬZhٳ6'SφQ{& ٺiG \φt-e:Z3mZ֮UtA1׋ 5ަcGzCJjanwSOXLRHNjXI?%ŌW5*cT1We\ghU|US,@y/D=eYFW- pAl=.ɨWDJ5ՋVԸ\kh4h8U{{$G՟& zym%U&^.^W*c uzY,xReɆU\-cC>նbM4ވ$U/Y[>l^:5pŪy+_HarsS좆 *ukSQɈjьhm@n^Uc#8|S1yVCW𧄠]X]kf!gA x\#U>?x֤}nz|*Y{=5kB<#׮eps_A)c)|W;S݉TO9ag)>>5˞05Ef_ _ \O\H땴 gdŭGuUәE#9cs$7Wb$ 0[_^b[]YT~ͼ|zq._3h/Nd׭O'W>ԩ5TYf5N{B{Q}]3]J3jYzFGA <[DQ>U`GWxxňԭJ kBHeܷ;zs7!)ГN՗շ6Zq k͝˙/.%<oǯk\`w]W-a-icNx1G/T҄O%W!ǐ `w|.T@rN=Gbw<AQ,E@_ia3>)2ϰ d#ۏGa=w_w ϣxt@HJDzNƳ?(?gMQs7(wlSBw=M>omu^n @kc99ʻ[~-sVc2o\>"-Z} \ TH?Zگߗß֗>E]^~="?Ii~lA᾵,W); Q֣}} L(Һ9P2wOqe:Оԕ_'f-;^Z/_G1>a<{0ܫOj򧻸;Zz{5ݽBb~iڰHPкi?Lӥk[ֱ%e.V=;\};߯\_<⣐uwN׀Xϟg 7SƟ{il^^//Fzcաkf5Sg|S/%w[7HwrB$D8΄0 {]bĥyg., b&M.}$S])>m˂թJ Zqzk+Nxա+X}&@sΊa66'z]bo&/t)W(Pp:*4mw:|B:guL::ϗ /]!H:U&:tpVpVppMGp(r\׻K #l~֥ٚ _{-yn;c$Io[{3mmMpRlr^G#us4Y't<#T NG3gW{],lVx#o}܂2rBͱw_7Yӛ[E=ga<"O^&_}GݽV?6>*x??9tT11CǴD^d>EbZzOi,7e=)v>H ]Bv >t,"fv_^ޯn^_י:{`6VijO߰{HչBZ'(?C]doB3+eڴ.!y8_wWJz^Op6L\^\з*¥B_X>Pwm8]4ô: Jcwj} A.U˫W ^)*3-ޮsMωB/פfamcB=8sy0گ.5=<Hx |Tue˿_"oݐۺfjRRAWFk{ޱQ 趇G  WQUC/~E(F}Bр_et̥àco1HtdXMG%8WO :踅>::hc :cO1B=t б:j(c.{t\OGtT1x5Bקգs\:j9:kNUcWcU{Bc+~ٽm0ٽ[c_FviW5]dIc5ݤKvkKv?"s1R+ؿiȮYc ]>$}Nc7=&}WcC~^ؿAvmٕj]ƾ>o#K5Y۠Bv7h?Ivi@vjJv~+ٽ?*?h[_NvwF 1 <@Ÿ'~M/l$~qI;<)ݟ'^%?vIG^J|%/"~)_K9bK@<\x%>IO9wߪ}+ }!yJ$%^? #/w-㨜?. G$oJ>xV%q/CrGBӇ%爯<$7|U~n0qA)q/?^_$䷾{oW"ݒk_8_" Y[ROz/_/+⋕.qk_$ I~xǕR~͗Gwv_Iĵw)'Vܣ{e_WPOJ.ߩ- _O|7Ⳓo#I/)U~H%R/QSi'KonğQ1V)|a OOI>Z&R8F'%_H5/!)&>M^7K0C *o)cĿ=M/H_7^9#m_'U9]% )ˉ_>❟\Gx5W)^ NWwH(័|3-?C| _'tws?z'v!nݦ' ys$A\wK^%y9WJ^EJ{KܿYeĕ$x%%~etA$*9'JX|vH}#ĉ2!> /]qKVoQB|tҿ?--cdҒO$~]N#tt?[*e|/!ZrxuRv6Jx{/_PƓD7o]+{g*cJwwVҗCJ}x" s%IQ?|.q]~^ y5VH^GS+˕'c15?A՘̏_^.ħ/)K$U/__(RWIK|ŒO ɋVr ;exZOKHOJğ`ɝď(y+oQ%x%_yxht-YL7fYw^q׿%̧S^|ut_C|+ۈwNI.0O$oyS[׉?$IoW()JO|$K4'H\=!3IKB տ|+$&5.Q⛟c럕Gg"^IOc|i^J{$/#'/!>ǒ/#~)7_}%?{RC7W?*{_$r*Oߑw[roI^M|cJ!>Q woAɿ$mHI⎯H6%kw++ !_kt}~[Qҟ_wImwJ~~?|^ħ\#~wHW*剸:)_Mۤ{3q;i}ėߠď8vtOJ;ߢSJ|~BSH>Y^?&O#6= Q%~ħ]/R\+rK(%}*ħ\)yO[$Bm2:c$CܾY?6J>^+Gٞ%&xNAo%~};OI,ە"9'>+ ]s,I1ķy">nKzf/ Kn'C~2&~r@aogCIoRt^ OEx4<2Vw, >_/9O\Tm +ĭK~wV?kT~, K + įQJJHܪ軋x$!P"^ve{6ݿKK%'>YW/ ~˟p_*O F%}%.) &W ެ7qo[s#R?s|?B⶿J"[d|V]o#GNJۈƔUwMOQEě~ğxv|U߉k~$s~I%~ۿ<#ˈiDeg,%C$_O$ 1s習D!گH~Hᒟ!}R>VVL8:?i _J/RN!7~\zFYȰ:裣J:"ttב;tBG%: :/ w:裣J:"ttOt GG tD0_At GG tD0%w:裣J:"tbw7?2}u{65~e- p p:}2/Eܗ7w>Dxlo~{el pgk~skܛ7;"Zstf}Dj rT>>S#LКiKE=LA|l-]raĮ:a?U:IT13yøC(57ݗ'B~dœs20<f%0`6Œ\ s-I0`fa`ny>0̝0oy+A`s7!001{`> 9ll2'` 5l%3K`·f%aYy=0aG`s/̗`u̜s.`Y f $M09s'{`9s09 s &[C`F`\\00;av9 O9UdxÜsJ50W\ 3 za!^/dxÜ 8e0+`¬3yhw(x <OII#Rp\ 7p}~x'x <Gc=qx<6!p 8.+p#8n'YpwCax<O'S`c>?#Rp\ 7p}~x'x <Gc=qx<6NFOF%\Wk8gp< #Qxx<OSSp 8.+p#8n'YpwCax<O'S`TT\Ke p%[Ip\A.x<IX/@%\Wk8gp< #Qxx<Op 8.+p#8n'YpwCax<O'S`tt\Ke p%[Ip\A.x<I8?#Rp\ 7p}~x'x <Gc=qx<62\Ke p%[Ip\A.x<IX/D%\Wk8gp< #Qxx<OrĿ#Rp\ 7p}~x'x <Gc=qx<6!p 8.+p#8n'YpwCax<O'S`@%\Wk8gp< #Qxx<Oňb\Ke p%[Ip\A.x<I8?#Rp\ 7p}~x'x <Gc=qx<6BB%\Wk8gp< #Qxx<O%\Ke p%[Ip\A.x<I8?#Rp\ 7p}~x'x <Gc=qx<6#Rp\ 7p}~x'x <Gc=qx<6AA%\Wk8gp< #Qxx<Ossp 8.+p#8n'YpwCax<O'S`<<\Ke p%[Ip\A.x<Ix>\Ke p%[Ip\A.x<Ix?~\Ke p%[Ip\A.x<IبD+p 8.+p#8n'YpwCax<O'S` B%\Wk8gp< #Qxx<OR\Ke p%[Ip\A.x<IبFp 8.+p#8n'YpwCax<O'S`A%\Wk8gp< #Qxx<OC%\Wk8gp< #Qxx<Oe2\Ke p%[Ip\ww`Q?ρaN%$)ؿ>l[XcZu0aaf $N]00 07f~sN ]0a|QO| =0q0| $)| &Ï0[`9s&#Y fA#0a=0[`9s&Y fA#0a=0[`9s&ۣY fA#0aSsC0D}Y x-I]0a (msQi?s/̗`NB~΅^( #O+vqͣOo0è0JqK Q~^?K37FȳsS~PT'n?/%s&# jȳsS*&!"""7S* y M##y/3PkEq7)E>ȳ}Qgo(|"; (Պ|"ɜ 75E~Qܔ?H?Ho%AU/(E$eGƇ!M~E_>lnE[?IȳsSxExE~5a"""|ׅ*_U?J_9ٹ)>Lt~?\?I2C>B"  W3<;7+o`S$s*U:/ȳsS~"[?8Jwȳ _jB[[] yvn߮߮nøܔ?Z?Z_H2_??ȿN2!|a<;7?L8bC^O | -!$9y9ETETE6y򷅐dR?ȟȟȿL2G@eEyxyvnL2e?XJJd*!dVC$C^yvE$sd% N$ׇL2B:SPP'o MgKywHLȳsSޫ=d y$3*!"5>9$?ٹ)MߦE2@.E5E5E 4ȳsS~E~EnBnEEE>}sd."""j> M*EJ$\O*^H:N_/SO2yn_@M|"k ȿG":| @)ȳsS"E"ENT?DG!M+WY`<; HߐЂdي"""?4_g/(w<߭??t8H_^YGCoYqq^A_^epަ,'Ol?\xJ8{W*z}a?8^w(߫=GU9]U?_9+UӇ{:s!}a#N96pҟ :^9G_"e_Ř#}aPu~8_ߑFw~1uɞƜJ_CUCUu:S{'v}/ [߫~ne>}a{KT_|}pfпO+xg?~ g%uvw)~^8/߫|~/ mep_|ܾXO.sY&8o E_<8}B_g 9p/l}j8W+߫< /(.˘ [+8?}4}~d8ҟo9V>>sPRr񅭿Hy?}a{W4?Z_OpѯPWs9/WW};N61R{0?@?5* eOpS_c̫og/m9۳ Vș&S9~?{](3~ _wLU?l}aR 叅s/0g+G)K82v}a t*zB+ϭp:ѿUYR8 #F3Ώ΃SD<{<8se_AW)߫= *kl|d.v*8p_| pFߐSDsk"}a?8?A^o3-y,?| _ =p6ߓA{GGޤ/pWo 94[9~KQ~wӁ~ ~9pQ#DQ [(0K(8oଢ]/4KFi}p>_8;QW{]8]{}2wiJU=ʯ?+}aSN% ̡01+xc(̭߫p^} 3_g;*\_pN/Ӆ~i~ k2?-g :)+g(N?/lʟLq埢S8G߿sIȘ?`Y7Kpܢ/l}v8Yg gKZ)/lځo9V_M@;IݔZCds'}ag5iL* G.+k RQA_o ^N[mu6NOg{ևpӟ|&85oRWqQ٘R?j}xyP$_1Z ߲;7c= =0_m79+Wc<P}/l?}Ϙ~3Q_C?\?\K_3?C'l3+֟gZ@g+ʘz;+}?}awUUw/l~*~*];E_%/Qo/ MR~Θ [c1~5?}a_BJ_)"b~7Q/l9jN_}W4f,}Q>q?>gUϫj}V+̓}aj̇P~f7}aV߷ p|a9{(0~OTf}a{0@Tuf}xySۧ>:~g~J_͕\ =u|㨀?X-ߧaF_S<.`zX}ƪ /oz2?N [(S)A ~~V';A}Z~xFB_o0 gL?_WO]> [BU_]<4/}n0wH_د0~??o߆;_TTw&տ.ә=wh܌/lY*Y*;Q8 [߫5*+ [*L7@_^1hLK7++νcћP /U~[P [-g/S0 [N*:__~q/l}KYV0w/u,`Y2߃#1ހD_E/R~ފɡ/lb+}/l}!:*`/l}UE.<g/W~֠}K_yj}M/l*z} [[+uӍ{2` [7o!`/j} G YԷ/0_*Ug0s|aWxbA_د<60Vg0Qlϼ'}a{3`TOUu)7w)[&u=aӐi9v}aQ~8`j}Uss }a*Tmho/~ʯyju|B_^#񋾰/VsZwa;3; 6՞]/>f :o>`f?R:^O0O񈾰'0/l}7 gǘW~ c mQ/>[K_/B}K_^ҿQ+ yUO]~>ycA,}a{/ZA 4?u~AsKTH㿗7u}a{^>gM }aW+Zg_tNA_UUysf5}aSS?~Ml/l}kQ_W}5p^/W#/l}HG4}aiM_AӓW Z4 /74ϿJ~v?WaYN_^ ~_4O컂U;hWA_4 [ZNj>4 [Le&hj}Uem4ќǂf+}aU\_4; ϴA}a{O~"h{&OM~Ef}a{]K_د4epsAs)}aMh[^{ [~1z)h^/l}=6T4?4g_oYw^񑾰ߨy#h[ 74%ɵ[o<[ wb뿓.{7=^֠YfZ뿟[wcYMͯ?]t>>Ay^~Ol¾4mW+mA/eL/w:h4+ w mS R~^@(.?zq>H_^_9}a߬/A/l ߲ + [߫Gߺ!s+}aC?~O_zdl/lTǩn2;6d>/lDž !|5F!s4}awVwVpJȴ/lmߦ7LO֯X{촐N_u_G?2ZKg^]{ k?^j.~ r}@\H_O))|$d./_>y{gx={*d/l>2[ [Zj}N{>d/l}S/FQ?J2 2 [{(!SE_-ݐpVv/0dۿ8d>/l**BgRW~:dOT߆q?TC}4/_YjW2 [*q~D_9EρL)}a(?GcL-3Ƨ 2 [L埩e/>e>/l ߲Ce~/lʟ/&|kzukL慨pzjf'T 'oO'iTF>y r%l^ >܎<\D6'6Ln$yx y |Þwx#p;nNmOۃ/!_K.A^C^~2xyx//p7m!BHCGɳWo""o?K lNm~$ En6qn6uz6ar$]\E> :'lm`Gp]6YdC> <<<|y:x)y)??#_l?ؼ g7#mjW"^eo߲ofk'ۑǁsK7mlm^a&mƓ_Faa<]6O͠EFam$l^ux#>yڦ1y)<ڦu.9M2roɃ+9G#?eDnOLE6jB5J|3 בo/$/E^^E^~ &Gu`9YI^ZC#qG>cɟ<7TzB~Cv\ߘɭߓC"?>n$˳<9HkOgg$s1~'3/?ayE9ׄ,ϡhMgLt##eζ1dy<"2dӸY+'\S2-dCQY,VpJAxe|̣[C9red1[:Y,AdwX=,s&2wz=Y\,>OR%<;2d#7`edS,@@אe2dࣸ? S2f;̙ٟ,aɡq3,X.!Oe-d[r;Y捬a4؇<`"db,+>H_&˼e{!G8`e 2`,^Fo#˼k2f׷,s>ag!Y;,u#wd.L9.#|t2&#,sCք,$˜lc2Z YROy%he~W2w'dl/Ykڐ-Y ey4`n"]27קdw̩8e6d kY湺,sX T/edWܗ 2di(Yq*#M2Cd7iYD,u29d'Y Gfe;2/Гd}3Ys"2yd?',sL ˼7eNdfYy,|A9d~%0_e^2gK&\+Weed#Y6,aYADFeR2:G"0Yx,sn|@4ve s2FAS,s\ 9d,N!˜K2_d 9YpA" ̫pY.MeAd`Y(Jn +2o uzWd+Yn̲/h"fd;ߋ, 2bgG21,%2w>T9,ceugH%d<,o"˘2 5LqğeOd[2>YA12vYF2N,[eKd1YƜ&xiX#2)Yxv"͡dGqQ,%e,*sH1e|N=, 9a3x(J?WX~7.[PY~ ly;({kV'gǶ%xfdߦԦ$d[ YMl&˘dP,e,dВ,cze|XJq7 ,}_#KOg~Yc{\q6B4QI9d[^H~㗑O~,}'K?/7>qd}6Y=s YvN!KEdY[DZZL>,WL]lI~簌, g\K>~/ާd+Yޒeu)~kMpa,6v+;g @^㺜,}&Kҷj9o5m!sY;ILOi0Y d;,֒dY%K?:mZ˜L3m70d }jYL!Kds'Y%d}'oirI^k;ـ'N$7K>dc&@z;rF~ܟ|0Ms~%ȭ zOɛOQ8!kvYx䎧`'7<+-c/9@#$_4r-O%//"rF|qh;xy }8H^:O8pg~Cމp|&~Ӆu#7C.yZCOO;۟YSk _C&ѹPeNdt!C7|V%y(M's%!הH?H)spPw;ɍO\E{/'w?$1r 7w!s=ڽ >ܩ4%7lbLrNu^g:Mڬ,p&#R86"/'/&oEx=>}?#K{[T;sXǾrL~up#g*yJsYñrx5 !AyL^Ob:"W -$C\Jn '7þSCn>ryi4{.r)V?g'/ _>՗|҇#l8ՖQgc?nc&E< &s#俑/y[ñ5lS+M%6̘_$E>ߐ߁?<܎%?3\N#/"F b~#rW# m>\le|9ݣfQ~|֑IgJ>|@LށМ|t|V.Y>Byn _ry=Ƣ"w$<_}G r&ó'Xُ1#Wɯs1o[  ~{˱S`;ǑsCnGG~(!^Bހ$E>/wC>~#HoCnuC><N%/&G}.yȍ_"2T쟝*8[qDW+]񃊟SUN*>8')na+T{b3,)>RIQIq9sW\x9oRX ūoP-?Wb3<+>MqY*.R^qwT|k_x?}ŻF&X-wRV;*8Wq♊oUJ3(R:THoPHo۩~*}JP|߯iϛ*S+O~P$(nъ+#O(~Fk?V]MI*n⑊OV|k߭/*yCPqsT\2ŷ)^_)NHOSEh+[&(ޣ^,]TRq(W\2ŷ(SjO+~MG:ſ+>pbORL(W|EWUW%T]S\w*ޠC{MpcŭQQ<]|KQIPuR|(xyP$XSQ銛OM[{]V`c*.WB|)3m/_=_׈o;;" !sN?!~Fw?=?{I:{D!G8q`!Ce :#u9Q#Dh8q XDCq'=2IF #NE4A8qs&8q9%5 ; :J8q>s=Yy;)R/ F!.DFA"!a-@DB!JrDU(Bq1ĥ|4 1q9J LUY_jqbb>z 7!nF܂XXqvQrb b)N]Ľ+#V!@B|)Bd @| 5b;Nķ!{v?"vs/ WoO_ EE'_DP8GFq@A \# @2G<q XDC~q{<ĉ2)ƈSM=oS3eEas9%#8AECGt@tCt߮nҶ@B\9}}}B "1YE11F\̷Gňg!*RYۘJ:) WZZNJ-`MS+(rB}]]\_AV:55\+R,(-w*gͅ*OJz{4Z~{yQIyX^rE)SJUTFʑ{ҢǻJhAIbryRb^xbMIޗzUeE $(L.7+DW1Ҕy 9::N(Hʠ2VR:)ZVRNsLiӼ\iIZ.p~Anu$WEJՑJ&+29˕ZXQU\E6 ';cR_G{ER,"Ux⛴ģ7P%E[JEp鬋KUت;\7!"UbxBnaUE6z5pNp~_T^%N-ITxRhqҹĊ*VE&&` VWMM(91\U*&^WF'-׍&l&1]Zˤ4TUpkOblpZ\NN++HNu6ms+ iIjѡer^NKj>%/$%%IIy!IUnۺ}3/ZFNIHIP_3UxWuDZw3-9)h?ž+))h&.wR*Z$ ^R)+hѶy|(k(mAٝkTE) hH;'2,ADʒxkgi>6vLLtNݝxT =,CU 9+Q㉗ 2tcVZ$2>8*i"bD p0)RUS Hhw/ΫrDikV-ON)uאܢEnZw8I’Rw?bԩ!xWJ] 4Q jJ6d@%%Hŕ5%\TiVL,_D1/ U8tWV '/ȋF'WTESqn[Q_#W-:k# G'-r'˩F7%ՉtYQ6+()6b:윩y[2mzLDL3yquKRցAM\nA3U2[ex>eAd>ՙv*pEL5XMӘȨb;O0^kʼVVTcaMj6Tv"Y9fD /&%Kadw[;z}#m4e,LYV,ZÓk.e/qWWn4?D͟cRZPT; ƻץcU RauRQ%=FҾ)"M3ɹS+&Cȩu-I~쁲PVNNK,g ykj9w(y%B޹c ~>kpJ;&"zv3ƶ:zUZbL.mߺpj hf6kX#;7*5rwmlD}Rɩw+jdQDjd?vE6{{,'_Rvd_p$ẠHxa#A1?MT*Ԣ"^oPt8$ZW`b{q0 v9]x4V)9VYE˲KS~S0]'tVlG]Uu.y$^,KK]/Q6ՕCEy%)/FUMW"wtЮbcvsw--V;W޵WNR/_Qn1pQ=ie΅:JUk2'~.(Nbo֏ 6WlSvaM4\9rNz4>iitr[Y\'oKb$UpEMu}/v\zDٽ$钣< &+fc;[(9;AU$]$-t5%xF^JM'uZ+I"RTQ]箛 *p//MpH볢5Y|CIRsW;eS_T&˄FB] r!*o\x(UItKJ;c>J];8%7m)*%PRQFu 믚|}, )+%t4Sݍ??cIilH8t|!d0iJQdWWM^`l{)JNPb0򔴚䤢䄉)or''o+d׺T:* UiKSe\ONKLtBQQlJ^fJp-N+)-eOJUImw)N*ũ88'߾8$յBVm^,ISGsGpc]K,WI+vpt5x )uEΪk4YIb -tkJ=byQUXRRmXrII,14ƝZXQ*qi[KqK<4r[;X;XK*%Eh;3_KnW~1O"WKL}{5s9.!mruTW:,+qݜM*t)Ņ))yEQXXJڣ*\Q nDRIM4)K]ILZ+4ܧ)uoWZ_IZyNP'ĸnEGݷII[Q׭Vtu+:oEG][Q׭VtͺdɲX'X(Qo-V$iϩ[I:Y]8W˒ܮWc+#{u~i4E;+|׶(sȽ=n;))!AƮ}8LpN'&SpTJTS˼TXꈻWne4R(uV-QL#&X3=cVO((l9tG>^rxOjHRjjZ%iJj^orWvENגtavlةbkUX.4/vNm!-1R7~C{kyr.;wmXV$1ꈓgGUNIyze;m#>R4MX^YǮy,sm)i~WRj WԽQIT:RI6OMjNrvشޖObBv[7}; iH _1*6<9%Jmnh Ӯm)}x*gKVcmIHZ5*b :>:*`FϞu@ܞY9CFvj;[ߞs\)\s=gD%)CzNIKfE)=G30+eI!ӆJv>zC'ftϕ?7gvJrw#5ScI}9$)U{~=][Ŏ1sN'5N=kOצ9lrr N'59rԯ3W,קpg`T^YCX=&f +%9u%b Mb*JL9"|!pRCx_Cثz'U!Y=$-IIMTk\=$ĒDOsUT:z'WC<9ͻ]C"I'6ULT!z'W!z'U4Cx5e9m^#SHUi.\zDF:#_[tJ`Xܲ[b}wݝC(V%nz$]Jڑ!y^~`+` %]KR20DW~}2X$WpERqDDz+$>cJroݏ%Mϩԛ0K~d7bJ{+ t=RPZI]Tg`Vcٟ+M0ݥՙ&ͰUFkQF>z^5wK5F5=/fQʸd=M(4=/iWLxtp32a3iGLxn>FSdgaҎi:`!e{\瀠i별GӍJܐM-$a:U2?GHvI*Rn6'_pmrb/;y&Gn4i T&ɳ%^˥Xq7݄qjZtƹ{_&L37mmfӕ( 洃S %8_d_7^So܄u߃.WUT}Oך<"5B=ҵȽMè]pSEGY&ɣK!̉YE58_%1 ׳5yo^\kD^ ;3jRxiM'.RR38e}}:~S_cJ"sRRu$%)=C&֓JShiSgڴD#rnc26k_9 ;+_ؖL%i]tke s"[ͳ3XU9b_CNJB|e|a{q g_ d:5}kBvl2$㷤..ɪLpKUpN-Q?9%V9Hbڮr\ɩt;{iRrl=U 6 6AI@K5JBDH HސriAὋ@_Є$6(ЂM4`P *9{$_~sffq/ 9`4e"BR\']R+ʥO|06lمGˠ%[5': 6@ )d](HEGb"@*)L!5EQLR~tC73=%> +W˒_ x͓^orAzK4#|fB"M@Jhס|:G+py4C#l!>&31}_XcP!!|)2B&/QQ nf`1i LLiT׳vtrεHК#K<&92|icGk smaG9p;aFKxiQMHR= x2PsFcRC#4pޕ>"5;NNpHl髱tŀ>ҵ>R3ӳge,DBƒ1mZA3MMc7d?3nGS f 0д%]XHr|Kɇ.F^~äe1c_1#s$ gA_!!5rw+O`r dx| v>Ll m#``X+8<,`:& T\ '>J&ДyTUk)#Zf0Ah Aϛ:m, q%0.-s 0T`cf=^]b$Y$ȏfLK"0F½}L.7>{Ak.R͜T @th_p$Qgot83dy0lKE1D#P#prGN>$erXk}8]ω0 ecU26@yK`z`@z|q9='5Q\>ԀA]js}Iҫ@T>ARwΈP_2fQ1]b27)#=S)y^~l\hfzթD~_Q&b#qc~n%͠S>رjcfN*+ƙ#`s#zt H$:@.}8j@y6DQ&&(Y5bB{V!\aCE1cX@",Jj5 1~E@(D. ksP3v9GFIDQ oQ\:ӌHQ"PKIGvzqLbA-!6v= ;~=|4'b>20s㡱(M;1Iݦ\1Oe°!a' KW6䙻S5[~FM,`C<Ř}!4 ^ {EXƼd`Lt>.TŎ )0uuڲ#)ȟ4#{3@Y1`# j#|$D5%B%'dCP?˟ ~,ozp_"D\ kȫ2  /bn`eb7Ou 2+!&}O8c&Q(mJ*~ǽCFK&9sPQJf.YRj ic`cQEE l6͔C4S.`sG dTOȠ5IoԑȉFt~HZ̅w Z{'E)`JdUCdS8YoM2ɷ%}!K/G9Maf!"0;B, Hm8hޤI@݀J 3XEX` -1[$"[(!ɀIU,8Qa|JcSC!KPVs%he?6#L]>cZ,#Reh#55jӝ@^%OgPuDf@ۣ\F3>~†ڣda&+C4ߑ* ೴Y6 Ab~ƫ$B CPdj܋M.z[KmnnX3%ফxAG? q=#^:6ps$q?No d`2mAAyS NΙVT2 !~] s9Av%N`5c ]:-s-S@_] *x Z'WhmppGbuWpxe`ΩWt6z%kkv일ܹo[':c%YE3ˢ٭el,{1N^6wvu3yY$o@p/X> <'9y&g={8S_osݹo-9ysm,;~z^r&8?I'BM><}}!.[2ͮ6&gĶ= jtt{zэ{;o-n;.;'Xvl1xHNӫMO@,98@Iyٯ-' NmxuJ~R)>vy}$7XEѵVvl糣1~q l4j8sq$9]lNfVw7[4[A;6;C_8h%tv@esx;'b]+tc`.4)Cl tHwq=)b ۘ+,SLQp)ΑQƥx+ACX:1{O9WAڈ:F")Nxw'+Ư f%]vmRMW8"z; Ž$Xn')A-PQ̈K":n䙡>&pZM*Br9n,Ǜ5{ce%ehn~9cPREAm@%reJ_ nBkPYyL%UQ{Y{ Uq.(2{)ҫ0.T\רI8H7<>YwQnʁd.}2&td :P;YO4<:FaڹUv(3\_pƕ`띒8jK| p9G./z&WIf JٖH1hÁv;M0mB3)Rd$UլPŸkhíQ؀&A퐃 OKc2|6)e鎥2H(Kt94Le)r,bT|ꮡ蒣39XLd1X)|e1ئܕ& ʗ'7B%D(b,Awl=epR(^6Ym颷ɦ&M.z,]6Ymxd颷EoțM5UpL\ IOp͗B<1GƔ"9a1$? i$=f#\_Z"\4 3C $ӶCJeDY,#06p ~<+)E "dl MOԁɑ;,iv f ,;EXvX*"ZuTr̩ΉզN5Tڎ>d"QՎ&۬4%KPS6dYNnB16XWBue jźw.)Ŭ)fMG6IN]D&D#ˍ6|҃> 3f[I԰`!<%+โfؖllڒ$VOFeц@rIi::=nYl3c71"~-;pN9GQ9[xJî/1%Z 2 )6΍DQN4)Z,M:9 -> :"3%+YhbЖ]fXG3aNy.cikߔH+NՍ/̇w1.?OLPt;rmgl!kYl@$Vl=$|c4ml6K?Li>uOc eٻ٘6˘-ɂ]wXZ$_!`YE9IμR"Ir@0s1 -MY9[Sukl 6TTdwϗΗ%w73\tz -%6uGq&-0o&%<̠dx-YɃ!Y.L)uJ4m;dPfH1n`dy=Tc5ʚO@ʁ=4Bt݈ 1!h8$2('JѢxjπRn)(ada-)6vǎμT8&#yd#b`&m$uLhFƬ4waNVx @Yf>EDHxQ*Exѡn 0Z!߉g+-$I2}~b0cc/`b9&rCN0rL!rك)kffhf<&*C=سl8H':T|!rlC=C {1`$ @ {0HP=!Gb9Aʯ{q`n &5[g {p {ȑ؃j; ;>a0e̓sZ@mpf"f4P3{g!__O-$%G(f({3!^g608ȠCJ!e 6D|S8K;IG|wk {LOE)EP]S%ZSxuL.6)kuW^ʝCRʜA/ byvPz@w%HjEc$_DD3sfT e*SuU UbP[U"dUDMr]w]w߿˧||~7/UW߹s L>\(~S%Z_ 1U0꥟o sfM4xKuEJeDǸLס?P\]?'2̂| 42:O@$3̔d[$#љ"gB}?pXYX֟u( ;"ܭ`) џʀ}AA[诰$ש+L˘TT$i)c2\g-33mRF:"~vkϰ'n|g%]p6QQp1}3ͥ!~t]w]w]w{<a/^:/ODJ9%(7/X'w._0q^5?}lAdi~ײua#G}lY@o}$_RY%ѣU nho^-J@a[E&U+f+N$%UTX({vyE\}g%JsF}Sh?ʎ|QR1\Q%A,NT_E xERI=߉:U} FH{JüZ6.] /uSetH )OI'lFz~^%:(߃0O-ݣD'=Du֨y~hGJtY%m_ż}Xq {RQ˞mLJ|hPyhpyG0J %Ha/ -yh[tаuϬ섴QλQQPqepOrOiߨߕNQ~lZV 4lP°_rz7D4:^V$%WC۞kIY.{CS0Wy3qIhm<)e@>/)c[g1׹o]s[̃St˫7 hڅ,K<Q"">mP\_(PDhي](٨@9c QV=DPH#*|QRz_(ф)4);ƛmU\~ xbET2fLp=D(- p!$5VOȨHtxF~Ġ׊9> 9x_7dTG}_*.mk@h?Pnoig{ ǚ[*5q@ǯn  OrX??Y螆lj7iɉaL(r~<얁>PT{ˠ?sg9"q臲Xvx3FV.0lêي5VX/x yB(+Z׶38./P''WC8Na=S(}q~`_|w-{p^/4a6?/x,o G>^ᅾ.GXLk_o{e[mGy hΓ>7okϪo}|![f_E} c2\)8EFވ;T\c M$J@{z"c33'o&(d}6c}x(1֥g9MjY{C{==ClO:+М)eqK,dtIH{׫+g rkrY^R_&[BG{IWb3'NQCXSu.=bۍ1e0|yM &>^mn`w OWym-kؠ (e̥rEhO@yΖ+8^,*YuEʋOi²Z{7B{`{\3#|)l];`Vc[-ƛ%=,kNKѴ.uZh7n~ݍv]8}ՈĺÇq-C9fS}]y~-dV` uF}}"^,7A1mOh{"N %߄]DnA>Xc2տ}k^Zja??&hGoKj+eT.p.brj:nE%{eF![h(':q{,1<#<[l#oCo/m칔+/io/vomWqڮKmQǵx?OI“^~I^{cLҹ?I%gq mE/\dzy*7gE2EX?kT4<s!|#Mc[-P-7C1~Iq hs17ٷݷW%w!nyrۿژ`m|/͐6=z5ooMp$maۇmȧG!J x8K/r[ԏ3/!}`*2޶o(in(wO= [#ԋk{gm% p/Wz;ozxx~ kHJ~/gܠϗz_}C$yAmNdG/a]YR &п yE;hW.d3/cI6C>,`y)1yNX se.q޽u/+QJpA|(/=Tu޻eoH=)W#ڢ(,ʲ^x_,pV?"/1uL% .(;E !ֲ.~}ijkoD}'^}kF=@~A`չ_i#_q2c-G=οpox1{m{Q>3߇*S蜻)<3.f9ߋ}Ӈ=4ke|Ci]#~Av1'X|VOC~&:o'JKc̮݉֗%_MME~^?\8/;&)pdj ٚ8\[q<|4Pl=}VIJЖyƺG E9#jG=s -p2FFXN_LtqW~Cwh8<2LuY`+ $q"m~0Zغg["+Zvt]k_x?;S]q̊h~Niɒ,ioS6!8%Ǡl-i KS!k}'=(OxeHݮ~tv! uqLև סэVGxa.|7d+#W\d\E~d\8a|_^ 5p% e/70X9b;O^{L3ЏIdEkZZ|Shgl]xMa~uwI4l@ƽ?.ڏQF}\f}Fַd?`컺tgDƫ5$4ҽ0I(c476{/[{lFg)1]_k,7! ߫hUV5[EwgŕGpr=95(CY0TONX| X):T1f4H{߉^Ҋ:h|N '1~ - vgv 'ۯOE + j>sXT4v]ק`EIsO31Y]D[T_?~7qZ3L:#L=UNqvg6"p~<qS{wudjC{ +[Dgŝzި^Jηs^T[uC\x7^@fHy\ƱeӿSV"ԝwѶoj֖h-gR!PisO:Ou3m:TtȂ"& ZvֽcG5߀tJ۝mAƹ.N50VXԮS]V?XOvdX)] n8361AFݰ̓E.ڴQ_!;r>ZwNht Sr5 mF=d2]GqGV`{L\fh`zj0+/Mg1_Nt%MDq~%;Q~e{YP7c3ߺO|n*ө+ި#3@Fxat\5W0DIM3/[g7]ۤO_}i0_F[&&бsauDgΕl㙀>ԦMڄ2d]Gd8O<բ-7mb}!,i|xM͍[_;h˂1Zdeb;Aח~tׇ4őQ2v {3Ϻk&w_U3])`kdF0y>Vs0i{r~F}Ṅ{Y3ApsS`r>;ApD 8| {K!qqp#p˂0@ە>r&{n_ '纱.;۠ugGoQ"(9׀v-U+}d_`{/& };;;#LNo@_muuuW^z}j7K=}04~(y4_R!/pG,ȿr~;|aECڼé7w{S{o'm}{V]ѐUV?m^םr{v[koS{M{#5_ {n}{Ohly=s;oYWoMۻq,76x{~޾*~ a<.x{Oށc^Awaq6.emg߼3}/{|3;n XEDbsgqd{;g^0ڳ6h1șK 6M{aQLg9OB5OKsv4}W?[Am}\0c98=S?]0ar\x:ݰl?tfކm4>sC vEOۇmM 43j<>sb{i LOɟ@}R md?ݙhtLCNETc:]WcݜtLmЙn4CZK[Ԥ-J;ۈn9*% ؅Q4Od $W;| )m櫺 296Bp拢>lؔ<컒D[B]hKOr(g~&yn^ߦfc_+c_YiIz;pOKRoO|\fF@5ѕ(&6 |ɅZ}C} M'Cζ-۴S :o}Blg.X7G[ț,wɭ~p̲f羨P|x7% ^aP^ϕ׽^qFuW@P3[9zr&7S<&4;ϵcsxwGF.=sd~4[ݖV_Ѿ#8O<ŇkNy_9 ѥlB\y q.Q=7q X%¯Ii'q >]Ovyh ˅x,m͆|Ct[[|qQ"ž$=lޥB}w"wh_:퇔qnJ~ӥ=]+5_A{`o6c[@ma>7އ4it(zp ḿ13uV_o$_ =F@2X/Ix3JXl rmK >aweE]8Der `펥o=4w4cߵ٩?]s8ڭJ< {S< ğ)Q/k$_Ca`ħ&>() CaF~X[۬j/K>^otN w]%ئL(@due;MYԓ7ncx. 09m﮳q >"iǜt]KϦ~|*|Ot/JߢYƯVɾh 0k\6:K<5DhaW| .Jx:?χ0Ik:&?2@Z,[6]J|,HӦޚxy&kFvoL}5y١?ֻՍߎ)/W?UG秅O?3Md箝\Ni峤DKJgKoY2jpmu~{ݯ>5-^1Y;.#[߁_B=r6\a%~^wR?v|"LF|}'kx_/tu|Ơkw4v?u};ei|R/QX#X-aK8bMqKS|KVO=i>d65:bsq$kބm]1sfMt=NwEe[y|Bj%Dغnuߚ'OTLxp`?)?9KqE-Pj^Q`jM{zjhG\|)ovx5μ#nel̚wh.t,m9c@%Bu[goK_m>*d!tB#뾁Aow2B ܾшS6/>sp;J(Ϭ_ag~{d`]$}٦`;av._Cn(p'oV]m-g[5R߉6w(s.zV߸Szf7gUH1{|pJׯku=^Lկnij,{ȶI*qպnsJ Vfomb/WwVJ}a_[reY=~aMeMrDC?=?[Hmլ;5ſx cO}]z`,jx?'g 5L6$|՝MG0,;\9=_5 K!|Gh%W]':J|5&B^)#6\,45`X>_~5lIlםq9cG618ژmڶ8džKV_οr[lpo 8g|M0ӂشԠI^ƻ0..&WVx{*=w/_9-cg'rDVv ^ؔ*5߬ߋz(j)/܀Ez>tDwu3ہbL9[p"  y=6b{YKlthDq6vFm K^$^lmM䅒~bv^nz_^ϭ@coh>5Yy-Dc/e)Q{C?4ϡ耋݃D_s^:a,֣07z)߇."p cl;w?Ru1$XB* bƙU{J~MƸVHbycn3dUwqrk_{}_$/wn ;Q MV(i/*HFLnZ"qGSx {ra658/P>enG6g>K +u~- Ygyw eeV"yzn<;oS\r9As^uZ-FM6w*}xF:@$r&r.eyn.l?Q^n봏Yx[Po : =-RhgF$eZv*{3 NF5*ʝ@N+嘟;};ӞVw]ĺ:shv_`OAguؖ~wfQky~?QQq|7=!=X<"CX]Y]ڇ<Ie(8mbS׽t'umeB^0x}b<~h|)2ƎJW=o}K?x]xc7WǶ۾t/x%%L>|Sq :{ޯ_y;Ƶx''G_ 7cyGFF﷍|HPx|ޱR97ҽ?%U{c|r<Ƚ1JĠJ?/k\3g#gI \ZJ׫6qDuFbalݎy1uV~~[L:Q9J;ZOXx6:`wq@_gaC`Wmށmv]DE8|w SaUkXU;)*П$2MY[}n6軁EYȷDؿoI,;[ă32w,AQ{p{+(_¥(aIs\IecлQ0GpF{[QD{A>E99>t^AYc/ uw'[_eOo;gtzod=Gcfc*󼏼,Nt-ѱ; ?V`-6G3 FtNA=h,x9'{6pEu~Z7QP\{.5/4\\99mp2\ WlU>˟gť^`2(u!"s\37ZpE0\):߭E6뫃!BYqupE1\!zs\Emp.RBul[Wj+f+ru4J濆[ϥߚu_~e6Ba!F>Kh/8?\'-ؐA:Ù",omp7D\рk.ǵbrC&՝B\I W% !תl+~k õ D\z:hk gBղƭ>uZϊr+5=vW06mp./ 3q,Շ*7k ZqfץM&\!8v2\i&\jh>lv6 nj=*:W"._qW1՟".jḞWÕEq}54\\\oU՗_uqˆpM~"4\GZ/ur^&񆈧T^jmӗX4+:D\ߟ݊+:[k"jp%1\!ܪOXg͵)י澀KX ? q p /Jq}~[Y-V\o>P?f CV\hq-09_2߄+06ȸNZ6&Cn%Oϧp QnElp-0 Q_ oq2!߁ka:+D;~6n:"w:,:m_%v]Uh7D\ ḦN2CõpMpZq҄+=D\Y+\ekGB<"\Xj9m>m2+p W WLh@Gۊs28jIS1c']9'g,~+jxHEy xx-xW2x!U9V$x(mWql7P8(gAyw ˛F ți#o_;VȫҼ7w7~w]yWAU,%F4țo#ofyy?:9v=wず|J.rĭxah濫`UqGlEs&Ś%*wHyȭf-l'R'\,> oK34rcUz+C%Rva%h/נjTS㮴)F|a?}>XC:r$G%FL/&|3 4Ics|bk^/`[Pl=b|cd7'gtgZ֞bO{kx?g%ޙ-GNEh}zgE {f`ИGriްzX(/,yy*} A { :Syxy:y,i=/{XQ[-Vvz[jཱ+ 0 `$vEb#g{r(6ES&*q`}ÒG5Ħt]ϭaԂf(m`G~:h?Y폅o̾1|2x ށn}3Nff|i!qd|:WMeN0{6R[XJò=]0FA?D7 :o9RO.a|Eݎ cU-g{7;][Ms|~%2_9ny&:EO/| mwAS{c"9#\n⻟K|a4{U-DL\{S[H9Jdėe] P~ H'S?C+<) q?(s`ԟư8͆I?*[X~(shh]kzoPn8$[i}l]5&H |Ƣ$.VM/5-!LGQu}d}B?m3brDž m4/mVuM}vi#T"a{3G̣(wT(7/é?@jQ ~?t(N3mĻN+Ob8Y,;G9=c}M81>9wez5W?懆w&CsjN=q5 s:5PtE׆Yw6(58|#sc:o0/;B&w4x"){rwEp.a!xyɌd`Z\\ñ6swDYsa,>Bx23߫?H/n|/&CyYlo)ﴲ5%_~ ׶UZb\Rdy\w'4@ ,W~bc鬭^ok+̀9u\=F%'iY,wng6-;e3q6%xES\j<{sU|μϙy{3o9>g|μs|μ̦U@H/ 2Y.6Blַ:0#n5NK6`y)ꁆUMmǾB"4xg~hڴӴi+iS iS3i*NӦ3M8MibhpC[R KiEB;i7iv7pm?i}niin{%|N~oj`Rv4CncնOguQ'$_VǰcrS1<&m1e;74HK0neXvcLJc4i-lNm9Ͷ4ۖi-lk f"8Ͷ4[ ^]&.X\i/i8Y09^!-|-ݎvqMki,Ƣ^=Ʊ RX2qKN-*$N-[28s:6t%ӱNG{+Xo@z 7!l8"o#s.~_LeW d! +k@Ch'gIëK8 Q/ӰiX4O44_iX߇Ӱ>Ӱ>ӰL0%gzxA/C찛)R?.'6xXOw }4 it_l{\cJdlߏwϵߤ|8//2`|x?X@}}vߤ?R~ʤ?oRG6?:S[h$ x3klnSƑ;h1$g>aw9ф?LsW;M6\sC?|,N9p>>Ő@蟒k?%ckpypֵ 2"8\? mω=϶}Ǫ#e wUh>Akj/%kA[#^zkA{Iz/g/}S>e>uӗZL5SG[txfxW*Rnj']\9]^sty1*Ns9]^\N9]^b ˋI.ϝJ <\[M./t.vzgߛLOxkiTwӨhiTi:hu4Q*NFuG98?|.5 h+ O=SnNO/?'j1P:sݳnK86Gyd# |sdfCfsDsv]sdLxG |H]'>j$mP7y1p0O[T ,k=O On4n4^i^}Ӽzyu+yNy8ͫwpW/iț2ӽyW(}L${rwnk)Ͻz|[a0o,v|*Ӳ*SU}9-;iYiYqZ>i9N*&~k74{\Y׎ εg3:>:;A>(Frv2:'Mo7OksN,8ω|N'cewω|rA7ik6J&nN諠\*wKn Z ݯׂ|T RA[Yqtkei2\L y"CȠk >Vx.5w3o8=鵧OPznYq蹤hN8=sz.$sI⽜[9=Drz.93=|=/2zL"3όt}&Ngp.LNgp.jtU *m=Xu[õõ` &D0\ k}pmB0ִrBcvbin4~JFw$7߾qyLIt`2L&с$:0D&dLIt`2c{LZo֜d29k> bԏ /Woӱۚ"1~Yo;^kjRp|_+_*jp|-Wnq|F250PWYWx'b89?$ZZ .}yJ~Nuڵ f^tKQJعυ;W^mW p} \_=' jZ ւZe?W^ㄅGouNfqݫUv_-2<,糅6)8VFJ[~+coE#෢I/+g|isUY$^|H1-Zǚelysmޮo[MB~s&˙|YuvxfgZz[,׫hb؎Wsӌ{Ő 6Bů_6Br P\!*!*}mȅJ?cȅJ߯_K+u?5=ADӽ"TE'݉97ŸQH!3{eڵ! 5w:ߢkw˟wCf|q[G%Ԯ GUQq8Mȟc><%ฤǗqI%8/y|p\RKqIr.Nu N.RXI^W}eM<.zWZ1_"AGLOŶKh-xݷ E!cpGj䳗slVG6DVM냤RqRd)̯1Vs Cǘpj. y6ڿ=b5`/.`0_E` JD[?D1x1ccGs ]i]x]Tl"0C510yl_ Ts N!C=js~Oڇڜc PqM ɛj؆]UF1Og <|E`h/EU`(Ճ:_ r~Q!/JQ}=GzI?#^iV.,[[~࿢[~/4~LT 4̶LΗkT9arl6[cU.#`ɜ'Mim j磙qqrl%9[I΅$'V`+Ʌd[IN$!>^l>>N8aZcSX|dq[RXus2XbM+jʝcldY=`qM%>Y%>Y%>Y%>Y%>Y2)K_%⿿Dqna6*QNdvp+6tu 6ꪸ>b߮kՈ񥾯W^s`9zZ޿klnX= FO73`t= F`t3=FOoB.fV]9{߮Otܿ}5ozS>-޽x1#)c,ӱ%ʺ<o1;?y"]NG~B3.l_d}zd;O^847FT~Sc/zT_m?zܼ?mG<CG춑S;OS+}דp 4mZ-o_u\\6EnSRmJ4M)6%ܦ۔@pnSmJi_d&Lx&kuS5AiAG`1`SOAL5<OxAL!ԀAL#H^w`̀i{퍘Z;cϚ*rXF8y:MǙ>%@<ϢXohzD`Nhր_'E_tE{zXJTI޿`TϷow>jyAb i}U"oȳZD9m{;Y#|{[搓{T{Ʒ?{9<}m|^I_Og_Fٗj!X<^<3u><%οƓ'O?o,s ØΧnCW3NuPOwK]U}7E6yx q5 ƅ8W ^+ƹ8o/uy^ux*+Iuʖ$Io_ZN-W6/;gWx2xGWx&xGWxxWxxǂWx"x*_2y]PuG  G3v;2 )f7Sd" ߾ qƒ,Kjǒ,KjǒQD?^B&sB\oA؝؝Xhvr=hdE}+rhFT,g8Nb+XeX X˰Zc|2lXՁeX3XoFu4J}n6`CtϡⳋժS̆5 *44 T[Ԁe(}Sem&֛(gq&MP0%MG(%MP1N~ {dmgoO ȳG|a^I596V;L8Rȑ:@r#uH G9Rȑ:@OyDJA?9gbF7x)L̺Dd&r&C 1/}HOÎ^slOn goo:O8!$!1pCH Bb8!Sc1$ǽ[Pz,{ w1^.lԌQGmT Q6*F5ۨzpnmTJ1湍Jᄁ{oս:XvXՁ]g:YA_] oF:PL*~p`5FՈVV#jDXV#jD5Xns:P]a ‚y]ޭ`guN^,U')Bݪq`}yGUj.6Hۧء'o8 Nj&h8 Nj&h8 Nj):^{?޼8[;ʺ<6s&a<Oٶ..տuQ ⟧H RϞbD1>erwdR>|B8 oozW/CF1b9w?F]#> SO;.x P`@"?`+?&+Xq-Ll0Cym~#zʷm6tD/Coh6 ME74Vr~Coh& V1!ם%~=]f]Dߕ~ }S}8zz3Gv,R}[?%o7{ ^p/yK%o7{SF1MO9$ ~I;_aNCK{XKU1~;jwj ?ϯϯJoD"a/=]i] x _ 'c~R97/6C^&9/ !C`y$݇vC}H~o!>$݇D$5k2\+U*;Ւ7,Pޛh?&OSoԛh?&OSoԛh?&OӔ1&^l|TJ.L8{S ߰yiq6c ^~\zK/wp .g=^>ҳ=uOƥO=rGs5J[.xhxxGxxH#QY sy]AL'簺EmX'>FC6Y]=[&=GLzԃ`ңLz4IV0Q]>ɼ207Y+ݙ`?uQ-ie&aɨcҝz\owNrI;v'$۝z\ow\j^LnR^˜N?:)yң'#mfuAw{;Aw{Gx݋I1q7k&!6ʽv w#:݈w7ݍ|w#:݈w7ݍ|w#:݈w7#b̼X?O22܁]f]"y]-M1?"kHwݫD|gnHqz-a8"_E*rWU"|s*cd1*Nd.}\}>p|s-8Ws!87s8Vqy8WsrKu -<`7{..E.Yf~^PwEc(Y?/TSZ0sg=n=IP ґ>(ґ>(ґ>(ґ>(ґ>(]M0sxǞ*ח-lzO4Mc{bAO'+?aOd]b{Uy+ 0MxFϳr?.i;q< ߈oD7рh@F4~#P ߈oFPq ye%==r?=6٣ˬK5Om`HL\&/Ĺz;*2ܾka=v8[} ۷mVρ[G[ܾzWpo=}[ ǝ7~g7sƵk@:[%1f6Q{Hf4{TX~^*8`5nBIW.l?_yV ܷ!suWW~uWwj..꒰7suic{4u9#7Um:{q{'0̾o<0̾Z0 ̾ޣsuwYs_q‘o 2q?f{n.Vy9jMΝixlG}6Y/p< gl"8Mp<gU`Me..n 7O>6|:V%cvC4ms;Ҍ7 v  SO|`y]V,pr}U_ժK8N( cVOOC;K5g=${Iw;) N mKM3Q3a3>3q33ɰ3I7`3g`3ga3۔6^Ij95opBXYu7Y17aĜ6ץZ~ WyY/Ni޼MNy)wnS~] ާ8> GO9wuOq.!FW2E6W]r}oOd*s2|N|9>'d2 VF[hp2i:~+3>8m} kKh<'_\AUVekU%:@DS):ڨ܏y݈a78OuuS{M=c~>jOMf??.3~S'fW> OpD x'InO?p= ?}y?uvO=S TtOu3G8G{C$>DC$>DC$>DC$>DC$>DCa fMmSh?U0^(?9$v-w1c?zѷF:|CC)̮/ u?:X~`,;X~+``?¼?: g;G]|h_n9_ /aQy7[ypjp;nkʋ|" ,3M珺̺VM˷WM˷W>}|9?S/OX|9?ˣҙ{4~&1' ,"'g:nO5}Y-ؗ}Y+ؗ}YؗU}Y5ؗ*SX~C]e]yb{bk=ylO`{B=I'bO{=1i>c>s}5^e2y]C5`/`0߯E` 8rNȗܐjo=WJnwXQA&qn<[I }T[I+s*p)3ݝ 0ݝw]sgc;Y#dlb|vr¦ 6Ƿ8/j#:N,-f8?v"]EkEjvڮ]v"]HmWQmש$^}ֲ*儷>o;P60sDxSs'lCsڛ 59jǓ"hžqI-%-V^P4ZdT^w;I;I;I;I;I;LŻ]n?*W\XqmnA8y]!A̺D~R7[FaS6ʱRC&vVW]Nma8q2^y4}ޡ?X Ĩ;çvͭ="ӜHV?g٦cl ?+$ B2?+$ B2dY!VH?;)6=_͜yg62CL=̷|{oO`= ̷vW0|;.ޑ={\I Q-Rzp~9k޻V5{` VUU7X{{`^5XWV*ڧ, 8aMtb6@zЎOGmZ%OwIӻ]R.z?m&OwIfRtB&1럺1ZIs^{1[OHmO#=y'ڞ',sӶ/`0d!?Xq\.+4Sc+|N6{=u:y@^'a D 9Wl:W+Qkrz!r* '9gX%.򂯻[ n2SS]쯼eT=*?]zWլ'z5^zWլ'z5'z5^zW6,絿^| NHg>rfߏ~;|i=5$m|ޗ{o9:&F=?B Fѯ|'\zmպXnj֒9GkɜdZ2h-s9ZK%s֒9Gkɜ`/cm}%v]b x5CTmJ;9!5V Fm}Dpu]>8]] v+nu>ح`V7 []vKnu_8kkYl~Wgpb_ߍj]گŒfhѲQ'\t]nKJ3GcaӫkMش,~Xz5}EܿJ?׹9ZMz'vXvN?4`s04W8 5Y`)sM%k^Y bSzo4?f{633h8c,Ϡ)SAf_f>֓wI5>BBM\ 0\0 ՃaF fR0\ZpY9~k(Vħ2 ;O&3_V >GEb&|>}Os2jO+X8WO׃_}? rOoNR٧i[mOq6nͧq޼FTiwPu^e{iv&>--Ӗr`4 H0\Kpi,K=p/3xwn-Osѧ-䄂RӺ I1ḩR/5rO{D{2zD~ Ԍֽ}J'*Қfxu,ә\eȫnoGOȞ餟 c vZBMfC-p%6ZJ Rx†xR`CZPьۂ|^[Qm oy0˳KK͕j-̇h*'좳*.mq:Y\E(p\ā"iq'8. EQȨB^e=߷ܕ֥Jz*N>&o`uZߏ6]jogM1ve_~/a%s\>υsY\&> Esx^UOr>swuLpv?5yO)ڻd1fuU1s;_ธ5)8)4`0N$_0Nx0N"b,uMƬonp\%'|̚ O1]3eqycS'U%w۶\eO cl~Oj/( svΏ `;?lsEvjF~XF?ߏR'Dl?ζ~me۟l?lnM۹Q`;7lF< svn,M۹`f@m?F-O{~X ?7D0f-nپXrOΤOD?7k9.֟):;\ggXpuv ΎI:;HӾQk?,ς03yq~WYO^{,gr¤Xրӂ8!<>hgZ, _vұti 嚠}L j-|nk.6iu;0N_gc h8|x>Fj<'5I1R|x>Fj<_k2Ztjo-x ` `W_v|<`vB(\v 岻$e"\ڥy+lMlŖrc%J_UUѦcZ^*u x S-<ՙ੮Ou5xScs;ư: " k=`Va7*,J*Xe)}c29=[Lχ-*и|f0nL#c7߁OIh0M&OIh0z?&OWb/[kKOdF>\ ~rv S?;ҿ*I}-2SMZe(V9v20Yu 1 00 80 d0 MSih0rmJKxmMLp5bL+/W}xwXL[L?uccLmzH=C!Rg~3?D"zH=CJ*o> YmǞ.Nf>SI#a8]H9؅]H؅T]H6؅ԁ]H-؅$]H؅]Hr?k,0C8aP6%ǣӱka?̾i:ۏGbS/\ĘβULG5Z0U ZtT9i+Q`:LG*b{-Q90q}oۏ5EfLe Jve~`_p q7FHF:(7Fz߈:/3Q >L^y|-`#-"<翜s1nO]u DzڂHO[i "=mA- DzڂmB<H 9$gl|0VIdnYejtf,xf8GLpIGpIGRp)GHpIG 8gEP=F{wY,^zynlN}=.E}0Nu}hQb'C1,Lp,dZPr@6XI8w=x}>D ,e]j }#¶ce0mznmf|OrI?'$w۟nmOrIoM/+O o8I~_9F)*n[`s9NduevwaY8vΫ켪yՀ]?_v^E`U v^M`G..jvYfU N۝.2;mewiF-tG?ױo$ ^7z_D~w__~w>)$lsamWƀO9SFSx]Xw!}HsR܇?!}HsR܇?!}Hs0YAZ\^g= ^A`zE]//vg3^`׳EF^.Gk"Dv+M 7Cz9Cz>_%S{/4ޑCΎA=v ڎDуh;zmGA=G_L2M&}y~_Y1MH33:{]6^jJ 4Þ0ā     V^'gC[%)JV{ݥ8"%X,T){6rCNÝ;u'NɼSw2ԝ;Nv'NɼSw2ԝ;uQr~,צ9l&4=~a3O*޲Y!ʴƍ30#V9}tFt :@݈Dԍu#:@݈P ~tn - ^q۸+yE>Wa}6?q<;7 F4܈&r#@nDȍhM 7 F4<{O)85Ԧ',k`tM9y8_5SDݬnr,4B* H,"B* H,"B* 'Zޏ*1"@؏7;hv؏sڛ-G9.S =&s]s4Kгv/ r/:r./rn/ r./Rc[)vBzюܜ8ax4ӳ6'ZLzqs'\.0Y]6 zvU e_ռ(s_ &yߖ=L3m`\f8+`{`ٰ*C{졽^{hO= a)EJ{X=Wq°pfg='s¥J{( {{nljV{#k̿sg߹ `픪9(ǽskhihihihihigtd ]vrΕkMsg=޹ʺLIkniߚ ~ goH\{or}:yl:#5OV_F齡M`{w^mlGa:ɏ_'$?~ǯuN%$?~kD'N.m>F}xNww.mv>4kܡoԇ7hmRYp&lJ\hg+(N`K&wz-U+!|o2}7x~gH>0╣~9o/苶 /z E&0_@H=n1^Kc}!}>_H/ B!}>_yc_[vuQڙcLǾuF]ވײwE?e웱 {Bs}ot< ZgCcہp?\ p?4 Dh$A56rIY!yFã^l8ƨW1>D0>OK=`}8} Fg`4<<9Dc@0Zߡ kR2JYnQc cq|z0j2̷̷̷߮k |2߮!gԐ s&Ѻx4w\d?6S'ÁDz\Oq=?zm2kyBҷ78ḬK׾jUjŃY>dMՔ>dpev 򝷻/{;Oɝ<'w{yϓ;yr=Oɝ|޼1ٝユ>݌Ij3v دvthss}8XW~uGZ4WW~uW'774i!c6EnqWYYݞ71_r^\Ss฼7d7dd7dd7d7ddd7dUk'wq=]f]Rxi3M叴>Kʼ+ȿ.`{veIm[=s鍖{V\l|9T_j Jz*U/-|U gŋxvYI:ĸ>~Q"qu*v3$wΐ;Cr~gH &93$wN+ڈsj8BEor>%_|1$tvMۯ8=Gr5D^Ctkx !:5D!~儃r#Wq[0ӼLt& u/%Ex}Q ^_E=x}^_EE*x N(eF g;kٛvu NWu#x&: ^ +o#MWJYy^J6k _`n:N&X: ^'S$^':u2 NFI/Se^'=9cnEv4O??A{N9A{N9A{ND|NH?FvmUf]wU*9{"}*ZE^?'}IB>/}rCh|9+' mkǓxu|{Q,FiY*F==j=={: {:{:T {:{:{:T^;boBFuu)嵁,FiQQ]f]Zx鍚|ʴ9 ?0T`Co㶝7 3OKx?ӽQ3Eb(ZֿX|o>s9QT@M=r :~ :~ :ԈzڡRu(~ KV@Dr?uM2}"L[ՉNճN?;(hkީ>&>&>&>&>&>&>&>&>&>&>放g3/Dm`N&]xn&ˏèf]=2lƮqؕU]Y9ؕ]Y-ؕ]Y3ؕ5]Yؕ]Y ؕe(y{?24NؚOMwj;0̞HfjO}_)G$WJ_)G$}D5{ ?|Ï;{32_?ḷ$A<~O Id?Ƀ'y$Anay}x};qJ $JJ2JWI,xh$J2XNx7vW7ΈUE3N8/~x>l+i+8}XN{i8}XNVӇa!8u+0?cѼ*O+hr3r V2z:oScT Fő`TF `TFũ`TFN`TFa`T FnNcsg2ң;:S9no&fno&fnoN#UyF9a FjxZgbmhO5cTkkvѮ:07?hW)j]`\3U o3F6Dm9b=~X9( T* L* (rP o1^w]Ȑd+TQiF2ӡ@}@}@}@}@}@}@}@}@}F$)EThNA"'nvlZd.1SCmsW2}h7睅9DhB_H4wDhB_~s.e}xsV~s9 Gn]tf?rj|Z總rm'gvr}r}ٷ}ٷ\Wrx흒9-wSߪɐ2Bl!~s?[ȹ-rg 9Bl!~s?W{ȥޒ%3??{)*^ʖ ?`a;oYğ]m&_9I$L$+'_9IWä/;_9/*Ba?}Z"#O_9:u7Hs q$y9o8 N7Hs}"ˢaU wq\ -:֕8ۼFUy:^HM&RuDn"_7HM&RJrϼ6ʼnkG_R/]^>paHHHHHFR5R5R5R5R_mݏi"S<Fnckǜ;}e-Pe{^wwwwwwwHݻWIݻWIݻ 7CslCh+54{q<{H˪5gmvEK55'"e$/#H_F2e$/#yٷ>K4Bk*ח%;7碪vʰ$gXp:'9pIz'9pIz'9pms[+yޛ dž8O%wg%O?vuAh2*օ!R!uȨ:dTz ZDs|Kw"avZ{/crFj-wNsukk jm6Z֖"jmZ[ֆ"jmZf9]dѳsor<댵\K$?\=z<3zUZO^q`/Dp_fu?!۞ -?q<:\w—\ _BrKH.| Ʌ/!%$—\e|X%e^<g%Cs;e9*xeso7h܋\9gZ=?hCuFoM~/󋵎w$>\M$>\M$>\M$>\M$>\MժS's0^4uFoߜqFm'fLg*Yqo}%N9#=_匞*Cy/ODwtǫؙVǻ*+?7s{"%;m;+>c9'Ԅ\EjB"5!_$5!W//HMU&*Rr&0IR }`ߝsnc|Inc/Wm}E{żO$sLKjE"^gSn1o3?G7x?8t}eg{qsJ?*ݏV@CZc{ъ r r r rYz{$ iF= um%822};dx?~3LM忐mgVkle'V=w=ܞ"皐s2r{=׌NuWWa\hH}ו|ޯUUKZ)սكS^*rʋFNyz䔧CNye)9 8䔗H뿚V+D^Zn@F{d>u }2MjyZ^FV˫6dZ ujY,O- 8&#/'P>]aSn Tu}jYrZ惜rڐӲ`ӎiӲHs 9\GN˼SNrjufVasL4<jc~&)#T>GJ'MYa09pdq/C+>:8Z<Ӑgv"gv6̎D3ۀ<}gyf#lrӝzv&ighx\ X>I6ZUω}O3x⤎\T!%ejI6ZY-EVK"% jIS68zͯGn!%m~#r_FnO!im~r_g 4ѐPʹ5Ǎm}ّld}9W ~) 9"@v 9)= 9DNim)]@NiG%?YN;(]ܚvy~Gs͉-\=cG 9:qB_{kϒqY2=KƵgɸ,מ%<2=KƵgɸ,}k`1'@i]}=pز}9sgYZ2-3w`bd67 ͭBfs3:d6͍Ffs\=2w+$D+ssEe i}=y{B?W,%.gϫAqps9cq689فqu89ΩEs"9%Ko+}5u;%M?21 d7="=yq4]S<߼_s{_'N)-7"ߔ6R|g{"M)E)7M)A$[ճWGNncYfuudq Aݟ-i <7h-Ky$J5^G|.ƚu :\g#Yב7r} ڈ\gDr⻏fm 8#9J|ݣsgk?짞8 ~*9%#do䖬Cn>-9%'"_"d_~M|FSݜYu~]Çl>Dz? :œO]`eS;a.+O%E#פ0\krMJEI)5 &#פ`؎~VSO% ?q &8w.~?rYf#oF73܆f "򛙊ff#O^aHq%Z?h_EWQBDH%#jp^\UWb鯾DURG@׭$!2MHE 4! &"ӄ4AL2iB>2MdZW! Q:>k5h5 4_ si<~[;7g~36r~ ~ӚߴoZvMA~ڑߴ67MmC~4%C%y_QECd o=p8K~_Vo) 7MD~-ojoE~SÐP߀/!x䷻^/A4<{?7s~_uZΙϕAo4?1p$ )qJ r!)qJr" )qJ$riɱ6Axb4%6 Qz`zٍg4gpgQNltٯLwAڟm y}l>.UrF\m,aKm,aKm,`(ɻ,Rs?gG;qU};S6B8PBWz݃?Y>p3&$2D?gL"Id3&$$v< ?c{~nWO~NseAMsR{R1U=c!ǘzScN!ǘ s 9ƴ"ǘ\ScjcmzI6 :h;M8?5il=؉͜BG.E nYd& Pd&# nud7Ǿ%N !P ~]>v#||O69nM۴gf8O"'}">|GO!ߨ /:k7I3=بӢ!ؓ3޾S~7x?m:<;{4]N;;ђ3>ZrGKh-9%g|䌏ђ3>Z?Kkmu=Ki}E(;kN8:zKv{f٥;Z oCvˑ|d7ٍ#ǐFd7>ٍ7 en|%a~I=(pv#9Ֆqhgݳ].$#Hr?G##Hr?dW$5{fY.gŜ.??i}?%}ؾzLW 6 #a$5䷆0:䷎%a$5䷆0rPXh+2_GF[]9pʶK|b2:^Q2?JG(%cd %cdI 2 g~ce߲W|p´ak[56G}z{vשHd<ͷ1L҅1d;ǐ2Ccw !1d;YS!ɛMOשw2-.􍾸6;ߠw&9}ӟ$? :~1us4MF=d`4MF=d`4MFGct{:Z+)o AAMoTz:%5&v:WBBƿ22=Bƿ2/!eblI޴Na!,b[j;UW9УT-ЯyZ!*6Gd`/umP.ZChۇѶD>}H}mP"$Rd7N|]s&BGHK;>}0m?Dh3жmG\G>}0 m;BFۮ#lZKNq3lHɺ~ Y$d]YAɺ~ Y$hVI.=b۶#ζ&Z\g$Nb$Nb$Nb$Nb$Nbh۵6ӶmR{g@Ҷq>@bHL 1${tn 1$FbXmgl;Ҷ*6l 5H͆R!l 5H͆R!l 5JжkR%dmێm#]ghжǣm/Aޟm{9`hжmK$Im;ZV'v(e>2Bj%̇ZCI-󡤖R|e>2Ҍ$a۶ζXvN#ֿmmߌ/Gm[жh[ .fI.maoZvN`;&{ɞ`;&{ɞ`;GWIK1mֿҶ{\gH^}$s뼏u"yH^ 9u"yж/eK m;\KNp/g֏5?GtȚY#k:~dMǏ5?N klvHa-zش\Ӛb_Vg45'B*[{Rǀ˅-4D }Xnq*LM=C|7قy (5BTE={2P#ġF)F5rF=A{P#F F%qYƶF lk z@kUyR#$Bx΢FbGc'h'5CF|RQ#>\ԈO6j5ѢF|0Ȓ?R#5GHGX}(k$o1jz=׭5/ 5R5R#>aV#_td?/*u̱pB$nu7Ɂ@Mr&9Pw$nu7Ɂbʡ0Xch@QU 1ej`[[>eԋsm-i.ף|uGjM1zR[϶˹~:uںjF]5ZP[wmCmuuW;j.jP[w^[wDm\[gmu&;Z[7vlܝ[ Gmݝk-OVU{ke}k[˵Ui_0j+Ͽ{m  Ѡ?-oqw7S%P[7yFk~8nZ3`VK:8zugjNr;xw;Ԣ$wA܁w'&ru1Ц܁Ug[[{lkAkmg\[YVRx=j_ /[j뽶rT~ug1qӖAԖg-j˳ ل܈lAmyEmyfx|'oڼ,n}Ң`K̲? j4u]Av:jAv:jAvp[xס O}d$)ƶ/1ֶ/zWڼݨ5!^Z}wїe;nxZ_X#A[RG?t-cj˨Cm -cj˸e,CmQ[xԖ1e Bme`  㸶u`,7n[[[kXk+5 _)ڣP[kc,eLZmkxkki?j7j%֍ 6֍H uuuujkA@ں(ƵZZvҶ)-cN/rm@mK-cGm\V[9*zgmm0a m}׀njRwP[ߕ;4j wQ[ՠ==@G ᑡ`ԍr8ʶvnZk[~43P;7RJ)VjYUk皎kghn@gvP;DN{jiOFgvP;|7%DO]_ڹdsMb YP;Kkڹ)^lF;{u3J>>?"tW}ï΍~|d]|4~+?GnҐgOs su΋Z!E ?^{+7Mrw~?<>ծ0͹"=0vdp~Wdž{Ax+#lTGA~4o Wۉֿ ΍-x⟾,hYxLJ3u<36ƽ {{es ?l✹?:akxޗrw ^^v_u#-u >l޷rU=Ӗ,dpw W}t#E{mz_>]p3^ZNS%hX;1+ `פOOF@Ha5cT?*y  e@kzZq}{,V}bnA#A*6_|~WE`>_2?]#>Rn|,}>gA& g>ZޥL~t"ɉ蟆}^ySzü}w^^Њ}җ~D+od0ͳݢa~3lve:Я`v` ]9Ne[ IVL~|`S_\q}[>|fNSjOоTOlg@+3~gD>Ggll\˙w&|<7 ek+/> ^m}',|Y!7 FL>pFϦP83&_|a^'}63mPI ,V ^s@y Ιlykk5#U W z]N|N3.rgGg;-Vߣ^,ɍIVxxعqGv$ϼ'm8Sς>g=߫jx&ϡW\̙RfUmzf8|͂!gQA?6PV*OG k{̆V=Y0>l |&oWA?{שBlښB[ <5Ry8Up~?@0U,3X>dzsf?I>cmI=ޓ Wt\N m˪0hK6z.S1DhXUbF_AIN`g(i:Tmvs:hɟ/ 6۪j; myT/ |l, y*,{*mX<#;Mu<_({'@?-_z rUO[O3vzKJvU?b]rUO6]s*C_yӡMm3ڠM; m/ڠMgX~\}jfhgXӲm>VT|B{_ Mw,U[@[EڠMg5U>pzzy?Up9˪6Խ3POZ?snUs*eTm-sYeNLѰ2[gH,Fm#S@j[~)v+ UbqP5-m}Z?݅HS_TK/GgƿZhi2URnM5-OcLlcLl\2Юz0W{Ta7Sh dl;'*/]M:l P290zn mW2T2Tmk O}P6hxR_3TD*v,P.6-go6o60gQlOG[aL؏b.u'|s @UO/11r`)>h*3'⟘1#-o:FNXsV<TO2x}x8̱jϩ^nbQoN>]sEX~sk |jρ"z Qjρ?߄\GaΗA4U_s \Gi /seЖu/!e  M6B@n4/OPާ~Zh;#Jٞsl?SJ=$xI!N({s-M:?cVB;ޜW!{KW~dL}Z'?'qd),}O~R'qiߏ$z\~L\{iOSav+{՗Sba_@<3~zmW3u9{v֥T'L'L'L'L'L'L'L'L'L'L'LS-B],<6lظ7%(Qca˧"}0W4y•z(ϙ1)N42էiRW${p x\f`d:22ԓ3_m6"&dl|mS mJF_BM~ȶ)~܆yJ3 ^X{c> tSD|ssWpGh?B$?2?ң?A(m/OE厩h=JEʝB/LwgצK}ryG4ԉ,~nZC]PbzvnZ~k4%U8 u{P}r3 xa_ӽ ׇZa0y}>B^f daPD^ IQaR;Hg |'}{t$9YR9ArY>:;Pxg;y8a6:h|p]}&U&9> ]N.>۟FV {VWݷ&v:365&Ϫ:;6{hd ~_b9^呝B/}aF5ζ,aۛjH.TsEm=jGySQmŤ4STRJ>[iOcZH2ǨB|@;!wC}-[r;nwݢE!wC}'?vUЃ W~결_cw<);RV:$hTgk7[A~S='E£qdDXl{OTI4! א{kȽ5^rp W+\C! א{kJXx=jRHO8:#s {\ML1li |(Aqy0v?Kul.Ұ}(o@6뷇~ cxk5Ţa>}_g^d1|Ռm+toj' 7(c ~AW>v4 UǐcU=r*GUcUrZcU.rBUȱ*9$M{Еl#u[أFĮh s$^MK.A_oFP+~S.0QvcdmFBtQϧE5]X}[|t s ¬0 * ¬0 * ¬0 F|5>R`yL+O%ٯ:;wt~[+yI%ij~6C׽W@__v|aٱ~p~:I,vSނ|")oD>}fS^|ʏ!ȧ {w߯}׾7-_N-u_4Fޯg_]sjRw5hM?[{BtUrltUNvo"9HF㻉n$9H&㻑n$9HFrF{XGuӚ>W_)2zϓhUlwr{l$Nkn0gXeokvF4w7ew8yqwPYC55>/n9@m 6sȹ ro9@`5Ll\tTr)̏&+ I8ucwу8X!K4|Cc/X4h#7Ip0,=dk=ZO֓}dk=ZO֓}dk=ZOb2|97Ii֙=`) yɃH+hGi^!4t5}L;9sz亼Dr]^"./\HK$%uy亼Dr]^".Ur)t=T+J:!l2sWOMgzLH BMMl+u~Kβzдv֣MycV){ݯ͝qN mϓx~w'$dVyJ2[Iy+~ 4'4JgyF6JX>_bpWzCEP64ws,ƶ=ǶۏmWvIŶ' g:3gL|l>~6`?tg:3g?6c[=Ƕj]d^cc_hH]>'HO ұ5Hq;Z0wzOvʯB>e'?@>' o@>'? Kt([A-q&= ZӞ\Y$]XL_>ꮐ8۳.d ?>>?^4śoaMϾkdolUb옷 #|W/kD~y/πʐ_^2U 3hd^ flVﷻɳKƯnz={Gu1O yr$vEOn33yfF"\䙩E%3yf!h䙙<3-w}$jϛoIazi^,]XčvΟ}gZ>/l&jkͺ6i l2&sֱc++*3Ա yEOVUpԢFa[#fXjΝFri/Fri/Fr?jDsL26@ k[# |52NR5rbjdw)5Z#3Q#bKnF:[P#'mk1<jdێ{5)˨s5vZ#\#FQ#iy&5ɖxk5/j۝5rFh$55ӎI E̹IFjQ#sP#sdH7jdN3j{>>wmk,zk8ȼzȼ55r}FRJ#vlYg;n{َS!ΐ< ~E氳v6ag9Wd;ag9l2MrKDbϏ@I 8ٶ6:!v mY!S<>6LMOOm_)t^uD&#g3:d2y&m'rFNI$ $IR2I*F&I$ $ dTLR:_O׬}r~A Z-v;%.o@ ' ʺY>WuLS$v>ohru>In"ݓJ''to]Tb؝P.eZI=7?V|lG>cv!Q;OiQ;OvJF<y* Tjgy*yOm]~ƏY䍑uqs9nb+}m,^/cCu/ ew4t\?Nn@6ӳd3LOD6ӫ d3=LOF630dv:1d3#s/}_}Kxv-PƍTE˦+<~$6vXL az ދ|oDr6V0rCr?h-L=9zC|6rOE9^u9 9Ǘ"㓑s|^7:X_'x6jw30N?sK?^>Ar#%Z4Ns'퐌{N?D:> A;铪/ķ?,saC#{qd/ő8G_#{q}^u6v N3J.p] '~?nͷSs%rmgs% Ql*2@Fi(@FZdbQl4/e\fuui2Mb7\na&yKV4<Ƶe:Zk[''ql)(x1d2L,G&O!d%d2LE&Zd2Ռ-uoyߙX318!Ns -5;1W4*v^_ɶ5fxkvi6D{! $:LLEdL&D&ڐIL&4"k\{uu ,5;h5UXR4{Ѷf?`_7:HW& >wfRIT2 C&QY$JD&QdG&Q>$*D!(RY}=!{~ױtݾ; \Mׯܶ~ߨ4AxrVy<9|Z^mAzιNnaVK AYv?3I߿+ێ5y\т<"GD-hCu#yD #yD#c#r>}F/2h0Rmo4i8@M3q-ܩmYuN86;&fR0k:EE5@?)kur <'Ao=p'|>|>|p'|>$OEd ]qV*׍;ڼӸw^جX27e]gZ1߿*NhTaseYwB}AT~/ (o٪Orlzsg \z#09FNcSY469 BNau)9#0$cV~!~Mw%Kt~C'k\6eqKu C~%95J/SjS}uc|Odڂ Vd5& Y EV * UAKVͭ|;|A_n]r|Znw> cа}{]{ҝl"_CD}gOٞiѧ{YcB(0 ! (0 & `( 1^:z̩B _ypcV]._  ߫h1CN8 թj'ӺqЦq={6(zf9@88UѨd4* DFRѨ d4*BFd4*DF<-}ǩqyЧlPbU/}6szzj]%cqK:ڕQl-13mKZXM /ǐ1H3ySڏۤu6c7jr#d؆F#vd5BFD"c*:ᅬ,Yt AU??wp1%K2~[W2_Ɓ7LuxNfWԺvH0^Df/ڕrE;w5wZʼm -?/$K]7/o%p BMw?|jo@3 hA0iyC ¼ sμ`FUA@hAP7y0LչPr(9?J%g3Cə!LPr(9??TgW{ph .:,XwHhRۯ`{miarɳ9gjc=y`r09?L&gl`r09?8ro/ym9ؠLǮ;Ir矜0u\nUv.,1lg~ٳ){Vqq ՞ۏ)bAj2f}o#Jƕ{=2~$*u3Vn`ܟůXu_N9Էv{.L}OZd sE^/_82>%¿dR&L~:~Ml# ?GN8/|/YOK'%{Ľd}^>q/YOܫZ~N؅-XwQ%?spOs6Kdo }"*v4q0LGuЖ]6bl!olkKu O=3|س!L23_m̠? 4P0~Y5œISֳ]]%cT?CN3u}n[m{ܡ}q[2G&o_4LȀX`a#Gs c>-EnK_ X94wfFY mĿ]1HEW b l91H* q,b?!H#d?&#U?~SKu qgٿy?Vc]nNX#vEKƅvlpW<3ę[=Dÿuӄ{;~03}n):L֫.:Ji^|(7+9Ql-z0!it,' hNYYyZ.x `bW71_4CS>`Ğg+{-weo=dQ{-eh,jKGmuPԖGmu::,qdAي_}ly&A7E9]>}rB66;-gAN3'}1}zp:/|My_z'ܧpx_w{>O:y0u)Ҹ YKYK44B4di,C*dḭdNd|6yʥ9S.9O66yJo=q9O6sYɌi|) t;'9Esw‡IEVX_/8D~jߵ2w ]ۈ kgߵ,w]ۆecU cZl.L+?0fZna+=ĔJEsO3)m RC&Vnw]=uhON/L<̭6}/Wr7*sYM(hr4y[;h1b=*-r$?uhBWdp\ن \GWڑ2\i@W-巚$Šc]w6u30?fe{RWp /Uqб#8خ*@2&R~`e uª!,;ěΎJ_ ExX8VYѕ/_VQ8:vb _l/یs&*}lWP>t%P$Ҹ7ŨorQ$)C|:uM07)oP߈: :orW|9p=; "}^2&vR,){'E.uw]$"]H|E.uYkuNe糧~['~"ٓ}*5xRR+Vmpݺԅ@r# B4rE.V\ w`6[^1Rӆ1b>n{@Բwjwfꄼ .?p5,o#I髬&usmjw5z@y\G(2o$C_H ÄpNVi~rr_\Ggnx'=~E_"ۯ}-ck_du :ٞlC_!m^oSƍ{ wlSrul~ ds%7ıNㄵGwR-wR9KqN_hY~Ss%9yiJ>csY)AgN!3L*>ST 3iȺL'cq#9/|mQSHoS$=N6J̑l:#Qd7OGb؉N#1Go$s8av 8s8a/uC)8'$:q9uѧXc sl^rF6ʂ8:R7q#0Qm>5kr^_k\uݺgzdY2,}V>K@FU ʑgDdY2,h]fJlk^jl5na+UMςEÁu5YgMr9C'^ǔyO# ݼy7 ,lGhD;ZwB[<{<bfR{!"&bC(ky5>d}>^dzFاγ:<[02yZSg2@OȄ8ϛGvZy2:{^uՋMp4E-iWZWV$9R$K$'NX9-s|zY-۲-ۉ%94)$\pi` {S{ȅp  3Y3g_}ўko~̞=#_=#=#Sn/;9=>ϰix"{mw}yГ_/g/g/g/g/g/g/g/g/g/g/=M}}8g\sv]՚_ͅ]})n?}3^A.{ߴ^s [?{J?w~^sxnk0n.dSK}Knڭxu'/n5dTY?{T=|lX5j1p?t/*ψ4;q|}pE={/_d1c|"{E=ou=sDŽ<|ٚkw2?k~O,i :׃lnnb_\=`}qo2?|7`}l^`r[m^唭]=[W:eh7VxǍ yL\bX75B./ͻqo~oǍ 3ĸwAcMh69ǎCʱ#;7WF[~r׻5ڭg7 diji Ckl[m?soٺ~޶u_ol?m?_]?k=d\s͐/ߍ.QQs Yb=??._V;(g3 K/'k3 y\$EBvv՟4$߉OT]-w"jgŸ.+Ͻ~򉷍~役*/8Mށ3u3at+5+Vp{މD?د,~czbgՋՋՋՋՋՋՋՋՋՋYzX7uԯ_{s՟Cؠկ|S;7\r]ȥVWݯ~W yxOSyJ}Mנ_.dsY+v?r1~r$ϔHWUZ9Lj@i"35W}M^ 5KX>e|~k<̠/qum}n.[l̝ޟ[zf3z3Ͷޟ lϻ!|-x yUs5m$?ZxouGZw@ ;Er]&*xl-'n-lȿ(׹ϖ"=]ŧoSbں{N[wOMۺ{jS7ۺ{jl=u;} 5WoG;^7߫+!5z坚.'1W⻽׋\kosk??grsV~S?CWp 5.䱵9gA#+aGgߙ}4Z? wYs'Oḏ>3ݣg5<8wy[ǘUT+7Gv-vˣڪk_[l={Wl=/xU[%oxI[^xM[瞶vcKp7 Ҽk;/$B6b kސEĘ/}/z5\sy|z[֙?C}-ƙߛ#zݞJͿ7osO*vCso]*.O1^rУL>ȇ1A?ܮD*;Ûr2Aow=2::-z&̝f՟f՟f՟f՟f՟f՟~iv^iv^iv^.+͚7<~wB[uQzx褭l%[?qEC?g(.ӆ#TpTkOF[*lZn[m|Öl*M[=QsƸL;+`'ʎh}$rV_2?ugG5UwP{@\ u"{ɚV0=,'~^3 ^$D2-G c/o >yjM*#9V;mcE+6Jo#_h'6紑9'8`slz]Oq6w=eF붍T?%S62m/Ԯ2lDm#.R~FN]Gm䴑o/F %\a^yU`'˭y1~!{\ƿ`&ޱmb &.6qLmb&lılmb%&61?Ļ? 8+>+t<+Wb~N\4wR\Y\2}p%ĸFOߜ.>^Y/d^/ <7Tszз[-7I k`-ȍkkl[WGVں:rl]~m]hɚ{_Dhn{ s<>biQe~͞+ϽDseo|Ke~GٹT7)G%ׅ%?z .Ms kyf-4^*9fs7|!|` -hn7kO91E-O6~ NC_PO߳rh!+zq0} W%>V;`Ķ`X~~RAAA*tFUo[)EZLTǤ+7* _2~ä+s|6ȫN6wC)7>S~~դ_B.}(?J* UW7ާT_J~ɴ??d/(}Hqaa-?jP׿>_zEy`z_doV4Əf4*<LUoHѿ)o+3) *GlP)߀bo/9_=Հ>Kۊgt{U(Oުz+h أ?3~ )+?f(tIW/iEV7_~C7}}+*o~n%R Tϒ% '?'SI_?%E_n&[a =?^zR)EB~ PN&P ҭ$Tt%ߩVS~צGBB;㫔WY^R 'A.t).~vSO?vO蟺Ia_NU/S1?T S¾vfءv(G*nWb~WȧC>+O+Ɨ*F'_Uզ/mKTסqETOSVZb|ءXPoӢvEVU*U!VEB1I}]1~)G*#DWy5})צ6EѪ_+> -{RR(Ӣ=x>ѢXئ_<_lSo}lS6EM1>?lˈb}'- ~YVVVE|HE[ |HZ!ˊU)V]1~(UQ/+/+Ê }XVU{X烊Ê-EEEP)o)(-}mQ tbUa?[w+*|~~YQ^~{Ps)_?T7+͊ff|P ?ߚ|_(ýsW~TިkTG7*gWܿY1W?߫5+߽{^E\EnPO&^AoRMhPWB>W_i?6*N?N?){uWUSNaAAߤhߛ{b{?ܣ?V+T+ 毰{F^X_QEVXQk5[(s=gbJ?ݣ(:NjV/3u'TK1R? |m>7*{b|ZbB*b|^XQobFѿU(~Vj+K_B~ժ)UWOb}JѾQ_*jEV*oT*~˭'$QM+Q<;h%џP"(r(rǪ/D6UF*$=nu{*˛E>57C5D<1-+#[{Z |숅HhtC cf\XB~yZEE֣t=,Wu7yŅOo˛yѓtٶA4iM͍uwfD{:gf~nԮ7ԎXd4\a-15H:b:dWO,o7bF9ihKR;2wDR;k"-mrj}Fmp6KE[x{<Ѣ2UŁE58"EY:7=^:w\[Dܪ.'PkP 3V9]I$Y!.<zԵx$JM=FXO؋.{hdqݖOeOQ쌵_/+B6ˊke)4tiXD,MtN )ܱlL%Qڂ{Zy> ldw*e&;u*j_=:?c']В]z%`h"nq av^+*GE>fb ~,=NX΢s,aXk튷) XQ̨Lt_OVS~x2dۑytEm;ҍSf̣zBQ(qԸaL#3U3XOʓdZu-Ӭ+a3ݘ{fh {YIcrde}6e4%V;hFJծ]4ꢡ~uO#w]~3O&90b(s#=V+5IZD׍3F/m4+of;Q3_]Nצ͚[2Ke;9#zȮx&k6e,t;I_3l;e ft̞;cʁˀKz(),ռnik#Ȗʭd"kBg.M&x^O+.}("-M#c=3Is޴գ-bٶCW~e4]uqސ[(T2;ن׻#4Iu`6W-argy̺qin8Dy'#ÕnuR4Ku[21Û%J_Au4sӅo;]>"8lW\MyWZs ]2ǣdc}2D{NE4MK-,> d[K*q!2Rӗd!v#S<\akW<~hWZAF-u ]ƳW˖ȉ3+g)h&zI(i5kɦFt6d[ hMֿtcħ.ԗa41wկƺ~ g_x&Xcdss`_F전31XK#-eBqYc8!~hXNO1sȖt.{ "QLbNYz4˂ʻxXiHu4%QO"bQޛ/?`OD)q?!w{S# `xu(hіl7-h{"ۦsqwiveڄq 4Ŭȃ4Qsk!Kчn6_9tu@3 m0Zuk7-aX@tMoBf\VΗ#T7iIzXXifg`[xRRvfV3bi GپVLDxdYckgNV<6xK }%'KG_`X&/4 2԰" Un[ۅ9fl{i$ɔ&Kktc&i->gi4!ZF;sDn"ΒXWDƖ{CE[Ȗ/Ӎ$g+Mt$jF1ߓo2zL2v͐Y*=R_e !tQ3AGE~M(d67"gSdI(%ݱdöOda&%\" Mr>f^J(dU.}uNulQod^[iɁ&'\g?RdT2.u-JentCR@]zp\z\z\z0]jnm]cPH!6#kƬRצRymj+x7Mufpm8+onm8/M B6,Ml3b)ۤX6/bCc)68bc)6@6FMf_|oMV^v6MVܔNKT6Q6Y إ٥lv)M~y^_jH>9a];iV/( b%Kr[W&)OhܫxҶ>.~֔룷l1 Eiқv\oe9Fo66S~~jKmInɧD bEX[,ޣukkQg g Qʮ˜xxpˏWW~fL3i/_9fwk1E [?0e=<-ZH3ssC"D5r3li72Yoc/FՊ|3;v<G"4~k#3 % p2_)6x՗ʖH9O= ߰5ilyWƓյ _^V+W_]|9q9/͇ꊐxʙkGD*cӎd7<t5%=u ~NΧC~ѕG|3M0Yϐ==/7-hgmOXvWDߛ)6R~'Z&&7 <]5U+ZQnLz[jl)D>7WOϣ"͓(֌Ӝk2**at=nuт4ks:oad>PC7~{I ~)xpҊ<ֺɽ/jxCԽr`Z|0z2]2bnBy&<(G@F ]x[eTPd|@AW̛^<\z~1-OEJlEk'LA6⹏w!<Շ7[Kn+–_+?+NZkG3J"l˹GazdR g s3aʹ ڑM[ҺAzXΜ۞(Ye2q!ʭuT7ȀgPbceت5 9` uwv~4Ia{aʆWzǖ|o s;rAczB0ޒNb~ut%ZB91._q餇sJh(ϩi+C="?Σ/y6&z7?NVgF_ҚOy?=|1eBcɖ0~ H]GmY:o]ܞ]Utu4#"rKI˹-Xf]:,{s_i)à.%'~Sb>9$Zc]^C^V=C뾏'O{r+}[[a`~{тم+WJN :;LYNK)utn쬧bdiATMr:835|tn#(h7_@*'8ڋ ybAxno ce |ӥZ#u wUy}y<]F{gk/g(>!y:}H{tu|߆~ i]x5Z ?nE%<-;_Qʩ~Z6mDRI[nݟ~(ؠ/}<-ϠWT{>ۑ/1/~̭\"~_Wˑ!Ox>"γT]qՕ,ϒH@_?}+Nwy97|?I% ~l.`3-0W}Џ#$_>ʱF2Xp/d|^yF QG;jQ$lY W}$ɜ3hNe }#>Rx9׶yIwIz&òX/ >!]|D;?_^a|q#s+:++^b:dnGVUƾ 8-̾ <`qsَ<*IqT"9Ցd eˁGGaX0U>3xc5|)IYn c'ʫ+UfɆ*2C*PER^ߺ!dזN嬏8y0Z sD5{[]|\Dɺe'/RIM $8i:*کD,+N awa_ֹMٽs QuD1=E Y\*yf#Ռ 6^52*l򗑓QdyA Tc",U|RUO`c4mO63%ze!i6.n^"9 'U35ߓvԃMX&+SNe3:h!jm2B6_}#M}a^`ˮ j23Wk.T{ZH`Îp.9c2<)-YJ 6U؊d+jx!|Xm~xM"bzˀvR'Y]D"fݺ[vOLcM]8lYC=nbHv,mrґ<8}y+1\t 4+1\lOjIG%ݒƼrRz^swoNbYM2~!ٙ6/;{JW'Ϸ|AkFʥg;!mzHKW.&!x[Đ=Ɛ_ܹg,N';k0uJDb}: ۼţե#31ݩ%AzZq>xɊ>cLrXaoY3!?wv7>%T$k1'T}g^?ltyIB4)+Ojf[ꊷLqDe|LQ]G{.漁'CilI&\d wǻF].! 7H~ S?vܐh Sɻ6 [Qs+w<8C̶c\XZON:/}=K^'oLFmfܜڂikFսtTpdoD}bdw*sjwDRtiE"kl\Bڑ8izjJ%yY}Ce*ūlt3_J֬/iJ֬Z9o-͂WI%u (.`,JU͚jm+ƕڲ"e<~cus%ESCeyuY,w#TU5*7E+jEu6DhvuMҵh wDj#uDCw+'w֗D4~$þN q+Xx8NkmU+*H֑5T7cؠ%t;j̕D_mG5r=kֺɖXN [/wa:uGWn.#c8+j_cDc<ӕKJcƕf\/J&wL>aigs鄗uhlzJo$seeյꍔO'E+99R_W^imjU7[~TTc^WU7wUno׍뛫˚q" uk!hDUV"˘f9qKW~k3 u3x*j* \^!4BMU+CZceye}tX2Rq'З7oSe}rU\7Dj5쁮zָkYg'4BtæfƲ7Q] +ġewVFH",YtH|S}uwCOwm޸6k\gNiBsfʍekiʚtr[b镛eO~:gzySR鍕nljvTk4}6Y9ascC}S%GIrGz]C(27UP%u44ʔ7֚q=੫.֊`:ja#p!si75X$W%:}gszY#>O7=D[ʛLw$Az7QmUYu]1;^:|cPS[Yt6M)=1KoƩ:]ԫ:MN`MA y9x;クNI+khP,&(:bD-uT xSeHTiJI41)לVVc3NƎor {Y~\ jeI0?F#&f\VƍkܱVkl*TS7\*7M͍kiTTKH˛uIt&Kws877o\/As.2Z9M}3_㺮^Ff̛47ohF1bXa类+ӧVF;n^;^Iki]ᠳd>ep%p\zy? @+Me|XBӐiδ YZIka;\GgO4,tCqW>b }eEƌzh$vׇ4] [nCde<4ϪM{xXo=x=sW?S_&.+򽟻̍2!)@q.g$%p7{swڥ~y;K>e'[GZwDR;k"b}l˛#W~Tz3-X9J-Z!xx7Y:ߥ+9o_Tow/9#7TuyygmA"Xoy ~W֥.5(uYu6nzTe5VwVT#~,_NBc@{(if E]7؋C^O Zms W'|YvP _6>) PR}BP@?U3/VY.}oC N *_U=Jy08(ϯyMzP.p<PNґ:v-x>ICz?$U*l9dÖI:+ z{y)-0e=.VZʂS;r-Q ez3YOQ$Fz:v~)T(?>~k}:qUQ8yN 0{,d`} f=><|ѩ󅰢o¬qbqTd߶Y1Euj /v(ȟۊd]Ö8fZ?s3`0 UNM枖< /ݯ&7×|qi)c8>}%5:BtJ<~_Axa%Pe}Xl:q?pHB zTN6w40P/򮻤{bae;r~%_w@zuEX)J/$&A3Tyu%'lRqD@9 ̷ ; =Tu3vh+/UyU/?>rk7[TQ>'!He]sYO3!EY!EZ#*~ċMg%%x(,9o~='y 8 c&%%ل %لǒl&jy&·GX}l}wM*}Cd x,5NDC/pg{? WX_C>r6J[:& >*hM(x<' #(xåsCFĄ_wg#~5*sKXoKdz69lO1g ˺/־K9(y<-no)/ۿܲ}n댦0|Uvӿ.Y*tr=D9?az.nߓ^^a!>օNEȻ-Hʭ ;$\xOƒ^i+'[9ŵʗ_KR˯%׊+E鸍wdw+w?? (D5i[@?xEb/tYD–uQleW^ĽKVˌ>>TmQ9>p[Oj |\~]o|dυKiD)fkG\#m]6A1^ydx4#Z~^}.+˔ї<8N [~ağ"JqXQ9nN D ;KIuѯ~w6-nȼC+L'7GAEWKu~ *c_~[WA?ڲoMo÷vIkG7(Ic {V.$ Gw$6S#G$}~s'wA~~G^ߛe<ɔ~HK'm$m_$~ӒmOK/$]Iw$iZnD[%+'CzUHMn[wV4.I oo mm˻ڲVO2Q{l轕JV VۺMNx[G%3m}~v$x[Җy)fHL@Wߩi{W8ޓ,U+❚xV+~ \`hʼn~2XV1M7Y_jEsoMZщY|Vt?Ek5[,Nˣ,N/~ 8ӊaZѫZ<?bq͛,VWkE=,VeiZQv|V wYD+Xv軯z(ЊĶhEۯxVg=t0yNjE>L+z}ş֊/iEmϲ+Z_gq_1ZۋmLhE8}p-Y'|1nԊ9Ao8N}۟3{/i8w;ro;]ύ8,NuA6⤫ï8Y|&/(S'uѽ8G[%,NpɷK=tj S,$ŋYx=쳃K,NM&%2A|˃=Io'7}vk78̞:קYu~;I6褺_`qvC'5'{9'yJ'yN}_c}N־; >:n:ohE\dKco'/jEjY,ߩU1ī'YY+*g/N;+N?ai|V+;N~/OKLq3[Y%ůhEߛZzfw1yĴ&T_ax[+b߾^+.fNo5'I>XGϰ=ko~ vώ8?4M+ ־˙A؟fwǴVl<#_f;Њ9ձYߏK7⛘?V|3kgE+^ 2]akV.k]Z?um yţZ?hY{ IofazR+k oKZL]ohVI>e&1wߦg*56S+nwXު8o)?1ݓZ,~R+i&'ax_Њ?-vOՊ?čZZ>jkfI_f%ƎGbVZgKdx:A1<s,>e3qY+mֿ&^ /&7ogxZ7?yV V$$_c a$_f$%&VU?gǙN>~7I뿓/h^i$ JGV)6K g;Y{IQo<ߩ'5RjN+.gM=WncKd_Rk/3M/e&VxWw=nfo;ks٬a٥bs'v Nf_4w+bN$Óִ{X3o GY}%Z!3&'iϹyӊ7\3}Za/M`4b&~K+3j?ɐ= (gV_emy zN׊Y=vũ[=eȟ<[m~揨+(3Kjb,駁siY>ZoZ=}e» %?V%~$gYF L9mc*OrQ>/G^䑣oVY&O3_9W+?!{q/{jwG?C f=dcQ_zh1P1{ s/7kŭ-ъcl>K}k߽4ar~zi? ?O/':~{Iu't?ogK_Kd}G+>+߷\+f]+xɳV+6jcS oߴVOVk?}Z߳F֧] v_ b[x%or/|o '?ax+]V 5ǴE8G?;LTmfiE3{ cՁ۴Y7@ S{aOO2 ^b LkLgcsVɗ̟ x({Zwݢ?sZWExkWVv'euV+6Vp_ ~E-֟ ޤ:zS+x g$ SZ33H g4Z+x~q^?/ͳ8  k>n kj e;D3= =◴o0GɿFGiſ(/^bq?f~tP+xTq(9??Q}Ǩu̟Q}?F']~7cd0W78m_/ků2yfnd{O+qϾq?N?;Y5NyίO+ 4yǩ}f^V8odb7N{LԞ??AgYs ?d?=AX0A}>&h~dOdO'Xw&^ъXcq^g36A_Z۬=ORϬ\9A[V?I/P_]Q&Iߏ1MRiũ!^_~__b'-[46zM0Eua)ꋾO_cZ/36Eç?-?MmwS?1EgvDWl6k-݄MyT׷8{mq7{D:=WN [Ls礯Skݓ{d:O*Qg?hYE*VUg?hYE).7;JzZ%]Ď-S+݈X?s%D&ےhDٵ:'O|z&ڏ~߇>:Ų1fwJ&i >EH$&|| Xg¸p X,t kt7~߇}߿R`ԧ.# oAx+!\vC7 @Xaf[nE aa'.)Y}"D8p,9'E81O |/ Y^ִ" ]#f&!\N6#܂ a $ig#"— o"|Ųތ#\a­!"C8pc@,K_E·mYވV%!\a(NG!Q4w$ⰏO"a/ܛCwxFāġÞz#============ }}}}}}}}}}}}}}}}???????????????? w.] w.] w.] w.]?????????????????C?C?C?C?C?C?C?C?C?C?C?C?C?C?C?C? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?#?#?#?#?#?#?#?#?#?#?#?#?#?#?#?#? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?c?c?c?c?c?c?c?c?c?c?c?c?c?c?c?c????????????????????????????????? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?S?S?S?S?S?S?S?S?S?S?S?S?S?S?S?Swn 7wn 7wn 7wn 7{={={={=? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ^ {/^ {/^ {/^ {/>}>}>}>}~?~?~?~??3?3?3?3?3?3?3?3?3?3?3?3?3?3?3?3????A? A? A? A? !?C!?C!?C!?C? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? a? 0a? 0a? 0a? 0?G#?G#?G#?G#Q? G(Q? G(Q? G(Q? G(?s?s?s?s?s?s?s?s?s?s?s?s?s?s?s?s 1?ǀc 1?ǀc 1?ǀc 1?ǀcq?ǁ8q?ǁ8Ǿ}c?q?DZ8pr8'!q9\cq=c a1b #;q3[b #;q•Xp=B#;}d'*V!>GvGvNDv">Gv"!>b #;1$B#;}d'CB#;}d'. N\A}\OB>'${='Q$ʟBS( O)?PʟF(Oi?Q4ʟA3(Ϡ?gP ʟE(ϢY?gQ,?#(?hnE#|!ɣo!|;_}ab7# ݊mݎpu݉p W sùf96QfߝD8p!޹;pc=i{%/{ +_EAs9?yyyyyyyyyyyyyyyy________________E_E_E_E_E_E_E_E_E_E_E_E_E_E_E_E_Cy<?Cy<?Cy<?Cy<?Cʾ? O>%}w¾`#$&KTT@TPT4 **"**mmk[[mKն_g^s3o̙͝׈kD5"ȿF_#׈kD5"ȿF_#׈kD5"ȿF_#׈kD5"ȿF_+׊kE"ZȿV_+׊kE"ZȿV_+׊kE"ZȿV_+׊kE"ZȿV_+׊kE"ZȿV_+׊kE"ZȿV_+׊kE"ZȿV_+׊kE"ZȿV_'׉Du":ȿN_'׉Du":ȿN_'׉Du":ȿN_'׉Du":ȿN_'׉Du":ȿN_'׉Du":ȿN_'׉Du":ȿN_'׉Du":ȿN_/׋E"zȿ^_/׋E"zȿ^_/׋E"zȿ^_/׋E"zȿ^_/׋E"zȿ^_/׋E"zȿ^_/׋E"zȿ^_/׋E"zȿ^[[-r-r-r-r--r-eokkcc#{d}-oR|+}-?Gs:{}os_[l[lJ2;|-R+}}oRA<?[lCvзX]7bW mᣮ}#Odd<ݷضQ1_[XyooW튿 .}>tewbWp/+oZ9ַإo}-W-v1ǭ,-Oҕ7[~>gݾ.^L-oc_o |ʧ}-Vk~{W|]=t~}y[c][l#[uo^ƷɇLW][clW}U4]{&E6X%cJü&W}jo-v[_?ao5b'6=b's?⫎u[<3feXSηO.Øn[le_;^V}-T|yŖW[l܀Ϙ][]V-vWy-vgŖGXìTbv[lsc.[-UO[V|My?/wGV-nMIdjůV|Y?-e, |-oV|̹[l'DƧ'5ƧoR]5^zu\ވ-O]3Ʒإ?}6\3ӷ5bW0C~,-vؚŖ_EO#_Oxxdַ)6\Fk/Ҭ/a|4e_L"&_;bU|uk[ZAY#!ƿ >ɷw*N[lXZ_CK?|11OuboU} 4;Y=ܷ߆'a}];K-v͏ۂ-v?o-v{aղ=/oU|7BUejZ}̷KϘ#-vQȷɇ̷ U[oX̷*?:4b'cbXZY*~c[{oK3k;Oi{L#qEko-v[x>[O@ V+&tnyb"悵bu<&~Dkb2ʵ-v`mu˔27*b-DC҇i~obOo0NG pŮtlfx)I+{o}B+>lݾ.{b"/;솷||/ii 2_\4? >Yi:_r]ȷODNMo|b6u]}|u_={:O ?W{ꠛ5<ﻺͿ^'5zNc Y'kty}]kQ}[lڟT3şk=?!F]/itƾaLyz|^Ů,@L^4!>/3|| }-OU|h Ҭ z=7>gok`F;?Ů 1~!lmN>t7./BL~[Ӭw %~(JE}]o03y<_hyb>g}]r۾Ů^n /_C/ϿIbs?G7-v:y'kL_]{bgǛJ}/-vg1|MS7$|Yv[ t_>&yTUʟB'o·_~|]A[Tu{l粃n~9||-_L*0Ve7sem:{ge||Ɔ5Nbf|ybHſ-y}ol\[#xoW+~uom7 ܸ׷إoQ}a#[liD3lo}ͭ|Gs1,9эMhq7&w-v7$n}i&aN6bWb-4͉;|} ,ϣ ;7Kj< |͢qw-2Ǜ?o4 ē]/E-޹ |[yS\tYR[_a'}n"o%_[d9?O>}G%پ.O"U⽪&+fɭ|MK~Zզ&𱦸UӪM#&UⱪmMg*?3o}·e/[[~|%DW`C.@\r3}]olKt/6J^Y'>cI{W7M|E߄toO+d4oIlZ[l/1 6k¦|]o4lzطGoU&ɇ\O7I$z5ah®YICVi?Xڷ>|m*i>]-|Yv[UMW!MW': b7"nUz$o9&j_ݸMֳk~u4m[~jm-v/}඿[~w32n-C(X.w])k?w%膙M#Ƹ]gp_|?Y׶ j||e}N϶6wC–&ߘ}׭4ct?pHo}EnnL]omw-46L3;z՛ܑ[|ܱط?TMŮ;[u<&V}]Yw~a'>7mj1fdok> 1fyx&#mr=|7~:N3Ů }3~ޡq'HY_ɗؒ <$j G}]V,nmb:toup'b&gsyv{&/#aRd'o/Tv'lvgw*Tj~WL5Yy[d~|P?_;Dٶ~-Wsmngwl>dŖtVWU|X?IҙK[eJ5ΰ*Z>T*S+:c<W+>WL~U?uw#*wl0_A]*/>b;=GSGrly>4;cx罾.}x s֝2?>cR.^ǽ).k16Oǧ[#&}S-v?۷e_|m&~ojkS.![cno+~km-vs6ɿ=0 |w|wl.j=> | ? kokoȷ6x<| l.'| ,l}|]~| ]~a';d\ l.ُ={<[9{|6x>{|6xwgb'ke>}K7yKa'8.-vc-Xj j>be~?Ԅb/!Ƹ[Ԅxȷ1ݲx ˈ~ +|W!}]yQ|Y_~5|Y_v7/u|}~)_ֿGGu_k3ah|_޿&]ױ.S> [Z_1.Ơ&%&&l|󛝛./wV|v|&_?aN>|vo?6 jJN^e=NG̰]kw߂.~7E>br~[sG̰]ykct߆! cCG>'Ϙ?œC/HϿiyv!\??dcRϿw)F#&o,KT+y_Wj}3?b7&7dt %̃0WgHo;c^ɿXOӍ3|+uTр){7y_[[lWﰑ{E%;#3fė/}m3fW{E1O|'Sއ1}_IE&Ku$b{_?gC| vO?;kx?`۾3OtcŶêT*~j?[W5?lϻ?3vy;1ΰcc't~>~Nng ݸ~U@V?9cu?dC??-Q㙭sD|2=[첯qx}d㏃첗?2)?1dK/ 3Qx}ak|f¦|*]39^+S=3ǫpA~^cU,3E;|<`A>/`WK_"{|9]*AO^fi+.6৾.~d jK*cw*|~w-v4إUuwz`.E5gϮ: Yscn[Ĵw]dS__ >jüx?`|oF v˭8`W[IU}S!>`WI9Nq>FwJǿ:=:$Mwb|ttq~.1`?`%?f?ɇykO{XӇ=0kO>c7n>[ݰ}m](|üf8d0'?O6o}}bp '05ck|^vb?|sVï6az U vB5.K{~}OS_M}|]qYŮ|ޟ7I]Sk}-?Wsh|]>bZg3~' vCd}^Oџ@3u<`WyDvG .3u|^?ÚN3H'߿ߏ3wK_ i˲zY~Y#]}{f>|c?𿁍KwQe;4Y-4g#fmT2D%'M vV }m]kjld7 vf[9`ZcM vϠb;,1n~`Lݼh?gC'w}$\?lSgYcx{x5a_Y~. |~`u q_g ?:A{?d]~G vߕ`W=[~^ |]g~.}_>?1equŎ4kH[94>`O5 kG ]eX=ljr.أ&z&3x&k*G9_`^ kY3=xw(';փYW?7 vհG9`|1kG}]d3<~==>|c}]) >`W/c8= u)8`WX=!L棲<ϯ Yrp;Y+bOQ5e/o5b`? JzTw.{9`}m8/ Ag[3/&Ą-vx|~.c8ҷm;ѷſ1.}}{ vOfvd?!C4ص^c 4?17ҷر 1>g07إϵ |?J}x~_2]!}?oշ1Alϧ5 c?Tpş@ mSgg!'${=wG?3u/"v} vKEO>~~ |n*t W7ص>=9 u‡? >?/01|.[+]moZx?gn1~'q5^&??G3~7~9|~'7?>O ?[/:>IŚtG~֧Q7[''9?`c]/1'9`Ig~I.{'I~mK<>?67b'< ]U/-v񿆬|ӷ/֌O*/)®1;lLj}By;!;nþOƚjl?ⷄO;[ vgv+_~c/eC^٧!8y⟎9꿇9cgxr>p};9ͼD]ssgxn:&Soc>c_\ >GmgfGghc?6RS`~oeЁ xV9)>o`um>WwS v߳|~`~xc v|>?fF~߇2y`;0>wd4J}m]3o?#com>'ަCnm|sh1sb3ovvDLoCv b郢4ww9`>A~kg?s~.È>]s.n_]09`˯Tʀ|j?Z/}XS _om_y1? |OϏ.]?`zlUo[f>>O`W |0ض?[^:~(S_h >`+GcZ?'Ůke^y O{`m_'+OEǾŎsr098/#QoN+0/[ⷆ,^ljs¸"'lGūYAsEޟ7*р/3m va"`"v|>WyV锛qϫ4՟Έ1;9]e]wD9,uY~ V?jCoX œ~!xn#9 ⏤|a?zb]×x`W߬`3k8_`?$j l%0ʷHɷ(rfٽV0`k_rBg^ vO>t%~ok|G;˜ǝ_/s~.^u0+1O(_ |[ R'[8y` k#Ǖ] |ч5jYϘgl7GFĄ/`Wn;ϛ vo;"~_Ľw0|}g_Y[lySySNm7y`Y)+Q93x W1إ?Ob|K Qk5 yk|Ȩ[V0|a·1?QOkÜ`o.|~ | |aΏ2إ7y- -~ϓ1o[~.{3bWV?* Q l^,>UO :ll'? %;f-Kr?TFlU~?~fug'[}K/5;sη7#=3[_u^ѷ#|>1Rlߌ^ 9) 8T0##8G `ęG!~-[0ÇS0X-M>-cELڋ;#2}91@0b#.:*"<:K0hq cut`MaG }8O0|#1G"2=XF0`c_˜ηy}`1/}`[c_`?S'ٌ0oc`ߍ1c`FL0s{0o-67(f`ւ1o^$ hMvg;s-hMj`j`jW? e|vcҟhXcc"{1ϽhX`l`ԾԾ>j}}[ߺ\0K>\;[[DwR L&@09' Fomޏ#=M0H0{`w0~v^_mOI`?gv- ?B_>?wZ g^aLd9߉3ދx57޹C0wqxN0þy]0w"s; Ƹo\ W&s<ѷ _x~x` Fy.9#s@b'SQ0Lkƛ/fLߐj~`ȷS݂qEM \􊾵\􊾣$0E6j11}}ED02- =1} O )}ی<-ė7wZ|ƘC[wpZ7-}зӷiZN hy5cLKkӷ`ZT }kc>KK JKWs7SZ~#~SӷNNq"}o"}"@9E|j`&)70}r`SS~"}k S>LL߼hu2cE+9V⫏?2oCwZ- }n%j#cA+A+E͂V2ZGh?SeNoz)c:AlSesOǘ?u5c:oԭ,SEo:et?aL۟{tv:>tc:s>$}CbtV|HƁ΁O6gw졟3sCoL80祟ƿkB?7c:) ~䧭bLgv/c:W47̙eL灟!c:419ޭ=tFwk!ڭwt&vki{Y֭3s[83[}O3O1 }:9t%yΧO1sO?X>es~gdL933XΘ>Isl|3~̘=CdJy^1f85gz[tWŘnfL۔9{1[aΤ~ØΛbLgɆ٦9^olG0\tj1tjXtJ |jA /2gt~?٤gvcL玞19SLyg@9g1y&|γNaLgou1c:W󬾌̳'0*dLg]sY3*؜?y-lɳDȳ93gB݁1x6 Ygߜvc:JtU[_Tm_gLgOe4JØΌ:c:|]YO_q:c:|7:|^;絡97|s&cZ'^~ޜqQc:"If2).s'.9Μ)q.t^E: #震~ʘpHtg셋y.0*\{̄_.YYsk1Xt(Xl#sŢtnżcIż_aXٗw}zuyw8ļ/`L;ݗ`Lk_k.%bSZҼC} ǢKx˼| Kw/| ϭ]Kx1_[%`Lg?fm:fm1y'=m:׼K۞yO=ۻy=67﮶1ڞ_iӻx_μځʼaczXnfeczdͻ879v} c1;=w;|͘+qyg# >`]o=b~Ҽqcz# \G;q9F2u}.ۥ=K7w.ۼ_vi3_u潰Kyo>ץw.嵆yRK97O]:oލ\S'ދ04u5y_ۣy Q';D8n7u9һ?x]o~üa>N'o]NgzN{K'/҉@eO;) I]85q\&2w4.~ѻq,jޛLlމyߡ#YS,zE t :slf~ռYƇr;_5?P}g^?μ39V4ޝccr~9&rLncMeL(rr_~i_&rɯ¾Nw9Ew900|>Y!'0}l>LQ!gvl>Ly8da<0}l>LC6?B6a0}l>L!WC0}C6͇{s?da |l>Lߟl>LߏC6B6ߩ!ӯ]0l>L͇W0Vl>Lm!ӯ&da=0~0> |~_l>L!ӿc0gl>L͇4da|l>L!p da0N|l>̀!3`R P> 03Ր͇͇͇ߐ͇(C0?3_ 0Ev3PC0o |wl>=!3Pla(foB6f# |Agl>̠!3hH |A!3:da0D)fS!3H z/da}03C6fp͇,I0džl>!3*da0wl>B6fOB6fC6fC6f?B6fHv i0C. |!C6f͇27da, |!7l>̐!3dW y,da<0C |!?|!߄l>B6fy!3k 0C |ѐ͇z} 0Cl>Н!3>da>0C|l>l>C6f'!3͇ߐ͇40f`a |ub<mx0d4l$%LJ]x)Ê0p5vЇa2n>{D0k؋q_~ȸ9S?"z5Ob#ޚŘ +j˥?-jovJ;uK7K=w xG0׌/0S^S0b1>v=B3te|oGzX߈{AGLaa<)ZFzc;^Ƌq_#j#01 Λa#~!wƣp#f<x4lgnwd[0GpfGF1rpF캢#q<`yYÑO3~o+6 o{@WG~ џQsVf| w\QWQWX}]5[ڇ%uè%womԃ6quH#'|gRΒzsWCF=GK?`FuG`|5Z80z pmtom BF˸^<2폗/m|*񷈍@ `̧&x3U 9'?1b 0wO8۟ cK"N'(vDoa6A||e&|ԟoB&o_yab/E 0Ew4Iw'!ߊME6$H;Ib['Z$1̂]2a'^>L9}>~k^פr/$'%>)nLn%1| lsrW?~wrb6ɥ1G73^ |'2wE*џ'\y\$)ݤOYyyJ>y:h2tnǡWSv3."1Ot~ާ>ߛ"`6W΂VCqN? =z~O0SA* *t2U1ڟ_*y182ľ0$BďŇK_&~x lg1[&r×Nxz4Ŵç}% S6BM9" }.~) ۙ~`t(l|zX%ƈ^?%~r|>]mNX0|efd!.9Y—*S\(gY3WXkV qMޣ~_RM՘ jh£ +l oWX'~E RaeSW~9 p{)<@1 +9.RXnm ߫# /,We*|=VvtU߮^a寮ڥP}\lVpH_]*|O)ªoWq_Jv 1f_3_ ߠ Vo)+<*[asTPa9j.ews~QƜXV?Wesғʾ*1W7*|(d1W +=tiVpwV׵g+|ʟ\;Ha5/\k][JTXk_SXk嫹,_3S.QxjW}WzUqwʏS68/a)]y?SX KJ>On OP*^} Wm?,OS (+y *߲% oQQg Eg+I OQx7(Cg>R 0[N^ VxjoT*(W wQXI]bW(Aa%ӊVx]_)ưg*^ Tx‹VUy)le+_PX-V< pj`s pTQ'G+QnRXDUU{&QSV{z^ 9(Ecj-S{;1fPXYbjS노ڳ}e WX-V눸Z3ƟPXXתԞL/RrZZU*VRUjߠJ͹Uj_J.QcDU.QKT,djD KX-١p/*|ZG/UХjciOzp ⢥QK(|La_ Ӏ/Rt eNlGsĠes.R8 oU(_1UlL6~lgW^tc9 *6[/a )(\^-ߠZ_TU߶=2Tqg@r,+Qf\r55(sP\2G)D)B,@)F)A)Ee!J"r :;`1J%GBYez(PVAFYr:(7܄e#(܊B܆r;|[P܉ .:(;PEe'(< !QjPjQPQv<^Q<I(OF(@#ʧ(B3_P>C+P9?P@'ʗ(B7PBP ʷ(pXg/#Ã"#Dxlf('Bg8@ir p*!rJkQ@Acq&Y(gA9\PڢrJ;\ Q.BG=J(9(tF JW:W;J(PzEe@A(Q EFg^@2 6~=خ`փz[6ͣ1P`{`ՃzUV=ة;`ԃz1:S6F=ب`lԃzQ6F=ب`Ӄ}zOm39P`lӃmzMIgyM66=ئ`lӃmzM6=ئ`lӃmzM6=ئ`lӃmzM6=ئ`lӃmzM6=ئ`lӃmzM6=ئ`lӃmzM6=ئGg6=ئ`lӃmzM6=ئ`lӃmzM6=ئ`lӃmzM6=ئ`lӃmzM6=ئ`lӃmzM:mzM6=ئ`lӃmzM6=ئ`00l3 6ð0l3 6ð0l3 6ð0l3 6tnt l3ð0l3 y(0l3 6ð0l3 6ð0l3 ۤwð0l3 6ð0l3 w\P`afmaafm>ð03 >ð03 sð03 >ãQƠbPơG2ed)(Øð00l: {Þð09y8Og͠`îð0: æØð0: îØð0l; öð0l;98 þKw}ɾ|)_CϐߺӌaWS/jm7󆹗RK%;%ffb\3/̯˜K6Ozac c(Xfw"ѽDǠ'&6b#hL.iR@r1|#Y4G\Aw̏_̛O26̽<[f#'sodwh~hϷ~8)e[_ PBq xQW-4Z)# 4\o();ZoP|%gߠibI-)<|o/j Zk})|@y3q /0} ?"D~|H~#7Ghn0h"/>2wM|?dn14Ds192|dϡXM&nbh%)bQRab9(~8N:RJq*Ʌb6((6x)y(b4(^8bx}i@q  ;Z,?(N>/zb5M(Ρb*7].gk7(&(곬Q(("GkZКaE-5F(V/3k"ZѺOvIZ7O$ŏRlJ$tb&%/YOڑ5 ͯ 1N9b,(69^jjh~b(1P֢({H2ѼHE~W?(nyDZuҸA)C)XbC / q!ňhY:a!֤5'5i]IKj;0ϘB ZSa1>!_BfCq?inN)bAS)ΤuŦRHq WKPl@4khD_<6,2: 14y1,KUɹ='eHY.OS EWMʂ҄2'-^PԨ8+Z@<`0;W\ƒ'҈4)R7,l+vw&,N[?Lʏr*+*K3EEǒEǒEEEEEX£XX£Xh'Yt$ !MuYIRv[DW[t)v s(ƊH$X}^MPͦ]N%?-n@'=n6at5I0dl8,Lw CVɵ0D*Gj')HbVne,R= I0Ej.b_9aтFԩ><'f3{V Ҙl)z(] K4;q9aG{IAYU{w,TWѪ`L<o˓PƱ6*$9i.9.QZS ]rSZ^'mw< R=$ =6Mkĥ0,e)ĕ{Ej:>qO\_dU`SVٖ&E!,d oOP ]()8 | Ue5JS6hK{SX}4OůLsmQ#_Ra۾=/|$'=y/1{G/$bliu)G珜9;FGOkTgRѳg1nVTb\3ҕ3f9!bg$,Uk&75oZ3'sd F #gyLИqvh)רnҨTƷ=zq~}T❏JqSR 2j9vt+f9.QL}3m|lMbl'ub%.3z(ՕRoy#{ӏ`Be?SZ0)3\CHƯd( ]+h΂xMB@$kCS7 8S.M$G\k X=xh).pR`bchM^Qs9aRC4-ttXc՚3φ 1ۧEh,KQwHpDo"l`&oj˘dxDHk.b=ódZZA'>. sy-]B|&>zL(1*S,(ɞDLAC2vUŒ"el8j˗\KqAX+DGK|TEZY/`Ʊtjo+GbvIʖlx 7j ljMd*,(AZ5#L$ Өk ,k,K,4+\]ޱ8frBMO"I[%@Qk16jUUJ[Yٛ5)5Ҍ[hʄytfG ~ Ryi617R:b䞁S" ~B߬$A6M+cʍ~2 Q()_ؤMؐ ۘP9Ww9})?"11$;۪喒~-_-mOS"mMc*vcDr2EI 51_^KOnlɐmzɤ3 0jr-R8D(_ q]6!SHHeeITeȆi¶v{ܗ%ܾ+1S 3!_n/>߬=~I5 Wג;͘KT[AY- Z{V%׶#5_r Цos;KzC6 G_Ea` f "D*xꗒ/osv#⸋G|Ռ2{jK\\L442f;uLwԆ9eua [%c͌Eb Lj!k&it3ea3q^vE}EGOo>׹hZ5}}}jS^ӚeO_^'o -p++8j?[g_NKONnVν"W)R;H] ?t ;mnn;ߎ1~p'N om_99͆ANwKN ҧj}|I" zXM߮ G'btO H@ѵeZa]xͥ1w|:{WW+ެVӣ"g>)U͇+bCaM v毋i| H8]JYRd!}> 75lDg,+bSj˂$%-sN>ҕ'h@ȇba\\-&,{*v⻂ߣǷF-L m})[۰\/]aC%y.y:ayǚr8NfuHb삡'uΧ%#F7XZdZc2- s7a a?blV{VUUmXڥ%%e/(,X#HkKy;:||\H,mJ$;~^}ßQ8P $G0~<P\T>:2Ụ4xR ),Ņ۫Y>VyjR'QcmdDjt!qg? @JhA$y$(; d9&e˾TsmRjN"q,kd^#&*蘫8 !*n"Ls$ FU6`9rOQ :2-=q'eP}vAV;b ˊaV<LWW:uկp=ɲ`ĐKϑ2m#tBҁm!!w " 1O^ P*^G7u>kA؀>DX~ru1sBi-}Ֆf=SN06_EfSJN __I:w3W9Q bW I<(e)'/'ӏ|\,]m%*2eZ6=–:Ic;PZF{x8 h I pV5UA`tnd_WWNlT;K&:*XWGQ'@[,4a A5PZs؊KFB -[!8 8hV;K'/OGyI}M [SD+C2b'8o4%M41F{Cs ~Z" lzf}-%cktxf'$^QGqj(Os/nbT/PV/J$h(.\ͮUi#iX#ÏkiWg˭a1~eg{krewkWqgiˀR6Cs:  HBB(pw 3j«pt$Vzb1Py,̬^ kRz$_$[5Hּ_C aj"|&ѻأ ,KLZȍQdʥ6\ef53 xQ'$<|G@"6?xBk^ s{{ȦV!Wk6::8һ]Dw53Iy{]֜Y~"J?b&\>7  kgijCT ƳLz#^\kh?jGzҏ ~d+!oUq2k#3gxn>_C|:iG6א XäP&8Q[ƫ>"֡u^jɧ2Jo\.2!~7:Jtj4EJ#i*+3Ksj.y\D aHsבOHg:Гgu]f8.q=r#ůL |:"b tBtYr&\En&n@*"é)-|J?.㯬 bx{ob@U4nY¶ cxixϢO9 G ߵ-pp#rW@/(w˪ᰯ^o^~a@}_-hGai 6WHqm Jnn8ڠgnHXTgjMoFY\ڕF@Uh t9h(jKJeHU A]HAl:SX Yk C1'VhyD$LC 3%5DL=jgptSbIH V#M  eR8i(7`+SFl{R෹:m~os:mN96|ۜ~K9'ʄgM8NyZF#% =onG-GÇ{1\rs8bD2u#AbDF2[P v;K5Vq)N[fAަv=쮞;tĴ |z"R]I989Ԣ |pO\a~" .nV% eϸ22O42woqc>ծ;[J2P{piO֓b>W$E"ZZĤU]N5OLk_ut[8ScѮDkQ{ml+fTeo w q&-7j#nL} o&3Pa4[ΌɌQdvF_Ϋ.1n,YJS3QBSr.{D4Nj"wEx {ΐ L/vq1C"y&R hcYH6a<}XJ9fi+M2{=zBfƇ&wB PS{=ZGEqj_1¢:tDΩԞO|[`}ov ʻ)ɛɹ/rƽq8n:pJj+׋iB}(Qx{\4|biXq]!mLO SKĵXa.54?E <-[pqE-(38@[ .1%auwĦ.R}R},/!oÍ7j4ܨgHҤwM7~p@SA+D9A%^4 `Q ;jӁGJ \)I;"s$5 aQĬĉEWGt0@>wȹ&GcN[S }8YrHPޮ Ʌ ,E QB}ϔTeV[IthSV NR<&]+&\UW4bl{TF쑍#G2bdш={d#HF씫{eбS9`ϋ̉`/B7m6zHNBt-`S"OC;cf7o<Ouwnm8M̎F=D!4"Ӹ伳V Qnn&+À0wS #G2.#2/U <1a*^T[' "4۔- 7,]=&4c+(q^=D%jl>nS4L̚{3+'ӂ^vP"У5QF _d$%-Z5Jjy8@B(zF-.wC Q+eo s+֔5F(g_&Ƒ[;%4ĩ,FzR *8Jvj% gse)]9^)Ko];΃;^<Æz¯Ikp|ަ l[E  ֊Ƈ,aYHQDki^{ InĠ -k"0FN`=ur2><Űsu7ZM'"A4EF3",ǰwm{{򞺶Ĥ|.E5"ZBI`Sy_O|@>4 S lgp9Dac7+<{XֱJ&R0a-Cy-Coy0a.JbPY[ YFCC1A/<4عpT< vyaàY< 0ZՈYP\^Zwy(gay:˃.HZ`;;Ap< bu9gYgEz ]{x:}kސ| ,$%o$Br\gRpN*U!EYI(y#9j8w!D'p~ bQi4~i4hd%dd#ffǖ<]|^|~Mvz:6aE.cҝEɚйe>ٱ|'j֏)!Gv@Dbq>0HN/wx2u+*n\JQ X J=(+[^7 BC}N$_MM\@:>{€* 8MƈD$+tDŽ:nPb}R%Gv*xh#?HS砐 8n2O@wH/kӚVV)33!`>'yeukfd'ߟ Who#pu7 e_G f'Q>~Wfss ;?nΜx1㢒%J:SIPv̱z(J$ò9Z iiuOF Ξ),J]лΒ؆"ۑ5/+r#LI{eL-'r2k@WFfm1d%d_5Ŵ>U=) =L2GbA3>qnOAnO>' x-J~6WTmvJ+R U&^о3f[g[<pKb  Dar໒ᆄ_}#h1$J :A4pi4>Jl~z*dnzk(<) 2wLgxt:MnGoZ?g8&6#Q@ $(r>Z/uo·m%y_ ?kegd;dcvxM&ij93LPslbXF^}/UJ~WAğnbKdjJ?%A+m"c|PK§,Yև#N4(ap zЃG )8XGp%098w`MU7W:gJMд:Y(=E$hN\DVN8J* ȂU ꘆCURX YYBlhoeH+Uv+̧h)veԳD(kn"UES)hYQcI}jKoTM"Y7s"3M@j1UVyf0GOSV푭w*iKi|;'q8='q m@{ G DْR6&)+s&Uٜy~l+kɖvYk!cL4Jf3w$DA 5簹 2U!I f3Ow-`{}DT}\aGc.;U~e\{}p>< '}F*"GX>:(KT57Q)NIwYwEwQC&IdF~ {8A+#xV -v0 wk؁(PtG0WO" \\Q󡰃^/b3R4d:#, < 2=c>~ gIԆ #BpV yR34i^y8q}4ooW$R # H$(JAv=8cU 0-7N1B2ؗm]ġ1Τ@s;Uω0IXA,dv Gb{[hhTf>:nWmG_7se7.b%ubT7W.G%1ˍlRM2gi`6ǡ'AHH4⭴K05?-) nT/A{Br{Ppb/ Fjo K ?n?,11<;]FGLv²#՘.A7O}Cu*(A~ˎV|r1{ck{P\TFO~ce|2#QF=*j jF T~ǒ iWT㮾\I~d(@bsߏ$L)*MLBa$. )mB:]Ԩ@ؤ.I "HW.|L$0b׷>a`-/ 1 fibG R:G?lTEQ茜z@XyHp IAXTם`04(gm1bdʺX JxqP\pc'DTawݛ߸71柹YT _RdJI\zd;CTسh{9KW$`@mu)Wjbͣ(Rڎ5=p_LY,VOWlLŽ ͹0seMs X|e1Rb~X^L Q%=$z7sz!93ʔ4r{,m0Ir(ܾ1aN7WQ69s?CC|H('&\XƂ9fl d]vՈ%UyG~X1X,cHM$@wvTZKެGϔALu\Tj_dD2 $8E *C k L)~#iՎށ_WLcs1ԇlXk=/ Y}Mx]3~5&R7ke"qs5rG_v1vOD*xEXڂm/ڐ ^gXy\oFT&P24tx~?4K$n<Gg&'i4N 8|,YUk.X)|tVJ&;̐sIA5s!s9JWNѶ5&N{{z&$Eł2 E6ҋB \@(#@pH,^ ]'cP0aS|G0&Jy\y0)!;(HJ|χOWC}LaVȯ%$){ 1v6 2V;0HC^;T6: `xD@={nN|\wb^oGV68a' 4 zl ?^ƒE0:@lpz(N+ed@J ȨJ?YBq`FEQ Y&[)qV&,*/UyY8QϹT:{koJeRSN8u޶$#p*[Khg!ӧap>Y`>Ȭ4/ZVͺl]6k!Het۠lj dflEp%qfNCj'm8*8_Ą\fBqBHi:Sgi; S%$A{_~!o~cՄM轊:&j&Sx7rS::i(ؘX)kvt=@6}zmd]X,}8iP /衩oZ]Ɋ]JG֓;1S/?{DMB[mvZm TSDm×g}[a20M*F5SKܿ…;출U r_V1,Rq6o-wR Wr=7[V_K2)w U'CÍ5j [ilj@ߕ"U`ȴvZCoIV3+= =Mȴm j^Hf./v- gg~0E[}C%s4f㔟̀zϽ{Uf3@ĸ:8s@<f&e~ uUR551-u7Gɏ*l\ygνgua˃BdFJg`sd5c͸a€M?.#ՍbG4+fkٛwOVٝOZ g vիnهcB݊.Kf `@?BDU+ݰf߰֐YUE=V4W0n7c1frzz>e%7Lx_2rqj\ͪIR4@ng3i4ـ.x u5dm߉rKi:7PK2rAIX>fY .-n-3D,D$k Q9ަH]QTZёE䬺C+qBbۺѝB vФE>m gjbW ղ[,\`Kd\'=|& -Be-vw`n %~ňśCl,#jNT}ޙD'DB o8du$>c:Ȇ<(a4;N{b XYwx6#kj"U6I!O^BgϿDE7v#&a갢(9d :OY _AJX0,e8{3q2N'CUS=W]|Ur M*Q-f? 5]%\͐mxG bLeW4X&a6~Dv޵khW2G-}dҌci1B.pGnqU2ckzh접OlXj0p/J5Y&6i sqMoM]Ƃ~hz\ӛCQvѵD dI+V۪Z XNndoĹdžʰJ$]wt"\u*_!ݒ|GdtVSWr>fi*Gzv&Lsݡ+J?tl= x {eH';,a5fLԄ(_%6' ]$ :)1AjnAXae\@;_ Γ|(=_vV*R׵P }ِo?kB QEP>B@òz:n})f1R}DvaL(.Ie=. FsPdÅtq}܏ Hc{<P6]}\joqN!؂睏G%s-A5ݣe4ȔЀҮzL-Ea09g W"#c/PK0 exAQPӅQT0`{ r 6hqA4kl`,PKpMq3+3[lT|T[5:1â@CVbQ**ouFlפ=Y7>%g& JyfhteVqqk@Rli@v㉱C!5 fj9|>` 1 [4q_'3PN"E0A $ v<t:UCs,&+饈$CTi8N̪$T^QiN"2@yyʉ9*5Z:3˔_ps cEJ[% 8xi ~EwN|GWz=h  q n=3W^ Ÿv_gQ]["a"`ŷA/xj|'EuNFS ͇3\Ua(Jۈ{{"s/CxۉBnb9lpMVBN` _c"l dN ƁRm"/үUsZK[іଁtR aR'bL'v%xKRV?3)>7?S*"5c?gʃ| ihB4Z`F f'*/@*+jD7_ v}VaEm/Q eWT#\0gJ-ҥAZjYr[pgtsvL3-"ND;.1H˓ 7sSr]'} ~p1:b)6Z}،m& F\F$Qe(a1#? 4s2zΙ&EYa&W܋EYcrƖ([b;ƵY 4?Œ%vBZׁ-'0θg9}7,iZG?UG-ycB6D̒rדrgQ/*( S`QF)8B )VO iwYCF0<`{NjjjE5UCdxnSpphvvOKHJ\/r;XJt'8e@0pp.6&"83S=$ !! MlݬUhUJLh-@gf.%;9̄Jηd%3AB Ij`.XשS({Z9UM' pKn c?׬tCdpPV CE0g+jb.V0ș~hmo=7f(ܲ zy+ƲSD뉬7c1QAGfO V':Wx /?@U\=y# C=t=8o&*|G*c=56) ŠdyѻARƹ.gMJъB~ZBQ+ F(ď*P-j΋(OLr%AXϣ&3MVSrݦКZ>*gq"wDkK/!OKյZBpdd*|*,E+ZQPuVJ4j%lf0o/)AJ&*_E@?SB/.z ,93r7 ]\rg8<_ڲ#>y X;a>Ѷh_L-T!" ^A#WV]Q#M]Td}*GzκS3JD%! E] ̲ H}o>9`ݿdz@&dgDBFTt\NF`JYԛe"k6ǔ-ʴ2/K:XA;KN)OPx;,a;Huzg0x G|;];WV$7`bàA }b{0+)DE99z.IQd<O~fsDui+AE߈QbL|4 6T@+CHQK؍"~Ip+QxF!^**AWP/Tk|8ETWՎ 2Q|_ArD7hM6i7Bd(э0 ر8׏Tb$J$4i wDyn\E߼f]29ݤ=Oj^+0fʱ["PR!MR.ydYB2HH.huŽM9_!n0'S +obj2\?$6̽w'L܍3j @KտxQj$ӇLSd*}힮v4J%{(,\@MVCSҸA Q0!|PqSѢ UzC*qBPx/ŧBOardcTj~\&?qNl]ݸ^\~b[8Ǎƿ|*y0X>%AYGq/G# rO^ÆۈޭQ/ltQIU[ 9Y+.qPBr>v`w4 6R}Q0t #J&R0Ěy 1c7G{i2n3Rg򸤞ZSk@3XP,5 <3"auν9_PAǩ3*NmJh|c%ؘۚHMMo;aD~ q}v/>?'ӛ/woo޾{n7PDX<(bҮŲxTEH4寰8 Z.]h6NPKZˤCWJՔʦ+_w&6H$ۚg&Q3ʩ͛t&|5"wI9q9De~j;,7ymVӸs Liɟ3'ՠ4UxW5_ YX$-ƒzNyjl36.S7qXj7^ >{ 1 >a7J6qaELW{lkd!jf1-v+.=_06 A킋P<.1I ل+d/M՟ I*? >y7ְݺWKf;K^ƅ͒Z={{R 5zc?a4/^I*W*׳|GO07J5G2Ę[>[]нc>e&]B5mqSମ|:]eopg7.w.pbRA2H& zh?*q8>,}$ؐs'j.%FJ)B&<PKL`qT6Z3drj#$ Bij K v9A#nsVp4ect)%ZiT1ͪGFcimD͛] +<rۨZšģ !hIl QpmZʼ_,NسŴ&S*瘔gmH8OnzjL|B;~ )-PKF;$F M2Crypto/DH.pyUKo@+F"@"(qC"VmB؆jYrzh}3;VEpϱ|z -|˺A4D.\\]]-?2,SxbUs~5Q1e%*,VJHH |Fϖ1|`#Ҳ1\*kӔְ#u=5މTSB<K/z||\)3sD5&؀a*̟g~_gd~4X:1AuhE]%]y;\v7$"qj%"t~w[89LPMož؏"؝ -{+ m٩Dz V7|%/B3Q{n\o-ݝ4[YF@(s:M4q(yA'pzMւ7Bl\/Ξ^oYMoM kVr8CHW,KҐ| ,bTTnd4A-(V4E0MÆ^DR}6)@0/m@sI؋WxgYE vd*3 JpؑI!Ͳn!QlDpo(}W2xrOhBaA> @rQ!]5g³ttbX|"W@kRzF;M$8UURtLI h`4f9(˓NT4VZxӚPʵ%hoÛ62i xvAIU50M"\e/ЀG: eYxFIsBl@d`sSwh@.\|*0> 3](eVӆ5W&f)"R@qg;l'iSi*U'y1AY{Ձyi?fІuqDК9C0x" |ޤORSEyEFRs^^Ʃ6uI24σMjCh$Z̷ v1'6%=۷qTlrŬ|KtdSQ\St>VTӪ 0Ô[*J&t*lR&P>Bof#;Z{b>7WeJZE*cx cg(3SG=3{9M4q"^e K1 l]RlBELl"9g ٛCF͂X5):Ի-<9%0!H<ջ U?'BAwR[fu8):,3gƼqjp2Z4c^wV\U`a *7|3SLZ!/Fw; vWC\x_1/yy3W7PKF;ԧ 6M2Crypto/DSA.pyZm۸_ )Ph m.$lz_AKF}g"R2lص-gLޞۺȍuYTYyu\\N˪ \5$rgϞ[+WrJ.(:ILlXn8\VI2hr2ߪUՊ.xF5-s.^' 6˄~"HWWЭ|NJmxydTJ_}Xk'礆d}6%7%KyZ׏+bA@IV ;hk\Pٖ25] 5yCɔH,)6LMw/Z;_~ʩR@G}X3)钑$/D%7TO#a8Mj_&䊞FG6mk0! dg\n@%-dwOwMO)UOOA*8jn|YU5BwU ^6$"j)Pxx6C-C7=)Dfd"EQR2HYַ׿%#U  Ҭ5ؒE[f.CyeƦA^p2{k5e+}k# Cq3qSmZl$! -x #''5iHd8D)-8[J|e||`M+JJtg_6i.P񞪩'x!FN&,W Qjd2@6TA mMF D NsfāXY ܽ ]%۹z|M&C[FLFNnw1%'-x̆}UrطO}VW*zYπ f?ʈHbd&IoGm }r:O>oNOg2T+aU} p'q,\I/j=L`aN~hztЛ :pŵGqXzK/8Z9~*Ք#O|w&|񳀬cO@ rUs1r q7[@Mc.NILhs^3A?஁|c 7s3ahX̀9 ]/ⳬ${0P[WqB&z~04<DэL1t6Gءuݾ,vS'/"/ls}~)^ꕠ< ֍ |!Мg7hS%]5UXVZec*`]2C o  2gvGZ;;kY6!7#acm[( dqdp0+qg{Pn"Krzo~?7҅eS ԲB:[g3ow'j2صXNc;v^7A}F7~RiOox\OhjJq63AKi}9fS^ cAۡ^<=%+*Wm}8)mE.Be┌Ez%z[l?А TԪySut mo^ȓ74eu5ӷz.׷f,rZ@ͣsw+ҝM-ڷH-׷b15k _1uXUhtLeu)M!*t=wyqׂˏ':)yuA!ǫbҌt,0ݧC{ZTpԢN_n=գHdJ֦8BOy7 F}C_Bk+VԐ6+ڐ,tUӎ,VRLB*'{78Px&FDߺ{kF@`UH~W0bu3I`!>L~xzӚLw(tmaMYe*jtxy=~jz u<DKuX8>+v)>SLϜ(`mra)}8E#dOU0L=XMwBqr۬@%PEMO?=+,Wa+[Z׭ށ4Նl"_{1u 0G6۸Fm"B{f׍gW8W9CB$۹o]_^,AX8}:c9bUs:ZAs1T[ChU+\ж` .ZI_H\R&fq f_%L;ZU+X u^* zQ~]K8f}g!Xbƭtd1;K$);Sv7Qg;wDYo ;݌o_qwaÚaucO @p/N3#r/k-֌ABߓyޏQ@Ϭ~yR>ڑ[;  eW욖0[xniD}S(puL;PV+Nx-I{*s@\{vqƛWĿ;kzAGłX\EwL+VhSD#2> 9z),-!aY胇U ;f81ǬR"@&A .#a 6\`S|u(~ =O(gL!B(EK$ z>LsbSxkP I<?i|+dvOb˻= 50Pq*벵<ǁJӫrR<_xLYM~42믃T}$KP{ر:tUů(}AغeYmHɒeg?s,A+ֹCUIX??|Yж#_Eȼ S^Cm0g,$>zr#Hx-Lo]ߧmc9@ mX+Sw*]Lq&%yaf.V2UUnlVYm9ڒ>X&̯yEK!&=˲4[IiiTulҭM?4V{XGl|\>WsA*AXd/DRĎ˳~nz`"ncvIQ(+RQšdL<b0IV gj gCQ!)gX1_E F[HiF 4gܫf(޴!w#"bKK2*!T>>Y'P *C$S/ 66gRꆨ!31.~Aߦ?ix( %"VW -( V򕜽3DB ;D=bm*Aq.&1@ӽ& .O(AgpJ0+]&*j1ZK}1߇ ŋuB1x^Gam21 *qA'^0%tqqxK&.cN>bшh ]1r"ۑܥ,)\TeK4Am&BZM#{^ǯW*Z?ai4+:1Q+"k7a#~VƸΙ/] Eq?SY\ʌx`G;& , UJ`7؝ ]6Ϡ'*CIg\gBBҋ۟q?tۉE8`Si:,IȫѠR;v׊R@+G 􉜸RZ6(n6V16lb|*xiTGE"ÝL"?8WCo;)N@ǎ#/72t扏!o`gճ+&X{q?_}֮iU}.qZ vWlXfi%ࡢ^,xT /t(`zT"KVX7%)|("zJ$&QvLebibQ ͚3 w~l(xhkڨIڙ^ֆ F9$ Ek[-퓥 ›% ii#Z9|*Ȟ".SCY&]Jj,' 4vNuO ?28f'x]"|%PT vAhd͒LDIa?&aGSrYɴRUI?=Ǻ"@`Y&*<6*첉{kCv ~T10Drr9YlμR2:bwR1 JaY7Pn^By@` D -m%r`"9Sk@T++2~'ƑMkNMЉ) xs~GGxxC3ra!Si6NJяU>Z\Iz؄>Ez'%y]vYkx"pYTF& B1B`4*v6>>W D74)\S @ cێ44^xay{q{{?uhfuf7E?ҽ:p%^Rh&l/`T%=ѦYy%ױf. om$b"YEQ侀.?e 4;'$I7om= robxU%8.voʼSsVkRbmR[֖*:|jj݀*M T^+<+.^m q:|ûu^j‡er/ "Q2GR .9U`.& /tsxc] Peq* GErKg3)!TO$U $!%xitJ"v*vKe}]BP^oH T댙 @//)Qw5 RM4fxXu|~ߚc.>|8f{붮ZtAС&?"HX]r9ɝG$|gKfٛӦܬq1c(.(?@{@aqﺍlͽxL ڋPb>h7.v&O=`YDG^M0:G3==hþ{I?Mbqjn|O1 a󨯑l"$ E@?1Q#s1R#S+9$) M'!u2I͝JAGΏ7lAv㌚1jz۹U<ݯOiYKP8x:3Y#hF x#$_)$BY$KUXY3VS(ۙPiqG.,obdPDȽaDNkraƷw,t}/qD$SQ\%CD xYwG}܋{?oC^"ahrvO06[-RmX/9l`?|10gދxlUYm/Z>W;M.#ܘwQV&A~{+LZ9ß(dOW0xl 'z{#)7%nԖ.V~7דc1#g$(G~/a0Ri-ۃ㧢MY^эCq+2+lN)eЏ@w { )ϰd:<" LJ,}C >qZ39&!ޔuŸft<[Ζ\6kkտPKF;uc *M2Crypto/EC.pyZmoF_砐<K@{ .Mru7"W[r&w'3ﳳ+_\\?FwTyRUMɇݛ <ܒۏo xzb.exCGʃ஬Nmw -o9y%wؒVې9f&))GAI]||[J+T?Y ABƬ`bA xA m-ER:xM\Ӵ">#@Kh-YD $p%RX\>px*p*p,#O.(rr6ȥp$R xR f.@.51Z:U,+:bJ%XXF.|#K_Ғ%,M 3R- $\۰["h-܊ܔZ" W4mTqu驐K& Z˙+A.W\ $xg{ {t)}h @#)}~c$oxŐ1H1iTk(xQQ<;JﱽG7~%ŕ[3o[3bn( _FC#Ņ/3rUR<鋡@J+>H\Ǥ6f>)üsaޫsWüxyxü/9{3̻<ǻ|"{11G8pyyd0cIR)IҴ\-Ô8w) Z:rA"# (a8T8ӌO+@ksa C4 xr\5Jw}"iR5% m Dx]y e^"k>ךBBp!p{[`GC^P(@ ;M{j" D05hwx _xY?iBWMR\0T a ?#(S #9J) q.'+ /0$NM4Lň#b/ZV \`ME(0+0mȖDiє*X-SQN _S6BrSqߠǀ-k@|-(OfkN6*ɞBu!+;WP2!jz,;¤66uT(S_z4zmCWn^7e1$T'Լa/`:-<V&e>C[QX$_cMH~kرPAi #VzA ਖ਼ӧBl+Qe#ɲɋɔ@"I I-*"r#~0n8~I]̆OP9iC2/_:>[Zqk7!2PFwHbd)7m"SWp 8g)iEIyz8@=Kã_2PejR-w%ig0""0^6[Kw|N,Xer窺&g$OoBQvqvN}0:XR4%)v%gMS8q$I rC|vay- \yF~P# +R ˕PN{hQ*NԊB*Pʺk^Z9,<56rfgthiN{s"չ 84{[ gjPS|hiL#z Gc8Qiٍ-̍=yّ3PqQƊ )Lʻa#ajf}l4ŞvJ[S4dn>7|<@ t#ذ~N Uڒ9RMͻhyףn- :Ќv>ϕ7T7g ' kJZt;v/ܙڹz{nN_Y!}kާnw*Ze>T3uMgT l=-YHl<=Y߅C.ˌ_YFę  ۼ! ']}?y؇qw0|4VDnݔv*UizK2ۀM&s0Ae=CUxNm %> kdc/?RK< 6h N&z.XnV wE]čҸ oArXD /L&YVPl֩Uv E zٗR hEok8g@=_) XQ~唣܁޹ rO"M w@ŬЩ4s. U5 j-Al 7|;SL21g VE.7PK=_(~')[ @4\Q So^@,7 r^`З1c;b  Ld` @b`43 f19N8bc3p=' >a`@<`! <OA| s&\ԪǬ~Ip' N7_SKpOMpx e/Hѷ./A/Ӟ0n'ċql$_hnU S-aQevGZV4_:]ƯjT҄5lZ}j^: xX[9-liwФy4KV[dkJ=@I~w,ZP ]SjKj %*$T%9GΈTD(3ZΒp ,;yb: R6Ndz>lvg{^Pa)nj lEg |qoK`` Q^˶X,WsՌzҔN#"۰hJg/!j%+#>Y@*1e{Iq#V>"l(%cpXy+1c^ ݳ+lK|0OhǛI:m|GFv|NÊ9BsUiıܳ\F">ۙdo"` YIwj2Ă"FөӖɞ ZVr9{r[ڸSdhF2*s;vLbП5{\_Ofw$k;mV>kECy&`i_t6L* '&e-<_) 1N['j- 4ցSu'6IkhN 2#+7ly;>WtFf?0hsP32m3Ҭ7k7xRM(!Dz6Y;R?&3f-`(,7-\\f} &ыSdBR^V4r+xd*Ûg)J8x\ZiB=f1?5tu7[oMn"L/5?Aam@G}awʭQ,@cw)"n{t%&.6sJv-Gq9;GcEu.>QF42Ui9v FݯǡkjxZ(hW!ϥ`ߏPm*z:f>\F/`~c([&cC$45x%b 8^oWfp5v렃q%@P=Dt#^@toG x8ՌH'fחN 簗&`svLp#]KXzccG"+H:fֻfu\ebK$v*a]KbKV/ d_}۹*p퓚cY&JzL(!6pwӆY7FX.d.T#]0闎W[>m40QMf؟O gnJvSVnO$-VH/4;ϘƯhyz q 8IPdqʣ4SsU1L_uҬ$.'98!Ϡ1G?TD*~:R?pMzFJ*'tIlh7A_0ӊad}*I+9BS6xtB3\Q&Q? f&p'I NmzQ@:_{ŷ]DzYNcdR&M:YJ&o<5oPsw %f^6oSBC''i.mg2Z(I?/ΟrÏ8d(Gn*+O *8p ߱ jW#IUz^[vR1{;ڵG$24;zmefDA+]d<KղS8@[\]DL1J\qd|sϸgt6c\T׿NYz󻚡AJ<''{=CN ƜqѨN8Z"-3hN*j#Ʃ Z_i~Ainr-r>#ۉW8IR;U!Rh?(2ïq¯-~Ȭ^Z lRi~M=W]HHdP&%,*/ɯ֬E ;Mm"4jt(%7}NGr023c'U&C.z:/6Oʒ٣hTbf)NƔ1{lT6Jl8igڐJ(7,?Q"H8aljg)īR+Z=7/EbZb\'0 ^n5P ˚,^pẐ`Ei $x/xYYU_W *ed%Q |8!5<7Q+2t C;b̲3Rm@ I)|^Y2R9M#E"489;۽^rZxy@2;EʝJ:0r%QgEܫʴ,ӬS%; K@d&%Ҽe)Ke"4ZVSj$ԛ3tJJWYfV9؋H Ս<X=؁f{J1¡(ҔEz/rզ||L,_zԙQl{9CuX\w8+ۖM+ehH䗋mrwl<[ zr(%6p ȸmaC˟hi(\9@$lX̆/ ʨl:E}MMR腵< S6C7ްOÅvs 5[;X~ VOmɀ 0jCgCVKwA{CYG='kDg R:#2gދjo7^7amM h+цiy/ƖyFcmɲ4ߞ1 FdTjlY1l:`cTW9PAKj6',Zȟ|]>+>q]ejcuKp!%fLG} 輛ƠHJDn 5O"ԛ3V GJfj2ӫ>j5:ë6r;Wng62]oŽaM>ϋ&4RMmx.~FnsWC@|EZ7-$ vkUN =M 0#6n]ץIB,e{#_==>PK_a;bE;XTc5fQjL6%c|IL[S,hQo` O[,X 4`lDѓlD\e^LL2 a7GCzkN2̛$/ZGg 䑪ᗧ }B a,,J8|Й| ^r x&^v؛HU-:$4S=G0 a<azOo?{{axtшj~.$\["bBSlKH"CHK0E"Ek&EhM㔟MJoqx;w|-i . _Uh o00O!õ}Ư9|wWDN_8dp8#$kpmv?'35j#&"KȤxQL $$C共Dd?R?"OB2Nbs^1Ȝ/hGtS96敞p37 ݞ2)5ka4X 8T65;[|sRbm%- Lu\ss,T]NQvN{%n #Iv8yTr^KbP )Yc6Kwp5LLXGtCC^N"ןn0Xs,ҩF!s%ASecJk0mE~Z"h}-‚3NYQ&Օ؞ݕSaHF;E&qt^# *Ql.Fav14L aA%x_JΫ4 f0<ϥPa^"#o)TIϕNŦ0IHr9Lb8"ɣ)ggOO+hݛCH2$9qQϝaa`|?Bt:SāQs6J4uCk Y[y13[>V@.!ϱw]<Zv /I,\:o!; @Òػ{ ӞGzahմm4vi7z{h%C˥99li>jBo rsN[9m6+ j^Lċee#'BNWhmw|48sqt|ЯPsg"aQIK\zK{EVݙS0ӼhrTۼc-f3|S`l`z)&OWF6ھVCEeByWff: cV/Q#1K$@Wc)~$XLSg /M#'cjgؗ5Eu! k*0[s`4]&X 7%6L5hCo}IQoC:〫 `e%Tpvo '֮iѬ7PKF; xcM2Crypto/Err.py_K0).YDp= *. I:ݷkgW7W(mr眤4ɽfƠ-Ty5TUPo5?|h<5Nk8Jzc#A^IΊ Wa鞂ʽ&ۜK'1V2@43STM^ܜ';+;љ ⣭}d4?3b8|PKM̐O9v5,gofvFǏ+?uX?M2v;ga8;sI \&k$ԘPg#cAB-ldT`p5rdv6]`3(˔2 ?/_[4=~k z?:>>>8:<|쟍oU2:M}dӹ7*WRɾ >gˁuf݀0kPsLJ:> |,lij'8VƢ݅eȌNPH# P^Td-^&3E$ $mD.m:aAE5 Tln%:=b{|QZ_D !nSX\xU"JB7C7nhxgi@5UͿq+qNEi4Pc:zCmxϒ ^/ Q 5Ѕ7Փcfx^V]/AU_.SGK-w/0(FF8$6v0w; ZnV-GRQ9 mHH6!zMkJ[bpyYҪ74O5 xx. 1)obԢN:(2K4v$QQ THafUZS`\a:MB|6=s;$S8M@i*gx _QT9xF-:e(c(Z׸=#`XXbysMHIjB⊁vHCD7˷M9]Jʂ&f3r^y=SfET;fTlHߪCmMw=f +t @7 $JmvfXX9FIWC~h2yTfd\ପyÿ젶pcStNȴxWuKi;zt5XY-\(%"לtQV_ g,P 'e%K68&[VѪ~:2fxcW%#iR3 Gut6;QvF~!0dloH8Bfhv2>bݲ 6KH(y;n$SWj!Q$kF=L[˟.D]eI/ge 9wIٴWZLKuN1mg4i{PY*MTim z$jn*Vŷh^o&]څ}wxv)< Q*x1j&PFz_rtK8z>\+lQ&b礧3([9z9?NH^/-bTXHm_^ ^\Lm"VV; >vimNP8rNyBP=ryc;#b' <'eec_ՀƂ@ਵe@ Paէ[Qr8JtbڣvT/w:ۤjh]BC붕z 6c]7 S1/;c7!Aq@R!p*vi<c.eyՓOBfEaCYT`]֐|M.E,<ؗ٩d&~Ĭ6(. hz*Nߩv$TׁAhO Ys:VukLƱuu#3OSaʄ6 rŕ?`^o1j:fKna= "/N [J .ue6WSw)SǩH9Vvm&2dgc};Q"D&Jk'o:`n6VdLj⁴²X,mai d<(aܫ.!)Yr/CicA3&~As]{ `&"HO*dTxf{1Nsjh}yԝDӟqCXBu<Vu ] ]& /r(6!!-N\]~u)\<-ys=-@/eap) RCѬ-!ݐ׌ E\3cK$!x|+CpD,ՎBPc,(@**dIZFrF|\ng>ha1+u]y$ή 0Xdžl9lh9-+"ͣ:QԨǎ}iNi2:WI\5'V-l)0L(!< hjY9|NץNU5|wg2 jm`|ԼG7C&)C;y !SS8Ci"/w ?HC=A:8 ڷbWd];8~b *Y|\MţZWV;~`~sbT *aPKڂJIA`a-TT^76vvx7街E-}.ewF '٣KW]e%0mbcǧDM,]!0Z숱#Ď˰8pm8sCx>?t:is^$i in{$ n;!0vwo:޼uF^ݬO]^|$"0|m3oFۄ9OśAotL" F0|hR0}ppNQ s6v$^<~ㇲImz*Nx]q]~v:]K;zWuJh ܈Q~ԑvc|Ɂgo{<ǵeCk MDSF~D« MIojHxXGF5CgG$!IG||QIjiĞЈBuom Pc@zݱ٧6=%PMX*އٱ @JfA\*P^Ͻ^汓3@'\TϹ+\&ݾ}inKHd*NEcG԰J\R!Jt !Q |@~ѹ2dk;^ R^tJ+1Hl%3p L >fF$7?"9Xz!Mcb̝0.3u*lyX}//XcyGȿRXu^-ܝ{75T\)r6xzq3Y+YYk5)TkVtZBcNmNKOGbS2` kHJ$LuUJYJUO\WKOυ1 $yQZSepe-xT>v~v|ڦrլBDvu_:Q}6NmqNQN" #gPkQաa=EIԮM+S,UVQq 5&-J(%h=&cVFdn./[,"N(`K`B(!MX.Ue]JiA 3D:W}x#l3juRQ@-IEu2ˎfG:HuCQ> ^E}JFͪ1EG`fQXGmMO+C*xG(=yΓ68Ża8[igv 0[m*꠵LL)N үO_MqIJ!YKmL" 7[ =Oj א13G*#as5dG촆Y9O+bތJӏ:rB7:]cd3ϋ\KX^o[Cmge› sD/ eyfiRJSui<-J}c;%ʣ2L-mEwb期cyʙlEdJd,SK/coyyy #"n!GuQnVfA%+@\ӶH d0b]`w^,1}4% N|C)\v¾9dW-fӋ~pƀ݂֜8̤wۤwse҈(K! Y|,kyc) n)Q4m/햬e J.WQfj2#OɆt(r>T݄6sZ9yvk BZ>/,3ݲ݉2ySѧ6|slP}ȟaVZq_' qS)ty`lkpxVpL.݉],~vQ,L$9Zbq&AXQBޙ)r^ȫ)L&Jk` M.O|^ԟ\_-sK4I9w{M0%D 2[i&M(&VrϥyV'~uM{Gt'OY xԼvQdV} "=O9q(Plyd0}qTrى<9G0sĉx@yJ梿Џ!p8։W V}$SkF'n+Z]:F!{r%DKD\ IĒuΪZ 9AL=mP1% \A1O-z2cLA\A4:D`(m ;#L Y!~B70T,Jk*Z䪪"0Z=4F$KSIvJƃS8AHj#y{"Y7Cq2}X=Jr .l{16E] #gnC}yV9M6GD!QѲqIۇ,r"F15r`%~du@C֐wxZ ُZ$P @;85Qo7*nVD/"&p}Oa'\\ztC;Q*!ݴ -}?`ڻa":b;F\'z'ʧӜ톥6-$ "#K%Dpr۔rkQe|6]1vԪc^l;nXDgsBڅ'h&TYj< sS*)QWlS$Vzivq"7T2[ό5(=mw )|Rœh  ijn0z%y>CHgd'k9!:Ojϣ¿&#{IzΠxPʽ M g\0=^ٙں@ &:bOKɅc-6$1u۝BM{ bW*qh\ __ ϲ+&˜QJ AR+!%Z*])s{jh ukhݢm. FmwC&2q{󢠱rl{? >ȒyKvERHyI%I = dٍArzTE%7'9XHIEV_mc5$9+t5\Q\𡹂 W8 *jHI7/c[T10(}߇5)*w4FIQ{{\ Ax!܎ReyF-o v#,h%h$8fU7 ܩ/od5e7?T>?oV$tԺ}^cMC!=&װ)v~=TQӬ+zyF,k˝`JuKN\ oW`K!!ϡ֓8R%^׳`^ӰDZ6sÒI?߅H2 Eץ/X+n58wv3kg÷e4kMgw6nM8-+Ή`qc@a6)*RS8[hXBS"V0Ey:t6ն~BG҈~B#9bE<`k eUU Y SscƉF\up/M3ZwFU zS)*sj-SA&Cڝ$-h\&A_dBɰG ɝvrT;pı}dc9NIF[2BiԐ$siGJ(c^W{SEM6)?pI S_z2&h@BW6*ORPPB$!QPo؁0,n+sc|'rD\bLPOSZ׹ EItf-~gE lhP'XxvH:JJyp?;:@0?W(AXֿPKM9+I脙srΞw ƟW~{^+ !}5dO#=JH:$,~hZVe/BVIJ9ҟNT<ϓ~ 7H gH0GY2yW|_C]5? li"cꇜE!xbokFo$dc$#0L\hzOԛ652i6KHC kq]:H1] TR!oy=۱8/z&]ulT$<=Z4 "as]/좂,eДyԪ Fxbߴ0ȬD=&^ht{o&(e2 yĄYXD#}ԡ6g6=Vs>pguz珘ZC>0px!Rkn:ߴmh'p%[N2ƻqx;sjozSڼ &-ݢI,0eyxR->v,SgeYxN#[Ay vs~=Q9֒)oR#v`Nm*{޾д,eKH U 3Ɋd}>T8؝, h4ÑSr֐X4]E!Ucj6< ,Gfs,XƩ$Jc6l&,;dM͖2G&}G60YSȬ#Z_#uD^@iL_i$H!"1Wk 55 ;t{B(Y:|FOXop?H@4j$H4^"L^i^EP#tpc%tM+qɽ Per],>TYЋIܤ9΢\Z8bʹg[ ֎)6zNgPtu堓*< u0&)тaw`q7BZcϏAñFl8\,h$ɴs򈾠-j53S?]!\ukU8/58`_ՐΨ@s>R;os̟yXyiv YN?=.7f%Б|DTc㠃ա*=ܙy9`rLy!TV^:v;"( U۝ Ewhk(ljֳ"o̠gjƓsjľvQU|] (Nӻi5 \U\fʵjbRꍩLQAvA ,cr=iZם̃ẘKj,7PKF; M2Crypto/httpslib.pyY{oܸ?FR9>h\s ?z A+VK$eg[wԃjm7męp7/z:pr)wLU%fK!N)ֺ*E&KQdZkd!{ݻ)bk+v[EY2Cx:N&WZ|1S'm@ORlY-* X U=<|TO'Q{=mr;bwB1Džn!g^_kdRV¥2 &?t`laӲ"ۚYJHI8ڄ;!eZ:1;gYI(([ #Cd/_+U!XR9#nm;ߐ! Vܝ%l3|IE >gPi״JJ!Zi I&>/_,!L uj9Kww:4ԟc~(5*B\ga/a4{؍gpsa15Y6C\m\c\Nw[,=yJ?A6e5YYp`oO2RpuHZ QP) .] r֕\3mUA|PI\ӻm%J# jh%0qAAyU͵ئ6k'\֜S0TlVBIWpS;9ۊF0nq_S4ld'Iq/j;=4.V)U#3]Y3yAQS]k:l<:k<}-V[HS^+鑗Xs>Yg4 y dkcȷ [\Ky7na.ݙ$;3J=)H CB?^]!^#c+'l+ 벐}) q-6L5ƛ,a;59FtdR¸!\J@+S)ddgK_I~zA,Eƣx?fsMmmn=nBEyuF- r3g⛢JL=n^Ҍ\<9bzalS]s(%G1W +,KMQ+0L4 m1ݜ "!X\tE=6ټ-|TMӻ +5eHt_9-(kqhoQN`U~)B&qg+bx6vcݞzV6!M?O8ֈDI"s(#&9ǖe M5pl7fC{ax0 lljV6=3̭Mv5R$~uWjR<쐝JN4K (N{^tK¡T0d1 ߱{=2@T{qե;I[#O=]D ٸe n3n8XSWEa]5X]0KQ Tx73,(ws^)ܯCK4;f5B8j0s1%??p@$?MyF} YHl 0Pih9UEttɿPKw܁c"4oO˪I(I8Qz(rt ц<8ذfWW-\ktY,:sN㹿-g=*=-;)q >)HVĵ6PǺȗ^ c`rX\۶cg+>۫0 F0J~yt1t+2+eЁ1]iJ|BEӝcT7JLQ\IPPby64+l"Դڵ6bD >xrAZXzwQT$4LIQ`UpB>-&JV55 B 2>~EZOkX38dbzN KHWhdX/:c6vjK6S"x;3"qwz"LBSwJ4y|o|OBMM8f1Jo;[iI`i|F_UFDOB-k:'Ҟ"D*jpfzO="ʀʁ cYk^g&2~cOQDA?J배ONlꂵII)xC[kAiM5E& K&ܜ4J|[Zvz~)J#gk>ËQ%K뭰0k1t2~IyB9\PmQL3NȄ 2OC @8L,SᦡPg6KiHZq;~N2P?32)# g/(N%N5)IpAN4fO2|Ӯ8LBLLDhPhYqڣEEf ^!1x%.V0˿W5E/ :gs gT#\M&3֢آ B7J m+l[=yx(o{Wq{\ E(Oq|]C~&IS7zZgS!Ҁd$ ulÑ.c꼵AN6c4*R6Wu )X8ԁ̢<)zi)$[ګ8fK6^y [0No¯@uЩ]jVsV̸o-Ażsɉ |%)d p3G.k!p 'EFJ25 gsW˚C?Jp3{g'w|(opݨ179aXfi?F > 1D [2խu:3P:hx`Lvٿų(leY}6-(@|u.YCBLd @' ?31O6&ޱPua ZD!Ӥ:tvߎypsI|#$1ŝ856YLD(G:`3җjH@  D$#5yf ē$_̰Ӭ\Nex! uBӺ$(ND.%G]Ѣ;%u>h*v{|j_X[|](s̳w3q[6{Tަ0),t΁uzEݾ7z]R m^tA}zȦ5ҏBߵ(&8LC}oZలDe[y>% k ebVnl8 #qC]yUeQ> AuR d٣!잳p-yF;pYga/K{1aqm|&4,ŞY'|nb:Ųg.y#7[OBo3uX#cn뼢C$"-54H6cl?TL/!aF]؁i>-REM}6(m|-4o[?3n7Isx kڕaP4gNSǙ_ЙV,E: /ҏ[ɶHal?e*9W|Ɵ8S1'ϴ&g|yŎ8⬳RXvݥPKF;TͨM2Crypto/m2.pymRKs0Wp3aڃssaruH„ߕLS ~,~;;A$P“AU`1h8(VnXp;?=s/xLzQ=S 0һ58wRB4QOѾmG1"h4 9ώY1vs +&E] %b PKïd읣m tkf3~{ة#.p&\Α*Pk.kõ(ԏUtaZ̹!s=.q&w8݅F;&[ūVj(uƇ^Lz;qB 4f_x3^Sej|B^I%>ˌ0[Xt䱭}{UA>~9:PKF;:{LM2Crypto/m2urllib.pyUQ8~ϯ$mȶIvw=i[PoPR]ہ~8 8 ;^off<>*+*c.G[Iht]gJ (j.JZe":LR5/+ &www?·C%ྮ hf޳" 0NImXS0GvE{{2X.OG ϏxZiSo?FAP-Hڡc B|M MHaًu;4܀( 04=XJv9E6fSwjî=OgРks(OzX&-УSIT1X4R5N8DgWQSwALA񫑖h(rKN<\AcAujyH P/a2>rHډOCFzG\~©gz;،Mc=$G 5*-_繻0}UǾQN@0k)Nd`Rl{&$<oݼVTB2&6`IL+ 9UyKDbq;tϧjX=7EN o82`MY=)T^,Ԁf?jh1_s !by#L ip㰛`'8B\i+r5."l"\DC>^Ƅa{xROH֘޵\hGm^ 3{nSm-+l'Gz.QxO|A^\5+eY°.V3Ke[amEB}.WV-pgI ]MC}]+3v|عnnF1HӴTJ*B(($6;{Sgg3ރix]ܿsΜV|C) q]p >0lA99GNC7i@w8r Dw~A%-CDF~& }q.B yt2P(T*a{cu6HZ+8?uooo767_8}h 8a SN,/o3m ?\%\\ge0AZ[YpFg\1r G &30AOQPS*ȓLSCQoxcm2LШ ᘅ)`kr m`$CO rf]4ENb!9r969!`UњZ4Т@>6alC(5BXL͂^` x%*lz`M0+t^`xTJ6ٻtx*bIũz佩zw1& *gҌF:#Cr <(AlQ8Oģ>Y!mD_WawFsTg.T"D:WB%KRbpO{.E6ΩzSꊻҥ oh*^LPUF[xy UЀlS/`'oF~;jo%L2aϴ.r#sXZbv-"f[Fᙸ5\uk {h*-G{lXc6U+E2z\Knn.]q23P݌璏i,E-XLg?PKF;5M2Crypto/m2urllib2.pyXmo6 _ 9o׽R=LUH!bQz~ezïOji`sՆD+ڱb9%=]ڪ(t lk~]Ɉ]X٬_\ʿ߿`;pT˝{JJvC݀mQ=7OLoY.^{kq~6J+ 1ef#gw6:]8I"WmooaV}hGz0=yZfPcԽs?SX dKtTU}cV7yãϓ Xpw h$Ս[!Ao H-0Z.ϟθ|XfAn+r" 3}^}CjE%a2e;k/*90^K2(gz=N)Lp{zt.j) a&WBjc፣ŸڣuKVziYzȱ0J;8 CυHTeUy7 )'D@#Bfhg$Dd` Ҁ t111EbȂ` oxn5`36Y_VSEy^] !-P5ܡp =p93RCtbc\Z=lx[TP- ȁXh d@~ko# D56 i/Gz &WDMӖbUh*u*nZp}-d6R逴i9c&6 v ,uBɨzGT@5mA_@_p2n)kJ$;9nҦQ25*}(ˏ3(s.WuG箩nhS:ūO}~c⨐6N2%:V0hv! {\ZTC xAhA6>cx~SxYhϢ?Asӊp-:gtIXGrΈG*b=" H:{=kM{!:td#/₄ LN5\ഖ@QsOnc `v6Ta=ЉѻTj%>CIgӸXS Xe:c'z1 NWczCv9ȶCZL9XbG$GtK*iB21s?Po௾~؊ {KDe3^PI#v|iC U/;zs%)"b >_,ͥA`QTtcT/Ï)Ӗ/ه0[uA߳ [6{̏sL&y=aD@<,:DGab:fj\R)N'yɣQoIu j| -L GS/6LG@H!~w:F/KgAO)Ɓxoٯ=t~쇁 7ӝNq4 P PKB:2Bo,Cē JcI!G<}s?y0ẋ2<$+&"([SG E_jr_[EaI҈b\}?7 lLc^6RpB&u!/1(Ѡu1 =0\4X0y'l@9u'y4P\tǝEf 9l70a#ߔC-N?.JqOR.궅%.;aag.sy z4JI $KtPn 3d:ΣFmBC1'JPO7|x[Ƿpޞn "\ б)kЀ cF J<9`i(Rϡeh Am A]GM2,:=z2" O-r 2SmuB\tj6n?P1pc1p$`I5uk*Q{̾:fa9m&?f0!9LRIߺ klx% 1=h"k9y<4VnJB"o:/ ӯB6GDTΒ)gnZ肅Cԡ3X‘:,% xN<86I{L^ p $@Jtgtj[ܩ]3z*yw={ 0.9631̱A}+`]X15Z0Ң2d`ƀ/s`?]EybШ8TqfhTBQ@v+ 1 hӘ"مyFa@S ^ނàe/uϵT``_%+VP?P֬sU9c5j)OG#  s-Nc=ص#@0KHț:שFLj'Є@yQR:JՓf.%^ۚ^71tY{1p/DBusai[f@ ۖ>Ka+Z}`E*#W,;䫬"-X4+v[sL|j]F@!,} =(beSP@@ІS`͆~d6Myw$9EvD+ӺF:o"ljehQ=86c4Mok[+z]ُѠ7< jGoV*IxF눈x$ʝ bcsVߓ9kWA% ڈ?=zp (\b;Bj`Վ(ꗟ .s[9z]BL*v$6 xA:ƷsxFZopzyk_%ٌ6o?F8TPP==8/UefC51Rk.#~ָM ؒvm"Z]f,ےF,O7՚8R{O"VjS]kЋJ3*v^:3Jӯ{`_ lkoG7\o!AFZ,a{p~3E1/I 5_(Mr*7.mj$4~EcDi;B⚜'XrOCIǽ_vWúQEPz.Z.47fN4f4ByMѱh_&x^1s{G}iYlM~ZL\:5F}X61h)QA/8;K#ylIL-՝U[;PKG2{*{(jngRF{_9 eH|fZ({S"c&c9%vBUK 6%Bh "H=T/Onwt`Y)^N-` U-l,7c˫$hSU<, ,h3SCy*V 󯣛NR 17&3u1YKr7h{|0"$.{FL07I5R)8q7ظdlVdNL&Hm'.G1R('KcH.uMMV9WSZ~52PVo:8dߙL)s%& ,C72$p~C4Ed1.U$;t:Íw!*( piZ 1cO`y` cBf!epy O%ɣ!6cQk*Ubr,ܵ6"Zv]ujEuy^%\$Dl)Xp#G$QJO)DZN&& E~Tz=Vw~TkK.Q {%_sMrDS<PKF;e_"jM2Crypto/Rand.pymN0{_YRBC FrccAy{BJ"I ߻+cg=7ez,c9LP۱ ZXڷ(<sSmS < X$ITH9S?tY3 ǻJ]mX֦o93圀 |g}CNg]}DD@T@1פ)Δ.Ly#q}PKhGK@.nCz{ 4dzԐtАt˗wg16OZK5ii\~7Jɘ~o!e2;Raxi*ZSy6'(:\&lr%ldAv{X-qDXZ/ElJbr[x.ы}MغKc1>zȐ$h$yiY2׳ؿC[CXw}QCa oE mQt:͌;9lJy>m{ymp[/N:x'N;PKF;D&DM2Crypto/RC4.py}n0 y uzi&mm>lD&]շ_ P`(W x_nqc@hAua kA;ɢe1yP^ߩ;HpI P p LpLjԀuMX|uTRBXKoSMSqTģgfNv5Hy,;Gn%|"‍$4ޗL".vW]K<6Nx,NgB{e /\|PYyA%|PA(/vd-P)ЋF醳N vis. O0"`9,p ӉrI=Z  N`A 9")ΗHLTc@uCKCUBqu- E4&(o,ܪCQbF~sٞ^8:_AЩAtmZٍ,!.FFglioՋ'"*޸TPFvՙwOT7c7zZM(7vs|r bbbaAmtB)vkMi4aQeL{E/4'G ;U)PKTF;"4HD H3M2Crypto/RSA.pyZo8a!wmnbt h[҉fH=7Ml>oɫٯ徐9ٕ(XIyI/G_b_$xBΟ={v:;;{J^͚9_*ˈ'H+, ONNFtS$b/JٔrfJ^lfQQ!pYh?cVH< FYX W@o{ "kQf`DlZjQR-wLLtp8VO!^:}sΦHגVd;il3F.̙\\\5E:^[S˧|: 1ҒJ<]*dNuTt^Pmi+2 ivXS4?{x730SlRq< @QE3N;Ûea1Bpұb,ӌ=PKapȐKM-6i x9jxc `0Wh<:W F.oJjJ0Am=8,Wl_L_#eђ!ډdL$ xxqAC~%(qL,AU:oˢϿ%Q]rɂk Amwq8i|#>i2eX+'ceE`$rJh/`$YVr}9;:YS>OȔ*Ff>FJ%ST\5dzY5Kg j\`%K>_yҦ$Kxw nM%Xf (^u\SV?$덥 :Hē0GR;^4T;*]7|uccf)yl=Gdj&7hEz;15ѝz]|W-1[V}ΨʂCw$yI xuL r$ 16`%vipPNp3; Igo(3@P-`H0 :2*5h#W?JC*04ৌbj (چz#p'jMthF"1N͞R~ )sOg@yBMcͪja=x}< .艋zwQqP[x@c_wH+z& w-q8 vb rh5̭,|[j"p>B;Nxт.ߕCOr'_NMл18f?x0r,iL nŮ؝jB>{Dzb=YV<Ʒg:I2ꖰ{8tŷ/ '\m̰]b7=ZuKq'CԃFcF;X(nZդ&~@+P/ 롑vcӥ^$,UXix]SWRE(d_'+2M5Q"!O76X}9>Oʡ*än\fCKyJ8f['DHd˕KآMBSٰA6zSYgq d`ع& Rus1UYB}MF)v* Ku|g Zb{+qsNqĀiq6F-\Ģ s[d9Mjq-EKFhsαS.P9,:kꠟ&݉U<Ƚ+9Pg5wf:#> ÕSo!@Hk7,݁%1-x[h8qmQX?|yǮlaT}CGX'>[>6 AxvRC >3#nPޑ\wHDŽ?ɀs6gTnƭֱ\s!)i9==Ȫh8ĸ#sm;9%OO{G:UiW˳,*2Z#Z@X 4EQfJԳN qpr$ 47L|nH` lWݮP:\w;?PK t6ݻ1v1Z^w^gsGmzY> R|np.l5CA/Yoe%5)Uw8x=Io;Als{f:wysW.L:1^$%1C5\MCh錗W&f x6/)"}O_$2/ : YBꗨduuk<aV,.xf|_w8H8F!V2D6-RvDE~tՒ=FPx kRASrx;ݶu%Pbij{^FER؂̊$Yǂdtm 74n^mHXjlI=EL9R]ڮ $tU$Dd>9rTHGz畠šB S#28ԽW/y:C}?OR6ZΡ`L!E'1ir'JDhauUh YyHϨx ;L}Y6Hӽcn$]n B ؾ!)AOr}CA[t?s$GD~gy=>hM$Rvb(j!OGGNllg֏  rbN`1 W#n#ͷo@ 0'=A7@zXpŷAl9(4R N11e``(ڳ=ߚhn,N&Py]u}9}SZoA7(kB@h喦PufHӘ ڻ/R]Ev9@oG{;v8FeK0ncy{U.^Ex[pz}l4/fD}+FD #?83 32/$P-YÖk“LK b6 %0$֮`+2*_3g)Q( I%>9B Ӽx.tJрMEF?DL|Q٩ 6Tjb(ɩ i"rb ʠgMtLK&Q7xC=ܐNB"1Z:-G>$o4K&L)r"7+S0lVΏ0Dw`ٶƁ0,=PCB8?`2v``;-ye DMU9 y8 S='3|~"o8C2~@$P\6TRzbҳIiY(i5#s?Qg˘-` JE|xK`QxBiY˟:a=7kSNgDZ$J<4&l˅5.rc:::p5nՔY FWsyſLT-u2+uTCv3L.)X$l 6##xGە2Y0jZ+$G`rꉸg2)XҖ%(`#EST(5'_J7s9!=E".X 2~:ab0+Rs1R)~z|+Vᾰڤ-*ߌw4t@W ѐL Dݙ4:0=(j|Xv|+S#Կ'dNV$qQg3Xy"s*y/u r, |'/ ,#k/|pl2ЅcƱCsN  ["\\L 9 ZenMג#ڄ ʂjH\۟7'[Nr>zF^x3:i%(a ?N4FN.>r?=?=[qc/yWCPTHCr ʴp6tlMm=`<[UJWC |]'TuFyjeıM&}pYǷ>_OΦ)B DRs+ OA(ڝ VJꂶ{:nL4⦚Q})3g1{\ ~/M:`x*MpH8Z?!5n0H.Va¢/mb$36Q{)GTAU-}]_ԫg}Z^.\_^[^_^.UWPKF;^ԋ7M2Crypto/SMIME.pyXn6}H"( qn 6퓠ȔFެ3Vv $ș93gx|w+rg4?wӻ =L7g ~#gي?GNhi]vVl:: eҜ(ti-Fm8N"׳,};<9aI3APQH*nmIqȞ "~J&:jX+*B!T[:7| Ta/}9ښϩԉjpA 7 mG#mՒ"V-ۣzL?Y֌ڑ8@2ӫCbAV)其Q.>pUXa@j<\ыo]hT N B$ jE`Ռ@&`>اE`St6( :Wy ;" 2 + ם{eLY&}SJYqIQVXY.:ْr%5|h^wYV2|)_8 :Ǣ*=& K1)U]k|t$kF13~p~ӥʘku0> ,aD PÇ#yebmaEĚ$)HS@zL}Nm&jS lΕ"\K-۠SlLH%^SwA,zPJ( ϔVF1'HGWѿrHН Rr1<vbk40Zhh{ ^Zm+3UԥɡXŋ >T[dZxw֯7,5ukqw' !BWA)ʖZ⇲ g+nƩŅ# QEV\萸ꩦBqTuxH r,Rي׭2f٣lh>\aJ);\)փiKv0#Uַ %aRk[\E $L3~E;=RR,g4h/8sˊ(ۗ<[ۦg}AV6vu:sV%uSxGsξBK|v3@3rp %HqHHvIf")p-ځJQі9lodq`oLD3;V&cG؎/xW]Y:b߼{n AH4;Bʺ UQ@Am""gONSHHJ8w#3g ;j%ž!`g;6ޗU-]w]y~ U7h,;h6D#.UwēǷ 0#@eW/ڊiM?^hçK7p[(_PK'#>a옱ghz9gر4 Kxh?)eAs.S.4'tiF|í7Z;  vMaժa&puN'*þo X*ov܁caTᕫk UgwwU{ᡅWW::QAIW::ss)@c@⅜K:bC90XV4LŖ\/$_ 7‡qYVμLrCb9^t!x=xף;<aȐyPCgзU@ ]ύ:LQ j=-FJ^ B| Opc 1d,̃&J]'FQUݎ` j2`d۱A-|%O.&i7|Dh"cz($C1V hT-o^RcvB2>okt8t:GD3ÞkB9B]<3a"ZҖtnN{VJYq{:'068xq}a`- >NM;4Ae_œǛnSFUFȑÊާi Ei b:X"Sck j ޱצ!G? 9&| t:"L+V݁C%y&?-4{cC/ #MN4N;-%pƧF09 D†p!UU;E}Fjq1!d94z"{B J%<,̈D&Mє,@UIӠz\2 h>aP]e.~ĒTc2) ~YoynD/=wyM&Z_('!^hmIwBNalygcW]&;|"'_VFJj{]VRb(\5IC!M#)fk5 cY _.+$,;s&sȖHzr2I# ;)ݝ^d^n1yjڸzah?.O2=`~ߔ| N_ItS>̣\s ۦiu8ʧrJ:{ѶYYqA|;8pū( ]z)5RL r0'H4 Snd^:̰Bk8/yR$sŇl?cRM,511YAa"C!5դ&D"+PUTP{PCԯPJ;z\{THf*SH@X0r08"KeAQvob,S yz $M!t<!aW#`]}Ҁj!V29_rV *$BHմhU&qRQ l)V1hNkPBH`X"'`4l9ĮE1zrN\#R$=a3d_1ˌcRk*jȱ>*Nc3?3|Qɥ22_'Q(,ÓD>Yʄ#~Scjiz,26*wȁm&8;Ȗ9ET߲m)ף,Ȉ\L` U=tjbT&SVj`@1@"Uo|ŏKK%˧kR_J.@ɼ~Rexi*1Y*w|RS<ʧ2:Pʓڍʲ#X=z^{TLu2upI`D\!LWN9R-G]KJRk#SDH}zSi&Z4?DHT?[S'S sz^^N;ױ0:(cD6hum4Zm493i-*WBqh9߹YUxW ,UxwKHgawp"PV*m AEU_9/U}yc( Fyq_PKF;"\1[M2Crypto/threading.pyA 0Esn4h]՝ $c;&1M=Sr ~/$m W*.ؖش޻rO.`ݚ=$p#;(X9 \VgYMt dpi >F *0W8-e:76|ꃬS&憴myme99o%PKAh [PAPtC< 5À;:=9c=4&H*R7w%.q^f}5A%"cI/T=TtvmvZCO:e} p@~!'oܪ}7(fqѷ m h+8oͧ%#q2 S:ㄫf5HB…T LX72fQc輀L2hS3ZLtgi7v[De`|㜓2'FYjM^PKF;B)4M2Crypto/util.pyTmk8_15҅-_r!((Nۦ,'igьhOٳmP{!ojwip߳6)(L7\ W s`v˳(_sPXP`,BߎGH ׺ˢ9 b)2tE.K,4JM"΂!* 8o<,O"ayf}>kpYx}& @(LUW+@FO,U^@ʼdtF7Lfwf6e{Pbc riVunh8ͶpKJmb tt;y -vII;}P0=?m28|gA BJ^1p]ly|86d?uᛥKi]{a1A з'Vu=܂%ibAa $hNM/5{ WY[L+$!O/2L38ocfAkY N?~Ǟw&t-zzg4_ 58<έX!!X߹q{.ri> ^gW*M+Wdbdn=^xĝ]a/q<UGbuXݝW88,KYߟ3,4Nn^_w:.$DX̚|EwFF7s$mm__KSCzS7z:rކ#*6R!}&z2[&%PjbӘ@)آ4aPd-L |%ciA67~MxV*x4=U^-A ?D^@ 9j2۠m,FS6YĘyqOsDi& e2k2IWP]xq+ŽLx?e^{V+jQUyUWe"HqnE MkxXri-lQ]hM~n0ziC~~^[\RN;XxlS@I<$TgS-4 (T9(6pN|3오-REͫf}C#T)*[4=99`z@`a+˾ ; m[ºwE^!F)?fѢ=qȢl"v82(zU 7iS$KA/< ԦpjI9r؉Μt/r}SzGfɥr|<1+8fF7r@gR&״t{66QeR&[<8H6q9R|)6YK/Ta0ŠңneH/FvVp_fI?5['f[ſi Y1 ZTUjVYeM>UA @0 @zi㽄I:1S;  5Goʮ3u8qh\PZ.@p_Ì; Nu߁YH}Q% eښW%k۵JX$PKF;VM2Crypto/X509.py=sƎstj{/4=qM/áɕ3E$e[/?SM[$X`]{qRMnx;[-Kv|f:uͯ6J˗߾+{?gW,SvjVW7UBcb ԲÛ ;* [5Y>?FN?#AuM\e5:K6>dKx5,^LuY3Ҁ'@i_Xo#~<"^ H.G-׳nj?OHN*u Z}#6WÓ'~f,.R"eZ7U ^2LN^>YVlpx[VqVs .X lY\,W>Wq}5SBN%'#6iw5o,f$`ll* >&x('|\FS"KJo~0oVUjvcK&~g/"PDHT;lE50"mQb{`f6l 0usٳtΛi*IPN4m; sdd5,e_RRV%bA&.YsE1EYSWy(چr f!hڗ˸[?yUj6I,xH6+ `\jx8ws~&;|dZ!I0wt + MRŭ855`* Ėk:,m,cÓaC7\a"|xqn[mYu^7Ym2 0|3ܭY_e5쟔;o5?f8V4(HNPEhn Oq: I iF` Jx1Y'4p&LjM$[<þvf xR!j;i΅޽hǶkQrœ~86$ 8,|j`:ktl x-15˖Ӻ' &A-6 - <+6R'u ج'_H`yI1;'_A' ,IuɂZL"{ U#rf2̒w* +[VgL>pg37oNaM}a RMμ2~SN0Wp6iu۶pbkqGzZ:$[wb354_ϛCo-[*46B)GghMfaLQаq%g6g dje =h'UsZ9l i.agJf&ډ׬LR~KPߨvQõRGX>zǎIER^$I7ƼwM}wU%OOsʛ9X < ur-KDL\/xt0~KUrHht-=->Hm{ [¯LFg @J Am9f"N5DF6CbSb-?Z>prTKy+:+xWH"ď&n}I_T}Kpg'҇:}f8MEɡuSj7l'7meU,kq'Lba&(yl(JˆY:7 ~_d/Ej$>d!<7M(- m{bZL3+rYGEݰ Z{t5=:7Xc( K%UqVdl*^ħG<F":挤72") o*q<_/*i4wQSrn<K2N#d(eIjtoR*:Ko!6melvɦ,i*Cb04y'})MZ+NM<=Il5j,3hIl!޴$@VE-҉>3D fݳ'<-Lƞ"(1o2IAnHq HpK-jŦNfn ZHL1p e=_<gXwET3Lu}:M8J.ď6P< Y~__ݷ,9wZ-O)&M?tʯgS)'q>)+a<:(O|_/&9ͧT5+>č6 [=L jQe1t-tU᪪xgLE-w rx>»ퟡ K.WYXJ&3(JԠDhckŭvYW-ƾVYD#͒^n=jlab4@0 W vn>pz@jeS>eu4Q2i}eΚNG?_ ܇p~2$~¡YN!~=xs25O*q ߯5 |ʁ0Oyِa6d蓞e ^auK@xAӓ`VD9qՖ]Acr,މpTȗVtHW | \5sV~sd`x|Y4M|k򏾴DZ_2H{IgJ[300L=ݍ9W}'vaJrt=pD[#e~Z]o>Uͮpu]`oAUH3?@ڲz녎LEFXFBei="~L ך*OSϖgx4041~%<V_gVYc[͛-k79[hD]"J]&ʤ]!*=@n vܪ+ q!5T;LVA! }$^CGK~k85B;0,#>TFq1yi8Ҹ4I\ڹ AnI<x~8S­C9Og/^zJ^:BS9{GK&~x4y?A1 @~yLíN鸡捎//_3{ysn| յ؜i=g=Μy\\5\\u֜kM|.2C7r{C^ ˜ޏVڱ똷#r [9鴽͞y=37^δCWs&|:I`g-ٹn7\]ϼxoӟ߸Bsˋg2=twn=yπju :D.A3Srnm?S]pxvng̾|z]]}AL ГΖoC-ypIN#}1Qz}xF1bPyʀC*+>~0e yP%qܧӷJ O);c!=O&G0}w񭹰~Ql-w';n+~ݺ~xn7Μ=K/ⓟ:on9ܥ _z7ۮ V. *Ǝšy9au&mx+>.Dnl 3Nʂ>\I 5J-ZQ@7 )c BI  YqYq }CѐI%V$he :Sn\8. H4fb"bw k1<ߋM |f o*/CMIp`@@@8 "v9 تq0)$"& B:Rpn6ϪU5h/ԄbBN,Q *q,)L0{,`Ut?y)g 6_9k^2)ش}لz|JXkVǕ7.Pcucvw1 (9;7{G?L9ZKv)vwS$qM [|Dciu6P$P=>X [?%qaXbc)\d DCˍonQ`Q} o+Cӧ7`56e16->LW30-/KWt^ADr5<)zuZw^]W!"y*A !hAy1 &`CR{Y42a}O@U]̀Uo 5./._ؘf֤,YV* _aA" Z\}go{>tT_uFv+RljRB[++n&uz4"grNd06Rc7Qݺ]-8h*A8n,@-`AW Z&DQA([D)Cpp0zC~puZjJR+Z |Rm.vn*ᤁ*"Po*چT]0/ȴl!Q [ ɷ͖j ~+OF-Jo1k8q;],|. PG9RMgK+RնMD[~٦Dr>@Ppnt zZ,$ntuY2cU~M'L;h2Ek%K2M4- r{ςU `bRjSK-hz*ya0N{J5c**).D85dOOȗ9 x\|EN}foa~sȝso MaDeW R5N\H$& (4Y 1&Kni}LI$Pp9zFHgP 0ViŤ|]zYH(ri,fsʶJ;(eN',-R󞳹ۮîQ:bwQWF#lHbAQ6BBu5jӆr:}Z,BإO^2+.~I "ߗFM\Fki]I5%*()6K6[dzn{G+lETyoQ`td1c%"^L Y ڛ"\ɿA G$Aԟ2C"t+ܐǰB*"}lX6{J݇wGwtzJ7;?;| ">f⌜t P 6)xI J:CTmw˾+-U8Kpʃ o^*ߨ25Vtsy[RpFzL7`9+BJ| V} ~k\UM]ط&sw%U_ z>OT#F 0!*3bA'<@)DGD(A : AprGA'GscT " '.z @AK쟛dz\xHZvۋ1~ھ˃o#Xޟ_D [@ op68E<~""rˋ(_#jDF,2Gw6{h%O U]tѝC%魬q,Vgq@s(>% NfDVq5u05.5/B8La a6J{ºEBQE c*VE_O丞':1nkRYf♸B`$1d5w۞_xE柩AUDHtVTIݒ8 pzME|(CS ψQgGdsP" țz!kU`l ѭ pɚ(}$#I SBH( @|E3q8F sz*;gr,E%R2: CueK՛IжpbP nYhNq t7yx\sw$*:#pXq2g*{1A<{K3@oMj8TTͰ,8 ܑÂa""PhQ N(0,a<7?fp^FQ- 0JSlVkb\ńcr / -Q3v7c-eDJیA;JB-ح54>z\cX u;61tBTT+l.-o"0\vJZm;ƺChP#*kY3 ób=.(Я=N?"QпPemWHb̘QC xq_V~^fPȜ<Lv[DFǥ?d_mE t'O\[zC_=]|܄_k?j |ܕKs7 7.~<1` kx |#Zi õZ6o*( F͓(lmFjdp'5W!#«8*c«_xDžWqBx'W*N AUdncMy\)MPsͦ=)n3S7b9JU=F0~A~W}f$oXn60!uyoR߄yՊe{ r|ov>^G0]A>>k^n..b?#Xz8PGh (lRJ%ܿʆa}RH,o)uf'B$"eؼL5~=?V|c=(Y$vN>ܩtW \$@|{B--!t[g vw?H.rg^ˤr ~?(Zxgg^M>.+S$dX#^i[W~]0BEG&ϞֹԌG@o!v>_Y<Ƅqnf+Kw4!1X` 1_4T#Q "~H*0ʽg!<0&Y t;Q_ZIFpNqqpy GXغ/,`\ݔww[ق>3d4sL r0s%!1-w[2?Ai&k԰蜌\w^uM[uش۫T2k X]篞M\ϞNX._(\Ot`w&`*zL-b u.d~(Tg^2mZ9ýDhQ]M/ =7VN$K!V+5%?zqx'P2kl`XmY-:dx˜{.I\,>ӣm(8?w #cI7GcwЍ.gf~ Ԙ̫*3e1>ٲ儬f;o%*K2U9GS醝2u;eYvtuĝ>)03RebtmkBlA˾`͛F03y&o #ph}B>0+vͮr^UY2G򭛄( |-vbPY,P'AT"u_N,_uCVW77FxL.䃙%糒^xnfc<4PFaxY/e nǡu?|P [VٌY hRn9 $g ic~C :nP7g /7A^))SBTlO.YPȂA/_W%[_opM!op aA9ipepTz R˭H0]_/́:UY듵:lX&k# M1#ae=W$aOX}((\V *Stca3v[YuMq k 4+b딖IK1yLUv(;-NEadF q02_BtB̈́۝.,e7jm.x[";]G~A։QXleS5 ٘ԊPIEرaQ5e|".F1s?/眇t1$Zn"Xrpqٽwp8ԁq3Y 풜~ÁRtr0E% S f.NT5aŝ5ڇOu@$ddtZtD֐1>@QF~QT8S,c~gGjAJX`!0 JؽlPHV0)ILHR&.%|VSNŝ>D嶃ʛ!IZDϗb@gi%i$pe$fS-A1>% ' 7R\tY1QH40(QzDGyFAn9b' %N_N'  'w(̆)%a@`TOq;ZV_+x Is>ch̐F;5*-wťrMN\7Κ-<e2dLjZ S]YT\^P6qU=&r:w"6 !_πIv TDK;P_5WvAWC/ ?QV֪:XiĂ ;23aK\/q|WL$VbTTXB]d@Y9jCA*$=0HqT!qTx/aH*;Y);;8~yd,t}Cm +YAdZ)6D(Mh8@q!aOnpˀ `Z+ʹX)C &2%)NٵpC@a/{3^BP_B`4i+_*`0V|+L<٣1U7*MP0wf\"5O%,X+~3GWy3a!6r;3G {)s}8gSo3[% db'&:Ƣ:XE(|wƊ;؝,wgwgݝjD"F? :]c~m8 6ZJ6Z曝- nb  bw.~.]ܣ'yr]w[4 1s%v[ TծM>Rzgdwh^!#r:Wމ'}驼pV[*lA" m-k输lG.@4Sa?^s;t*;/Oɝg#wc>]щ~X8ǙJ%69[!+w'26@BbNjcg--Aذ#!*&7 E^B 3ΤFlKA-vOMUs&I܉rVG[%4oϋ6i\\,W%,$J"'sl[A %5$IM>mi:iU"H[7Dv[t^Vu\ŵ3d9=el2T.&Ȗ]!ȰJˬBoL54ٳ«lݍA_6k]؍sGkI,W;*ՎryZjT#gk>wF/\}/{O2J`dȔ^( Wڣ0 )U6u`Z9=>TϽ&އ;LNY Q{:H~jKqJ.b]GU "EJc,Pd$ ~IC&tRэ'P4ԥ"ZGRb9b*4E,%8)FZXTldJfqI4|ӡ0?Lrur .dgW,%=afEs$Ǖk0mUUH?Qi0 fr"+ V(?=չzv[-7p ^wvDNL@OZhz)&Ns>F \ي2'I =K??'/]y^VO 2خ?-\ˮp.sH[b$F+sG{,W)c=ǿYܓ/ň?p|%8em"Ɏh삝f1 0&[ /-<1 nBGq<>e dbG;_=MpƞXZXlO9]1%6M8h;A9|?ˤ\zTޓ4 '.$x"Pl0.#Twcԝ£?"B>O`@c>sr^p0In^NoJϼ*4.a'+bM0sl@I|V41d'k4gm~R4Rix SgE0o!  !S0 z^ւ!8!i-.NɧfzjTKSg'6'M֧ꣵsZ}!22qPK F;#]M2Crypto/PGP/__init__.pyUPj1 +OIid{c!:c6_yAB30hJm#.93]׽;| L@xjBQcȬB!O^DR@.8 7m,XJ=#Idq[&Ue$<̄ߓsOIKۖs?PK!@!x&Eti?d~.xׇT2_GV"ظZ\,^2w cB Xt?UuJ녑APK F;NY}M2Crypto/PGP/constants.pye=o0Ew'O(8I,$XKN( !Ŀ' 9WqK it#|$M7 Tum]n0vm"X@1,Qy$\x^sRakxz׋BgW<٪4v¼%ɑړz^hB+;PۼZ߉u݊)c PKn+=eEQUVyoPK F; +M2Crypto/PGP/packet.pyZms۸_*)QXI~iƙN؞ܤEB|+ѵ@|L\ܙS&ECmjdPQclۏ|aۃb6Y/0 Gm#"-,c?9:]pC81 g\:1K#tRr?B rKY ǟ+xAu9,l(1X89,m!4r`Heɩ!g"LJ$yr|d"gTAMjI2kL,c߀dٶm Oz_I!:Zm{=݋mbHTɡi&l)kQS"IJ9~~XuaKC;\ܹؗLC:Ԏ kjx ӽ7Ɣ4t Ƙ_| E͸)v[ LF9 U8jߴbʍ;bgڪZ^/ Pc^^:Kވ޾?8~7\l}VJku2mNy"g7 [Ŵlw,i§mp VCZQ4X? AS^'ۺj#*9DُۼR9ٲ~k"rƞX!v٩"Wqe)}2đ:"vwiv,G1&ʌ.jUtXiw*waY_ʲ" -,GhZY$yqX蠡_t(5}0Ѹ|#=p m2sx9^^%yTW/]DÝG/?7uIUr)zG^*jJ%ܦDzlCRӰɈH+~jȥ[-ŕ-fg:Dc`ŗͬq xZ~z Z%=vX 3Kez/9֭PKXp'xnגk/dmNGNآpoNl4pJR(JpҦ٨)@8"tC˜C{ҁaapY3ڨXջ|1B-RH64x5^?x'4 ^xPf@M5S}] j|a3$o,&H#l'e{[FFܣ~նҺOm7RֈGi\ uRUDmDf5y8+28WS %gu eڍF laFdkZyUY!&+X^ahVa[GI&)'C}Zm"q8W'ݡ L69qHF_4_]Vۜ+BcaZjw%+؇câ0TucFmnm[i[$P+æZqWR1cҏ2Vkۉ+\䝕m!ߒMgqL{o/>9=On[*Y/172Z"2֛*<68l }kXB%W403= be{/>>:G_솝m(b9RNuUueeYYjO,U{ `c®늤US] Z9XρE< vINqm IY[kq43v6{NZ|^.ĵs`/3qm%Y{65Y{.]uYǵ o3ěD!q#q$.1e"h1$p0EM$f8q:X 2SD4& 3pg W [ԗF?ς}Ho9 gӟ|Jmmw[N=ջMkA[{Kovl}^/[nA:imG}pt}'[כjrv|w7}WmGonM+/Ee&N4h[NOS'8'ze/;^۞kYӵ N;6qv7+:6|7W'NgK_ñVKve{?冰.PgJ5?7VD1,<+|nE%W*U.5`8!0 W~p~NH'XQ H@D^dv1u/\O5@. a /80Jv 'yR<+@H5=H> m$'}MtIuG{tmlc+H.7R\_7+sяp:VK 7*jkó9+pZ1NK{n˾AnZ^߹Z7r7B}i74͋4ʸRSJUQ-V" .J<jSk\tRyj49yZ%J`\ 3X^ iƕPh;g a~y5L3ks>+fB̲үuk B{ 5"n0Y<6gcdG/Ք)u[b74{bRZę~FfB2 3tfk.3'fitz=gL:4Sc'~gd3q}o,PHi+b$r<Φ;JOk1 ^;-69S-}c7G;hI8g9R71^Pj 6A$<7SED 5QPuI.Z۫.-(٥ebō=۾O+GX,Vk3}NUuV ]|x!pInQ&L TxƗ|&$ gC!8>x<%l;)i$cRȨSA8K9*ޤRs6H/ւ2^)k)DrHm\uͣHUFjDž3i),yE4qcDq`TpTj#F,cçeF<,b;QEAk|PCCRCd (!Ds.j`)4.5wQ$]lHr0;h U|$*lȄ(LPl4XI4='>FTY)?8KS/mUҺg W* WJUpWuV ţ?TPC5LFwLuR\ Ө؉X [T\:϶c$0* ԘlTEceը>f3L+IexWCsYe^bU ȒLD;D:"6< bX|Ác@F{F,av߅L16?(A̯k![ #!" a } 1/ }<Ծlc _sO<|Oف v#?RPC⭇kg$w$Uf*1/̙]YO_wS(&cH/E˥)JAKͱAC=*nzJ~ qN:.T}JㅒWNr ^Ů?o,M)#Mf䰅wSiV~Ԏ韮}pFnUGx}D[RB4Þ#FTFUSjNЍ^U9XM1 ƼKkLepİlf67Cĉ06g}U0M0Yܣ5yQ;q6u^#*ܵQRVȆ ZuApoƨX(GK8Ч,aRI" + CէhDhǟ?4=^)@=?~Od&_AT|&ݽf- E9 Q0y6*oNy;m0o7bQ.:-Fqv  ߨ)|d9*>9ӱc8.]?Ʈ I{c#|Ufˆa(bb`6KN5:b3K]5)ƨ)뷒{]UPXK}sxۈ7r^ q)$6iVgcn+\!F$@'ƍ*Ck| Me e 1&lIh:i4| JOҶytdSCqeَ0[Pt fBvCY)B-Dh_fym8CxE*D!CiUԚ̡*|4X?. oI&0 UN6Wsr6?f;;*;}Dpe;?E4 [R_-aMrU!qK/92#j}7Woh'-wD#Ƕj }'ĐLMVK$6L `\m"]gx[.5fh.=q) ^4)^W1)xoot/Zth.IқdR9/"( dj{^ rCZ8؎oZ=Gwz0u}t{av"W2~vlLT|kG<ub!n1\u&L>Nة^EZ'5[=Y\?_/cE%jM>}5cwPK F;Q_c~ZnM2Crypto/PGP/PublicKey.pyTMk@W !rpĽԁBZJ(19`H#yZvWqMh%k%IK3o޾L&{}Tշ՜}Zd; aD#NZ lYcم׀QN Z+Mt;h~?gI'==NYo`{w׏Lbo$yGi/Z/詿׵pWCa8x9U\q`FbEOm C%ESsڡYaŽ ag*h88 (x~46 hijު6 r\qХ,Qh0]^?&J {iN$g,zS-=3T3/oTL9c=̬D%6Ƽt*rsA6=Y!'3@t QJ|$4\rЌ@9O.Ni.TF99J$Rj{ovE;2ww0A;sA*Q& [nَur*gW|oik3I|J _r Wc.M{ٌUY)Q0뀅,*AIuFP]ڞSJ H_0da4P4>"M|i7LJF<%j (s6lr"0 p)T`/IIRH.Q(S6,)oB0fZtsM;#+Yg6EiR}psTXoV bJ_L&ulR|Ǻf޽̳JQ]r3<0A-fWg -Uݽ:Cq#ڱa" VjpRk ;>nSP%ŧ:\'2T`sdHaA k̫ZDyy\ Ezz% .VGH‰ J0It[m2[d#CɿOC2&? kS֜*mqı8?흢:UQ_yYe[u#Ք'E$=baPK F;]fY{M2Crypto/PGP/PublicKeyRing.pyUM0W !6+6P,!b{ؒޑ$mf̛7h6}}0G?oY<(}4";www׷77o`z2BɌ<ςAS6͂`gTqIFQhe\'tuE ~Em8R8-,΍$ &uX)(~h`" 9rZI.tX6_@V'4ܷ'vhDJ?O VFҒm4rK+VKJc7)zٗ}uy*z"GX*yeLU*a+2c:!HXQCSת)54֨5,*uggN<354lѧޯ-޴nZe밋/ uQ%s{!HWO樟g6 bE&+ t(Cல$W1` E7[E: 1~is3)ql'!0 4@W=4L 4?0HJ=ȣH=r|(圵'x>|k{va:\]IE4ƋO4=?&#U4 Ǿ7ԚCCyc)-yfL `3Ͽˡ֡07yެzdeS>/PKJ z2=ݍ~<΋RlK>lnnn? wʆγa?j4 y KeTR}zZl`gt)vJS\ K!Ҭ!RZ*mZh+( 'FHa1R1vtK^HU\f;cUޜ9xX!~ء`Dx`SW)&'-*&8^Fy25f DҵTInNV?]-dM kGk SwHgFQ@&3'ĂO! K-M2nAĮ'mqp֠ſ!omA${$"K>͐g H 3;;3۰"hV$J6=GK|4c:Y :BCWMAݨ3oce#cKz{%|љq(K Ɠ̪sǜBe)X0rI~"Yl'V{nrz $$j1YN,0RK5lϔ/'dW4Kނ%#v[Ƅ:zK4tKuաڪq}FV%OTVɜ;.\WnXe+)˼:/. .o>syYՈi%+4$ImU&L^ ?^xpcNT[5> JŎs.c2\Ee{КV(NUmrS^ K&evU"2Kť' Tpe>6JdUq qNFI$VH|H#/`rIPJ/:Ag  3mK4/L?V\DFՖQJuFl.JF)J>;2RxHQ\^^*<cQ}Ea_ώ;kV˽xmna6HWG`@d3KOkBiIexIB1ӛgL>k0Ļ̣1c% :K:HА=cIטxo4 ukeQ3E'֘_c(#Z^bІa _ɓ4d| pOX8'wRlײfCDe'b1 BMV : _PKHRqwQ]^T Ya[e "r<,ƹͰv |\z͝=<`DvT! LBqD-i}Zpۣu>38"8v73]>"-RE4KFCbgRKbFj4z26LEݝv 94i6(g]<7;x:3G %eP7  2T4S&>(*E <*=A0 e0we.x!bctlߣ=6Vhy0}~(.PoCIbsr!#ut*FygZjkЦ4ͥtl)-8HrT(-ף' sHSm%µ:<3J J;Q S3LҖ& ܌!8HW- bfKP1\+: 挽kgmaD噽5i#QWN48:Fw,;;g:o }Jq1S9ǶVV;%򮺷"bz7ؿWU}W_&޴]z₸Lq*oPKF; -zM2Crypto/SSL/__init__.pym]k0+iaNv.LpZu*HPMBJ5ʜ<=8·立fYNXypFy\>y K9P^Œ ^pUU;q` JcY-Ұ U43RJ:*Bi2ܦ0Lj>vL̻Lskz#؅cSb{#L86Gud(Ab7:ӎ5d2ڑT!Ɗ/ N9ޢ4 qh$5t3ń6*p(R15OG!s> ||܂)QEB#l*l2g=yW9y< PKYx !Q;^ H ; , cd3 d;]иn,\f̓O @ WOXR +)' n (bM UYm)<X ;@jR+= {Rx]^"|ny4{Z̢bqCUA8ɔkuv|rrr::z'tN=O~1y ayz Q2< .,u&(G3F)W2 kkkdzWldQaZM646&pJuYNYK+|dI_9>gn 6OۣCve80=um]ujy9 |E)͏_g}` w/E]Xד"V=kF%{D3ֈiK*'.W!jJ9]SLPKF;oh:6 M2Crypto/SSL/cb.pyT]o0}bc]=4R'1nXRڵEli^g 4IQMxI{{l0Ї$βaB+VO8}n J4=m ^k7+neg 15lPfOi2Kյ0*`J9U٬}hΚܴڇݓp؇ЎlfZzcաZeS%OC:}x/H2tb.Kadm;,U/K-[a0"?{\:{=ERy\To5sH#ۭw=O$Q[l7VS+h5zEf;L"jkt)*Oȣ٨a#wnJr&˺׮qox}.zŷۓ`,m4 #JoA-MrI(QcZ{#*:gnCPK,3@*s5fe,Nyviie[d?,,0z%Vfn s˄,jZA`wN74#iTύ/8yа(֌z'^|5&i]bG܋)OoBNr͇ӷ7 N_&~x-׮*p&UgN>p2 wULTd)K>,}p{^3rDDs-.ܭ Y;;ٮ=-o8zۈcmAAm<ИeGL%Ljh"c^s3^+l*f*>")3( D_1ERNa:2+PWÈ)!Axs]%<,/i%vl (#xd̗ V}ѧHlFJRXB6.'|dSAY҅lH EKϢd&bhxLH$cJhUZ> mI=Q~rT%5)+jZ|#$J+}L®<)UjS a -\R,#j~)e ђv̥^޿ҰڐR~|_K|Z ~}3xRHDUyC!ZP jRܔJ``A C._J_oU-OB#}ǁ )d_zFҌVs4{B8͖yp\agC{}ע9z˴)1UQp9y~FMB7:V0 c&Z.I/cGSZyș/]Yț Y?9jq97p5b|_lۋ+7$|'g꒲ K gyVՔSh])d!np09Bcz;%68_pӲ$;3!Oo`xzܮ?nO.oi#dQo^oxaZWe7Իǖ)7nv:E4(jױKh~s^095++j1S_֊+PKF;s`4!M2Crypto/SSL/Checker.pyZO8}w(F@;TףTTXzBQ{N20\TL~޳ݭW!by< ќEw\ eSщ9 p1?_2&RyC%ĀK,Eч$=YAY,|f7`‡Y[($ |gy9 ;s;8򤵈>'bf}ATß*rf42|xɼJ ҃Q1gQJNc29>ւ3l&XʺC&u z}3t&Z3U!3g94nb č #]k2C{C'CCZ^v=ttySzMVzd͢!]L#:@nhM33G)TQ诙m-v\C0ZզEcU!a?LpoLzH4ה8DNb0֜ qE?T~q i$؜gLp*Z.eZiP0XP/˨x `pk(4ARb- y՜_`"cOaX]ZBCnow屒)#Wk(E633Vp{ZDpwRƸKyZoyoX҈tE7v}T?m%yM76T7Uʻ2өo[1lMd-LЍ!Cu6[CG@ D JkWꈆ3.c7=Ͽ| Zpr@ՑBZ, D 0"7f:L! ]7\ρ7Oq\.Re+yβlۜwtfɥjel!ͨYկtnuy޳o-5i\=x"WgU7rt8I,эbg+k]l :|9e|@WceWCwu|xwPb=B;F͊^ĬuoJvzwpӐ1Y(Ⴚ+ĞkFq㸰I˱5y*9i:Ǟ&dR`up[U [YNnK̏.vsgfmȆWYZ, #FvxW:B'T\T⎂ofR]skȷy3 wT\{FYFhBI=/TZM܆PxT@]ʣZZ/S}g=^vS0mȎnlla`&z" bc{п~O*c\PK3?ssNbA"ڭ|埣kş |W5h9,楘f^52l F˱ Rl-Zif-0syekY\,r %c/Z<tG(5-uGGPoyݡcO,0Ƙcra&Vh⟡)޶C!"u:ZnSoQX\­H6.sg|PjЉ\a_ 2쯂QRjٴXّP"z&J?K}Sȍ i ~w>ZEd7((L=34Z F`!(skvO9: ڂs+kK<4፨qk5iZMG;:-izGTv@m.=S9ihsn9 up ?[ ȀfWja@^: rG+QhW\%gk:&884hH7 _tb疑-hSOr,`\33t7͙6!ͱWKmq-v2,-XϽTtE~.'օ9R ϖu[7ŀFBw"ַpˈ|lTAd;'u _MX aHl7}DPT :-5: 4Rlt"MjL؎֎\ i!E$l!;0v^B҇}lӷIbA2BW5T-ػ֥hyx.ڱP17XPh颩0/0U~Zj&#$,W'ZzU1$ Q[=IBALVՠS;N.җk }ЊYRZN']VBL / A&(/ؓN6ɨ5}Slf3 i 2 Ȏ -0dԊ(~B^ ^!hI7y>'Ԣ݀*F%mJJ;+3;tOh$>vM0Dz4V gKIXtTN)~OiNC:쐱02Iv$ɯh",&S)pNbSȗ)79 7ywb@3K?iatN-|bo9E"+PsD_X/< 5Z-Rzo(D=Jn#MtTD2p,dɃPR%d`ؓ^݃mIl0RkL0` |!MB5 Ih ُ )Iov8[JSV]Ú(mfS,Z ,D:,S~J8{fXuOY|jѯ>2,cZrwQQ&g1 -Nݹ{ư ~. dzs*J8-fJlF8 u[Tya1މTޤ<#?PDh2*mo$)A/G+1ZT!M/W8;c?`PKF;@?A.M2Crypto/SSL/Cipher.pyRQk0~ϯ8 8C0Ơ2B^k0MJKѪ,iyIKVQiB)V Y|2Nᣄ5&E3iPQGBi9w/4_JV>[]m$@5#$qؓc2 F^!ӡw G.`9=tQh%lHkNh73:C*d=Ë 'TSk}ܭoX)t/աsb˞:M4Up$;ٺ3|; dvqM#(eU}QJgdc@6-J/۔78h&Qc M2Crypto/SSL/Cipher.pycUQOP>mǀaB DAd&$%R{GF/?ok|6sm_e[N{O=k{6\&F\|X؄Ђw\5 484(4*%صRi;&J{^\X[{f}v##jCI;񤗜xnU&hV8W1`H- 0+ MRb J DMI1%Q!mTx Ùg5l0FSh&(\zcaR,{A@jՎeu}|m<߯}NN*5;=Uc8U ɼF~Lpa H Z/ v !e 1 "- 5LqLku9qI.KVS7vzu!9w3~gKgomV9];ܒ֟ [8% g J|҉^!p&(Letk`4\&q+^O&{%w^I%us s/7Tѭ}CxNh9^҃n,#90?hitx /~3 2B:K\pn1I`"qO񁝴"!d+Fޡ$N.GVxZTҋ[t)xO:P^G'Tn\1dwL@$ bH 2 ,,J$H>+sEK73 ׷BM *2ze)<|qzɛ6b}&}q9M kh#C{5jNes_PKF;#Hw -M2Crypto/SSL/Connection.pyo۸wDR7łk"ΊEAmkE){$lh,K:;;ޑ( T({`DWT;-W$C˗8?,ɇ/d;crD)"^QDrVwL8,8P2spDrث~! |A6 TZFs8xR+cɫ,ȋ^/əRwieH, jg`)*elC9͹LЅ^U*wV;hЬI#Oyh*cܒ(vPa-4f)R#W />BЀ/e+0]7Wzuv;.pJ ;[=L_7oF ]d9/ 6-inAtwѱ's'j>!VEΖ;B?یS[;v#'xDP&8G4?! S-;De'H#2]F2ѪyԞ TNgg2܇q^ 6'[1)SCBD2 V, t*h\Ȝ l3@4O$:&,YqSCЭ`p|@B)|`-e䳴p6ϊrTAК^CvgG}Kn 0C\sb9&ҴF5kCp7"[UOTᮾbUm$m#4k41-[AjMFK{:M3`ܟ$e9 L/Tǁa$P.#8)dQYGPr4Sjf+D" 295}){Z@h@U%lmѽ"o157rʒ\ Fc:gc@Q@4P42m1ZK%Wbx9z ⪗-0nerֶj2&B)[36VsM#$(=3H:scQD.Q̃t-Y>|u?8Ә/S`:Y BPϴKBoD~?i0[ZWqD·wY[ZiO$!ĿƅY$-PnoP|C얒#}ZP@{G\ /%Ljgp$Ȫ?i&Qeg Jz}߹do1Db^v`? ƌe " @٪F"+]%p:5d N9  T]ଆA=.'Nt{0>w< km.$r{v^|\PhSl%iɹ>1 |%t|G`\ kp`~ȍcu~ ÅT p(Y85`$bD* %B evyWYշwZ?$B;̕n0l-" UqOQwjؑilrH$dV,5f=*NERJ>0禍ӽ¶>w.39LY:v4OBQ{*O^_O12qTC_cJShB-\vSy=l'?6g    s#:;%a *G VX7 Pqpk'=QMUj3n1匑qvS7[H\˧_cae6 xZFю)!,)]`+G۪%u]}EJ+rx#̔Ş#U=bfQ?fw+wcxY_;!CJ-[pNKGȐpG}BI lf.K)q ftG,"=ޓöϑQ,EP3{S T+28 SN,\}g aqk?$˽hb+#{EfS:nID:[Pƺq>DFɑ0NVt6fd\dPzPZ7&;ك8ugFSOU {\SH_Wg鱼;zTu8$]FGsȂǁΓXyyXo-kq'}9M ]e!$ni_q؏;p2m]s5л -`,nn_F'鯶lK7`؆83}};ھTH;xs/=N]"U+ Z )\߾,4z#)wv|CxBBH0_O h61(ܲ QUV$ K\.%u Ԝz}>ua&T>n`f6k+q]* Ӏ7o_5:):ġF;Ӱc&V"@6Gƚ~~u; b XӰi&?͛STi;B ʬ&H@Cto[?z<.1v0ׁMS쀻_TܣC/DG53>Fwp=c|%`}`|?'?@)`1~W?LiƏ*G xcq0>AC'iړ"xq*e,+.c+bTiW k\buf?C7$2SYfW?OU/ue|kph>1>C/g!.O}[:o0W cn2.Fm#C'@Ưg >(fE2 JFQe@`O@A |_`OL9}B5 0#)1İ̞8PYE~V>]XK!xG=v96qtA&0l4TosgdڜN}kX$X3l%X*PF}K[7oAH: f+04dؖХЈK~ aj,7i*4]O NqOg*fT#Ƙ1f7&}>38' H~J)jfjE]M2 de(t*!2HBq,($=b-Drf9>϶q,},WYFO{PzH.sMo39e: 9ՑjD$5Ob&5d0^m'<$^nxt߅j;V61ĖK?5MNYt'ɪXaSVZ"?%֒˭Xz/CGKB%d9 aoUe+ZٰȪ$Fi;♊h 6,u؛JHT\7QZ"5i~HV+DVr5iU@ NċN4]A2/y48ͬct} p!Y"4R"kW{d,'&dNeF"g'egz.$n7Y@zѤ47%ځ-smD~",Xb]uLI]9OLbJ"I.Fr\`ёna띚W4ݨi4rdi-` \]jAEXsk]--noG Ѯ8˅ގk}Tqܦ^7 es1`\teC5߂Z0d ~F6Kƪ A$$9 $37`}#[5PcbVspLT eDLS:!?bo%# 8ٱhUpT%X\0z;5GG{4:D-84q -ۍ5oIJKHMqh EbSA t^olZ\,&8IbAb5.gΐ !|.zτ&>c)L=u~n*$$ϣ(Xw`ڧD3EJGq7l\Y&1U2FTܪ&>ْ XU7䊝%2nE@l82WAΔ}(rQ.gu5y\uAQX2 ŮhQrqqZ2!?د⨪BC:0添]kY.I=5S%/}|R@]t  JFUVK% )4)bB&- h|AZhku}*AE/,xwjֺ'Gh:g5 8o4>Df㦬G+\zy둓ck `iȥDc!7e1Uw/Y<>ggk 9Khfc)is.xk%b#F&M%D;Ff*j"@DK6u =2c}5:+4J1%挍b^em:FK4Um8z~n26 M~(7N'\ъ"kFi$g[d[ hBԦ%i*a2yBYne[~I3wOăr{N`II}+j@Y~y?A224T/uRjT3M=)tj8}ftzG_ى@J؝ #&KN`y9Ur:ȵ8zHb`]2Eܪ` 􃕡ScAa9tqrM4&ry @oe7pI O>^DP``&e4lS"6^L&kYQ ~Æ h(dY#o }:P~W58?o6>8\ýi*Fl/kͱXwJ wЈ a//{%vqkEs@wDNa^=Lp69};t!2X>u{6(G4{X?S.m'Au[$:Z[_nv黿S;U? 37ɄcOGN1=,.P PASӡ:LPT&1:@}}&>>^$Z]fҙaSiXrd&S'ǻ}>0ZQw)ځVdl8Q&'vup"> dl/_4?DG_Av$Ta+/GEGh}xNn̳p{49=hB>~>},ƮgTKg%t (B0kU%O >žxK%Q|(C=O%z*Sy$syYiڨz&/mI7.=3$Zyr"*QAKRԐEyǓ4qZx^Iw!Xy"Yᡳ<^uׄ}蝅eMj$1h.h{y}sROY, Jq8cԌqcN8!FB75-/3K&5@rLm33OM[oToɕ5Jɷ;M|͛W_O>l˷䞋|4%f")&, ΂ hF g-fK2sgF<1(نR_4-55p_WPheRW+RtI~nIn\OK/o?~qJ"Q6O|2-rPșyBQshXYņnC a4,sLQ>¾$,P޶Yܭca,>%3gnoaPť* T' I05cZ ÕyT|d g$RhbAb>FI sY !Tk9Ǽ8Y1 0158ВfBbĤ=Qek^yY?hlۜY:_,KST揹xʣz Jz^]`FM ,i߶:Eh^T[EaBbDw[ojļ1\ hW/.߭^zqՋ??|ûYå]Z}[ Y6TI+IMƩP++w0d}MSA(fR;rSf/Lsȏ5/GIXTC[ËoxL5#4O |w  9l45Z_Q+W,EZ"o`Jqx<lCT[%bc 'F=ee k87(ؚKC8f]Sc=\5+tx`&kG=b\&)^a?h*X{tQ`29M_)}HQZϼ!8HW "-*޹cЍ[t4D9ڞ$@6~.uYQS7*:͂><($_r4\z[h~=ו:0w^@t2.AЩ B?2@@'*:1•W ha{rGe\GWfpsVQb.٪=a{f $&qD($)%8p~(_O@0,oȽL6f\]'^!'/a275сi$ozJ`WPdHbeۖS}ɔN37( l z=/ с[ AjT{\Iaow4Z/׊\s+ h_IVVh2Xͫ#,i{c47p g s/*KU1t9O1ie>Uny@0UiB !?U|$Yn(O#ra鱛͍U{N0\UN1)V}ۦ0XUpay׮wCTbLTZd kX5\+-` Lb+ cf@'@3=jsh[CSQs슑;1ISraz<V#ᕔh_BQ0]W6û¶dID#n @KANobAS'8d! }2$U5cd|98ꯎ!)MxdsU-] ;TX.D&@ŽiЀJ7_ sB3o/SeI*LFɁ(:=qƁyTxmطLxSflFPf23DעCZ烥`Έ16,'wZKGE% ZnfN`l?zX5\`'h?Ddyi0T'MJRY IXc-DL3 6jg 9D4%-;1! aiSq=ATijT[gɮӱ춯Q|(:Z\DeO[ ݟ7O"B176;ӠD%++άw`&%&Ks:Bi–b4 TV=a괣:܇W'{N:20PK+ \s| oئpk$*t$߽Koն7D 'D>| M& G:QKpFJnA ;#TQةxT&N6I'&VpF r"!wvW6tmZ۱Pv \x oVkΥZUPBקpIj"61hCS1J@Q{pH -.l;> ſ XJټei9TGvqUhp3<R'JF_nK9nʣɔǭ,\u]4Kdz~E \..e4S /Wp?V6ίfnȜK8I4< ? Gx%hK]!$+g OJ#"`wyc8fÚ1EUXsYtqY9ɬ)Ꜣa44f]g?5G_Ĭ4Fe7ț(TǔFR T%f۞a&kZ>!SmWxhs=Fs7C_W߆1=dnH3ܻywM9ġ3T1dI|n]R8U15-b/4ptgO0KUc=<1̽uuz| '`i.sa#`>%Z"o *;>:k8Bq _~WP$r!f3~wDZ͵,;loQ̡ 2Υ|ǃxTNET|:Ds/L-x|7"uREE=|) `gl># >rq*|qҧ99_Bӱ s/ %EEE+4yq 'j"Ʈ@ԓ&{6V"(Ӓh LɊ4aPr:,e*>wY$j\,!6M-tH9 c<$|h7, =C+(P֣|7$r/Wr+*I~)Pw<CUiHx'ml h/-FV_H-i c<,gq n4b$Lj_4|Dj=`؎Ԣۑ{lAbTP!ۀؒkҋ6 ]h ¶~p1$FQ\%22;jgb#2Ю%4jه<,G[%f:2.=3=_/NOMHhC}bpF)FZM_ ,f'LƎו«Vr]p W4"Yxb9Xv"p,qZ&,؉g4*9E,L9zLbhZ]룓{%X5'fݹDSR5{vqCȚF1CD OD;>ωygXSkFK`a)F /OчߎZR1o+P[CN_ > 7Y)5j Rxf3 :Ba[BSzzT4B*ni*gA]69~+eRƙ؄Or0LRvtp`[@Nlݽ|*|a293Kμ ǶTlyqNSp[TPL^Bз^G^ba|OflYnyKv3Y${/aGNZ9~oo 2CYw|s44|]c ̐oL-k6asؼͷN̳@|^0S4M i ΁EVy$ Qj-l*2XiSVIkjx*TK͠.ҹ^-c@O]tO$EHuo % 2)Æ"P48@K=tjdA_asc{ap\*4Š/wxGgsڳZh;XK1dZl`{jrt3A8>3^{mo#m6%{\*5<4%KBfpPK"JH@XW"DV ° k@Xχ5kt1CXݧ(;e&͋dHxΣӓǝ O:|l<?uR7vml (~/9jI?LS+zM\BCB& H  +%[BndRvRZBdeA(8悢yJP/]AXvK8xߣZ$v'eDLYl1NΉhjٷ(.& {lw: 7dzYӓIE٤9L]2V#@"EQ{ k [^{$bBsdLY6ϲ,03JMۃʤ|UaNQ'82{ˍDUT.$z92n7,7S גKh l6|I8:U\Ė4 ŨLR{xE,)}{9o1A̷=\bK # f4s]\Q@_:b|Ld !=&tg#jSFl \jD2yP% (;|k7$ywbGkF옕L|8W9A~t} LvrNn# Mvyꭐxr7M&_LpeF|:R'W[,!!;>"<5ǚteV\ǭɿA99,x=UXiwmZRFp!Q>JxUo,^*"[)J \VGXr"to'}[$x:abPK?Y䕲 vtq'aj`|o<Z2ZgN>uk$TJ7IxCQ A Yf -15^wc cN"4bDAyRS#ANJ8tfwId j,/gءe ㉴B_V6eƼ\%G~/s?v_Կy@XbɌygf>Q‡MuQK6OLMnA/=)eϪc;uƮBXU+RJ?}kΉ*-``yQJҀO1rECѱ _nN#e=2 T^6~qppkP? wy UmkQu>i1|6%gqdz5y02s6 ]t/E8 N`i  3}dPK\m0IbVڲNӛIk{VsY!.ObۍόDXqBR!9~vez&nn?a{F5#ɵ7s?x]BdRk# ~E@E*3k2$Wi&5 D%Jȟuvm`X`FU® ~S1upأ=m~;Tg('(ǥ]PKHJ6= DHܝy<{l:ydͅ%.n#Y0N^ lr%(?~ "7Z;V񔒋ͦD%L}EA ,<YR_)+KJ8nH[i2 N_r&Yʊw\>k/$pÔ'_PtMK))?7f\E?1q1dtA@id'44LK.P:PeDh#+q*+^S*~M##PE.|YJN;6m&Fw46|IH{Z-R H3)ѩA5\\n+DRrVTbml-`3" 4~/]p"Yϳ*EmYm䟍~XҒ{UrM8I+-@rrgfAvHǽkb<4}!sעq~/B~'˛9ȺS4l9>3Ct)K{i*/HHپiM_$5_;{]|)癶#*|W'zMZds6[pJsCZmIe.g+3n-c Ё12A>?([$AC8 A8`GzPhտzB-ϏO t~'S0An:bTE8 \ m.W6&OwNaAز85sTTSbN_i؉Gdfd&I ^^)ɜ7lb=bD#o4C_ZZ:2#ErW07 O50W!v +JCI1IOQ Ӡ})²%1dS GŀnlCⴸ}|S,7CU &fwcml ֺL+-%@zKo+7ӔN@Jn4nաs(LOqI (>(e߰K*!S)st -TrY Y"cJʥ"H$_V莇ZWIFFJ*A0'D=%2Y!ĝyxaTG3D)!%ݒ$rvENaś[MOA%q9\'_υ\.DEL 9'UL8*&Б0p$8`xSћƆ7 9,nVl66ƃd2@W40吃Iȓ_[@S䁛A@0-КJLtKm8LM cúPE YUjn(R'ȏ;+}\w v}zK%u2AUkn;cbsqC-*yE@`]%p1bpA! uDo¬kGD &ZvmؽYm5RVֱn4fԘ{drD d;5z I4vD؆9w&窳Bu.KrC!>u^?cNyBmGGMA] EBJu"z_#Qj}jSc&i`\MTіuGz v^ab J*]֓K%ӊ| ]j X͵y,dujrJi*Iex&oe[55 :)UJc) ?ݫݽƮ Or0ǔna^!n@0nd4 3@y32_ܨRߧr幤yo|a6Qk1Ǫ{u@ոd伃nL@bM~@pɌ̌!k5_6:t`\L H;mh!ډhUOkJ tjG⯅҂ 4ǪaXRJdh_hwVMbAjfQfx_446rA_>[fo^\ͮ\<u5r}j@Ћ˫)|r/ކꥷS;SR4?S̾d-t~S9Kt*x5 N왹;j#wkeh-N#e%xlCKƗK1%qx%ыM/ThӪ+ƣϨ0a2XmƩ OOh > cG藓_]DKgpR+| 6@5czX4{ykUɌm˖6c8Km^'&8Ąv),LxIcLwo?░C0J-MifO}Xֵ0eg8NjSf+Θ#'v"Rov,[[OvG $h+'x->P>| OM¯FA ;QnD6*瑚3Ivm8΅.-} >+UYJy[T `{9qQ{BK36`d*[oƸcFdXwۖSf3Zy8ҰL,N]pB|`ةF; El|C15T Шa$W׎J9~a#*BKŸF6UUI*qBO_&`t iU@` OMu:`KwfUJ 4y%8ƲTz+ymq^l Ķ6\^O=UqEG RJ H]͸s80 ' pDe7/Wg cwԨĖJߨÚ\Gm5Ѡ>#_&䤻(H3 `K';nr5*|d nPI8(SșW"3|4R35?ƃ~6Iw#cZG?ËcGi=x5j>w ¦5o0Xnc!=FOt#bb:4 :i}7l/ ai>NuzGD3b}F"s_Έ`ʺϊs"hֳKD@DK,w]":9ZQ, Ql+"X0M_!6i.+ v)]Y.gIKa?O$uVWKI0 sgѿܾuTq>YM/{~^&q'q,>Q~\[Z[aF1:O蛧G+i'~}zLNj$6"b[u@һkg$U3>#'<[цbv^>};j`mc $틐iEIE_iz}Z d"{@ -Ivp[ݷoMuS3&m͂sc׈TϋluR }lIm3r_^0S8[A1!K%'B6c!g:xDZ&Պ~asC~*P$7'e;GoJ[ZNLU)9aO?K[5m,^ 3C y{$767@{En,V8'PfH6Sy]d =R>!lV$y{I ^,YqVB 0YBfcתI,U>tfpw < Q" v⒙%ǽ#_3fLeDFf۳|#)[?Ez16[ z^s_it)g)JaH8)c${,^X+Fg4F;UfOiRP^2RdMZ/;XդuĪ:.P+ &=)NUtT/;,Ti:սzLOVz_MoTbRQ~C16RpJ-Vk`:J,D)UބgZz׶:J't0sP*$b2 %uI d9{2Ng3Murzg9sH{; oKC0"C{*o|a6D9"mdb 02ab*b4H<_5319bz<}z̮.іDrNXxVRVL 07޻ݣmRR Mze }CQšׯx{{h#8Xq~Ssbqd/]7!jM̳^-UiUPϲ"4݉r7O])mmǤWq5wP45ܛQ R?< YҊ_(i5J\ɹBG 62L!݌4̨t`FedTf@:i#Y,2)vUU;:

    9;Jo 3 ;m4yh#'d> QeOwIvBkziwL@GܒyAnډi}ln@nF-_hR,fb;M0D|^؆T;7xDncM* >th#'rW oƠo)&n pQw/vpQ:'U/D D$)+CV"}dw`W ̣lG/alږQĒ8c[,Xq{\8OM9h"(ZVFO5l }ָ3 *"E}5rkZִʭ*БڭjпM]ir*u[LJfEI.EKP1msp5!o5r&dJBZ(x( xy+{ڥ9n<# ydXG C<gɀ_0NuGӋ2'Mhg|>>}~~-S}z{辠dO\6,BrL1rە{ajHn"qqNn?a}VIg~h ۣ2?W|*uЇH5aj7qGsI 1lt[~|=wGO܇kW֨@0 0"Z6[uպdb_?m}~Z,X4e¼ZHE*…2:΃*:OP1#)\$~( mv"#,ª$d7FeVH]s4Y"&rmU5Xڕ?jPUQ?Fw9MTB!L^*A+Y&_}3atbއ_N+p~|M~ӮXoǺPYLJȘ!^b0˰i L_a˭߆,h'n$a3.eޜv?#Rsؚf5hkNvMTUb4@m P0d)NJ2'Ln򺪑[xIso:QUvSUU#@*ljeO߅=]H] Jke/dM8ڔcSTxZaN(2y8˱C?OPRt`p)g I+͢<'JGҗSa Y17pCT)㮢XEzH<&g?6`Ux]OĄ⏉pJJo^Ś5gٗQXrՈ x}s~:ce& "u2ݶ +uYtJxDZUL#}5X9mnm & A%gMRN2XsDxŻÚ@gV{BCQrU#:Oܤ>#:{Ls4hiM>k*;IW5uCOĿ*iٛzL[𫓑?!R 78$+y2cTQqxSn'`aiͪ!ґFJd"3F2mf?dXJԙR5U b#"}0`P?l}L)&=x9Q z ń:WJ|QiWZ-1` ީdNgSsQ$CRcCj쬺*LJO'FE}u_2z>5D֌8h*=ߡzlk_ap}Xm @Y25j7Q!ys\JêbxRu 420bFl=Q45ʳղʟVY<9xa3*M榆 ."⾍n_ӉpUDެU?EVޘً|0v c:w!Cʱ:ڲƜ 2}e)% ^ݺޚo-f[VUk]̾V+zm /z'yq_Pi 7TN_ ^:~6f{/V zEΩ#>u"׳6,.TP5㉱S+X:Ia<<{.XsVP˚?SGy4קPKEGG-INFO/native_libs.txtPKM2Crypto/__m2crypto.soPKF;QW-M2Crypto/ASN1.pyPKM2Crypto/DH.pyPKeM2Crypto/EC.pyPKQc M2Crypto/SSL/Cipher.pycPKF;#Hw -M2Crypto/SSL/Connection.pyPKsbindir should be # cleaned out from. sed -i '' "s|{BINDIR}|${BINDIR}|g" "${PKGTMP}/scripts/${PREFLIGHT}" sed -i '' "s|{BINDIR}|${BINDIR}|g" "${PKGTMP}/scripts/${POSTFLIGHT}" sed -i '' "s|{LAUNCHD}|${LAUNCHD}|g" "${PKGTMP}/scripts/${POSTFLIGHT}" # {pre,post}flight scripts must be 770 to execute chmod 0770 "${PKGTMP}/scripts/${PREFLIGHT}" chmod 0770 "${PKGTMP}/scripts/${POSTFLIGHT}" # add in M2Crypto if python version is less than 2.6 if [[ ${PYMAJORVERSION} == 2 ]]; then if [[ ${PYMINORVERSION} -lt 6 ]]; then cp "M2Crypto-0.20.2-py2.5-macosx-10.5-ppc.egg" "${PKGROOT}/${SITELIBDIR}" && cp "easy-install.pth" "${PKGROOT}/${SITELIBDIR}"; fi ; fi # add default bcfg2.conf mkdir -p "${PKGROOT}/etc" cp "${CONF}" "${PKGROOT}/etc/${CONF}" # add default launchd cron job mkdir -p "${PKGROOT}/Library/LaunchDaemons" cp "${LAUNCHD}" "${PKGROOT}/Library/LaunchDaemons/${LAUNCHD}" client: prepare rm -rf `pwd`/bcfg2-${BCFGVER}.pkg echo "Building package" echo "Note that packagemaker is reknowned for spurious errors. Don't panic." "${PACKAGEMAKER}" --root "${PKGROOT}" \ --info "${PKGTMP}/${PROTO_PLIST}" \ --scripts "${PKGTMP}/scripts" \ ${FILTERS} \ --verbose \ --title "bcfg2" \ --out `pwd`/bcfg2-${BCFGVER}.pkg server: prepare rm -rf `pwd`/bcfg2-${BCFGVER}.pkg echo "Building package" echo "Note that packagemaker is reknowned for spurious errors. Don't panic." "${PACKAGEMAKER}" --root "${PKGROOT}" \ --info "${PKGTMP}/${PROTO_PLIST}" \ --scripts "${PKGTMP}/scripts" \ --verbose \ --title "bcfg2" \ --out `pwd`/bcfg2-${BCFGVER}.pkg clean: rm -rf bcfg2tmp bcfg2pkg osx/PackageInfo.plist000066400000000000000000000023031303523157100151110ustar00rootroot00000000000000 CFBundleIdentifier gov.anl.mcs.bcfg2 CFBundleShortVersionString {SHORTVERSION} IFMajorVersion {MAJORVERSION} IFMinorVersion {MINORVERSION} IFPkgFlagAllowBackRev IFPkgFlagAuthorizationAction RootAuthorization IFPkgFlagDefaultLocation / IFPkgFlagFollowLinks IFPkgFlagInstallFat IFPkgFlagIsRequired IFPkgFlagOverwritePermissions IFPkgFlagRelocatable IFPkgFlagRestartAction None IFPkgFlagRootVolumeOnly IFPkgFlagUpdateInstalledLanguages osx/bcfg2.conf000066400000000000000000000002051303523157100135160ustar00rootroot00000000000000[communication] password = foobat # certificate = /etc/bcfg2.key # key = /etc/bcfg2.key [components] bcfg2 = https://localhost:6789 osx/easy-install.pth000066400000000000000000000003451303523157100150130ustar00rootroot00000000000000import sys; sys.__plen = len(sys.path) ./M2Crypto-0.20.2-py2.5-macosx-10.5-ppc.egg import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new) osx/gov.anl.mcs.bcfg2-daily.plist000066400000000000000000000011011303523157100171440ustar00rootroot00000000000000 Label gov.anl.mcs.bcfg2-daily ProgramArguments /usr/local/bin/bcfg2 -n LowPriorityIO Nice 1 StartCalendarInterval Hour 3 Minute 15 osx/macports/000077500000000000000000000000001303523157100135175ustar00rootroot00000000000000osx/macports/Portfile000066400000000000000000000024311303523157100152260ustar00rootroot00000000000000# -*- coding: utf-8; mode: tcl; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- vim:fenc=utf-8:ft=tcl:et:sw=4:ts=4:sts=4 # $Id$ PortSystem 1.0 PortGroup python26 1.0 name bcfg2 version 1.4.0pre2 categories sysutils python maintainers gmail.com:sol.jerome license BSD supported_archs noarch description Bcfg2 configuration management system long_description Bcfg2 helps system administrators deploy complex \ changes across large numbers of systems in a \ coherent and transparent fashion. homepage http://www.bcfg2.org/ platforms darwin master_sites ftp://ftp.mcs.anl.gov/pub/bcfg checksums rmd160 db89ee0b8975bf50ad68bfac122e50253ded1906 \ sha256 138d792423475ae6516a95578a3df191504afaff31007877c7c2b36830d1a260 patchfiles patch-setup.py.diff post-destroot { ln -s ${python.prefix}/bin/bcfg2 ${destroot}${prefix}/bin/bcfg2 set pyman ${python.prefix}/share/man/ set manroot ${destroot}${prefix}/share/man/ xinstall -d ${manroot}/man5 xinstall -d ${manroot}/man1 ln -s ${pyman}man5/bcfg2.conf.5 ${manroot}/man5/ ln -s ${pyman}/man1/bcfg2.1 ${manroot}/man1/ } osx/macports/files/000077500000000000000000000000001303523157100146215ustar00rootroot00000000000000osx/macports/files/patch-setup.py.diff000066400000000000000000000046141303523157100203440ustar00rootroot00000000000000--- setup.py 2010-11-15 15:30:28.000000000 -0600 +++ setup.py.macports 2010-11-18 19:06:49.155292524 -0600 @@ -11,38 +11,21 @@ setup(cmdclass=cmdclass, name="Bcfg2", version="1.1.1", - description="Bcfg2 Server", + description="Bcfg2 Client", author="Narayan Desai", author_email="desai@mcs.anl.gov", - packages=["Bcfg2", - "Bcfg2.Client", + packages=["Bcfg2.Client", "Bcfg2.Client.Tools", - 'Bcfg2.Server', - "Bcfg2.Server.Admin", - "Bcfg2.Server.Plugins", - "Bcfg2.Server.Reports", - "Bcfg2.Server.Reports.reports", - "Bcfg2.Server.Reports.reports.templatetags", ], + py_modules = ["Bcfg2.Options", + "Bcfg2.Logger", + ], package_dir = {'Bcfg2':'src/lib'}, - package_data = {'Bcfg2.Server.Reports.reports':['fixtures/*.xml']}, - scripts = glob('src/sbin/*'), - data_files = [('share/bcfg2/schemas', - glob('schemas/*.xsd')), - ('share/bcfg2/xsl-transforms', - glob('reports/xsl-transforms/*.xsl')), - ('share/bcfg2/xsl-transforms/xsl-transform-includes', - glob('reports/xsl-transforms/xsl-transform-includes/*.xsl')), - ('share/man/man1', glob("man/bcfg2.1")), + package_data = {'Bcfg2.Server.Reports.reports':['fixtures/*.xml', + 'templates/*.html', 'templates/*/*.html', + 'templates/*/*.inc' ] }, + scripts = glob('src/sbin/bcfg2'), + data_files = [('share/man/man1', glob("man/bcfg2.1")), ('share/man/man5', glob("man/*.5")), - ('share/man/man8', glob("man/*.8")), - ('share/bcfg2/Reports/templates', - glob('src/lib/Server/Reports/reports/templates/*.html')), - ('share/bcfg2/Reports/templates/displays', - glob('src/lib/Server/Reports/reports/templates/displays/*')), - ('share/bcfg2/Reports/templates/clients', - glob('src/lib/Server/Reports/reports/templates/clients/*')), - ('share/bcfg2/Reports/templates/config_items', - glob('src/lib/Server/Reports/reports/templates/config_items/*')), ] ) osx/postflight000066400000000000000000000007751303523157100140060ustar00rootroot00000000000000#!/bin/bash # # ${3} is the destination volume so that this works correctly # when being installed to volumes other than the current OS. # set proper perms /usr/bin/find "${3}"{SITELIBDIR}/Bcfg2* -type f -exec chmod 0644 {} \; chmod 0644 "${3}"{DATADIR}/share/man/man1/bcfg2.1 chmod 0644 "${3}"{DATADIR}/share/man/man5/bcfg2.conf.5 chmod 0644 "${3}"/Library/LaunchDaemons/{LAUNCHD} chmod 0755 "${3}"/usr/local/bin/bcfg2 # add the launchd script /bin/launchctl load -w "${3}"/Library/LaunchDaemons/{LAUNCHD} osx/preflight000066400000000000000000000006671303523157100136070ustar00rootroot00000000000000#!/bin/bash # # Remove old bcfg2 cruft # # ${3} is the destination volume so that this works correctly # when being installed to volumes other than the current OS. /bin/rm -Rvf "${3}"{SITELIBDIR}/Bcfg2* /bin/rm -Rvf "${3}"/usr/local/bin/bcfg2* /bin/rm -Rvf "${3}{DATADIR}/share/bcfg2" /bin/rm -Rvf "${3}{DATADIR}/share/man/man1/bcfg2*" /bin/rm -Rvf "${3}{DATADIR}/share/man/man5/bcfg2*" /bin/rm -Rvf "${3}{DATADIR}/share/man/man8/bcfg2*" redhat/000077500000000000000000000000001303523157100123255ustar00rootroot00000000000000redhat/scripts/000077500000000000000000000000001303523157100140145ustar00rootroot00000000000000redhat/scripts/bcfg2-report-collector.init000077500000000000000000000045671303523157100212000ustar00rootroot00000000000000#!/bin/sh # # bcfg-report-collector - Bcfg2 reporting collector daemon # # chkconfig: 2345 19 81 # description: bcfg2 server for reporting data # ### BEGIN INIT INFO # Provides: bcfg2-report-collector # Required-Start: $network $remote_fs $named # Required-Stop: $network $remote_fs $named # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Configuration management Server # Description: Bcfg2 is a configuration management system that builds # installs configuration files served by bcfg2-server ### END INIT INFO # Include lsb functions . /etc/init.d/functions # Commonly used stuff DAEMON=/usr/sbin/bcfg2-report-collector PIDFILE=/var/run/bcfg2-server/bcfg2-report-collector.pid PARAMS="-D $PIDFILE" # Include default startup configuration if exists test -f "/etc/sysconfig/bcfg2-report-collector" && . /etc/sysconfig/bcfg2-report-collector # Exit if $DAEMON doesn't exist and is not executable test -x $DAEMON || exit 5 # Internal variables BINARY=$(basename $DAEMON) RETVAL=0 start () { echo -n "Starting Configuration Report Collector: " daemon ${DAEMON} ${PARAMS} ${BCFG2_REPORT_OPTIONS} && success || failure STATUS=$? if [ "$STATUS" = 0 ] then test -d /var/lock/subsys && touch /var/lock/subsys/$BINARY else log_failure_msg "bcfg2-report-collector" fi return $STATUS } stop () { echo -n "Stopping Configuration Report Collector: " if [ -f $PIDFILE ]; then killproc -p $PIDFILE ${BINARY} else killproc ${BINARY} fi STATUS=$? test -e /var/lock/subsys/bcfg2-report-collector && rm /var/lock/subsys/$BINARY return $STATUS } status () { PID=$(pidofproc -p "$PIDFILE" $BINARY) if [ -n "$PID" ]; then echo "$BINARY (pid $PID) is running..." return 0 fi if [ -f $PIDFILE ]; then if [ -n "$PID" ]; then echo "$BINARY dead but pid file exists..." return 1 fi fi echo "$BINARY is not running" return 3 } case "$1" in start) start RETVAL=$? ;; stop) stop RETVAL=$? ;; status) status RETVAL=$? ;; restart|reload|force-reload) stop sleep 5 start RETVAL=$? ;; *) echo "Usage: $0 {start|stop|status|reload|restart|force-reload}" RETVAL=1 ;; esac exit $RETVAL redhat/scripts/bcfg2-server.init000077500000000000000000000040761303523157100172020ustar00rootroot00000000000000#!/bin/sh # # bcfg2-server - bcfg2 configuration daemon # # chkconfig: 345 80 20 # description: bcfg2 is a configuration management system that builds \ # and installs configuration files. \ # This is the server that provides the configurations \ # to clients. DAEMON=/usr/sbin/bcfg2-server PIDFILE=/var/run/bcfg2-server/bcfg2-server.pid PARAMS="-D $PIDFILE" prog=$(basename $DAEMON) conf="/etc/bcfg2.conf" # Disabled per default BCFG2_SERVER_OPTIONS="" BCFG2_SERVER_ENABLED=0 PATH=/sbin:/bin:/usr/bin:/usr/sbin # Source function library . /etc/init.d/functions # Include default startup configuration if exists test -f /etc/sysconfig/$prog && . /etc/sysconfig/$prog if [ "$BCFG2_SERVER_ENABLED" -eq 0 ] ; then failure $"bcfg2-server is disabled - see /etc/sysconfig/$prog" echo exit 0 fi RETVAL=0 start () { test -x $DAEMON || exit 5 test -f $conf || exit 6 echo -n $"Starting $prog: " daemon $DAEMON ${PARAMS} ${BCFG2_SERVER_OPTIONS} && success || failure RETVAL=$? echo if test $RETVAL = 0 ; then test -d /var/lock/subsys && touch /var/lock/subsys/$prog fi return $RETVAL } stop () { echo -n $"Stopping $prog: " # we do NOT want to specify the pidfile to killproc; if we do, and # it has to kill -9 the server, it only kills the master and the # child processes stay running (if the multiprocessing core is in # use). By not specifying a pidfile, it looks in the process # table for all bcfg2-server processes, and kill -9's them all if # necessary. killproc -d 30 ${prog} && success || failure RETVAL=$? echo rm -f /var/lock/subsys/$prog return $RETVAL } case "$1" in start) start RETVAL=$? ;; stop) stop RETVAL=$? ;; status) status $prog RETVAL=$? ;; restart|reload|force-reload) stop sleep 5 start RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|status|restart|reload|force-reload}" RETVAL=3 ;; esac exit $RETVAL redhat/scripts/bcfg2.init000077500000000000000000000027311303523157100156720ustar00rootroot00000000000000#!/bin/sh # # bcfg2 - bcfg2 configuration client # # chkconfig: 345 80 20 # description: bcfg2 is a configuration management system that builds \ # and installs configuration files served by bcfg2-server. \ # This is a client that installs the server-provided \ # configuration. # DAEMON=/usr/sbin/bcfg2 PIDFILE=/var/run/bcfg2-agent.pid prog=$(basename $DAEMON) # Set default options # You can set script specific options with BCFG2_OPTIONS_INIT PARAMS="-q" # Disabled per default BCFG2_ENABLED=0 BCFG2_INIT=0 BCFG2_AGENT=0 PATH=/sbin:/bin:/usr/bin:/usr/sbin # Source function library . /etc/init.d/functions # Include default startup configuration if exists test -f /etc/sysconfig/$prog && . /etc/sysconfig/$prog [ "$BCFG2_ENABLED" -eq 0 ] && exit 0 [ "$BCFG2_INIT" -eq 0 ] && exit 0 if [ "$BCFG2_AGENT" != 0 ]; then echo "Bcfg2 no longer supports agent mode, please update your configuration!" exit 1 fi RETVAL=0 start () { test -x $DAEMON || exit 5 echo -n $"Starting $prog: " if test $BCFG2_INIT = 1 ; then $DAEMON $PARAMS ${BCFG2_OPTIONS_INIT} && success || failure RETVAL=$? echo fi return $RETVAL } case "$1" in start) start RETVAL=$? ;; stop|status) RETVAL=0 ;; restart|force-reload) start RETVAL=$? ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload}" RETVAL=3 esac exit $RETVAL redhat/selinux/000077500000000000000000000000001303523157100140145ustar00rootroot00000000000000redhat/selinux/bcfg2.fc000066400000000000000000000022411303523157100153100ustar00rootroot00000000000000/etc/rc\.d/init\.d/bcfg2-server -- gen_context(system_u:object_r:bcfg2_server_initrc_exec_t,s0) /etc/rc\.d/init\.d/bcfg2 -- gen_context(system_u:object_r:bcfg2_initrc_exec_t,s0) /usr/sbin/bcfg2 -- gen_context(system_u:object_r:bcfg2_exec_t,s0) /usr/sbin/bcfg2-server -- gen_context(system_u:object_r:bcfg2_server_exec_t,s0) /usr/sbin/bcfg2-yum-helper -- gen_context(system_u:object_r:bcfg2_yum_helper_exec_t,s0) /usr/lib/bcfg2/bcfg2-cron -- gen_context(system_u:object_r:bcfg2_exec_t,s0) /var/lib/bcfg2(/.*)? gen_context(system_u:object_r:bcfg2_var_lib_t,s0) /var/lib/bcfg2/Trigger/.* -- gen_context(system_u:object_r:bcfg2_server_script_exec_t,s0) /var/lib/bcfg2/PuppetENC/.* -- gen_context(system_u:object_r:bcfg2_server_script_exec_t,s0) /var/lib/bcfg2/Cfg/.*/:test -- gen_context(system_u:object_r:bcfg2_server_script_exec_t,s0) /var/run/bcfg2-server.* -- gen_context(system_u:object_r:bcfg2_var_run_t,s0) /var/lock/bcfg2\.run -- gen_context(system_u:object_r:bcfg2_lock_t,s0) /etc/bcfg2.*\.conf -- gen_context(system_u:object_r:bcfg2_conf_t,s0) redhat/selinux/bcfg2.if000066400000000000000000000106011303523157100153150ustar00rootroot00000000000000##

    bcfg2-server daemon which serves configurations to clients based on the data in its repository ######################################## ## ## Execute bcfg2-server in the bcfg2 server domain. ## ## ## ## Domain allowed to transition. ## ## # interface(`bcfg2_server_domtrans',` gen_require(` type bcfg2_server_t, bcfg2_server_exec_t; ') corecmd_search_bin($1) domtrans_pattern($1, bcfg2_server_exec_t, bcfg2_server_t) ') ######################################## ## ## Execute bcfg2-server server in the bcfg2-server domain. ## ## ## ## Domain allowed access. ## ## # interface(`bcfg2_server_initrc_domtrans',` gen_require(` type bcfg2_server_initrc_exec_t; ') init_labeled_script_domtrans($1, bcfg2_server_initrc_exec_t) ') ######################################## ## ## Search bcfg2 lib directories. ## ## ## ## Domain allowed access. ## ## # interface(`bcfg2_search_lib',` gen_require(` type bcfg2_var_lib_t; ') allow $1 bcfg2_var_lib_t:dir search_dir_perms; files_search_var_lib($1) ') ######################################## ## ## Read bcfg2 lib files. ## ## ## ## Domain allowed access. ## ## # interface(`bcfg2_read_lib_files',` gen_require(` type bcfg2_var_lib_t; ') files_search_var_lib($1) read_files_pattern($1, bcfg2_var_lib_t, bcfg2_var_lib_t) ') ######################################## ## ## Manage bcfg2 lib files. ## ## ## ## Domain allowed access. ## ## # interface(`bcfg2_manage_lib_files',` gen_require(` type bcfg2_var_lib_t; ') files_search_var_lib($1) manage_files_pattern($1, bcfg2_var_lib_t, bcfg2_var_lib_t) ') ######################################## ## ## Manage bcfg2 lib directories. ## ## ## ## Domain allowed access. ## ## # interface(`bcfg2_manage_lib_dirs',` gen_require(` type bcfg2_var_lib_t; ') files_search_var_lib($1) manage_dirs_pattern($1, bcfg2_var_lib_t, bcfg2_var_lib_t) ') ######################################## ## ## All of the rules required to administer ## a bcfg2-server environment ## ## ## ## Domain allowed access. ## ## ## ## ## Role allowed access. ## ## ## # interface(`bcfg2_server_admin',` gen_require(` type bcfg2_server_t; type bcfg2_server_initrc_exec_t; type bcfg2_server_var_lib_t; ') allow $1 bcfg2_server_t:process { ptrace signal_perms }; ps_process_pattern($1, bcfg2_server_t) bcfg2_server_initrc_domtrans($1) domain_system_change_exemption($1) role_transition $2 bcfg2_server_initrc_exec_t system_r; allow $2 system_r; files_search_var_lib($1) admin_pattern($1, bcfg2_server_var_lib_t) ') ######################################## ## ## Execute bcfg2 in the bcfg2 domain. ## ## ## ## Domain allowed to transition. ## ## # interface(`bcfg2_domtrans',` gen_require(` type bcfg2_t, bcfg2_exec_t; ') corecmd_search_bin($1) domtrans_pattern($1, bcfg2_exec_t, bcfg2_t) ') ######################################## ## ## Execute bcfg2 in the bcfg2 domain. ## ## ## ## Domain allowed access. ## ## # interface(`bcfg2_initrc_domtrans',` gen_require(` type bcfg2_initrc_exec_t; ') init_labeled_script_domtrans($1, bcfg2_initrc_exec_t) ') ######################################## ## ## All of the rules required to administer ## a bcfg2 client ## ## ## ## Domain allowed access. ## ## ## ## ## Role allowed access. ## ## ## # interface(`bcfg2_client_admin',` gen_require(` type bcfg2_t; type bcfg2_initrc_exec_t; type bcfg2_var_lib_t; ') allow $1 bcfg2_t:process { ptrace signal_perms }; ps_process_pattern($1, bcfg2_t) bcfg2_initrc_domtrans($1) domain_system_change_exemption($1) role_transition $2 bcfg2_initrc_exec_t system_r; allow $2 system_r; ') redhat/selinux/bcfg2.te000066400000000000000000000145441303523157100153410ustar00rootroot00000000000000policy_module(bcfg2, 1.1.0) ######################################## # # Declarations # gen_tunable(bcfg2_server_exec_scripts, false) gen_tunable(bcfg2_server_can_network_connect_db, false) type bcfg2_t; type bcfg2_exec_t; init_daemon_domain(bcfg2_t, bcfg2_exec_t) type bcfg2_server_t; type bcfg2_server_exec_t; init_daemon_domain(bcfg2_server_t, bcfg2_server_exec_t) type bcfg2_initrc_exec_t; init_script_file(bcfg2_initrc_exec_t) type bcfg2_server_initrc_exec_t; init_script_file(bcfg2_server_initrc_exec_t) type bcfg2_var_lib_t; files_type(bcfg2_var_lib_t) type bcfg2_server_script_t; type bcfg2_server_script_exec_t; application_domain(bcfg2_server_script_t, bcfg2_server_script_exec_t) role system_r types bcfg2_server_script_t; type bcfg2_yum_helper_exec_t; application_domain(bcfg2_server_t, bcfg2_server_script_exec_t) type bcfg2_var_run_t; files_pid_file(bcfg2_var_run_t) type bcfg2_lock_t; files_lock_file(bcfg2_lock_t) type bcfg2_conf_t; files_config_file(bcfg2_conf_t) type bcfg2_tmp_t; files_tmp_file(bcfg2_tmp_t) ######################################## # # bcfg2-server local policy # allow bcfg2_server_t self:fifo_file rw_fifo_file_perms; allow bcfg2_server_t self:tcp_socket create_stream_socket_perms; allow bcfg2_server_t self:unix_stream_socket { connectto create_stream_socket_perms }; allow bcfg2_server_t self:process { setrlimit setsched }; allow bcfg2_server_t self:capability { setgid setuid sys_nice }; manage_dirs_pattern(bcfg2_server_t, bcfg2_var_lib_t, bcfg2_var_lib_t) manage_files_pattern(bcfg2_server_t, bcfg2_var_lib_t, bcfg2_var_lib_t) files_var_lib_filetrans(bcfg2_server_t, bcfg2_var_lib_t, dir ) manage_files_pattern(bcfg2_server_t, bcfg2_server_script_t, bcfg2_server_script_t) manage_files_pattern(bcfg2_server_t, bcfg2_var_run_t, bcfg2_var_run_t) files_pid_filetrans(bcfg2_server_t, bcfg2_var_run_t, file ) files_search_etc(bcfg2_server_t) read_files_pattern(bcfg2_server_t, bcfg2_conf_t, bcfg2_conf_t) read_lnk_files_pattern(bcfg2_server_t, bcfg2_conf_t, bcfg2_conf_t) manage_files_pattern(bcfg2_server_t, bcfg2_tmp_t, bcfg2_tmp_t) files_tmp_filetrans(bcfg2_server_t, bcfg2_tmp_t, file) can_exec(bcfg2_server_t, bcfg2_tmp_t) kernel_read_system_state(bcfg2_server_t) corecmd_exec_bin(bcfg2_server_t) corecmd_exec_shell(bcfg2_server_t) dev_read_urand(bcfg2_server_t) fs_list_inotifyfs(bcfg2_server_t) domain_use_interactive_fds(bcfg2_server_t) files_read_usr_files(bcfg2_server_t) logging_send_syslog_msg(bcfg2_server_t) miscfiles_read_localization(bcfg2_server_t) miscfiles_read_certs(bcfg2_server_t) auth_use_nsswitch(bcfg2_server_t) libs_exec_ldconfig(bcfg2_server_t) # let bcfg2-server run bcfg2-yum-helper in the exact same context can_exec(bcfg2_server_t, bcfg2_yum_helper_exec_t) # port 6789 was somehow already claimed by cyphesis, whatever that is corenet_tcp_bind_cyphesis_port(bcfg2_server_t) corenet_tcp_connect_http_port(bcfg2_server_t) corenet_tcp_sendrecv_http_port(bcfg2_server_t) optional_policy(` postgresql_stream_connect(bcfg2_server_t) postgresql_unpriv_client(bcfg2_server_t) tunable_policy(`bcfg2_server_can_network_connect_db',` postgresql_tcp_connect(bcfg2_server_t) ') ') optional_policy(` mysql_stream_connect(bcfg2_server_t) mysql_rw_db_sockets(bcfg2_server_t) tunable_policy(`bcfg2_server_can_network_connect_db',` mysql_tcp_connect(bcfg2_server_t) ') ') optional_policy(` unconfined_domain(bcfg2_server_script_t) ') tunable_policy(`bcfg2_server_exec_scripts', ` domtrans_pattern(bcfg2_server_t, bcfg2_server_script_exec_t, bcfg2_server_script_t) can_exec(bcfg2_server_t, bcfg2_server_script_t) ') ######################################## # # bcfg2 (client) local policy # allow bcfg2_t self:capability { fowner fsetid setuid setgid dac_override sys_nice sys_ptrace sys_tty_config }; allow bcfg2_t self:process { signal signull getsched setsched }; allow bcfg2_t self:fifo_file rw_fifo_file_perms; allow bcfg2_t self:netlink_route_socket create_netlink_socket_perms; allow bcfg2_t self:tcp_socket create_stream_socket_perms; allow bcfg2_t self:udp_socket create_socket_perms; files_search_etc(bcfg2_t) read_files_pattern(bcfg2_t, bcfg2_conf_t, bcfg2_conf_t) read_lnk_files_pattern(bcfg2_t, bcfg2_conf_t, bcfg2_conf_t) allow bcfg2_t bcfg2_lock_t:file manage_file_perms; files_lock_filetrans(bcfg2_t, bcfg2_lock_t, file) kernel_dontaudit_search_sysctl(bcfg2_t) kernel_dontaudit_search_kernel_sysctl(bcfg2_t) kernel_read_system_state(bcfg2_t) kernel_read_crypto_sysctls(bcfg2_t) cron_system_entry(bcfg2_t, bcfg2_exec_t) corecmd_exec_bin(bcfg2_t) corecmd_exec_shell(bcfg2_t) corenet_all_recvfrom_netlabel(bcfg2_t) corenet_all_recvfrom_unlabeled(bcfg2_t) corenet_tcp_sendrecv_generic_if(bcfg2_t) corenet_tcp_sendrecv_generic_node(bcfg2_t) corenet_tcp_bind_generic_node(bcfg2_t) corenet_tcp_connect_cyphesis_port(bcfg2_t) corenet_sendrecv_cyphesis_client_packets(bcfg2_t) dev_read_rand(bcfg2_t) dev_read_sysfs(bcfg2_t) dev_read_urand(bcfg2_t) domain_read_all_domains_state(bcfg2_t) domain_interactive_fd(bcfg2_t) files_manage_config_files(bcfg2_t) files_manage_config_dirs(bcfg2_t) files_manage_etc_dirs(bcfg2_t) files_manage_etc_files(bcfg2_t) files_read_usr_symlinks(bcfg2_t) files_relabel_config_dirs(bcfg2_t) files_relabel_config_files(bcfg2_t) selinux_search_fs(bcfg2_t) selinux_set_all_booleans(bcfg2_t) selinux_set_generic_booleans(bcfg2_t) selinux_validate_context(bcfg2_t) term_dontaudit_getattr_unallocated_ttys(bcfg2_t) term_dontaudit_getattr_all_ttys(bcfg2_t) init_all_labeled_script_domtrans(bcfg2_t) init_domtrans_script(bcfg2_t) init_read_utmp(bcfg2_t) init_signull_script(bcfg2_t) logging_send_syslog_msg(bcfg2_t) miscfiles_read_hwdata(bcfg2_t) miscfiles_read_localization(bcfg2_t) mount_domtrans(bcfg2_t) auth_use_nsswitch(bcfg2_t) seutil_domtrans_setfiles(bcfg2_t) seutil_domtrans_semanage(bcfg2_t) sysnet_dns_name_resolve(bcfg2_t) sysnet_run_ifconfig(bcfg2_t, system_r) optional_policy(` consoletype_domtrans(bcfg2_t) ') optional_policy(` hostname_exec(bcfg2_t) ') optional_policy(` files_rw_var_files(bcfg2_t) rpm_domtrans(bcfg2_t) rpm_domtrans_script(bcfg2_t) rpm_manage_db(bcfg2_t) rpm_manage_log(bcfg2_t) ') optional_policy(` unconfined_domain(bcfg2_t) ') optional_policy(` usermanage_domtrans_groupadd(bcfg2_t) usermanage_domtrans_useradd(bcfg2_t) ') redhat/systemd/000077500000000000000000000000001303523157100140155ustar00rootroot00000000000000redhat/systemd/bcfg2-server.service000066400000000000000000000005571303523157100176750ustar00rootroot00000000000000[Unit] Description=Bcfg2 configuration daemon After=syslog.target network.target [Service] Type=forking StandardOutput=syslog StandardError=syslog EnvironmentFile=-/etc/sysconfig/bcfg2-server PIDFile=/run/bcfg2-server/bcfg2-server.pid ExecStart=/usr/sbin/bcfg2-server -D /run/bcfg2-server/bcfg2-server.pid $BCFG2_SERVER_OPTIONS [Install] WantedBy=multi-user.target redhat/systemd/bcfg2.service000066400000000000000000000004351303523157100163640ustar00rootroot00000000000000[Unit] Description=Bcfg2 configuration client After=syslog.target network.target [Service] Type=oneshot StandardOutput=syslog StandardError=syslog EnvironmentFile=-/etc/sysconfig/bcfg2 ExecStart=/usr/sbin/bcfg2 $BCFG2_OPTIONS RemainAfterExit=yes [Install] WantedBy=multi-user.target reports/000077500000000000000000000000001303523157100125545ustar00rootroot00000000000000reports/reports.wsgi000066400000000000000000000013451303523157100151500ustar00rootroot00000000000000import os import Bcfg2.Options import Bcfg2.DBSettings config_parsed = False def application(environ, start_response): global config_parsed # with wsgi, the environment isn't present in os.environ, but # is passwd to the application function if 'BCFG2_CONFIG_FILE' in environ: os.environ['BCFG2_CONFIG_FILE'] = environ['BCFG2_CONFIG_FILE'] if not config_parsed: Bcfg2.Options.get_parser().parse() config_parsed = True try: from django.core.wsgi import get_wsgi_application return get_wsgi_application()(environ, start_response) except ImportError: import django.core.handlers.wsgi return django.core.handlers.wsgi.WSGIHandler()(environ, start_response) reports/site_media/000077500000000000000000000000001303523157100146575ustar00rootroot00000000000000reports/site_media/AnchorPosition.js000066400000000000000000000124141303523157100201560ustar00rootroot00000000000000// =================================================================== // Author: Matt Kruse // WWW: http://www.mattkruse.com/ // // NOTICE: You may use this code for any purpose, commercial or // private, without any further permission from the author. You may // remove this notice from your final code if you wish, however it is // appreciated by the author if at least my web site address is kept. // // You may *NOT* re-distribute this code in any way except through its // use. That means, you can include it in your product, or your web // site, or any other form where the code is actually being used. You // may not put the plain javascript up on your site for download or // include it in your javascript libraries for download. // If you wish to share this code with others, please just point them // to the URL instead. // Please DO NOT link directly to my .js files from your site. Copy // the files to your server and use them there. Thank you. // =================================================================== /* AnchorPosition.js Author: Matt Kruse Last modified: 10/11/02 DESCRIPTION: These functions find the position of an tag in a document, so other elements can be positioned relative to it. COMPATABILITY: Netscape 4.x,6.x,Mozilla, IE 5.x,6.x on Windows. Some small positioning errors - usually with Window positioning - occur on the Macintosh platform. FUNCTIONS: getAnchorPosition(anchorname) Returns an Object() having .x and .y properties of the pixel coordinates of the upper-left corner of the anchor. Position is relative to the PAGE. getAnchorWindowPosition(anchorname) Returns an Object() having .x and .y properties of the pixel coordinates of the upper-left corner of the anchor, relative to the WHOLE SCREEN. NOTES: 1) For popping up separate browser windows, use getAnchorWindowPosition. Otherwise, use getAnchorPosition 2) Your anchor tag MUST contain both NAME and ID attributes which are the same. For example: 3) There must be at least a space between for IE5.5 to see the anchor tag correctly. Do not do with no space. */ // getAnchorPosition(anchorname) // This function returns an object having .x and .y properties which are the coordinates // of the named anchor, relative to the page. function getAnchorPosition(anchorname) { // This function will return an Object with x and y properties var useWindow=false; var coordinates=new Object(); var x=0,y=0; // Browser capability sniffing var use_gebi=false, use_css=false, use_layers=false; if (document.getElementById) { use_gebi=true; } else if (document.all) { use_css=true; } else if (document.layers) { use_layers=true; } // Logic to find position if (use_gebi && document.all) { x=AnchorPosition_getPageOffsetLeft(document.all[anchorname]); y=AnchorPosition_getPageOffsetTop(document.all[anchorname]); } else if (use_gebi) { var o=document.getElementById(anchorname); x=AnchorPosition_getPageOffsetLeft(o); y=AnchorPosition_getPageOffsetTop(o); } else if (use_css) { x=AnchorPosition_getPageOffsetLeft(document.all[anchorname]); y=AnchorPosition_getPageOffsetTop(document.all[anchorname]); } else if (use_layers) { var found=0; for (var i=0; i // WWW: http://www.mattkruse.com/ // // NOTICE: You may use this code for any purpose, commercial or // private, without any further permission from the author. You may // remove this notice from your final code if you wish, however it is // appreciated by the author if at least my web site address is kept. // // You may *NOT* re-distribute this code in any way except through its // use. That means, you can include it in your product, or your web // site, or any other form where the code is actually being used. You // may not put the plain javascript up on your site for download or // include it in your javascript libraries for download. // If you wish to share this code with others, please just point them // to the URL instead. // Please DO NOT link directly to my .js files from your site. Copy // the files to your server and use them there. Thank you. // =================================================================== // HISTORY // ------------------------------------------------------------------ // Feb 7, 2005: Fixed a CSS styles to use px unit // March 29, 2004: Added check in select() method for the form field // being disabled. If it is, just return and don't do anything. // March 24, 2004: Fixed bug - when month name and abbreviations were // changed, date format still used original values. // January 26, 2004: Added support for drop-down month and year // navigation (Thanks to Chris Reid for the idea) // September 22, 2003: Fixed a minor problem in YEAR calendar with // CSS prefix. // August 19, 2003: Renamed the function to get styles, and made it // work correctly without an object reference // August 18, 2003: Changed showYearNavigation and // showYearNavigationInput to optionally take an argument of // true or false // July 31, 2003: Added text input option for year navigation. // Added a per-calendar CSS prefix option to optionally use // different styles for different calendars. // July 29, 2003: Fixed bug causing the Today link to be clickable // even though today falls in a disabled date range. // Changed formatting to use pure CSS, allowing greater control // over look-and-feel options. // June 11, 2003: Fixed bug causing the Today link to be unselectable // under certain cases when some days of week are disabled // March 14, 2003: Added ability to disable individual dates or date // ranges, display as light gray and strike-through // March 14, 2003: Removed dependency on graypixel.gif and instead /// use table border coloring // March 12, 2003: Modified showCalendar() function to allow optional // start-date parameter // March 11, 2003: Modified select() function to allow optional // start-date parameter /* DESCRIPTION: This object implements a popup calendar to allow the user to select a date, month, quarter, or year. COMPATABILITY: Works with Netscape 4.x, 6.x, IE 5.x on Windows. Some small positioning errors - usually with Window positioning - occur on the Macintosh platform. The calendar can be modified to work for any location in the world by changing which weekday is displayed as the first column, changing the month names, and changing the column headers for each day. USAGE: // Create a new CalendarPopup object of type WINDOW var cal = new CalendarPopup(); // Create a new CalendarPopup object of type DIV using the DIV named 'mydiv' var cal = new CalendarPopup('mydiv'); // Easy method to link the popup calendar with an input box. cal.select(inputObject, anchorname, dateFormat); // Same method, but passing a default date other than the field's current value cal.select(inputObject, anchorname, dateFormat, '01/02/2000'); // This is an example call to the popup calendar from a link to populate an // input box. Note that to use this, date.js must also be included!! Select // Set the type of date select to be used. By default it is 'date'. cal.setDisplayType(type); // When a date, month, quarter, or year is clicked, a function is called and // passed the details. You must write this function, and tell the calendar // popup what the function name is. // Function to be called for 'date' select receives y, m, d cal.setReturnFunction(functionname); // Function to be called for 'month' select receives y, m cal.setReturnMonthFunction(functionname); // Function to be called for 'quarter' select receives y, q cal.setReturnQuarterFunction(functionname); // Function to be called for 'year' select receives y cal.setReturnYearFunction(functionname); // Show the calendar relative to a given anchor cal.showCalendar(anchorname); // Hide the calendar. The calendar is set to autoHide automatically cal.hideCalendar(); // Set the month names to be used. Default are English month names cal.setMonthNames("January","February","March",...); // Set the month abbreviations to be used. Default are English month abbreviations cal.setMonthAbbreviations("Jan","Feb","Mar",...); // Show navigation for changing by the year, not just one month at a time cal.showYearNavigation(); // Show month and year dropdowns, for quicker selection of month of dates cal.showNavigationDropdowns(); // Set the text to be used above each day column. The days start with // sunday regardless of the value of WeekStartDay cal.setDayHeaders("S","M","T",...); // Set the day for the first column in the calendar grid. By default this // is Sunday (0) but it may be changed to fit the conventions of other // countries. cal.setWeekStartDay(1); // week is Monday - Sunday // Set the weekdays which should be disabled in the 'date' select popup. You can // then allow someone to only select week end dates, or Tuedays, for example cal.setDisabledWeekDays(0,1); // To disable selecting the 1st or 2nd days of the week // Selectively disable individual days or date ranges. Disabled days will not // be clickable, and show as strike-through text on current browsers. // Date format is any format recognized by parseDate() in date.js // Pass a single date to disable: cal.addDisabledDates("2003-01-01"); // Pass null as the first parameter to mean "anything up to and including" the // passed date: cal.addDisabledDates(null, "01/02/03"); // Pass null as the second parameter to mean "including the passed date and // anything after it: cal.addDisabledDates("Jan 01, 2003", null); // Pass two dates to disable all dates inbetween and including the two cal.addDisabledDates("January 01, 2003", "Dec 31, 2003"); // When the 'year' select is displayed, set the number of years back from the // current year to start listing years. Default is 2. // This is also used for year drop-down, to decide how many years +/- to display cal.setYearSelectStartOffset(2); // Text for the word "Today" appearing on the calendar cal.setTodayText("Today"); // The calendar uses CSS classes for formatting. If you want your calendar to // have unique styles, you can set the prefix that will be added to all the // classes in the output. // For example, normal output may have this: // Today // But if you set the prefix like this: cal.setCssPrefix("Test"); // The output will then look like: // Today // And you can define that style somewhere in your page. // When using Year navigation, you can make the year be an input box, so // the user can manually change it and jump to any year cal.showYearNavigationInput(); // Set the calendar offset to be different than the default. By default it // will appear just below and to the right of the anchorname. So if you have // a text box where the date will go and and anchor immediately after the // text box, the calendar will display immediately under the text box. cal.offsetX = 20; cal.offsetY = 20; NOTES: 1) Requires the functions in AnchorPosition.js and PopupWindow.js 2) Your anchor tag MUST contain both NAME and ID attributes which are the same. For example: 3) There must be at least a space between for IE5.5 to see the anchor tag correctly. Do not do with no space. 4) When a CalendarPopup object is created, a handler for 'onmouseup' is attached to any event handler you may have already defined. Do NOT define an event handler for 'onmouseup' after you define a CalendarPopup object or the autoHide() will not work correctly. 5) The calendar popup display uses style sheets to make it look nice. */ // CONSTRUCTOR for the CalendarPopup Object function CalendarPopup() { var c; if (arguments.length>0) { c = new PopupWindow(arguments[0]); } else { c = new PopupWindow(); c.setSize(150,175); } c.offsetX = -152; c.offsetY = 25; c.autoHide(); // Calendar-specific properties c.monthNames = new Array("January","February","March","April","May","June","July","August","September","October","November","December"); c.monthAbbreviations = new Array("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"); c.dayHeaders = new Array("S","M","T","W","T","F","S"); c.returnFunction = "CP_tmpReturnFunction"; c.returnMonthFunction = "CP_tmpReturnMonthFunction"; c.returnQuarterFunction = "CP_tmpReturnQuarterFunction"; c.returnYearFunction = "CP_tmpReturnYearFunction"; c.weekStartDay = 0; c.isShowYearNavigation = false; c.displayType = "date"; c.disabledWeekDays = new Object(); c.disabledDatesExpression = ""; c.yearSelectStartOffset = 2; c.currentDate = null; c.todayText="Today"; c.cssPrefix=""; c.isShowNavigationDropdowns=false; c.isShowYearNavigationInput=false; window.CP_calendarObject = null; window.CP_targetInput = null; window.CP_dateFormat = "MM/dd/yyyy"; // Method mappings c.copyMonthNamesToWindow = CP_copyMonthNamesToWindow; c.setReturnFunction = CP_setReturnFunction; c.setReturnMonthFunction = CP_setReturnMonthFunction; c.setReturnQuarterFunction = CP_setReturnQuarterFunction; c.setReturnYearFunction = CP_setReturnYearFunction; c.setMonthNames = CP_setMonthNames; c.setMonthAbbreviations = CP_setMonthAbbreviations; c.setDayHeaders = CP_setDayHeaders; c.setWeekStartDay = CP_setWeekStartDay; c.setDisplayType = CP_setDisplayType; c.setDisabledWeekDays = CP_setDisabledWeekDays; c.addDisabledDates = CP_addDisabledDates; c.setYearSelectStartOffset = CP_setYearSelectStartOffset; c.setTodayText = CP_setTodayText; c.showYearNavigation = CP_showYearNavigation; c.showCalendar = CP_showCalendar; c.hideCalendar = CP_hideCalendar; c.getStyles = getCalendarStyles; c.refreshCalendar = CP_refreshCalendar; c.getCalendar = CP_getCalendar; c.select = CP_select; c.setCssPrefix = CP_setCssPrefix; c.showNavigationDropdowns = CP_showNavigationDropdowns; c.showYearNavigationInput = CP_showYearNavigationInput; c.copyMonthNamesToWindow(); // Return the object return c; } function CP_copyMonthNamesToWindow() { // Copy these values over to the date.js if (typeof(window.MONTH_NAMES)!="undefined" && window.MONTH_NAMES!=null) { window.MONTH_NAMES = new Array(); for (var i=0; i\n"; result += '
    \n'; } else { result += '
    \n'; result += '
    \n'; result += '
    \n'; } // Code for DATE display (default) // ------------------------------- if (this.displayType=="date" || this.displayType=="week-end") { if (this.currentDate==null) { this.currentDate = now; } if (arguments.length > 0) { var month = arguments[0]; } else { var month = this.currentDate.getMonth()+1; } if (arguments.length > 1 && arguments[1]>0 && arguments[1]-0==arguments[1]) { var year = arguments[1]; } else { var year = this.currentDate.getFullYear(); } var daysinmonth= new Array(0,31,28,31,30,31,30,31,31,30,31,30,31); if ( ( (year%4 == 0)&&(year%100 != 0) ) || (year%400 == 0) ) { daysinmonth[2] = 29; } var current_month = new Date(year,month-1,1); var display_year = year; var display_month = month; var display_date = 1; var weekday= current_month.getDay(); var offset = 0; offset = (weekday >= this.weekStartDay) ? weekday-this.weekStartDay : 7-this.weekStartDay+weekday ; if (offset > 0) { display_month--; if (display_month < 1) { display_month = 12; display_year--; } display_date = daysinmonth[display_month]-offset+1; } var next_month = month+1; var next_month_year = year; if (next_month > 12) { next_month=1; next_month_year++; } var last_month = month-1; var last_month_year = year; if (last_month < 1) { last_month=12; last_month_year--; } var date_class; if (this.type!="WINDOW") { result += ""; } result += '\n'; var refresh = windowref+'CP_refreshCalendar'; var refreshLink = 'javascript:' + refresh; if (this.isShowNavigationDropdowns) { result += ''; result += ''; result += ''; } else { if (this.isShowYearNavigation) { result += ''; result += ''; result += ''; result += ''; result += ''; if (this.isShowYearNavigationInput) { result += ''; } else { result += ''; } result += ''; } else { result += '\n'; result += '\n'; result += '\n'; } } result += '
     <'+this.monthNames[month-1]+'> <'+year+'><<'+this.monthNames[month-1]+' '+year+'>>
    \n'; result += '\n'; result += '\n'; for (var j=0; j<7; j++) { result += '\n'; } result += '\n'; for (var row=1; row<=6; row++) { result += '\n'; for (var col=1; col<=7; col++) { var disabled=false; if (this.disabledDatesExpression!="") { var ds=""+display_year+LZ(display_month)+LZ(display_date); eval("disabled=("+this.disabledDatesExpression+")"); } var dateClass = ""; if ((display_month == this.currentDate.getMonth()+1) && (display_date==this.currentDate.getDate()) && (display_year==this.currentDate.getFullYear())) { dateClass = "cpCurrentDate"; } else if (display_month == month) { dateClass = "cpCurrentMonthDate"; } else { dateClass = "cpOtherMonthDate"; } if (disabled || this.disabledWeekDays[col-1]) { result += ' \n'; } else { var selected_date = display_date; var selected_month = display_month; var selected_year = display_year; if (this.displayType=="week-end") { var d = new Date(selected_year,selected_month-1,selected_date,0,0,0,0); d.setDate(d.getDate() + (7-col)); selected_year = d.getYear(); if (selected_year < 1000) { selected_year += 1900; } selected_month = d.getMonth()+1; selected_date = d.getDate(); } result += ' \n'; } display_date++; if (display_date > daysinmonth[display_month]) { display_date=1; display_month++; } if (display_month > 12) { display_month=1; display_year++; } } result += ''; } var current_weekday = now.getDay() - this.weekStartDay; if (current_weekday < 0) { current_weekday += 7; } result += '\n'; result += '
    '+this.dayHeaders[(this.weekStartDay+j)%7]+'
    '+display_date+''+display_date+'
    \n'; if (this.disabledDatesExpression!="") { var ds=""+now.getFullYear()+LZ(now.getMonth()+1)+LZ(now.getDate()); eval("disabled=("+this.disabledDatesExpression+")"); } if (disabled || this.disabledWeekDays[current_weekday+1]) { result += ' '+this.todayText+'\n'; } else { result += ' '+this.todayText+'\n'; } result += '
    \n'; result += '
    \n'; } // Code common for MONTH, QUARTER, YEAR // ------------------------------------ if (this.displayType=="month" || this.displayType=="quarter" || this.displayType=="year") { if (arguments.length > 0) { var year = arguments[0]; } else { if (this.displayType=="year") { var year = now.getFullYear()-this.yearSelectStartOffset; } else { var year = now.getFullYear(); } } if (this.displayType!="year" && this.isShowYearNavigation) { result += ""; result += '\n'; result += ' \n'; result += ' \n'; result += ' \n'; result += '
    <<'+year+'>>
    \n'; } } // Code for MONTH display // ---------------------- if (this.displayType=="month") { // If POPUP, write entire HTML document result += '\n'; for (var i=0; i<4; i++) { result += ''; for (var j=0; j<3; j++) { var monthindex = ((i*3)+j); result += ''; } result += ''; } result += '
    '+this.monthAbbreviations[monthindex]+'
    \n'; } // Code for QUARTER display // ------------------------ if (this.displayType=="quarter") { result += '
    \n'; for (var i=0; i<2; i++) { result += ''; for (var j=0; j<2; j++) { var quarter = ((i*2)+j+1); result += ''; } result += ''; } result += '

    Q'+quarter+'

    \n'; } // Code for YEAR display // --------------------- if (this.displayType=="year") { var yearColumnSize = 4; result += ""; result += '\n'; result += ' \n'; result += ' \n'; result += '
    <<>>
    \n'; result += '\n'; for (var i=0; i'+currentyear+''; } result += ''; } result += '
    \n'; } // Common if (this.type == "WINDOW") { result += "\n"; } return result; } reports/site_media/PopupWindow.js000066400000000000000000000252421303523157100175150ustar00rootroot00000000000000// =================================================================== // Author: Matt Kruse // WWW: http://www.mattkruse.com/ // // NOTICE: You may use this code for any purpose, commercial or // private, without any further permission from the author. You may // remove this notice from your final code if you wish, however it is // appreciated by the author if at least my web site address is kept. // // You may *NOT* re-distribute this code in any way except through its // use. That means, you can include it in your product, or your web // site, or any other form where the code is actually being used. You // may not put the plain javascript up on your site for download or // include it in your javascript libraries for download. // If you wish to share this code with others, please just point them // to the URL instead. // Please DO NOT link directly to my .js files from your site. Copy // the files to your server and use them there. Thank you. // =================================================================== /* PopupWindow.js Author: Matt Kruse Last modified: 02/16/04 DESCRIPTION: This object allows you to easily and quickly popup a window in a certain place. The window can either be a DIV or a separate browser window. COMPATABILITY: Works with Netscape 4.x, 6.x, IE 5.x on Windows. Some small positioning errors - usually with Window positioning - occur on the Macintosh platform. Due to bugs in Netscape 4.x, populating the popup window with reports/xsl-transforms/xsl-transform-includes/html-templates.xsl000066400000000000000000000147001303523157100256730ustar00rootroot00000000000000
    Time Ran: ()

    Node:

    Revision:
    &nbsp;
    &nbsp;
    Node is clean; Everything has been satisfactorily configured.
    This node did not run within the last 24 hours-- it may be out of date.
    items did not verify and are considered Dirty.
    items were modified in the last run.
    extra configuration elements on node.
  • Config File:
  • Package:
  • Directory:
  • Service:
  • SymLink:
  • reports/xsl-transforms/xsl-transform-includes/main-js.xsl000066400000000000000000000021061303523157100242660ustar00rootroot00000000000000 reports/xsl-transforms/xsl-transform-includes/sorttable-js.xsl000066400000000000000000000155341303523157100253520ustar00rootroot00000000000000 reports/xsl-transforms/xsl-transform-includes/text-templates.xsl000066400000000000000000000074731303523157100257240ustar00rootroot00000000000000 Node: Time Ran: . () Node is clean; Everything has been satisfactorily configured. This node did not run within the last 24 hours-- it may be out of date. items did not verify and are considered Dirty: items were modified in the last run. extra configuration elements on node. Config File: Package: Directory: Service: SymLink: schemas/000077500000000000000000000000001303523157100125015ustar00rootroot00000000000000schemas/acl-ip.xsd000066400000000000000000000040641303523157100143720ustar00rootroot00000000000000 Schema for IP-based client ACLs: :ref:`server-plugins-misc-acl` ``ip.xml`` The name of the XML-RPC method to allow or deny. Limited wildcards are supported. The IP address to match against. This is an exact match unless :xml:attribute:`IPACLType:netmask` is defined. If this is not defined, all addresses match the given rule. If this is defined, then it is combined with :xml:attribute:`IPACLType:address` to produce a CIDR range, which is used for matching instead of exact matching based only on IP address. This can be either an integer netmask (e.g., ``netmask="24"``) or a dotted-quad (e.g., ``netmask="255.255.255.0"``). Top-level tag for describing metadata-based client ACLs. schemas/acl-metadata.xsd000066400000000000000000000063271303523157100155460ustar00rootroot00000000000000 Schema for metadata-based client ACLs: :ref:`server-plugins-misc-acl` ``metadata.xml`` An **MetadataACLGroupType** is a tag used to provide logic. Child entries of a MetadataACLGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`MetadataACLGroupType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`MetadataACLGroupType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. The name of the XML-RPC method to allow or deny. Limited wildcards are supported. Top-level tag for describing metadata-based client ACLs. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/acl.xsd000066400000000000000000000012351303523157100137610ustar00rootroot00000000000000 acl config schema for bcfg2 Matt Schwager schemas/atom.xsd000066400000000000000000000012401303523157100141560ustar00rootroot00000000000000 atomic configuration types schema for bcfg2 Narayan Desai, Argonne National Laboratory schemas/augeas.xsd000066400000000000000000000171001303523157100144650ustar00rootroot00000000000000 Augeas commands Implementation of the Augeas ``rm`` command. Delete nodes (and all children) matching the given Augeas path expression. Implementation of the Augeas ``mv`` command. Move the node matching this path expression. ``source`` must match exactly one node. Move the node to this location. ``destination`` must match either zero or one nodes. Implementation of the Augeas ``set`` command. Path to set the value for. If the path does not exist, it and all of its ancestors will be created. Value to set. Implementation of the Augeas ``clear`` command. Path whose value will be set to ``NULL``. If the path does not exist, it and all of its ancestors will be created. Set multiple node values at once. The base path. ``sub`` will be used as an expression relative to each node that matches the :xml:attribute:`AugeasSetMultiCommand:base` expression. The value to set on all nodes that match :xml:attribute:`AugeasSetMultiCommand:sub` relative to each node matching :xml:attribute:`AugeasSetMultiCommand:base`. Implementation of the Augeas ``ins`` command. The path to a node that will be the sibling of the new node. The label of the new node to be created. Where to create the node: ``before`` or ``after`` the sibling given in :xml:attribute:`AugeasInsertCommand:path`. All available Augeas commands. Specify initial content for a file, which will be created before Augeas commands are applied if a file doesn't exist. Implementation of the Augeas ``rm`` command. Implementation of the Augeas ``mv`` command. Implementation of the Augeas ``set`` command. Implementation of the Augeas ``clear`` command. Set multiple node values at once. Implementation of the Augeas ``ins`` command. schemas/authorizedkeys.xsd000066400000000000000000000157411303523157100163030ustar00rootroot00000000000000 Schema for :ref:`server-plugins-generators-cfg-sshkeys` ``authorizedkeys.xml`` An **AuthorizedKeysGroupType** is a tag used to provide logic. Child entries of an AuthorizedKeysGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`AuthorizedKeysGroupType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`AuthorizedKeysGroupType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. An **OptionContainerType** is a tag used to provide logic. Child entries of an OptionContainerType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`OptionContainerType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`OptionContainerType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. Allow access from a public key, given either as text content, or described by the attributes. The path of the public key to allow. Use a public key specific to the given group, instead of the public key specific to the appropriate category group of the current client. Use a public key specific to the group in the given category, instead of the category specified in ``bcfg2.conf``. Use a public key specific to the given host. Specify options for public key authentication and connection. See :manpage:`sshd(8)` for details on allowable options. The name of the sshd option. The value of the sshd option. This can be omitted for options that take no value. Top-level tag for describing a generated SSH key pair. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/awstags.xsd000066400000000000000000000050541303523157100146760ustar00rootroot00000000000000 :ref:`AWSTags <server-plugins-connectors-awstags>` config schema for bcfg2 The group to assign to machines with tags that match the enclosing Tag expression. More than one group can be specified. The name pattern to match against. This is a regular expression. It is not anchored. The value pattern to match against. This is a regular expression. It is not anchored. Top-level tag for ``AWSTags/config.xml``. Representation of a pattern that matches AWS tags. Tags can be matched in one of two ways: * If only :xml:attribute:`TagType:name` is specified, then AWSTags will only look for a tag with a matching name, and the value of tags is ignored. * If both :xml:attribute:`TagType:name` and :xml:attribute:`TagType:value` are specified, a tag must have a matching name *and* a matching value. schemas/base.xsd000066400000000000000000000025401303523157100141340ustar00rootroot00000000000000 base schema for bcfg2 Narayan Desai, Argonne National Laboratory schemas/bundle.xsd000066400000000000000000000353411303523157100145000ustar00rootroot00000000000000 bundle schema for bcfg2 Narayan Desai, Argonne National Laboratory Abstract implementation of a Package entry. The full specification will be generated by a plugin such as Packages. Abstract implementation of a Path entry. The entry will be handled by a Generator plugin, like Cfg or Rules. Abstract implementation of a Service entry. The full specification will be included in Rules. Abstract implementation of an Action entry. The full specification will be included in Rules. Abstract description of a POSIXUser entry. Abstract description of a POSIXGroup entry. Abstract SELinux boolean entry. Abstract SELinux port entry. Abstract SELinux file context ("fcontext") entry. Abstract SELinux node entry. Abstract SELinux login entry. Abstract SELinux user entry. Abstract SELinux interface entry. Abstract SELinux permissive domain entry. Abstract SELinux module entry. Fully bound description of a software package to be managed. Fully bound description of a filesystem path to be handled by the POSIX driver. Fully bound description of a system service to be managed. Fully bound description of a command to be run. Fully bound description of an SELinux boolean entry. Fully bound description of an SELinux port entry. Fully bound description of an SELinux file context entry. Fully bound description of an SELinux node entry. Fully bound description of an SELinux login entry. Fully bound description of an SELinux user entry. Fully bound description of an SELinux interface entry. Fully bound description of an SELinux permissive domain entry. Fully bound description of an SELinux module entry. Fully bound description of a POSIXUser entry. Fully bound description of a POSIXGroup entry. Elements within Group tags only apply to clients that are members of that group (or vice-versa; see #element_negate below) Elements within Client tags only apply to the named client (or vice-versa; see #element_negate below) Nesting Bundle tags is allowed in order to support XInclude within Bundles. Nesting Bundle tags to specify dependencies to other bundles. A **BundlerGroupType** is a tag used to provide logic. Child entries of a BundlerGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`BundlerGroupType:negate` can be set to negate the sense of the match. The group name Negate the sense of this group or client; i.e., entries within this tag are only used on clients that are not members of the group, or that have hostnames that do not match. The name of the required bundle. Specify how to handle modifications in the required bundle. You can either ignore the modifications (this is the default) or you can inherit the modifications so that Services in the current Bundle are restarted if the required Bundle is modified. Freeform description of the bundle. If set to ``true``, indicates that the bundle is a collection of independent entries, and that service restarts and modified actions should not be performed. See :ref:`server-plugins-structures-bundler-magic` for more. **Deprecated.** The name of the bundle. If present, this must match the bundle filename, minus the extension. Specifying the name explicitly is deprecated. Bundle schema version. URL of master version (for common repo) Master version control revision. Override the global lax_decryption setting in ``bcfg2.conf``. A bundle describes a group of inter-dependent configuration entries, such as the combination of packages, configuration files, and service activations that comprise typical Unix daemons. Bundles are used to add groups of configuration entries to the inventory of client configurations, as opposed to describing particular versions of those entries. For example, a bundle could say that the configuration file ``/etc/passwd`` should be included in a configuration, but will not describe the particular version of ``/etc/passwd`` that a given client will receive. schemas/clients.xsd000066400000000000000000000135761303523157100146760ustar00rootroot00000000000000 Bcfg2 client list schema Describe a Bcfg2 client machine. **Alias** allows you to set alternative hostname and IP address pairs that also resolve to this client. Hostname of the alternative client name-address pair. IP address of the alternative client name-address pair. Hostname of client. This needs to be the name (probably FQDN) returned by a reverse lookup on the connecting IP address. Profile group name to associate this client with. Deprecated. Authentication mode for the client. See :ref:`appendix-guides-authentication` for details on the values available. Establishes a name for this cilent that can be used to bypass dns-based client resolution. Establishes a per-client password that can be used instead of the global password. Deprecated. Use :xml:attribute:`ClientType:floating` instead. Allows requests to come from any IP address, rather than requiring requests to come from an IP associated with this client. Note that, since this forces the Bcfg2 server to trust any connection that claims to be from this hostname, it can introduce security issues. Requires the use of :xml:attribute:`ClientType:password` for this client. Deprecated. Establishes an extra IP address that resolves to this client. The version of the Bcfg2 client running on this machine. You should not have to set this manually, but can let the Bcfg2 server set it automatically. Metadata client list top-level tag Client schema version schemas/decisions.xsd000066400000000000000000000060031303523157100152000ustar00rootroot00000000000000 decision list schema for bcfg2 Narayan Desai, Argonne National Laboratory A **DecisionsGroupType** is a tag used to provide logic. Child entries of a DecisionsGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`DecisionsGroupType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`DecisionsGroupType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/defaults.xsd000066400000000000000000000034611303523157100150340ustar00rootroot00000000000000 string enumeration definitions for bcfg2 Narayan Desai, Argonne National Laboratory Override the global lax_decryption setting in ``bcfg2.conf``. schemas/deps.xsd000066400000000000000000000027331303523157100141610ustar00rootroot00000000000000 dependency schema for bcfg2 Narayan Desai, Argonne National Laboratory schemas/fileprobes.xsd000066400000000000000000000035531303523157100153610ustar00rootroot00000000000000 FileProbes plugin config schema for bcfg2 Chris St. Pierre Override the global lax_decryption setting in ``bcfg2.conf``. schemas/genshi.xsd000066400000000000000000000277161303523157100145130ustar00rootroot00000000000000 Genshi XML templating language schema `for directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id3>`_ The loop iterator `if directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id1>`_ The statement giving the value to test `match directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id5>`_ XPath expression to search for in the template. Whether the engine should stop looking for more matching elements after the first match. Use this on match templates that match elements that can only occur once in the stream, such as the <head> or <body> elements in an HTML template, or elements with a specific ID. Whether the matched content should be buffered in memory. Buffering can improve performance a bit at the cost of needing more memory during rendering. Buffering is *required* for match templates that contain more than one invocation of the ``select()`` function. If there is only one call, and the matched content can potentially be very long, consider disabling buffering to avoid excessive memory use. Whether the match template should be applied to its own output. Note that once implies non-recursive behavior, so this attribute only needs to be set for match templates that don't also have once set. `def directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id4>`_ The function prototype `with directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#py-with>`_ A semicolon-delimited list of variables to define and their values. `replace directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id8>`_ The value to replace the contents with. `choose directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id2>`_ The ``when`` directive is used inside :xml:type:`py:chooseType` or :xml:attribute:`py:genshiAttrs:choose` to handle a single specific condition. If ``test`` is set, the child :xml:element:`py:when` directives are tested for equality to the value of the expression. The ``otherwise`` directive is used inside :xml:type:`py:chooseType` or :xml:attribute:`py:genshiAttrs:choose` to handle all conditions not handled by a :xml:element:`py:when`. Most Genshi templating directives can be used either as standalone elements or as attributes on existing elements. This element group defines the standalone tags. Most Genshi templating directives can be used either as standalone elements or as attributes on existing elements. This attribute group defines the attribute directives. `if directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id1>`_ `choose directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id2>`_ The ``when`` directive is used inside :xml:type:`py:chooseType` or :xml:attribute:`py:genshiAttrs:choose` to handle a single specific condition. The ``otherwise`` directive is used inside :xml:type:`py:chooseType` or :xml:attribute:`py:genshiAttrs:choose` to handle all conditions not handled by a :xml:element:`py:when`. `for directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id3>`_ `def directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id4>`_ `match directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id5>`_ `with directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#py-with>`_ `attrs directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id6>`_ `content directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id7>`_ `replace directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id8>`_ `strip directive <http://genshi.edgewall.org/wiki/Documentation/xml-templates.html#id9>`_ schemas/grouplogic.xsd000066400000000000000000000075071303523157100154040ustar00rootroot00000000000000 GroupLogic schema for bcfg2 A **GroupLogicDeclarationType** declares a Group to be added to a client. The group name The top-level tag of a GroupLogic configuration file. Elements within Group tags only apply to clients that are members of that group (or vice-versa; see #element_negate below) Elements within Client tags only apply to the named client (or vice-versa; see #element_negate below) Nesting GroupLogic tags is allowed in order to support XInclude. A **GroupLogicContainerType** is a tag used to provide logic. Child entries of a GroupLogicContainerType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`GroupLogicContainerType:negate` can be set to negate the sense of the match. The group name Negate the sense of this group or client; i.e., entries within this tag are only used on clients that are not members of the group, or that have hostnames that do not match. A GroupLogic file is a genshi file that can be used to dynamically add additional groups to a client. schemas/grouppatterns.xsd000066400000000000000000000021041303523157100161330ustar00rootroot00000000000000 group patterns config schema for bcfg2 Narayan Desai, Argonne National Laboratory schemas/info.xsd000066400000000000000000000127411303523157100141610ustar00rootroot00000000000000 ``info.xml`` schema for Bcfg2 The Info tag specifies metadata (ownership, permissions, etc.) for entries that are generated by various plugins. Encoding of the file for tranfer to the client. Use ``base64`` for binary files. Sets group of the file. Important entries are installed first during client execution. Sets owner of the file. Sets the mode of the file from the octal value given. Sets the SELinux context of the file, or sets to the default context for that path set by policy if set to the special value ``__default__``. If true, files that are replaced will be backed up first. The contents of sensitive entries aren't included in reports. An **InfoGroupType** is a ``info.xml`` tag used to provide logic. Child entries of such a tag only apply to machines that match the condition specified -- membership in a group, a matching client name, or a matching path for the file being generated. :xml:attribute:`InfoGroupType:negate` can be set to negate the sense of the match. The name of the client or group, or the full path to match on. Child entries will only apply to this client or group (unless :xml:attribute:`InfoGroupType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group, does not have the given client name, or the path names do not match. Top-level tag for ``info.xml``. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/metadata.xsd000066400000000000000000000152221303523157100150030ustar00rootroot00000000000000 Bcfg2 schema for declaring groups and associating groups with bundles. Declaration of a bundle as a member of a group. The bundle name The Group tag serves two purposes: * If it is at the top level of ``groups.xml`` (i.e., its direct parent is :xml:element:`Groups`), or if it has no children, then it is considered to declare a new group, and :xml:attribute:`MetadataGroupType:profile`, :xml:attribute:`MetadataGroupType:public`, :xml:attribute:`MetadataGroupType:category`, and :xml:attribute:`MetadataGroupType:default` are parsed. * If it is not at the top level of ``groups.xml`` *and* it has children, then it is considered to be a conditional; its children only apply to clients that are already members in the group. The attributes listed above are not parsed. Name of the group Mark the group as a profile, which allows a client to be directly associated with this group in :ref:`server-plugins-grouping-metadata-clients-xml`. Mark the group as public, which allows any client to assert membership in the group with ``bcfg2 -p``. Set as the profile to use for clients that are not associated with any profile explicitly in :ref:`server-plugins-grouping-metadata-clients-xml`. Setting ``default`` to ``true`` requires setting :xml:attribute:`MetadataGroupType:profile` to ``true`` as well. Assign the group to the given category. A client can only be a member of one group in a given category. When the Group tag is used as a conditional, only apply the child elements if the named group does not match. When the Group tag is used as a declaration, do not apply the named group to matching clients. Client tags are conditionals, and can be used to set per-client groups and bundles. The name of the client. Only apply the child tags if the named client does not match. Metadata group list top-level tag Nested ``Groups`` tags allowed to support XInclude Group schema version URI of master version (for common repository) Master version control revision schemas/nagiosgen.xsd000066400000000000000000000033571303523157100152030ustar00rootroot00000000000000 NagiosGen config schema for bcfg2 Chris St. Pierre Override the global lax_decryption setting in ``bcfg2.conf``. schemas/packages.xsd000066400000000000000000000244131303523157100150030ustar00rootroot00000000000000 packages config schema for bcfg2 Narayan Desai, Argonne National Laboratory **RepoOptionsType** can be used to specify arbitrary repository options. The options given in this tag will only be used on the Bcfg2 server, not on the clients. The options given in this tag will only be used on the Bcfg2 clients, not on the server. All other (arbitrary) attributes will be added to the repository configuration. **SourceType** elements are used to specify software sources (i.e., repositories) for the Packages plugin. Components are used to build multiple repository URLs from a single :xml:element:`Source` tag. This is only meaningful if the :xml:attribute:`SourceType:url` attribute is specified; see that attribute above for more detail. The architecture(s) of the repository. A client must be a member of one of the listed architecture groups in order for this source to apply to the client. Additionally, if the :xml:attribute:`SourceType:url` attribute is specified, the :xml:element:`Arch` tag is used to generate URLs. See :xml:attribute:`the url attribute <SourceType:url>` for more detail. The GPG key(s) for the repository. This only applies to sources with :xml:attribute:`SourceType:type` = ``yum``. If GPG keys are specified, then GPG checking will be automatically enabled for the repository, both on the Bcfg2 server (if :ref:`yum libraries <native-yum-libraries>` are in use) and on the Bcfg2 client (if you use :ref:`server-plugins-generators-packages` to :ref:`generate your Yum config <generating-client-configs>`). Arbitrary options to be used in the repository configuration. Blacklist the given package(s) from the :ref:`server-plugins-generators-packages` plugin. This prevents them from being included in automatically-resolved dependencies. If **Whitelist** is specified, *only* packages listed will be included by the :ref:`server-plugins-generators-packages` plugin. Include packages recommended as dependencies by APT. This only applies to sources with :xml:attribute:`SourceType:type` = ``apt``. You must regenerate the Packages cache after changing this attribute. Include essential packages from this repo by default (i.e., without needing to specify them in a bundle). This only applies to sources with :xml:attribute:`SourceType:type` = ``apt``. The type of the repository. This corresponds to the Packages plugin driver that will handle the source. The :ref:`Pulp <pulp-source-support>` repository ID for this repo. This only applies to sources with :xml:attribute:`SourceType:type` = ``yum``. Due to the amount of data that can be queried directly from Pulp, there's rarely a need to supply other attributes. Include ``deb-src`` lines in the generated APT configuration. This only applies to sources with :xml:attribute:`SourceType:type` = ``apt``. The base URL to use when generating URLs for this source. If :xml:attribute:`SourceType:url` is used, you must also provide the :xml:element:`Arch` tag, at least one :xml:element:`Component` tag, and the :xml:attribute:`SourceType:version` attribute. You must not specify :xml:attribute:`SourceType:rawurl`. For each combination of component and Arch tag, a URL is created in the format:: <url>/<version>/<component>/<arch> The raw URL to the (single) repository defined by this source. :xml:element:`Component` and :xml:attribute:`SourceType:version` are ignored if this is given. The OS version this source applies to. This is used to generate URLs if the :xml:attribute:`SourceType:url` attribute is given, and ignored otherwise. Specifiy an explicit name for the source and do not generate it automatically. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/pathentry.xsd000066400000000000000000000032751303523157100152460ustar00rootroot00000000000000 path entry schema for bcfg2 Narayan Desai, Argonne National Laboratory Abstract description of a path to be installed. This can either be a single explicit path (e.g., ``<Path name="/etc/foo.conf"/>``) or a glob that matches a set of paths (e.g., ``<Path glob="/etc/foo/*"/>``). Path globbing may not work for some dynamically handled Path entries, for instance :ref:`Packages client configs <generating-client-configs>`. Install the single named path. Either ``name`` or :xml:attribute:`PathEntry:glob` must be specified. Install all Cfg entries matching the given glob. Either ``glob`` or :xml:attribute:`PathEntry:name` must be specified. schemas/pkglist.xsd000066400000000000000000000075411303523157100147050ustar00rootroot00000000000000 package list schema for bcfg2 Narayan Desai, Argonne National Laboratory An **PackageContainerType** is a tag used to provide logic. Child entries of an PackageContainerType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`PackageContainerType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`PackageContainerType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. The top-level tag in a :ref:`server-plugins-generators-pkgmgr` XML package list. Sets the priority for rules in this file for :ref:`server-plugins-generators-rules`. The higher value wins. The package type, which determines which client driver will handle installation of packages in this package list. The URI to the repository that data in this package list file was parsed from. Comma-separated list of architectures of packages in this package list that should be installed. Filename creation rules for multiarch packages. schemas/pkgtype.xsd000066400000000000000000000307511303523157100147120ustar00rootroot00000000000000 package list schema for bcfg2 Narayan Desai, Argonne National Laboratory Abstract description of a package or package group to be installed. Install the named package. Either ``name`` or :xml:attribute:`PackageStructure:group` must be specified. Install the named package group. Package groups are only supported for Pac and Yum :xml:element:`Source` repositories. Either ``group`` or :xml:attribute:`PackageStructure:name` must be specified. Whether or not to verify the package. The package set to select from a given package group. Only meaningful if :xml:attribute:`PackageStructure:group` is specified. Whether also the recommended packages should be installed. This is currently only used with the :ref:`APT <client-tools-apt>` driver. Concrete specification of a package to be installed. A package can be specified in one of two ways: * A single Package tag that lists all of the attributes for the single instance of the package that should be installed. * A Package tag with any number of :xml:element:`Instance` children, each of which lists the attributes of an instance of the package that should be installed. In this case, the Package tag should only have :xml:attribute:`PackageType:name` and :xml:attribute:`PackageType:type`. Note that many of the attributes listed below are specific to one or a few package drivers. An Instance element describes a single instance of a package that may have several different versions, arches, etc., installed at once. The architecture of the package to be installed. The epoch of the package to be installed. The version of the package to be installed. See :xml:attribute:`PackageType:version` for details. The release of the package to be installed. Package file name. No name parsing is performed, so no extra fields get set. This is only used for manual maintenance of ``gpg-pubkey`` packages with the :ref:`YUM, YUM24, or RPM <client-tools-yum>` driver. Whether or not to perform full package verification (file integrity, etc.) on this package with the :ref:`YUM, YUM24, or RPM <client-tools-yum>` driver. Comma-separated list of flags to pass to the package verification routines of the :ref:`YUM, YUM24, or RPM <client-tools-yum>` driver. See ``man rpm`` for details on the flags. Whether or not to install missing packages. This is only honored by the the :ref:`RPM <client-tools-yum>` driver. Whether or not to upgrade or downgrade packages that are installed, but have the wrong version. This is only honored by the :ref:`RPM <client-tools-yum>` driver. Whether or not to reinstall packages that fail verification. This is only honored by the :ref:`RPM <client-tools-yum>` driver. Package name. The architecture of the package to be installed. The version of the package to be installed. This should *only* be the version, i.e., not the release. Release should be specified in :xml:attribute:`PackageType:release`, and it is an error to append the release to this. This can also be one of two "special" values: * ``auto`` will select the newest version of the package available. * ``any`` will select any version of the package, and can be used to ensure that a package is installed without requiring any particular version. The release of the package to be installed. Package file name. Several other attributes (name, version) can be automatically defined based on regular expressions defined in the :ref:`server-plugins-generators-pkgmgr` plugin, which is the only plugin with which this is useful. Whether or not to perform package verification. This is not supported by the :ref:`YUM <client-tools-yum>` driver. Package file name. No name parsing is performed, so no extra fields get set. This is only used for some edge cases with :ref:`server-plugins-generators-pkgmgr`. Comma-separated list of architectures of this package that should be installed. This is only used by the :ref:`server-plugins-generators-pkgmgr` plugin. Filename creation rules for multiarch packages. This is only used by the :ref:`server-plugins-generators-pkgmgr` plugin. The package type, which determines which client driver will handle installation of this package. The name of the package for installing (as opposed to the name when verifying) with the Blast and OpenCSW drivers. Whether or not to perform basic package checks (version, release, etc.) on this package with the :ref:`YUM, YUM24, or RPM <client-tools-yum>` driver. Whether or not to perform full package verification (file integrity, etc.) on this package with the :ref:`YUM, YUM24, or RPM <client-tools-yum>` driver. Flags to pass to the package verification routines of the :ref:`YUM, YUM24, or RPM <client-tools-yum>` driver. schemas/privkey.xsd000066400000000000000000000131571303523157100147210ustar00rootroot00000000000000 Schema for :ref:`server-plugins-generators-cfg-sshkeys` ``privkey.xml`` A **PrivateKeyGroupType** is a tag used to provide logic. Child entries of a PrivateKeyGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`PrivateKeyGroupType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`PrivateKeyGroupType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. Available private key formats Specify the private key passphrase. The name of the passphrase to use to encrypt this private key on the filesystem (in Bcfg2). Specify parameters for creating the private key Number of bits in the key. See :manpage:`ssh-keygen(1)` for defaults. Key type to create. Top-level tag for describing a generated SSH key pair. Create keys on a per-host basis (rather than on a per-group basis). Create keys specific to the given category, instead of specific to the category given in ``bcfg2.conf``. Create group-specific keys with the given priority. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/pubkey.xsd000066400000000000000000000011721303523157100145210ustar00rootroot00000000000000 Schema for :ref:`server-plugins-generators-cfg-sshkeys` ``pubkey.xml`` Top-level tag for flagging a generated SSH public key. schemas/report-configuration.xsd000066400000000000000000000052101303523157100173770ustar00rootroot00000000000000 statistical report configuration schema for bcfg2 Joey Hagedorn, Argonne National Laboratory schemas/rules.xsd000066400000000000000000000171171303523157100143620ustar00rootroot00000000000000 string enumeration definitions for bcfg2 Narayan Desai, Argonne National Laboratory Fully bound description of a software package to be managed. Fully bound description of a filesystem path to be handled by the POSIX driver. Fully bound description of a system service to be managed. Fully bound description of a command to be run. Fully bound description of an SELinux boolean entry. Fully bound description of an SELinux port entry. Fully bound description of an SELinux file context entry. Fully bound description of an SELinux node entry. Fully bound description of an SELinux login entry. Fully bound description of an SELinux user entry. Fully bound description of an SELinux interface entry. Fully bound description of an SELinux permissive domain entry. Fully bound description of an SELinux module entry. Fully bound description of a POSIXUser entry. Fully bound description of a POSIXGroup entry. Elements within Group tags only apply to clients that are members of that group (or vice-versa, if :xml:attribute:`RContainerType:negate` is set) Elements within Client tags only apply to the named client (or vice-versa, if :xml:attribute:`RContainerType:negate` is set) An **RContainerType** is a Rules tag used to provide logic. Child entries of an RContainerType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`RContainerType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`RContainerType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. The top-level tag for concrete descriptions of entries in :ref:`server-plugins-generators-rules`. Sets the priority for rules in this file for :ref:`server-plugins-generators-rules`. The higher value wins. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/selinux.xsd000066400000000000000000000251471303523157100147210ustar00rootroot00000000000000 SELinux element definitions for bcfg2 Concrete SELinux boolean entry Name of the boolean Value of the boolean Concrete SELinux port entry Port number or range and protocol SELinux type to apply to this port SELinux MLS range to apply to this port Port number or range and protocol for SEPort entries. ``<port>/<proto>`` or ``<start>-<end>/<proto>`` Concrete SELinux file context ("fcontext") entry Regular expression file specification SELinux type to apply to files matching this specification File type to match SELinux MLS range to apply to files matching this specification Concrete SELinux node entry IP address and netmask of node SELinux type to apply to this node Protocol SELinux MLS range to apply to this node IP address and netmask for SENode entries. Netmask can be numeric or dotted-quad. ``<addr>/<netmask>``. Netmask can be numeric (``/16``) or dotted-quad (``/255.255.0.0``). Concrete SELinux login entry Unix username SELinux username SELinux MLS range to apply to this user Concrete SELinux user entry SELinux username Space-separated list of rules Home directory context prefix SELinux MLS range to apply to this user Concrete SELinux interface entry Interface name SELinux type to apply to this interface SELinux MLS range to apply to this interface Concrete SELinux permissive domain entry SELinux type to make permissive Concrete SELinux module entry SELinux module name or filename Disable this module schemas/servicetype.xsd000066400000000000000000000077141303523157100155740ustar00rootroot00000000000000 services schema for bcfg2 Narayan Desai, Argonne National Laboratory Concrete description of a service entry. Note that, due to the great proliferation of init systems, many of the attributes listed only apply to one or a few client tools. The name of the service. Whether the service should start at boot. The default value corresponds to the value of the status attribute. Whether the service should be on or off when the bcfg2 client is run. This attribute may have different behavior depending on the characteristics of the client tool. If set to "ignore", then the status of the service will not be checked. Whether or not to restart the service when the bundle is modified. (New in 1.3; replaces "mode" attribute.) Whether or not to install the service initially. (New in 1.3; replaces "mode" attribute.) Driver to use on the client to manage this service. The resource identifier for SMF services. Order for service startup. Only meaningful for DebInit services. Command to pass to the service management system when restarting a service. Parameters to pass to the service. Only meaningful for Upstart services. schemas/sslca-cert.xsd000066400000000000000000000152331303523157100152650ustar00rootroot00000000000000 Schema for :ref:`server-plugins-generators-cfg-ssl-certificates` ``sslcert.xml`` An **SSLCACertGroupType** is a tag used to provide logic. Child entries of an SSLCACertGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`SSLCACertGroupType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`SSLCACertGroupType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. Available certificate formats Explicitly specify subject alternative names for the generated certificate. The full path to the key entry to use for this certificate. This is the *client* path; e.g., for a key defined at ``/var/lib/bcfg2/SSLCA/etc/pki/tls/private/foo.key/sslkey.xml``, **key** should be ``/etc/pki/tls/private/foo.key``. The certificate format to produce. The name of the CA (from :ref:`bcfg2.conf <server-plugins-generators-cfg-configuration>`) to use to generate this certificate. Time (in days) the certificate will be valid for. Override the country set in the CA config Override the location set in the CA config Override the state set in the CA config Override the organizational unit set in the CA config Override the organization set in the CA config Override the email address set in the CA config Append the CA chain certificate to the generated certificate (e.g., to produce a certificate in the format required by Nginx.) Top-level tag for describing an SSLCA generated certificate. Override the global lax_decryption setting in ``bcfg2.conf``. schemas/sslca-key.xsd000066400000000000000000000103721303523157100151170ustar00rootroot00000000000000 Schema for :ref:`server-plugins-generators-cfg-ssl-certificates` ``sslkey.xml`` An **SSLCAKeyGroupType** is a tag used to provide logic. Child entries of an SSLCAKeyGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. :xml:attribute:`SSLCAKeyGroupType:negate` can be set to negate the sense of the match. The name of the client or group to match on. Child entries will only apply to this client or group (unless :xml:attribute:`SSLCAKeyGroupType:negate` is set). Negate the sense of the match, so that child entries only apply to a client if it is not a member of the given group or does not have the given name. Available generated key types The key type The key length Top-level tag for describing an SSLCA generated key. Create keys on a per-host basis (rather than on a per-group basis). Create keys specific to the given category, instead of specific to the category given in ``bcfg2.conf``. Create group-specific keys with the given priority. schemas/types.xsd000066400000000000000000000436011303523157100143710ustar00rootroot00000000000000 string enumeration definitions for bcfg2 Narayan Desai, Argonne National Laboratory Action entries are external shell commands that are executed either before bundle installation, after bundle installation or both. When the action is run. Actions with "pre" timing are run after important entries have been installed and before bundle entries are installed. Actions with "post" timing are run after bundle entries are installed. If the action is always run, or is only run when a bundle has been modified. Whether or not to check the return code of the action. If this is "check", then a non-zero return code will result in the entry being flagged as bad. Also execute the action in build mode. The freeform name of the action. The command to run. Whether the command string should be executeed within a shell. If enabled flow control and other shell-specific things can be used. Define POSIX ACLs for a Path entry. ACL type ACL scope. This is omitted for :xml:attribute:`ACLType:type` = ``default``. Permissions for the ACL. This can either be a single octal digit (e.g., ``6`` would indicate read and write, but not execute), or a symbolic mode including 'r', 'w', and 'x'. You can include '-' for operations that are not permitted, but it's not required. I.e., all of the following are identical:: perms="5" perms="rx" perms="r-x" User the ACL applies to (with :xml:attribute:`ACLType:scope` = ``user``). Group the ACL applies to (with :xml:attribute:`ACLType:scope` = ``group``). Manage filesystem paths -- files, directories, symlinks, etc. Type of path to manage. Full path. Type of device. Major device number (``block`` and ``char`` devices only). Minor device number (``block`` and ``char`` devices only). Permissions mode in octal format. Owner username or UID number Group name or GID number SELinux context for the path. This should be a full context, not just the type. E.g., ``system_u:object_r:etc_t:s0``, not just ``etc_t``. You can also specify ``__default__``, which will restore the context of the file to the default set by policy. See :ref:`server-selinux` for more information. Important entries are installed first during client execution. Recursively remove files or set permissions, as appropriate. Remove entries that are not in the Bcfg2 specification from the directory. File to link to The file entry has no content. This must be set as a safeguard against accidentally empty content. The VCS backend to checkout contents from. The revision to checkout. The VCS URL to checkout. The name of the encryption passphrase that the text content of this tag is encrypted with. The Augeas lens to use when editing files in a non-standard (according to Augeas) location. Specify additional supplementary groups for the POSIXUser The name of the supplementary group. This can also be specified as content of the tag, although that is deprecated. The POSIXUser tag allows you to create users on client machines. Username User ID number. If this is not specified, each client is allowed to set the UID. Name of the user's primary group. If this is not set, the user's primary group will be the same as the username. This field is typically used to record general information about the account or its user(s) such as their real name and phone number. If this is not set, the GECOS will be the same as the username. User's home directory. Default is ``/root`` for the root user, ``/home/<username>`` otherwise. User's shell The POSIXGroup tag allows you to create groups on client machines. Username Group ID number. If this is not specified, each client is allowed to set the GID. schemas/xinclude.xsd000066400000000000000000000031521303523157100150350ustar00rootroot00000000000000 The official XInclude XML Schema document is not normative or deterministic. This schema implements only the features of XInclude that are used in Bcfg2 in a manner that is deterministic (i.e., passes XML validation). schemas/xml.xsd000066400000000000000000000133051303523157100140230ustar00rootroot00000000000000 See http://www.w3.org/XML/1998/namespace.html and http://www.w3.org/TR/REC-xml for information about this namespace. This schema document describes the XML namespace, in a form suitable for import by other schema documents. Note that local names in this namespace are intended to be defined only by the World Wide Web Consortium or its subgroups. The following names are currently defined in this namespace and should not be used with conflicting semantics by any Working Group, specification, or document instance: base (as an attribute name): denotes an attribute whose value provides a URI to be used as the base for interpreting any relative URIs in the scope of the element on which it appears; its value is inherited. This name is reserved by virtue of its definition in the XML Base specification. id (as an attribute name): denotes an attribute whose value should be interpreted as if declared to be of type ID. The xml:id specification is not yet a W3C Recommendation, but this attribute is included here to facilitate experimentation with the mechanisms it proposes. Note that it is _not_ included in the specialAttrs attribute group. lang (as an attribute name): denotes an attribute whose value is a language code for the natural language of the content of any element; its value is inherited. This name is reserved by virtue of its definition in the XML specification. space (as an attribute name): denotes an attribute whose value is a keyword indicating what whitespace processing discipline is intended for the content of the element; its value is inherited. This name is reserved by virtue of its definition in the XML specification. Father (in any context at all): denotes Jon Bosak, the chair of the original XML Working Group. This name is reserved by the following decision of the W3C XML Plenary and XML Coordination groups: In appreciation for his vision, leadership and dedication the W3C XML Plenary on this 10th day of February, 2000 reserves for Jon Bosak in perpetuity the XML name xml:Father This schema defines attributes and an attribute group suitable for use by schemas wishing to allow xml:base, xml:lang, xml:space or xml:id attributes on elements they define. To enable this, such a schema must import this schema for the XML namespace, e.g. as follows: <schema . . .> . . . <import namespace="http://www.w3.org/XML/1998/namespace" schemaLocation="http://www.w3.org/2001/xml.xsd"/> Subsequently, qualified reference to any of the attributes or the group defined below will have the desired effect, e.g. <type . . .> . . . <attributeGroup ref="xml:specialAttrs"/> will define a type which will schema-validate an instance element with any of those attributes In keeping with the XML Schema WG's standard versioning policy, this schema document will persist at http://www.w3.org/2005/08/xml.xsd. At the date of issue it can also be found at http://www.w3.org/2001/xml.xsd. The schema document at that URI may however change in the future, in order to remain compatible with the latest version of XML Schema itself, or with the XML namespace itself. In other words, if the XML Schema or XML namespaces change, the version of this document at http://www.w3.org/2001/xml.xsd will change accordingly; the version at http://www.w3.org/2005/08/xml.xsd will not change. Attempting to install the relevant ISO 2- and 3-letter codes as the enumerated possible values is probably never going to be a realistic possibility. See RFC 3066 at http://www.ietf.org/rfc/rfc3066.txt and the IANA registry at http://www.iana.org/assignments/lang-tag-apps.htm for further information. The union allows for the 'un-declaration' of xml:lang with the empty string. See http://www.w3.org/TR/xmlbase/ for information about this attribute. See http://www.w3.org/TR/xml-id/ for information about this attribute. setup.py000077500000000000000000000047031303523157100125770ustar00rootroot00000000000000#!/usr/bin/env python from setuptools import setup from glob import glob import sys version_file = 'src/lib/Bcfg2/version.py' try: # python 2 execfile(version_file) except NameError: # py3k exec(compile(open(version_file).read(), version_file, 'exec')) inst_reqs = [ 'lockfile', 'lxml', 'python-daemon', ] # we only need m2crypto on < python2.6 if sys.version_info[:2] < (2, 6): inst_reqs.append('M2Crypto') setup(name="Bcfg2", version=__version__, # Defined in src/lib/Bcfg2/version.py description="Bcfg2 Server", author="Narayan Desai", author_email="desai@mcs.anl.gov", # nosetests test_suite='nose.collector', packages=["Bcfg2", "Bcfg2.Options", "Bcfg2.Client", "Bcfg2.Client.Tools", "Bcfg2.Client.Tools.POSIX", "Bcfg2.Reporting", "Bcfg2.Reporting.Storage", "Bcfg2.Reporting.Transport", "Bcfg2.Reporting.migrations", "Bcfg2.Reporting.templatetags", 'Bcfg2.Server', "Bcfg2.Server.FileMonitor", "Bcfg2.Server.Lint", "Bcfg2.Server.Plugin", "Bcfg2.Server.Plugins", "Bcfg2.Server.Plugins.Packages", "Bcfg2.Server.Plugins.Cfg", "Bcfg2.Server.Reports", "Bcfg2.Server.Reports.reports", ], install_requires=inst_reqs, tests_require=['mock', 'nose'], package_dir={'': 'src/lib', }, package_data={'Bcfg2.Reporting': ['templates/*.html', 'templates/*/*.html', 'templates/*/*.inc']}, scripts=glob('src/sbin/*'), data_files=[('share/bcfg2/schemas', glob('schemas/*.xsd')), ('share/bcfg2/xsl-transforms', glob('reports/xsl-transforms/*.xsl')), ('share/bcfg2/xsl-transforms/xsl-transform-includes', glob('reports/xsl-transforms/xsl-transform-includes/*.xsl')), ('share/bcfg2', glob('reports/reports.wsgi')), ('share/man/man1', glob("man/bcfg2.1")), ('share/man/man5', glob("man/*.5")), ('share/man/man8', glob("man/*.8")), ('share/bcfg2/site_media', glob('reports/site_media/*')), ] ) solaris-ips/000077500000000000000000000000001303523157100133235ustar00rootroot00000000000000solaris-ips/MANIFEST.bcfg2-server.header000066400000000000000000000003451303523157100201730ustar00rootroot00000000000000license ../../LICENSE license=simplified_bsd set name=description value="Configuration management server" set name=pkg.summary value="Configuration management server" set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@1.4.0pre2" solaris-ips/MANIFEST.bcfg2.header000066400000000000000000000004431303523157100166660ustar00rootroot00000000000000license ../../LICENSE license=simplified_bsd set name=description value="Configuration management client" set name=pkg.summary value="Configuration management client" set name=pkg.fmri value="pkg://bcfg2/bcfg2@1.4.0pre2" file usr/bin/bcfg2 group=bin mode=0755 owner=root path=usr/bin/bcfg2 solaris-ips/Makefile000066400000000000000000000011501303523157100147600ustar00rootroot00000000000000#!/usr/bin/gmake VERS=1.4.0pre2-1 PYVERSION := $(shell python -c "import sys; print sys.version[0:3]") default: clean package package: -mkdir tmp tmp/bcfg2-server tmp/bcfg2 -mkdir -p build/lib/$(PYVERSION)/site-packages -cd ../ && PYTHONPATH=$(PYTHONPATH):$(PWD)/build/lib/python2.6/site-packages/ python setup.py install --single-version-externally-managed --record=/dev/null --prefix=$(PWD)/build/usr #setuptools appears to use a restictive umask -chmod -R o+r build/ -chmod +x build/usr/bin/bcfg2 -sh ./gen-manifests.sh clean: -rm -rf tmp build -rm -rf MANIFEST.bcfg2 -rm -rf MANIFEST.bcfg2-server solaris-ips/README000066400000000000000000000007541303523157100142110ustar00rootroot00000000000000BUILDING -------- Dependancies: gmake Usage: gmake PUBLISHING ---------- Modify MANIFEST.bcfg2 and MANIFEST.bcfg2-server to set your publisher name in the fmri, e.g. Change set name=pkg.fmri value="pkg://bcfg2/bcfg2@1.2.4" to set name=pkg.fmri value="pkg://example.com/bcfg2@1.2.4" Then run the pkgsend publish, i.e. pkgsend publish -s http://example.com/path/to/repo -d build MANIFEST.bcfg2 pkgsend publish -s http://example.com/path/to/repo -d build MANIFEST.bcfg2-server solaris-ips/gen-manifests.sh000066400000000000000000000011271303523157100164200ustar00rootroot00000000000000#!/usr/bin/sh #bcfg2 cat MANIFEST.bcfg2.header > MANIFEST.bcfg2 pkgsend generate build | grep man[15] >> MANIFEST.bcfg2 pkgsend generate build | grep Bcfg2/[^/]*.py$ >> MANIFEST.bcfg2 pkgsend generate build | grep Bcfg2/Client/.*.py$ >> MANIFEST.bcfg2 #bcfg2-server cat MANIFEST.bcfg2-server.header > MANIFEST.bcfg2-server pkgsend generate build | grep man[8] >> MANIFEST.bcfg2-server pkgsend generate build | grep share/bcfg2 >> MANIFEST.bcfg2-server pkgsend generate build | grep bin/bcfg2- >> MANIFEST.bcfg2-server pkgsend generate build | grep Bcfg2/Server/.*.py$ >> MANIFEST.bcfg2-server solaris-ips/pkginfo.bcfg2000066400000000000000000000003201303523157100156600ustar00rootroot00000000000000PKG="SCbcfg2" NAME="bcfg2" ARCH="sparc" VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" PSTAMP="Bcfg2 Developers" BASEDIR="/opt/csw" CLASSES="none" solaris-ips/pkginfo.bcfg2-server000066400000000000000000000003321303523157100171670ustar00rootroot00000000000000PKG="SCbcfg2-server" NAME="bcfg2-server" ARCH="sparc" VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" PSTAMP="Bcfg2 Developers" BASEDIR="/usr" CLASSES="none" solaris/000077500000000000000000000000001303523157100125325ustar00rootroot00000000000000solaris/Makefile000066400000000000000000000022741303523157100141770ustar00rootroot00000000000000#!/usr/sfw/bin/gmake PYTHON="/usr/local/bin/python" VERS=1.4.0pre2-1 PYVERSION := $(shell $(PYTHON) -c "import sys; print sys.version[0:3]") default: clean package package: -mkdir tmp tmp/bcfg2-server tmp/bcfg2 -mkdir -p build/lib/$(PYVERSION)/site-packages -cd ../ && PYTHONPATH=$(PYTHONPATH):$(PWD)/build/lib/python$(PYVERSION)/site-packages/ $(PYTHON) setup.py install --single-version-externally-managed --record=/dev/null --prefix=$(PWD)/build #setuptools appears to use a restictive umask -chmod -R o+r build/ -cat build/bin/bcfg2 | sed -e 's!/usr/bin/python!$(PYTHON)!' > build/bin/bcfg2.new && mv build/bin/bcfg2.new build/bin/bcfg2 -chmod +x build/bin/bcfg2 -sh ./gen-prototypes.sh -pkgmk -o -a `uname -m` -f prototype.bcfg2 -d $(PWD)/tmp -r $(PWD)/build -pkgmk -o -a `uname -m` -f prototype.bcfg2-server -d $(PWD)/tmp -r $(PWD)/build -pkgtrans -o -s $(PWD)/tmp $(PWD)/bcfg2-$(VERS) SCbcfg2 -pkgtrans -o -s $(PWD)/tmp $(PWD)/bcfg2-server-$(VERS) SCbcfg2-server -gzip -f $(PWD)/bcfg2-$(VERS) -gzip -f $(PWD)/bcfg2-server-$(VERS) clean: -rm -rf tmp build -rm -rf bcfg2-$(VERS).gz bcfg2-server-$(VERS).gz -rm -rf prototype.bcfg2.fixed prototype.bcfg2-server.fixed -rm -f prototype.* solaris/bcfg2-server000077500000000000000000000006211303523157100147460ustar00rootroot00000000000000#!/bin/sh # # This file belongs in /lib/svc/method . /lib/svc/share/smf_include.sh ACTION="$1" test "x$ACTION" = x && exit $SMF_EXIT_ERR_CONFIG test "x$ACTION" = xrefresh && ACTION="reload" if test "x$ACTION" = xmanifest; then echo "/var/svc/manifest/site/bcfg2-server.xml" exit $SMF_EXIT_OK; fi /etc/init.d/bcfg2-server $ACTION RC=$? test $RC == 0 && exit $SMF_EXIT_OK exit $SMF_EXIT_ERR_FATAL solaris/bcfg2-server.xml000066400000000000000000000035401303523157100155450ustar00rootroot00000000000000 solaris/gen-prototypes.sh000066400000000000000000000020301303523157100160600ustar00rootroot00000000000000#!/bin/sh cd build PP="./lib/python${PYVERSION}/site-packages/" #bcfg2 echo "i pkginfo=./pkginfo.bcfg2" > ../prototype.tmp find . | grep man[15] | pkgproto >> ../prototype.tmp echo "./bin" | pkgproto >> ../prototype.tmp echo "./bin/bcfg2" | pkgproto >> ../prototype.tmp echo "${PP}Bcfg2" | pkgproto >> ../prototype.tmp ls -1 ${PP}Bcfg2/*.py | pkgproto >> ../prototype.tmp find ${PP}Bcfg2/Client/ ! -name "*.pyc" | pkgproto >> ../prototype.tmp sed "s/`id | sed 's/uid=[0-9]*(\(.*\)) gid=[0-9]*(\(.*\))/\1 \2/'`/bin bin/" ../prototype.tmp > ../prototype.bcfg2 #bcfg2-server echo "i pkginfo=./pkginfo.bcfg2-server" > ../prototype.tmp find . | grep man8 | pkgproto >> ../prototype.tmp find share/bcfg2 | pkgproto >> ../prototype.tmp echo "./bin" | pkgproto >> ../prototype.tmp ls -1 bin/bcfg2-* | pkgproto >> ../prototype.tmp find ${PP}Bcfg2/Server/ ! -name "*.pyc" | pkgproto >> ../prototype.tmp sed "s/`id | sed 's/uid=[0-9]*(\(.*\)) gid=[0-9]*(\(.*\))/\1 \2/'`/bin bin/" ../prototype.tmp > ../prototype.bcfg2-server rm ../prototype.tmp solaris/pkginfo.bcfg2000066400000000000000000000003201303523157100150670ustar00rootroot00000000000000PKG="SCbcfg2" NAME="bcfg2" ARCH="sparc" VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" PSTAMP="Bcfg2 Developers" BASEDIR="/opt/csw" CLASSES="none" solaris/pkginfo.bcfg2-server000066400000000000000000000003321303523157100163760ustar00rootroot00000000000000PKG="SCbcfg2-server" NAME="bcfg2-server" ARCH="sparc" VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" PSTAMP="Bcfg2 Developers" BASEDIR="/usr" CLASSES="none" solaris/prototype.bcfg2000066400000000000000000000047311303523157100155110ustar00rootroot00000000000000i pkginfo=./pkginfo.bcfg2 d none lib 0755 root bin d none lib/PYVERSION 0755 root bin d none lib/PYVERSION/site-packages 0755 root bin d none lib/PYVERSION/site-packages/Bcfg2 0755 bin bin f none lib/PYVERSION/site-packages/Bcfg2/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Component.py 0644 bin bin d none lib/PYVERSION/site-packages/Bcfg2/Client 0755 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/XML.py 0644 bin bin d none lib/PYVERSION/site-packages/Bcfg2/Client/Tools 0755 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Action.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/IPS.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/FreeBSDInit.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/RPM.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Chkconfig.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/RcUpdate.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/APT.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/POSIX.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/SYSV.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/rpmtools.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/launchd.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/FreeBSDPackage.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Blast.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/YUM.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Portage.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/DebInit.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Encap.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/SMF.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Frame.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Proxy.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Logger.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Options.py 0644 bin bin d none bin 0755 root bin f none bin/bcfg2 0755 bin bin d none share 0755 root bin d none share/man 0755 root bin d none share/man/man1 0755 root bin f none share/man/man1/bcfg2.1 0444 root bin solaris/prototype.bcfg2-server000066400000000000000000000122751303523157100170170ustar00rootroot00000000000000i pkginfo=./pkginfo.bcfg2-server d none lib 0755 bin bin d none lib/PYVERSION 0755 bin bin d none lib/PYVERSION/site-packages 0755 bin bin d none lib/PYVERSION/site-packages/Bcfg2 0755 bin bin d none lib/PYVERSION/site-packages/Bcfg2/Server 0755 bin bin d none lib/PYVERSION/site-packages/Bcfg2/Server/Admin 0755 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Tidy.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Minestruct.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Init.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Group.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Compare.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Perf.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Query.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Viz.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Pull.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Bundle.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugin.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/__init__.py 0644 bin bin d none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins 0755 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Probes.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Decisions.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Rules.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Packages.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/GroupPatterns.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/SSHbase.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Trigger.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Cfg.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Metadata.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Base.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Pkgmgr.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Ohai.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Properties.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Bundler.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/NagiosGen.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Deps.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Svn.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/DBStats.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Git.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/FileMonitor.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Core.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Statistics.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/SSLServer.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Component.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Logger.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Options.py 0644 bin bin d none bin 0755 bin bin f none bin/bcfg2-server 0755 bin bin f none bin/bcfg2-repo-validate 0755 bin bin f none bin/bcfg2-admin 0755 bin bin f none bin/bcfg2-build-reports 0755 bin bin f none bin/bcfg2-info 0755 bin bin f none bin/bcfg2-reports 0755 bin bin d none share 0755 bin bin d none share/bcfg2 0755 bin bin d none share/bcfg2/schemas 0755 bin bin f none share/bcfg2/schemas/packages.xsd 0444 bin bin f none share/bcfg2/schemas/metadata.xsd 0444 bin bin f none share/bcfg2/schemas/base.xsd 0444 bin bin f none share/bcfg2/schemas/services.xsd 0444 bin bin f none share/bcfg2/schemas/decisions.xsd 0444 bin bin f none share/bcfg2/schemas/info.xsd 0444 bin bin f none share/bcfg2/schemas/xml.xsd 0444 bin bin f none share/bcfg2/schemas/atom.xsd 0444 bin bin f none share/bcfg2/schemas/clients.xsd 0444 bin bin f none share/bcfg2/schemas/grouppatterns.xsd 0444 bin bin f none share/bcfg2/schemas/types.xsd 0444 bin bin f none share/bcfg2/schemas/rules.xsd 0444 bin bin f none share/bcfg2/schemas/pkglist.xsd 0444 bin bin f none share/bcfg2/schemas/bundle.xsd 0444 bin bin f none share/bcfg2/schemas/deps.xsd 0444 bin bin f none share/bcfg2/schemas/pkgtype.xsd 0444 bin bin f none share/bcfg2/schemas/servicetype.xsd 0444 bin bin f none share/bcfg2/schemas/report-configuration.xsd 0444 bin bin d none share/man 0755 bin bin d none share/man/man1 0755 bin bin d none share/man/man5 0755 bin bin d none share/man/man8 0755 bin bin f none share/man/man5/bcfg2.conf.5 0444 bin bin f none share/man/man8/bcfg2-admin.8 0444 bin bin f none share/man/man8/bcfg2-build-reports.8 0444 bin bin f none share/man/man1/bcfg2.1 0444 bin bin f none share/man/man8/bcfg2-repo-validate.8 0444 bin bin f none share/man/man8/bcfg2-server.8 0444 bin bin f none share/man/man8/bcfg2-info.8 0444 bin bin src/000077500000000000000000000000001303523157100116455ustar00rootroot00000000000000src/lib/000077500000000000000000000000001303523157100124135ustar00rootroot00000000000000src/lib/Bcfg2/000077500000000000000000000000001303523157100133365ustar00rootroot00000000000000src/lib/Bcfg2/Client/000077500000000000000000000000001303523157100145545ustar00rootroot00000000000000src/lib/Bcfg2/Client/Proxy.py000066400000000000000000000335331303523157100162560ustar00rootroot00000000000000import os.path import re import sys import time import socket import logging import Bcfg2.Options from Bcfg2.Compat import httplib, xmlrpclib, urlparse, quote_plus # The ssl module is provided by either Python 2.6 or a separate ssl # package that works on older versions of Python (see # http://pypi.python.org/pypi/ssl). If neither can be found, look for # M2Crypto instead. try: import ssl SSL_ERROR = ssl.SSLError except ImportError: raise Exception("No SSL module support") version = sys.version_info[:2] has_py26 = version >= (2, 6) has_py32 = version >= (3, 2) __all__ = ["ComponentProxy", "RetryMethod", "SSLHTTPConnection", "XMLRPCTransport"] class ProxyError(Exception): """ ProxyError provides a consistent reporting interface to the various xmlrpclib errors that might arise (mainly ProtocolError and Fault) """ def __init__(self, err): msg = None if isinstance(err, xmlrpclib.ProtocolError): # cut out the password in the URL url = re.sub(r'([^:]+):(.*?)@([^@]+:\d+/)', r'\1:******@\3', err.url) msg = "XML-RPC Protocol Error for %s: %s (%s)" % (url, err.errmsg, err.errcode) elif isinstance(err, xmlrpclib.Fault): msg = "XML-RPC Fault: %s (%s)" % (err.faultString, err.faultCode) else: msg = str(err) Exception.__init__(self, msg) class CertificateError(Exception): def __init__(self, commonName): self.commonName = commonName def __str__(self): return ("Got unallowed commonName %s from server" % self.commonName) _orig_Method = xmlrpclib._Method class RetryMethod(xmlrpclib._Method): """Method with error handling and retries built in.""" log = logging.getLogger('xmlrpc') max_retries = 3 retry_delay = 1 def __call__(self, *args): for retry in range(self.max_retries): if retry >= self.max_retries - 1: final = True else: final = False msg = None try: return _orig_Method.__call__(self, *args) except xmlrpclib.ProtocolError: err = sys.exc_info()[1] msg = "Server failure: Protocol Error: %s %s" % \ (err.errcode, err.errmsg) except xmlrpclib.Fault: msg = sys.exc_info()[1] except socket.error: err = sys.exc_info()[1] if hasattr(err, 'errno') and err.errno == 336265218: msg = "SSL Key error: %s" % err elif hasattr(err, 'errno') and err.errno == 185090050: msg = "SSL CA error: %s" % err elif final: msg = "Server failure: %s" % err except CertificateError: err = sys.exc_info()[1] msg = "Got unallowed commonName %s from server" % \ err.commonName except KeyError: err = sys.exc_info()[1] msg = "Server disallowed connection: %s" % err except ProxyError: err = sys.exc_info()[1] msg = err except: etype, err = sys.exc_info()[:2] msg = "Unknown failure: %s (%s)" % (err, etype.__name__) if msg: if final: self.log.error(msg) raise ProxyError(msg) else: self.log.info(msg) time.sleep(self.retry_delay) xmlrpclib._Method = RetryMethod class SSLHTTPConnection(httplib.HTTPConnection): """Extension of HTTPConnection that implements SSL and related behaviors. """ def __init__(self, host, port=None, strict=None, timeout=90, key=None, cert=None, ca=None, scns=None, protocol='xmlrpc/tlsv1'): """Initializes the `httplib.HTTPConnection` object and stores security parameters Parameters ---------- host : string Name of host to contact port : int, optional Port on which to contact the host. If none is specified, the default port of 80 will be used unless the `host` string has a port embedded in the form host:port. strict : Boolean, optional Passed to the `httplib.HTTPConnection` constructor and if True, causes the `BadStatusLine` exception to be raised if the status line cannot be parsed as a valid HTTP 1.0 or 1.1 status. timeout : int, optional Causes blocking operations to timeout after `timeout` seconds. key : string, optional The file system path to the local endpoint's SSL key. May specify the same file as `cert` if using a file that contains both. See http://docs.python.org/library/ssl.html#ssl-certificates for details. Required if using client certificate authentication. cert : string, optional The file system path to the local endpoint's SSL certificate. May specify the same file as `cert` if using a file that contains both. See http://docs.python.org/library/ssl.html#ssl-certificates for details. Required if using client certificate authentication. ca : string, optional The file system path to a set of concatenated certificate authority certs, which are used to validate certificates passed from the other end of the connection. scns : array-like, optional List of acceptable server commonNames. The peer cert's common name must appear in this list, otherwise the connect() call will throw a `CertificateError`. protocol : {'xmlrpc/ssl', 'xmlrpc/tlsv1'}, optional Communication protocol to use. """ if not has_py26: httplib.HTTPConnection.__init__(self, host, port, strict) elif not has_py32: httplib.HTTPConnection.__init__(self, host, port, strict, timeout) else: # the strict parameter is deprecated. # HTTP 0.9-style "Simple Responses" are not supported anymore. httplib.HTTPConnection.__init__(self, host, port, timeout=timeout) self.logger = logging.getLogger("%s.%s" % (self.__class__.__module__, self.__class__.__name__)) self.key = key self.cert = cert self.ca = ca self.scns = scns self.protocol = protocol self.timeout = timeout def connect(self): """Initiates a connection using the ssl module.""" # check for IPv6 hostip = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][4][0] if ':' in hostip: rawsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.protocol == 'xmlrpc/ssl': ssl_protocol_ver = ssl.PROTOCOL_SSLv23 elif self.protocol == 'xmlrpc/tlsv1': ssl_protocol_ver = ssl.PROTOCOL_TLSv1 else: self.logger.error("Unknown protocol %s" % (self.protocol)) raise Exception("unknown protocol %s" % self.protocol) if self.ca: other_side_required = ssl.CERT_REQUIRED if not os.path.isfile(self.ca): self.logger.error("CA specified but none found at %s" % self.ca) else: other_side_required = ssl.CERT_NONE self.logger.warning("No ca is specified. Cannot authenticate the " "server with SSL.") if self.cert and not self.key: self.logger.warning("SSL cert specfied, but no key. Cannot " "authenticate this client with SSL.") self.cert = None if self.key and not self.cert: self.logger.warning("SSL key specfied, but no cert. Cannot " "authenticate this client with SSL.") self.key = None rawsock.settimeout(self.timeout) self.sock = ssl.SSLSocket(rawsock, cert_reqs=other_side_required, ca_certs=self.ca, suppress_ragged_eofs=True, keyfile=self.key, certfile=self.cert, ssl_version=ssl_protocol_ver) self.sock.connect((self.host, self.port)) peer_cert = self.sock.getpeercert() if peer_cert and self.scns: scn = [x[0][1] for x in peer_cert['subject'] if x[0][0] == 'commonName'][0] if scn not in self.scns: raise CertificateError(scn) self.sock.closeSocket = True class XMLRPCTransport(xmlrpclib.Transport): def __init__(self, key=None, cert=None, ca=None, scns=None, use_datetime=0, timeout=90, protocol='xmlrpc/tlsv1'): if hasattr(xmlrpclib.Transport, '__init__'): xmlrpclib.Transport.__init__(self, use_datetime) self.key = key self.cert = cert self.ca = ca self.scns = scns self.timeout = timeout self.protocol = protocol def make_connection(self, host): host, self._extra_headers = self.get_host_info(host)[0:2] return SSLHTTPConnection(host, key=self.key, cert=self.cert, ca=self.ca, scns=self.scns, timeout=self.timeout, protocol=self.protocol) def request(self, host, handler, request_body, verbose=0): """Send request to server and return response.""" try: conn = self.send_request(host, handler, request_body, False) response = conn.getresponse() errcode = response.status errmsg = response.reason headers = response.msg except (socket.error, SSL_ERROR, httplib.BadStatusLine): err = sys.exc_info()[1] raise ProxyError(xmlrpclib.ProtocolError(host + handler, 408, str(err), self._extra_headers)) if errcode != 200: raise ProxyError(xmlrpclib.ProtocolError(host + handler, errcode, errmsg, headers)) self.verbose = verbose return self.parse_response(response) if sys.hexversion < 0x03000000: # pylint: disable=E1101 def send_request(self, host, handler, request_body, debug): """ send_request() changed significantly in py3k.""" conn = self.make_connection(host) xmlrpclib.Transport.send_request(self, conn, handler, request_body) self.send_host(conn, host) self.send_user_agent(conn) self.send_content(conn, request_body) return conn # pylint: enable=E1101 class ComponentProxy(xmlrpclib.ServerProxy): """Constructs proxies to components. """ options = [ Bcfg2.Options.Common.location, Bcfg2.Options.Common.ssl_ca, Bcfg2.Options.Common.password, Bcfg2.Options.Common.client_timeout, Bcfg2.Options.Common.protocol, Bcfg2.Options.PathOption( '--ssl-key', cf=('communication', 'key'), dest="key", help='Path to SSL key'), Bcfg2.Options.PathOption( cf=('communication', 'certificate'), dest="cert", help='Path to SSL certificate'), Bcfg2.Options.Option( "-u", "--user", default="root", cf=('communication', 'user'), help='The user to provide for authentication'), Bcfg2.Options.Option( "-R", "--retries", type=int, default=3, cf=('communication', 'retries'), help='The number of times to retry network communication'), Bcfg2.Options.Option( "-y", "--retry-delay", type=int, default=1, cf=('communication', 'retry_delay'), help='The time in seconds to wait between retries'), Bcfg2.Options.Option( '--ssl-cns', cf=('communication', 'serverCommonNames'), dest="ssl_cns", type=Bcfg2.Options.Types.colon_list, help='List of server commonNames')] def __init__(self): RetryMethod.max_retries = Bcfg2.Options.setup.retries RetryMethod.retry_delay = Bcfg2.Options.setup.retry_delay if Bcfg2.Options.setup.user and Bcfg2.Options.setup.password: method, path = urlparse(Bcfg2.Options.setup.server)[:2] url = "%s://%s:%s@%s" % ( method, quote_plus(Bcfg2.Options.setup.user, ''), quote_plus(Bcfg2.Options.setup.password, ''), path) else: url = Bcfg2.Options.setup.server ssl_trans = XMLRPCTransport( key=Bcfg2.Options.setup.key, cert=Bcfg2.Options.setup.cert, ca=Bcfg2.Options.setup.ca, scns=Bcfg2.Options.setup.ssl_cns, timeout=Bcfg2.Options.setup.client_timeout, protocol=Bcfg2.Options.setup.protocol) xmlrpclib.ServerProxy.__init__(self, url, allow_none=True, transport=ssl_trans) src/lib/Bcfg2/Client/Tools/000077500000000000000000000000001303523157100156545ustar00rootroot00000000000000src/lib/Bcfg2/Client/Tools/APK.py000066400000000000000000000042161303523157100166440ustar00rootroot00000000000000"""This provides Bcfg2 support for Alpine Linux APK packages.""" import Bcfg2.Client.Tools class APK(Bcfg2.Client.Tools.PkgTool): """Support for Apk packages.""" name = 'APK' __execs__ = ["/sbin/apk"] __handles__ = [('Package', 'apk')] __req__ = {'Package': ['name', 'version']} pkgtype = 'apk' pkgtool = ("/sbin/apk add %s", ("%s", ["name"])) def RefreshPackages(self): """Refresh memory hashes of packages.""" names = self.cmd.run("/sbin/apk info").stdout.splitlines() nameversions = self.cmd.run("/sbin/apk info -v").stdout.splitlines() for pkg in zip(names, nameversions): pkgname = pkg[0] version = pkg[1][len(pkgname) + 1:] self.logger.debug(" pkgname: %s" % pkgname) self.logger.debug(" version: %s" % version) self.installed[pkgname] = version def VerifyPackage(self, entry, _): """Verify Package status for entry.""" if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % entry.attrib['name']) return False if entry.attrib['name'] in self.installed: if entry.attrib['version'] in \ ['auto', self.installed[entry.attrib['name']]]: # FIXME: Does APK have any sort of verification mechanism? return True else: self.logger.info(" pkg %s at version %s, not %s" % (entry.attrib['name'], self.installed[entry.attrib['name']], entry.attrib['version'])) entry.set('current_version', self.installed[entry.get('name')]) return False entry.set('current_exists', 'false') return False def Remove(self, packages): """Remove extra packages.""" names = [pkg.get('name') for pkg in packages] self.logger.info("Removing packages: %s" % " ".join(names)) self.cmd.run("/sbin/apk del %s" % " ".join(names)) self.RefreshPackages() self.extra = self.FindExtra() src/lib/Bcfg2/Client/Tools/APT.py000066400000000000000000000257651303523157100166710ustar00rootroot00000000000000"""This is the Bcfg2 support for apt-get.""" import os import sys import apt.cache import Bcfg2.Options import Bcfg2.Client.Tools class APT(Bcfg2.Client.Tools.Tool): """The Debian toolset implements package and service operations and inherits the rest from Tools.Tool.""" options = Bcfg2.Client.Tools.Tool.options + [ Bcfg2.Options.PathOption( cf=('APT', 'install_path'), default='/usr', dest='apt_install_path', help='Apt tools install path'), Bcfg2.Options.PathOption( cf=('APT', 'var_path'), default='/var', dest='apt_var_path', help='Apt tools var path'), Bcfg2.Options.PathOption( cf=('APT', 'etc_path'), default='/etc', dest='apt_etc_path', help='System etc path')] __execs__ = [] __handles__ = [('Package', 'deb'), ('Path', 'ignore')] __req__ = {'Package': ['name', 'version'], 'Path': ['type']} def __init__(self, config): Bcfg2.Client.Tools.Tool.__init__(self, config) self.debsums = '%s/bin/debsums' % Bcfg2.Options.setup.apt_install_path self.aptget = '%s/bin/apt-get' % Bcfg2.Options.setup.apt_install_path self.dpkg = '%s/bin/dpkg' % Bcfg2.Options.setup.apt_install_path self.__execs__ = [self.debsums, self.aptget, self.dpkg] path_entries = os.environ['PATH'].split(':') for reqdir in ['/sbin', '/usr/sbin']: if reqdir not in path_entries: os.environ['PATH'] = os.environ['PATH'] + ':' + reqdir self.pkgcmd = '%s ' % self.aptget + \ '-o DPkg::Options::=--force-confold ' + \ '-o DPkg::Options::=--force-confmiss ' + \ '--reinstall ' + \ '--force-yes ' if not Bcfg2.Options.setup.debug: self.pkgcmd += '-q=2 ' self.pkgcmd += '-y install %s' self.ignores = [entry.get('name') for struct in config for entry in struct if entry.tag == 'Path' and entry.get('type') == 'ignore'] self.__important__ = self.__important__ + [ "%s/cache/debconf/config.dat" % Bcfg2.Options.setup.apt_var_path, "%s/cache/debconf/templates.dat" % Bcfg2.Options.setup.apt_var_path, '/etc/passwd', '/etc/group', '%s/apt/apt.conf' % Bcfg2.Options.setup.apt_etc_path, '%s/dpkg/dpkg.cfg' % Bcfg2.Options.setup.apt_etc_path] + \ [entry.get('name') for struct in config for entry in struct if (entry.tag == 'Path' and entry.get('name').startswith( '%s/apt/sources.list' % Bcfg2.Options.setup.apt_etc_path))] self.nonexistent = [entry.get('name') for struct in config for entry in struct if (entry.tag == 'Path' and entry.get('type') == 'nonexistent')] os.environ["DEBIAN_FRONTEND"] = 'noninteractive' self.actions = {} if Bcfg2.Options.setup.kevlar and not Bcfg2.Options.setup.dry_run: self.cmd.run("%s --force-confold --configure --pending" % self.dpkg) self.cmd.run("%s clean" % self.aptget) try: self.pkg_cache = apt.cache.Cache() except SystemError: err = sys.exc_info()[1] self.logger.info("Failed to initialize APT cache: %s" % err) raise Bcfg2.Client.Tools.ToolInstantiationError try: self.pkg_cache.update() except apt.cache.FetchFailedException: err = sys.exc_info()[1] self.logger.info("Failed to update APT cache: %s" % err) self.pkg_cache = apt.cache.Cache() def FindExtra(self): """Find extra packages.""" packages = [entry.get('name') for entry in self.getSupportedEntries()] extras = [(p.name, p.installed.version) for p in self.pkg_cache if p.is_installed and p.name not in packages] return [Bcfg2.Client.XML.Element('Package', name=name, type='deb', current_version=version) for (name, version) in extras] def VerifyDebsums(self, entry, modlist): """Verify the package contents with debsum information.""" output = \ self.cmd.run("%s -as %s" % (self.debsums, entry.get('name'))).stderr.splitlines() if len(output) == 1 and "no md5sums for" in output[0]: self.logger.info("Package %s has no md5sums. Cannot verify" % entry.get('name')) entry.set('qtext', "Reinstall Package %s-%s to setup md5sums? (y/N) " % (entry.get('name'), entry.get('version'))) return False files = [] for item in output: if "checksum mismatch" in item: files.append(item.split()[-1]) elif "changed file" in item: files.append(item.split()[3]) elif "can't open" in item: if item.split()[5] not in self.nonexistent: files.append(item.split()[5]) elif "missing file" in item and \ item.split()[3] in self.nonexistent: # these files should not exist continue elif "is not installed" in item or "missing file" in item: self.logger.error("Package %s is not fully installed" % entry.get('name')) else: self.logger.error("Got Unsupported pattern %s from debsums" % item) files.append(item) files = list(set(files) - set(self.ignores)) # We check if there is file in the checksum to do if files: # if files are found there we try to be sure our modlist is sane # with erroneous symlinks modlist = [os.path.realpath(filename) for filename in modlist] bad = [filename for filename in files if filename not in modlist] if bad: self.logger.debug("It is suggested that you either manage " "these files, revert the changes, or " "ignore false failures:") self.logger.info("Package %s failed validation. Bad files are:" % entry.get('name')) self.logger.info(bad) entry.set( 'qtext', "Reinstall Package %s-%s to fix failing files? (y/N) " % (entry.get('name'), entry.get('version'))) return False return True def VerifyPackage(self, entry, modlist, checksums=True): """Verify package for entry.""" if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % (entry.attrib['name'])) return False pkgname = entry.get('name') if pkgname not in self.pkg_cache or \ not self.pkg_cache[pkgname].is_installed: self.logger.info("Package %s not installed" % (entry.get('name'))) entry.set('current_exists', 'false') return False pkg = self.pkg_cache[pkgname] installed_version = pkg.installed.version if entry.get('version') == 'auto': if pkg.is_upgradable: desired_version = pkg.candidate.version else: desired_version = installed_version elif entry.get('version') == 'any': desired_version = installed_version else: desired_version = entry.get('version') if desired_version != installed_version: entry.set('current_version', installed_version) entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % (entry.get('name'), entry.get('current_version'), desired_version)) return False else: # version matches if not Bcfg2.Options.setup.quick \ and entry.get('verify', 'true') == 'true' \ and checksums: pkgsums = self.VerifyDebsums(entry, modlist) return pkgsums return True def Remove(self, packages): """Deal with extra configuration detected.""" pkgnames = " ".join([pkg.get('name') for pkg in packages]) self.pkg_cache = apt.cache.Cache() if len(packages) > 0: self.logger.info('Removing packages:') self.logger.info(pkgnames) for pkg in pkgnames.split(" "): self.pkg_cache[pkg].mark_delete(purge=True) self.pkg_cache.commit() self.pkg_cache = apt.cache.Cache() self.modified += packages self.extra = self.FindExtra() def Install(self, packages): # it looks like you can't install arbitrary versions of software # out of the pkg cache, we will still need to call apt-get ipkgs = [] bad_pkgs = [] for pkg in packages: pkgname = pkg.get('name') if pkgname not in self.pkg_cache: self.logger.error("APT has no information about package %s" % pkgname) continue if pkg.get('version') in ['auto', 'any']: try: ipkgs.append("%s=%s" % ( pkgname, self.pkg_cache[pkgname].candidate.version)) except AttributeError: self.logger.error("Failed to find %s in apt package " "cache" % pkgname) continue avail_vers = self.pkg_cache[pkgname].versions.keys() if pkg.get('version') in avail_vers: ipkgs.append("%s=%s" % (pkgname, pkg.get('version'))) continue else: self.logger.error("Package %s: desired version %s not in %s" % (pkgname, pkg.get('version'), avail_vers)) bad_pkgs.append(pkgname) if bad_pkgs: self.logger.error("Cannot find correct versions of packages:") self.logger.error(bad_pkgs) if not ipkgs: return dict() if not self.cmd.run(self.pkgcmd % (" ".join(ipkgs))): self.logger.error("APT command failed") self.pkg_cache = apt.cache.Cache() self.extra = self.FindExtra() states = dict() for package in packages: states[package] = self.VerifyPackage(package, [], checksums=False) if states[package]: self.modified.append(package) return states def VerifyPath(self, entry, _): # pylint: disable=W0613 """Do nothing here since we only verify Path type=ignore.""" return True src/lib/Bcfg2/Client/Tools/Action.py000066400000000000000000000056441303523157100174540ustar00rootroot00000000000000"""Action driver""" import Bcfg2.Client.Tools from Bcfg2.Utils import safe_input class Action(Bcfg2.Client.Tools.Tool): """Implement Actions""" name = 'Action' __handles__ = [('Action', None)] __req__ = {'Action': ['name', 'timing', 'when', 'command', 'status']} def RunAction(self, entry): """This method handles command execution and status return.""" shell = False shell_string = '' if entry.get('shell', 'false') == 'true': shell = True shell_string = '(in shell) ' if not Bcfg2.Options.setup.dry_run: if Bcfg2.Options.setup.interactive: prompt = ('Run Action %s%s, %s: (y/N): ' % (shell_string, entry.get('name'), entry.get('command'))) ans = safe_input(prompt) if ans not in ['y', 'Y']: return False if Bcfg2.Options.setup.service_mode == 'build': if entry.get('build', 'true') == 'false': self.logger.debug("Action: Deferring execution of %s due " "to build mode" % entry.get('command')) return False self.logger.debug("Running Action %s %s" % (shell_string, entry.get('name'))) rv = self.cmd.run(entry.get('command'), shell=shell) self.logger.debug("Action: %s got return code %s" % (entry.get('command'), rv.retval)) entry.set('rc', str(rv.retval)) return entry.get('status', 'check') == 'ignore' or rv.success else: self.logger.debug("In dryrun mode: not running action: %s" % (entry.get('name'))) return False def VerifyAction(self, dummy, _): """Actions always verify true.""" return True def InstallAction(self, entry): """Run actions as pre-checks for bundle installation.""" if entry.get('timing') != 'post': return self.RunAction(entry) return True def BundleUpdated(self, bundle): """Run postinstalls when bundles have been updated.""" states = dict() for action in bundle.findall("Action"): if action.get('timing') in ['post', 'both']: if not self._install_allowed(action): continue states[action] = self.RunAction(action) return states def BundleNotUpdated(self, bundle): """Run Actions when bundles have not been updated.""" states = dict() for action in bundle.findall("Action"): if (action.get('timing') in ['post', 'both'] and action.get('when') != 'modified'): if not self._install_allowed(action): continue states[action] = self.RunAction(action) return states src/lib/Bcfg2/Client/Tools/Blast.py000066400000000000000000000020631303523157100172740ustar00rootroot00000000000000"""This provides Bcfg2 support for Blastwave.""" import tempfile import Bcfg2.Client.Tools.SYSV class Blast(Bcfg2.Client.Tools.SYSV.SYSV): """Support for Blastwave packages.""" pkgtype = 'blast' pkgtool = ("/opt/csw/bin/pkg-get install %s", ("%s", ["bname"])) name = 'Blast' __execs__ = ['/opt/csw/bin/pkg-get', "/usr/bin/pkginfo"] __handles__ = [('Package', 'blast')] __req__ = {'Package': ['name', 'version', 'bname']} def __init__(self, config): # dont use the sysv constructor Bcfg2.Client.Tools.PkgTool.__init__(self, config) noaskfile = tempfile.NamedTemporaryFile() self.noaskname = noaskfile.name try: noaskfile.write(Bcfg2.Client.Tools.SYSV.noask) except: pass # VerifyPackage comes from Bcfg2.Client.Tools.SYSV # Install comes from Bcfg2.Client.Tools.PkgTool # Extra comes from Bcfg2.Client.Tools.Tool # Remove comes from Bcfg2.Client.Tools.SYSV def FindExtra(self): """Pass through to null FindExtra call.""" return [] src/lib/Bcfg2/Client/Tools/BundleDeps.py000066400000000000000000000023621303523157100202560ustar00rootroot00000000000000""" Bundle dependency support """ import Bcfg2.Client.Tools class BundleDeps(Bcfg2.Client.Tools.Tool): """Bundle dependency helper for Bcfg2. It handles Bundle tags inside the bundles that references the required other bundles that should change the modification status if the referenced bundles is modified.""" name = 'Bundle' __handles__ = [('Bundle', None)] __req__ = {'Bundle': ['name']} def InstallBundle(self, _): """Simple no-op because we only need the BundleUpdated hook.""" return dict() def VerifyBundle(self, entry, _): # pylint: disable=W0613 """Simple no-op because we only need the BundleUpdated hook.""" return True def BundleUpdated(self, entry): """This handles the dependencies on this bundle. It searches all Bundle tags in other bundles that references the current bundle name and marks those tags as modified to trigger the modification hook on the other bundles.""" bundle_name = entry.get('name') for bundle in self.config.findall('./Bundle/Bundle'): if bundle.get('name') == bundle_name and \ bundle not in self.modified: self.modified.append(bundle) return dict() src/lib/Bcfg2/Client/Tools/Chkconfig.py000066400000000000000000000113561303523157100201270ustar00rootroot00000000000000# This is the bcfg2 support for chkconfig """This is chkconfig support.""" import os import Bcfg2.Client.Tools import Bcfg2.Client.XML class Chkconfig(Bcfg2.Client.Tools.SvcTool): """Chkconfig support for Bcfg2.""" name = 'Chkconfig' __execs__ = ['/sbin/chkconfig'] __handles__ = [('Service', 'chkconfig')] __req__ = {'Service': ['name', 'status']} os.environ['LC_ALL'] = 'C' def get_svc_command(self, service, action): return "/sbin/service %s %s" % (service.get('name'), action) def verify_bootstatus(self, entry, bootstatus): """Verify bootstatus for entry.""" rv = self.cmd.run("/sbin/chkconfig --list %s " % entry.get('name')) if rv.success: srvdata = rv.stdout.splitlines()[0].split() else: # service not installed entry.set('current_bootstatus', 'service not installed') return False if len(srvdata) == 2: # This is an xinetd service if bootstatus == srvdata[1]: return True else: entry.set('current_bootstatus', srvdata[1]) return False try: onlevels = [level.split(':')[0] for level in srvdata[1:] if level.split(':')[1] == 'on'] except IndexError: onlevels = [] if bootstatus == 'on': current_bootstatus = (len(onlevels) > 0) else: current_bootstatus = (len(onlevels) == 0) return current_bootstatus def VerifyService(self, entry, _): """Verify Service status for entry.""" entry.set('target_status', entry.get('status')) # for reporting bootstatus = self.get_bootstatus(entry) if bootstatus is None: return True current_bootstatus = self.verify_bootstatus(entry, bootstatus) if entry.get('status') == 'ignore': # 'ignore' should verify current_svcstatus = True svcstatus = True else: svcstatus = self.check_service(entry) if entry.get('status') == 'on': if svcstatus: current_svcstatus = True else: current_svcstatus = False elif entry.get('status') == 'off': if svcstatus: current_svcstatus = False else: current_svcstatus = True if svcstatus: entry.set('current_status', 'on') else: entry.set('current_status', 'off') return current_bootstatus and current_svcstatus def InstallService(self, entry): """Install Service entry.""" self.cmd.run("/sbin/chkconfig --add %s" % (entry.get('name'))) self.logger.info("Installing Service %s" % (entry.get('name'))) bootstatus = self.get_bootstatus(entry) if bootstatus is not None: if bootstatus == 'on': # make sure service is enabled on boot bootcmd = '/sbin/chkconfig %s %s' % \ (entry.get('name'), bootstatus) elif bootstatus == 'off': # make sure service is disabled on boot bootcmd = '/sbin/chkconfig %s %s' % (entry.get('name'), bootstatus) bootcmdrv = self.cmd.run(bootcmd).success if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs return bootcmdrv buildmode = Bcfg2.Options.setup.service_mode == 'build' if ((entry.get('status') == 'on' and not buildmode) and entry.get('current_status') == 'off'): svccmdrv = self.start_service(entry) elif ((entry.get('status') == 'off' or buildmode) and entry.get('current_status') == 'on'): svccmdrv = self.stop_service(entry) else: svccmdrv = True # ignore status attribute return bootcmdrv and svccmdrv else: # when bootstatus is 'None', status == 'ignore' return True def FindExtra(self): """Locate extra chkconfig Services.""" allsrv = [line.split()[0] for line in self.cmd.run("/sbin/chkconfig --list").stdout.splitlines() if ":on" in line] self.logger.debug('Found active services:') self.logger.debug(allsrv) specified = [srv.get('name') for srv in self.getSupportedEntries()] return [Bcfg2.Client.XML.Element('Service', type='chkconfig', name=name) for name in allsrv if name not in specified] src/lib/Bcfg2/Client/Tools/DebInit.py000066400000000000000000000153121303523157100175460ustar00rootroot00000000000000"""Debian Init Support for Bcfg2""" import glob import os import re import Bcfg2.Options import Bcfg2.Client.Tools # Debian squeeze and beyond uses a dependecy based boot sequence DEBIAN_OLD_STYLE_BOOT_SEQUENCE = ('etch', '4.0', 'lenny') class DebInit(Bcfg2.Client.Tools.SvcTool): """Debian Service Support for Bcfg2.""" name = 'DebInit' __execs__ = ['/usr/sbin/update-rc.d', '/usr/sbin/invoke-rc.d'] __handles__ = [('Service', 'deb')] __req__ = {'Service': ['name', 'status']} svcre = \ re.compile(r'/etc/.*/(?P[SK])(?P\d+)(?P\S+)') def get_svc_command(self, service, action): return '/usr/sbin/invoke-rc.d %s %s' % (service.get('name'), action) def verify_bootstatus(self, entry, bootstatus): """Verify bootstatus for entry.""" rawfiles = glob.glob("/etc/rc*.d/[SK]*%s" % (entry.get('name'))) files = [] try: deb_version = open('/etc/debian_version').read().split('/', 1)[0] except IOError: deb_version = 'unknown' if entry.get('sequence'): if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or deb_version.startswith('5') or os.path.exists('/etc/init.d/.legacy-bootordering')): start_sequence = int(entry.get('sequence')) kill_sequence = 100 - start_sequence else: start_sequence = None self.logger.warning("Your debian version boot sequence is " "dependency based \"sequence\" attribute " "will be ignored.") else: start_sequence = None for filename in rawfiles: match = self.svcre.match(filename) if not match: self.logger.error("Failed to match file: %s" % filename) continue if match.group('name') == entry.get('name'): files.append(filename) if bootstatus == 'off': if files: entry.set('current_bootstatus', 'on') return False else: return True elif files: if start_sequence: for filename in files: match = self.svcre.match(filename) file_sequence = int(match.group('sequence')) if ((match.group('action') == 'S' and file_sequence != start_sequence) or (match.group('action') == 'K' and file_sequence != kill_sequence)): return False return True else: entry.set('current_bootstatus', 'off') return False def VerifyService(self, entry, _): """Verify Service status for entry.""" entry.set('target_status', entry.get('status')) # for reporting bootstatus = self.get_bootstatus(entry) if bootstatus is None: return True current_bootstatus = self.verify_bootstatus(entry, bootstatus) if entry.get('status') == 'ignore': # 'ignore' should verify current_svcstatus = True svcstatus = True else: svcstatus = self.check_service(entry) if entry.get('status') == 'on': if svcstatus: current_svcstatus = True else: current_svcstatus = False elif entry.get('status') == 'off': if svcstatus: current_svcstatus = False else: current_svcstatus = True if svcstatus: entry.set('current_status', 'on') else: entry.set('current_status', 'off') return current_bootstatus and current_svcstatus def InstallService(self, entry): """Install Service entry.""" self.logger.info("Installing Service %s" % (entry.get('name'))) bootstatus = self.get_bootstatus(entry) # check if init script exists try: os.stat('/etc/init.d/%s' % entry.get('name')) except OSError: self.logger.debug("Init script for service %s does not exist" % entry.get('name')) return False if bootstatus is not None: seqcmdrv = True if bootstatus == 'on': # make sure service is enabled on boot bootcmd = '/usr/sbin/update-rc.d %s defaults' % \ entry.get('name') if entry.get('sequence'): seqcmd = '/usr/sbin/update-rc.d -f %s remove' % \ entry.get('name') seqcmdrv = self.cmd.run(seqcmd) start_sequence = int(entry.get('sequence')) kill_sequence = 100 - start_sequence bootcmd = '%s %d %d' % (bootcmd, start_sequence, kill_sequence) elif bootstatus == 'off': # make sure service is disabled on boot bootcmd = '/usr/sbin/update-rc.d -f %s remove' % \ entry.get('name') bootcmdrv = self.cmd.run(bootcmd) if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs return bootcmdrv and seqcmdrv buildmode = Bcfg2.Options.setup.service_mode == 'build' if (entry.get('status') == 'on' and not buildmode) and \ entry.get('current_status') == 'off': svccmdrv = self.start_service(entry) elif (entry.get('status') == 'off' or buildmode) and \ entry.get('current_status') == 'on': svccmdrv = self.stop_service(entry) else: svccmdrv = True # ignore status attribute return bootcmdrv and svccmdrv and seqcmdrv else: # when bootstatus is 'None', status == 'ignore' return True def FindExtra(self): """Find Extra Debian Service entries.""" specified = [entry.get('name') for entry in self.getSupportedEntries()] extra = set() for fname in glob.glob("/etc/rc[12345].d/S*"): name = self.svcre.match(fname).group('name') if name not in specified: extra.add(name) return [Bcfg2.Client.XML.Element('Service', name=name, type='deb') for name in list(extra)] def Remove(self, _): """Remove extra service entries.""" # Extra service removal is nonsensical # Extra services need to be reflected in the config return src/lib/Bcfg2/Client/Tools/Encap.py000066400000000000000000000036571303523157100172670ustar00rootroot00000000000000"""Bcfg2 Support for Encap Packages""" import glob import re import Bcfg2.Client.Tools class Encap(Bcfg2.Client.Tools.PkgTool): """Support for Encap packages.""" name = 'Encap' __execs__ = ['/usr/local/bin/epkg'] __handles__ = [('Package', 'encap')] __req__ = {'Package': ['version', 'url']} pkgtype = 'encap' pkgtool = ("/usr/local/bin/epkg -l -f -q %s", ("%s", ["url"])) splitter = re.compile(r'.*/(?P[\w-]+)\-(?P[\w\.+-]+)') def RefreshPackages(self): """Try to find encap packages.""" self.installed = {} for pkg in glob.glob("/usr/local/encap/*"): match = self.splitter.match(pkg) if match: self.installed[match.group('name')] = match.group('version') else: print("Failed to split name %s" % pkg) self.logger.debug("Encap: RefreshPackages: self.installed.keys() are:") self.logger.debug("%s" % list(self.installed.keys())) def VerifyPackage(self, entry, _): """Verify Package status for entry.""" if not entry.get('version'): self.logger.info("Insufficient information of Package %s; " "cannot Verify" % entry.get('name')) return False success = self.cmd.run("/usr/local/bin/epkg -q -S -k %s-%s" % (entry.get('name'), entry.get('version'))).success if not success: self.logger.debug("Package %s version incorrect" % entry.get('name')) return success def Remove(self, packages): """Deal with extra configuration detected.""" names = " ".join([pkg.get('name') for pkg in packages]) self.logger.info("Removing packages: %s" % (names)) self.cmd.run("/usr/local/bin/epkg -l -q -r %s" % (names)) self.RefreshPackages() self.extra = self.FindExtra() src/lib/Bcfg2/Client/Tools/FreeBSDInit.py000066400000000000000000000124131303523157100202650ustar00rootroot00000000000000"""FreeBSD Init Support for Bcfg2.""" import os import re import Bcfg2.Options import Bcfg2.Client.Tools class FreeBSDInit(Bcfg2.Client.Tools.SvcTool): """FreeBSD service support for Bcfg2.""" name = 'FreeBSDInit' __execs__ = ['/usr/sbin/service', '/usr/sbin/sysrc'] __handles__ = [('Service', 'freebsd')] __req__ = {'Service': ['name', 'status']} rcvar_re = re.compile(r'^(?P[a-z_]+_enable)="[A-Z]+"$') def get_svc_command(self, service, action): return '/usr/sbin/service %s %s' % (service.get('name'), action) def verify_bootstatus(self, entry, bootstatus): """Verify bootstatus for entry.""" cmd = self.get_svc_command(entry, 'enabled') current_bootstatus = bool(self.cmd.run(cmd)) if bootstatus == 'off': if current_bootstatus: entry.set('current_bootstatus', 'on') return False return True elif not current_bootstatus: entry.set('current_bootstatus', 'off') return False return True def check_service(self, entry): # use 'onestatus' to enable status reporting for disabled services cmd = self.get_svc_command(entry, 'onestatus') return bool(self.cmd.run(cmd)) def stop_service(self, service): # use 'onestop' to enable stopping of disabled services self.logger.debug('Stopping service %s' % service.get('name')) return self.cmd.run(self.get_svc_command(service, 'onestop')) def VerifyService(self, entry, _): """Verify Service status for entry.""" entry.set('target_status', entry.get('status')) # for reporting bootstatus = self.get_bootstatus(entry) if bootstatus is None: return True current_bootstatus = self.verify_bootstatus(entry, bootstatus) if entry.get('status') == 'ignore': # 'ignore' should verify current_svcstatus = True svcstatus = True else: svcstatus = self.check_service(entry) if entry.get('status') == 'on': if svcstatus: current_svcstatus = True else: current_svcstatus = False elif entry.get('status') == 'off': if svcstatus: current_svcstatus = False else: current_svcstatus = True if svcstatus: entry.set('current_status', 'on') else: entry.set('current_status', 'off') return current_bootstatus and current_svcstatus def InstallService(self, entry): """Install Service entry.""" self.logger.info("Installing Service %s" % (entry.get('name'))) bootstatus = self.get_bootstatus(entry) # check if service exists all_services_cmd = '/usr/sbin/service -l' all_services = self.cmd.run(all_services_cmd).stdout.splitlines() if entry.get('name') not in all_services: self.logger.debug("Service %s does not exist" % entry.get('name')) return False # get rcvar for service vars = set() rcvar_cmd = self.get_svc_command(entry, 'rcvar') for line in self.cmd.run(rcvar_cmd).stdout.splitlines(): match = self.rcvar_re.match(line) if match: vars.add(match.group('var')) if bootstatus is not None: bootcmdrv = True sysrcstatus = None if bootstatus == 'on': sysrcstatus = 'YES' elif bootstatus == 'off': sysrcstatus = 'NO' if sysrcstatus is not None: for var in vars: if not self.cmd.run('/usr/sbin/sysrc %s="%s"' % (var, sysrcstatus)): bootcmdrv = False break if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs return bootcmdrv buildmode = Bcfg2.Options.setup.service_mode == 'build' if (entry.get('status') == 'on' and not buildmode) and \ entry.get('current_status') == 'off': svccmdrv = self.start_service(entry) elif (entry.get('status') == 'off' or buildmode) and \ entry.get('current_status') == 'on': svccmdrv = self.stop_service(entry) else: svccmdrv = True # ignore status attribute return bootcmdrv and svccmdrv else: # when bootstatus is 'None', status == 'ignore' return True def FindExtra(self): """Find Extra FreeBSD Service entries.""" specified = [entry.get('name') for entry in self.getSupportedEntries()] extra = set() for path in self.cmd.run("/usr/sbin/service -e").stdout.splitlines(): name = os.path.basename(path) if name not in specified: extra.add(name) return [Bcfg2.Client.XML.Element('Service', name=name, type='freebsd') for name in list(extra)] def Remove(self, _): """Remove extra service entries.""" # Extra service removal is nonsensical # Extra services need to be reflected in the config return src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py000066400000000000000000000032551303523157100207210ustar00rootroot00000000000000"""This is the Bcfg2 tool for the FreeBSD package system.""" # TODO # - actual package installation # - verification of package files import re import Bcfg2.Client.Tools class FreeBSDPackage(Bcfg2.Client.Tools.PkgTool): """The FreeBSD toolset implements package operations and inherits the rest from Toolset.Toolset.""" name = 'FreeBSDPackage' __execs__ = ['/usr/sbin/pkg_add', '/usr/sbin/pkg_info'] __handles__ = [('Package', 'freebsdpkg')] __req__ = {'Package': ['name', 'version']} pkgtool = ('/usr/sbin/pkg_add -r %s', ('%s-%s', ['name', 'version'])) pkgtype = 'freebsdpkg' def RefreshPackages(self): self.installed = {} packages = self.cmd.run("/usr/sbin/pkg_info -a -E").stdout.splitlines() pattern = re.compile(r'(.*)-(\d.*)') for pkg in packages: if pattern.match(pkg): name = pattern.match(pkg).group(1) version = pattern.match(pkg).group(2) self.installed[name] = version def VerifyPackage(self, entry, _): if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % entry.attrib['name']) return False if entry.attrib['name'] in self.installed: if self.installed[entry.attrib['name']] == entry.attrib['version']: # TODO: verfification return True else: entry.set('current_version', self.installed[entry.get('name')]) return False self.logger.info("Package %s not installed" % (entry.get('name'))) entry.set('current_exists', 'false') return False src/lib/Bcfg2/Client/Tools/HomeBrew.py000066400000000000000000000042111303523157100177340ustar00rootroot00000000000000"""This provides Bcfg2 support for homebrew packages.""" import Bcfg2.Client.Tools class HomeBrew(Bcfg2.Client.Tools.PkgTool): """Homebrew package support.""" name = 'HomeBrew' __execs__ = ["/usr/local/bin/brew"] __handles__ = [('Package', 'homebrew')] __req__ = {'Package': ['name', 'version']} pkgtype = 'homebrew' pkgtool = ('/usr/local/bin/brew install %s', ('%s', ['name'])) def RefreshPackages(self): """Refresh memory hashes of packages.""" pkgcache = self.cmd.run(["/usr/local/bin/brew", "list", "--versions"]).stdout.splitlines() self.installed = {} for pkg in pkgcache: pkgname, version = pkg.strip().split() self.logger.debug(" pkgname: %s version: %s" % (pkgname, version)) self.installed[pkgname] = version def VerifyPackage(self, entry, _): """Verify Package status for entry.""" self.logger.debug("VerifyPackage: %s : %s" % (entry.get('name'), entry.get('version'))) if entry.attrib['name'] in self.installed: if (self.installed[entry.attrib['name']] == entry.attrib['version'] or entry.attrib['version'] == 'any'): return True else: self.logger.info(" %s: Wrong version installed. " "Want %s, but have %s" % (entry.get("name"), entry.get("version"), self.installed[entry.get("name")], )) entry.set('current_version', self.installed[entry.get('name')]) return False entry.set('current_exists', 'false') return False def Remove(self, packages): """Remove extra packages.""" pkg_names = [p.get('name') for p in packages] self.logger.info("Removing packages: %s" % pkg_names) self.cmd.run(["/usr/local/bin/brew", "uninstall"] + pkg_names) self.RefreshPackages() self.extra = self.FindExtra() src/lib/Bcfg2/Client/Tools/IPS.py000066400000000000000000000041541303523157100166650ustar00rootroot00000000000000"""This is the Bcfg2 support for OpenSolaris packages.""" import pkg.client.image as image import pkg.client.progress as progress import Bcfg2.Client.Tools class IPS(Bcfg2.Client.Tools.PkgTool): """The IPS driver implements OpenSolaris package operations.""" name = 'IPS' pkgtype = 'ips' conflicts = ['SYSV'] __handles__ = [('Package', 'ips')] __req__ = {'Package': ['name', 'version']} pkgtool = ('pkg install --no-refresh %s', ('%s', ['name'])) def __init__(self, config): self.installed = {} self.pending_upgrades = set() self.image = image.Image() self.image.find_root('/', False) self.image.load_config() Bcfg2.Client.Tools.PkgTool.__init__(self, config) def RefreshPackages(self): self.installed = dict() self.image.history.operation_name = "list" self.image.load_catalogs(progress.NullProgressTracker()) for (pfmri, pinfo) in self.image.inventory([], False): pname = pfmri.pkg_name pversion = pfmri.version.get_short_version() self.installed[pname] = pversion if pinfo['upgradable']: self.pending_upgrades.add(pname) def VerifyPackage(self, entry, _): """Verify package for entry.""" pname = entry.get('name') if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % (pname)) return False if pname not in self.installed: self.logger.debug("IPS: Package %s not installed" % pname) return False if entry.get('version') == 'auto': if pname in self.pending_upgrades: return False elif entry.get('version') == 'any': pass else: if entry.get('version') != self.installed[pname]: self.logger.debug("IPS: Package %s: have %s want %s" % (pname, self.installed[pname], entry.get('version'))) return False # need to implement pkg chksum validation return True src/lib/Bcfg2/Client/Tools/MacPorts.py000066400000000000000000000052041303523157100177570ustar00rootroot00000000000000"""This provides Bcfg2 support for macports packages.""" import Bcfg2.Client.Tools class MacPorts(Bcfg2.Client.Tools.PkgTool): """macports package support.""" name = 'MacPorts' __execs__ = ["/opt/local/bin/port"] __handles__ = [('Package', 'macport')] __req__ = {'Package': ['name', 'version']} pkgtype = 'macport' pkgtool = ('/opt/local/bin/port install %s', ('%s', ['name'])) def RefreshPackages(self): """Refresh memory hashes of packages.""" pkgcache = self.cmd.run(["/opt/local/bin/port", "installed"]).stdout.splitlines() self.installed = {} for pkg in pkgcache: if pkg.startswith("Warning:"): continue if pkg.startswith("The following ports are currently installed"): continue if pkg.startswith("No ports are installed"): return pkgname = pkg.split('@')[0].strip() version = pkg.split('@')[1].split(' ')[0] self.logger.info(" pkgname: %s version: %s" % (pkgname, version)) self.installed[pkgname] = version def VerifyPackage(self, entry, _): """Verify Package status for entry.""" if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % entry.attrib['name']) return False if entry.attrib['name'] in self.installed: if (entry.attrib['version'] == 'any' or self.installed[entry.attrib['name']] == entry.attrib['version']): # FIXME: We should be able to check this once # http://trac.macports.org/ticket/15709 is implemented return True else: self.logger.info(" %s: Wrong version installed. " "Want %s, but have %s" % (entry.get("name"), entry.get("version"), self.installed[entry.get("name")], )) entry.set('current_version', self.installed[entry.get('name')]) return False entry.set('current_exists', 'false') return False def Remove(self, packages): """Remove extra packages.""" names = [pkg.get('name') for pkg in packages] self.logger.info("Removing packages: %s" % " ".join(names)) self.cmd.run("/opt/local/bin/port uninstall %s" % " ".join(names)) self.RefreshPackages() self.extra = self.FindExtra() src/lib/Bcfg2/Client/Tools/OpenCSW.py000066400000000000000000000021711303523157100175050ustar00rootroot00000000000000# This is the bcfg2 support for opencsw packages (pkgutil) """This provides Bcfg2 support for OpenCSW packages.""" import tempfile import Bcfg2.Client.Tools.SYSV class OpenCSW(Bcfg2.Client.Tools.SYSV.SYSV): """Support for OpenCSW packages.""" pkgtype = 'opencsw' pkgtool = ("/opt/csw/bin/pkgutil -y -i %s", ("%s", ["bname"])) name = 'OpenCSW' __execs__ = ['/opt/csw/bin/pkgutil', "/usr/bin/pkginfo"] __handles__ = [('Package', 'opencsw')] __req__ = {'Package': ['name', 'version', 'bname']} def __init__(self, config): # dont use the sysv constructor Bcfg2.Client.Tools.PkgTool.__init__(self, config) noaskfile = tempfile.NamedTemporaryFile() self.noaskname = noaskfile.name try: noaskfile.write(Bcfg2.Client.Tools.SYSV.noask) except: pass # VerifyPackage comes from Bcfg2.Client.Tools.SYSV # Install comes from Bcfg2.Client.Tools.PkgTool # Extra comes from Bcfg2.Client.Tools.Tool # Remove comes from Bcfg2.Client.Tools.SYSV def FindExtra(self): """Pass through to null FindExtra call.""" return [] src/lib/Bcfg2/Client/Tools/POSIX/000077500000000000000000000000001303523157100165565ustar00rootroot00000000000000src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py000066400000000000000000000261751303523157100203500ustar00rootroot00000000000000""" Augeas driver """ import sys import Bcfg2.Client.XML from augeas import Augeas from Bcfg2.Client.Tools.POSIX.base import POSIXTool from Bcfg2.Client.Tools.POSIX.File import POSIXFile class AugeasCommand(object): """ Base class for all Augeas command objects """ def __init__(self, entry, command, augeas_obj, logger): self._augeas = augeas_obj self.command = command self.entry = entry self.logger = logger def get_path(self, attr="path"): """ Get a fully qualified path from the name of the parent entry and the path given in this command tag. @param attr: The attribute to get the relative path from @type attr: string @returns: string - the fully qualified Augeas path """ return "/files/%s/%s" % (self.entry.get("name").strip("/"), self.command.get(attr).lstrip("/")) def _exists(self, path): """ Return True if a path exists in Augeas, False otherwise. Note that a False return can mean many things: A file that doesn't exist, a node within the file that doesn't exist, no lens to parse the file, etc. """ return len(self._augeas.match(path)) > 1 def _verify_exists(self, path=None): """ Verify that the given path exists, with friendly debug logging. @param path: The path to verify existence of. Defaults to the result of :func:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand.getpath`. @type path: string @returns: bool - Whether or not the path exists """ if path is None: path = self.get_path() self.logger.debug("Augeas: Verifying that '%s' exists" % path) return self._exists(path) def _verify_not_exists(self, path=None): """ Verify that the given path does not exist, with friendly debug logging. @param path: The path to verify existence of. Defaults to the result of :func:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand.getpath`. @type path: string @returns: bool - Whether or not the path does not exist. (I.e., True if it does not exist, False if it does exist.) """ if path is None: path = self.get_path() self.logger.debug("Augeas: Verifying that '%s' does not exist" % path) return not self._exists(path) def _verify_set(self, expected, path=None): """ Verify that the given path is set to the given value, with friendly debug logging. @param expected: The expected value of the node. @param path: The path to verify existence of. Defaults to the result of :func:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand.getpath`. @type path: string @returns: bool - Whether or not the path matches the expected value. """ if path is None: path = self.get_path() self.logger.debug("Augeas: Verifying '%s' == '%s'" % (path, expected)) actual = self._augeas.get(path) if actual == expected: return True else: self.logger.debug("Augeas: '%s' failed verification: '%s' != '%s'" % (path, actual, expected)) return False def __str__(self): return Bcfg2.Client.XML.tostring(self.command) def verify(self): """ Verify that the command has been applied. """ raise NotImplementedError def install(self): """ Run the command. """ raise NotImplementedError class Remove(AugeasCommand): """ Augeas ``rm`` command """ def verify(self): return self._verify_not_exists() def install(self): self.logger.debug("Augeas: Removing %s" % self.get_path()) return self._augeas.remove(self.get_path()) class Move(AugeasCommand): """ Augeas ``move`` command """ def __init__(self, entry, command, augeas_obj, logger): AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.source = self.get_path("source") self.dest = self.get_path("destination") def verify(self): return (self._verify_not_exists(self.source), self._verify_exists(self.dest)) def install(self): self.logger.debug("Augeas: Moving %s to %s" % (self.source, self.dest)) return self._augeas.move(self.source, self.dest) class Set(AugeasCommand): """ Augeas ``set`` command """ def __init__(self, entry, command, augeas_obj, logger): AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.value = self.command.get("value") def verify(self): return self._verify_set(self.value) def install(self): self.logger.debug("Augeas: Setting %s to %s" % (self.get_path(), self.value)) return self._augeas.set(self.get_path(), self.value) class Clear(Set): """ Augeas ``clear`` command """ def __init__(self, entry, command, augeas_obj, logger): Set.__init__(self, entry, command, augeas_obj, logger) self.value = None class SetMulti(AugeasCommand): """ Augeas ``setm`` command """ def __init__(self, entry, command, augeas_obj, logger): AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.sub = self.command.get("sub") self.value = self.command.get("value") self.base = self.get_path("base") def verify(self): return all(self._verify_set(self.value, path="%s/%s" % (path, self.sub)) for path in self._augeas.match(self.base)) def install(self): return self._augeas.setm(self.base, self.sub, self.value) class Insert(AugeasCommand): """ Augeas ``ins`` command """ def __init__(self, entry, command, augeas_obj, logger): AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.label = self.command.get("label") self.where = self.command.get("where", "before") self.before = self.where == "before" def verify(self): return self._verify_exists("%s/../%s" % (self.get_path(), self.label)) def install(self): self.logger.debug("Augeas: Inserting new %s %s %s" % (self.label, self.where, self.get_path())) return self._augeas.insert(self.get_path(), self.label, self.before) class POSIXAugeas(POSIXTool): """ Handle entries. See :ref:`client-tools-augeas`. """ __req__ = ['name', 'mode', 'owner', 'group'] def __init__(self, config): POSIXTool.__init__(self, config) self._augeas = dict() # file tool for setting initial values of files that don't # exist self.filetool = POSIXFile(config) def get_augeas(self, entry): """ Get an augeas object for the given entry. """ if entry.get("name") not in self._augeas: aug = Augeas() if entry.get("lens"): self.logger.debug("Augeas: Adding %s to include path for %s" % (entry.get("name"), entry.get("lens"))) incl = "/augeas/load/%s/incl" % entry.get("lens") ilen = len(aug.match(incl)) if ilen == 0: self.logger.error("Augeas: Lens %s does not exist" % entry.get("lens")) else: aug.set("%s[%s]" % (incl, ilen + 1), entry.get("name")) aug.load() self._augeas[entry.get("name")] = aug return self._augeas[entry.get("name")] def fully_specified(self, entry): return len(entry.getchildren()) != 0 def get_commands(self, entry): """ Get a list of commands to verify or install. @param entry: The entry to get commands from. @type entry: lxml.etree._Element @param unverified: Only get commands that failed verification. @type unverified: bool @returns: list of :class:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand` objects representing the commands. """ rv = [] for cmd in entry: if cmd.tag == "Initial": continue if cmd.tag in globals(): rv.append(globals()[cmd.tag](entry, cmd, self.get_augeas(entry), self.logger)) else: err = "Augeas: Unknown command %s in %s" % (cmd.tag, entry.get("name")) self.logger.error(err) entry.set('qtext', "\n".join([entry.get('qtext', ''), err])) return rv def verify(self, entry, modlist): rv = True for cmd in self.get_commands(entry): try: if not cmd.verify(): err = "Augeas: Command has not been applied to %s: %s" % \ (entry.get("name"), cmd) self.logger.debug(err) entry.set('qtext', "\n".join([entry.get('qtext', ''), err])) rv = False cmd.command.set("verified", "false") else: cmd.command.set("verified", "true") except: # pylint: disable=W0702 err = "Augeas: Unexpected error verifying %s: %s: %s" % \ (entry.get("name"), cmd, sys.exc_info()[1]) self.logger.error(err) entry.set('qtext', "\n".join([entry.get('qtext', ''), err])) rv = False cmd.command.set("verified", "false") return POSIXTool.verify(self, entry, modlist) and rv def install(self, entry): rv = True if entry.get("current_exists", "true") == "false": initial = entry.find("Initial") if initial is not None: self.logger.debug("Augeas: Setting initial data for %s" % entry.get("name")) file_entry = Bcfg2.Client.XML.Element("Path", **dict(entry.attrib)) file_entry.text = initial.text self.filetool.install(file_entry) # re-parse the file self.get_augeas(entry).load() for cmd in self.get_commands(entry): try: cmd.install() except: # pylint: disable=W0702 self.logger.error( "Failure running Augeas command on %s: %s: %s" % (entry.get("name"), cmd, sys.exc_info()[1])) rv = False try: self.get_augeas(entry).save() except: # pylint: disable=W0702 self.logger.error("Failure saving Augeas changes to %s: %s" % (entry.get("name"), sys.exc_info()[1])) rv = False return POSIXTool.install(self, entry) and rv src/lib/Bcfg2/Client/Tools/POSIX/Device.py000066400000000000000000000052541303523157100203350ustar00rootroot00000000000000""" Handle entries """ import os import sys from Bcfg2.Client.Tools.POSIX.base import POSIXTool, device_map class POSIXDevice(POSIXTool): """ Handle entries """ __req__ = ['name', 'dev_type', 'mode', 'owner', 'group'] def fully_specified(self, entry): if entry.get('dev_type') in ['block', 'char']: # check if major/minor are properly specified if (entry.get('major') is None or entry.get('minor') is None): return False return True def verify(self, entry, modlist): """Verify device entry.""" ondisk = self._exists(entry) if not ondisk: return False # attempt to verify device properties as specified in config rv = True dev_type = entry.get('dev_type') if dev_type in ['block', 'char']: major = int(entry.get('major')) minor = int(entry.get('minor')) if major != os.major(ondisk.st_rdev): msg = ("Major number for device %s is incorrect. " "Current major is %s but should be %s" % (entry.get("name"), os.major(ondisk.st_rdev), major)) self.logger.debug('POSIX: ' + msg) entry.set('qtext', entry.get('qtext', '') + "\n" + msg) rv = False if minor != os.minor(ondisk.st_rdev): msg = ("Minor number for device %s is incorrect. " "Current minor is %s but should be %s" % (entry.get("name"), os.minor(ondisk.st_rdev), minor)) self.logger.debug('POSIX: ' + msg) entry.set('qtext', entry.get('qtext', '') + "\n" + msg) rv = False return POSIXTool.verify(self, entry, modlist) and rv def install(self, entry): if not self._exists(entry, remove=True): try: dev_type = entry.get('dev_type') mode = device_map[dev_type] | int(entry.get('mode'), 8) if dev_type in ['block', 'char']: major = int(entry.get('major')) minor = int(entry.get('minor')) device = os.makedev(major, minor) os.mknod(entry.get('name'), mode, device) else: os.mknod(entry.get('name'), mode) except (KeyError, OSError, ValueError): err = sys.exc_info()[1] self.logger.error('POSIX: Failed to install %s: %s' % (entry.get('name'), err)) return False return POSIXTool.install(self, entry) src/lib/Bcfg2/Client/Tools/POSIX/Directory.py000066400000000000000000000057031303523157100211010ustar00rootroot00000000000000""" Handle entries """ import os import sys import stat import Bcfg2.Client.XML from Bcfg2.Client.Tools.POSIX.base import POSIXTool class POSIXDirectory(POSIXTool): """ Handle entries """ __req__ = ['name', 'mode', 'owner', 'group'] def verify(self, entry, modlist): ondisk = self._exists(entry) if not ondisk: return False if not stat.S_ISDIR(ondisk[stat.ST_MODE]): self.logger.info("POSIX: %s is not a directory" % entry.get('name')) return False prune = True if entry.get('prune', 'false').lower() == 'true': # check for any extra entries when prune='true' attribute is set try: extras = [os.path.join(entry.get('name'), ent) for ent in os.listdir(entry.get('name')) if os.path.join(entry.get('name'), ent) not in modlist] if extras: prune = False msg = "Directory %s contains extra entries: %s" % \ (entry.get('name'), "; ".join(extras)) self.logger.info("POSIX: " + msg) entry.set('qtext', entry.get('qtext', '') + '\n' + msg) for extra in extras: Bcfg2.Client.XML.SubElement(entry, 'Prune', name=extra) except OSError: prune = True return POSIXTool.verify(self, entry, modlist) and prune def install(self, entry): """Install directory entries.""" fmode = self._exists(entry) if fmode and not stat.S_ISDIR(fmode[stat.ST_MODE]): self.logger.info("POSIX: Found a non-directory entry at %s, " "removing" % entry.get('name')) try: os.unlink(entry.get('name')) fmode = False except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to unlink %s: %s" % (entry.get('name'), err)) return False elif fmode: self.logger.debug("POSIX: Found a pre-existing directory at %s" % entry.get('name')) rv = True if not fmode: rv &= self._makedirs(entry) if entry.get('prune', 'false') == 'true': for pent in entry.findall('Prune'): pname = pent.get('name') try: self.logger.debug("POSIX: Removing %s" % pname) self._remove(pent) except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to unlink %s: %s" % (pname, err)) rv = False return POSIXTool.install(self, entry) and rv src/lib/Bcfg2/Client/Tools/POSIX/File.py000066400000000000000000000217751303523157100200230ustar00rootroot00000000000000""" Handle entries """ import os import sys import stat import difflib import tempfile import Bcfg2.Options from Bcfg2.Client.Tools.POSIX.base import POSIXTool from Bcfg2.Compat import unicode, b64encode, b64decode # pylint: disable=W0622 import Bcfg2.Utils class POSIXFile(POSIXTool): """ Handle entries """ __req__ = ['name', 'mode', 'owner', 'group'] def fully_specified(self, entry): return entry.text is not None or entry.get('empty', 'false') == 'true' def _get_data(self, entry): """ Get a tuple of (, ) for the given entry """ is_binary = entry.get('encoding', 'ascii') == 'base64' if entry.get('empty', 'false') == 'true' or not entry.text: tempdata = '' elif is_binary: tempdata = b64decode(entry.text) else: tempdata = entry.text if isinstance(tempdata, unicode) and unicode != str: try: tempdata = tempdata.encode(Bcfg2.Options.setup.encoding) except UnicodeEncodeError: err = sys.exc_info()[1] self.logger.error("POSIX: Error encoding file %s: %s" % (entry.get('name'), err)) return (tempdata, is_binary) def verify(self, entry, modlist): ondisk = self._exists(entry) tempdata, is_binary = self._get_data(entry) if isinstance(tempdata, str) and str != unicode: tempdatasize = len(tempdata) else: tempdatasize = len(tempdata.encode(Bcfg2.Options.setup.encoding)) different = False content = None if not ondisk: # first, see if the target file exists at all; if not, # they're clearly different different = True content = "" elif tempdatasize != ondisk[stat.ST_SIZE]: # next, see if the size of the target file is different # from the size of the desired content different = True else: # finally, read in the target file and compare them # directly. comparison could be done with a checksum, # which might be faster for big binary files, but slower # for everything else try: content = open(entry.get('name')).read() except UnicodeDecodeError: content = open(entry.get('name'), encoding=Bcfg2.Options.setup.encoding).read() except IOError: self.logger.error("POSIX: Failed to read %s: %s" % (entry.get("name"), sys.exc_info()[1])) return False different = content != tempdata if different: self.logger.debug("POSIX: %s has incorrect contents" % entry.get("name")) self._get_diffs( entry, interactive=Bcfg2.Options.setup.interactive, sensitive=entry.get('sensitive', 'false').lower() == 'true', is_binary=is_binary, content=content) return POSIXTool.verify(self, entry, modlist) and not different def _write_tmpfile(self, entry): """ Write the file data to a temp file """ filedata = self._get_data(entry)[0] # get a temp file to write to that is in the same directory as # the existing file in order to preserve any permissions # protections on that directory, and also to avoid issues with # /tmp set nosetuid while creating files that are supposed to # be setuid try: (newfd, newfile) = \ tempfile.mkstemp(prefix=os.path.basename(entry.get("name")), dir=os.path.dirname(entry.get("name"))) except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to create temp file in %s: %s" % (os.path.dirname(entry.get('name')), err)) return False try: if isinstance(filedata, str) and str != unicode: os.fdopen(newfd, 'w').write(filedata) else: os.fdopen(newfd, 'wb').write( filedata.encode(Bcfg2.Options.setup.encoding)) except (OSError, IOError): err = sys.exc_info()[1] self.logger.error("POSIX: Failed to open temp file %s for writing " "%s: %s" % (newfile, entry.get("name"), err)) return False return newfile def _rename_tmpfile(self, newfile, entry): """ Rename the given file to the appropriate filename for entry """ try: os.rename(newfile, entry.get('name')) return True except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to rename temp file %s to %s: %s" % (newfile, entry.get('name'), err)) try: os.unlink(newfile) except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Could not remove temp file %s: %s" % (newfile, err)) return False def install(self, entry): """Install device entries.""" if not os.path.exists(os.path.dirname(entry.get('name'))): if not self._makedirs(entry, path=os.path.dirname(entry.get('name'))): return False newfile = self._write_tmpfile(entry) if not newfile: return False rv = self._set_perms(entry, path=newfile) if not self._rename_tmpfile(newfile, entry): return False return POSIXTool.install(self, entry) and rv def _get_diffs(self, entry, interactive=False, # pylint: disable=R0912 sensitive=False, is_binary=False, content=None): """ generate the necessary diffs for entry """ if not interactive and sensitive: return prompt = [entry.get('qtext', '')] attrs = dict() if content is None: # it's possible that we figured out the files are # different without reading in the local file. if the # supplied version of the file is not binary, we now have # to read in the local file to figure out if _it_ is # binary, and either include that fact or the diff in our # prompts for -I and the reports try: content = open(entry.get('name')).read() except UnicodeDecodeError: content = open(entry.get('name'), encoding='utf-8').read() except IOError: self.logger.error("POSIX: Failed to read %s: %s" % (entry.get("name"), sys.exc_info()[1])) return False if not is_binary: is_binary |= not Bcfg2.Utils.is_string( content, Bcfg2.Options.setup.encoding) if is_binary: # don't compute diffs if the file is binary prompt.append('Binary file, no printable diff') attrs['current_bfile'] = b64encode(content) else: diff = self._diff(content, self._get_data(entry)[0], filename=entry.get("name")) if interactive: if diff: udiff = '\n'.join(diff) if hasattr(udiff, "decode"): udiff = udiff.decode(Bcfg2.Options.setup.encoding) try: prompt.append(udiff) except UnicodeEncodeError: prompt.append("Could not encode diff") elif entry.get("empty", "true"): # the file doesn't exist on disk, but there's no # expected content prompt.append("%s does not exist" % entry.get("name")) else: prompt.append("Diff took too long to compute, no " "printable diff") if not sensitive: if diff: attrs["current_bdiff"] = b64encode("\n".join(diff)) else: attrs['current_bfile'] = b64encode(content) if interactive: entry.set("qtext", "\n".join(prompt)) if not sensitive: for attr, val in attrs.items(): entry.set(attr, val) def _diff(self, content1, content2, filename=None): """ Return a unified diff of the two strings """ if filename: fromfile = "%s (on disk)" % filename tofile = "%s (from bcfg2)" % filename else: fromfile = "" tofile = "" return difflib.unified_diff(content1.split('\n'), content2.split('\n'), fromfile=fromfile, tofile=tofile) src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py000066400000000000000000000006521303523157100206670ustar00rootroot00000000000000""" Handle entries """ import os from Bcfg2.Client.Tools.POSIX.base import POSIXLinkTool class POSIXHardlink(POSIXLinkTool): """ Handle entries """ __linktype__ = "hardlink" def _verify(self, entry): return os.path.samefile(entry.get('name'), entry.get('to')) def _link(self, entry): return os.link(entry.get('to'), entry.get('name')) src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py000066400000000000000000000030131303523157100214430ustar00rootroot00000000000000""" Handle entries """ import os import sys from Bcfg2.Client.Tools.POSIX.base import POSIXTool class POSIXNonexistent(POSIXTool): """ Handle entries """ __req__ = ['name'] def verify(self, entry, _): if os.path.lexists(entry.get('name')): self.logger.debug("POSIX: %s exists but should not" % entry.get("name")) return False return True def install(self, entry): ename = entry.get('name') recursive = entry.get('recursive', '').lower() == 'true' if recursive: # ensure that configuration spec is consistent first for struct in self.config.getchildren(): for el in struct.getchildren(): if (el.tag == 'Path' and el.get('type') != 'nonexistent' and el.get('name').startswith(ename)): self.logger.error('POSIX: Not removing %s. One or ' 'more files in this directory are ' 'specified in your configuration.' % ename) return False try: self._remove(entry, recursive=recursive) return True except OSError: err = sys.exc_info()[1] self.logger.error('POSIX: Failed to remove %s: %s' % (ename, err)) return False src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py000066400000000000000000000003711303523157100214440ustar00rootroot00000000000000""" Handle entries """ from Bcfg2.Client.Tools.POSIX.base import POSIXTool class POSIXPermissions(POSIXTool): """ Handle entries """ __req__ = ['name', 'mode', 'owner', 'group'] src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py000066400000000000000000000010161303523157100205540ustar00rootroot00000000000000""" Handle entries """ import os from Bcfg2.Client.Tools.POSIX.base import POSIXLinkTool class POSIXSymlink(POSIXLinkTool): """ Handle entries """ __linktype__ = "symlink" def _verify(self, entry): sloc = os.readlink(entry.get('name')) if sloc != entry.get('to'): entry.set('current_to', sloc) return False return True def _link(self, entry): return os.symlink(entry.get('to'), entry.get('name')) src/lib/Bcfg2/Client/Tools/POSIX/__init__.py000066400000000000000000000160231303523157100206710ustar00rootroot00000000000000"""All POSIX Type client support for Bcfg2.""" import os import re import sys import shutil import Bcfg2.Options import Bcfg2.Client.Tools from datetime import datetime from Bcfg2.Compat import walk_packages from Bcfg2.Client.Tools.POSIX.base import POSIXTool class POSIX(Bcfg2.Client.Tools.Tool): """POSIX File support code.""" options = Bcfg2.Client.Tools.Tool.options + POSIXTool.options + [ Bcfg2.Options.PathOption( cf=('paranoid', 'path'), default='/var/cache/bcfg2', dest='paranoid_path', help='Specify path for paranoid file backups'), Bcfg2.Options.Option( cf=('paranoid', 'max_copies'), default=1, type=int, dest='paranoid_copies', help='Specify the number of paranoid copies you want'), Bcfg2.Options.BooleanOption( '-P', '--paranoid', cf=('client', 'paranoid'), help='Make automatic backups of config files')] def __init__(self, config): Bcfg2.Client.Tools.Tool.__init__(self, config) self._handlers = self._load_handlers() self.logger.debug("POSIX: Handlers loaded: %s" % (", ".join(self._handlers.keys()))) self.__req__ = dict(Path=dict()) for etype, hdlr in self._handlers.items(): self.__req__['Path'][etype] = hdlr.__req__ self.__handles__.append(('Path', etype)) # Tool.__init__() sets up the list of handled entries, but we # need to do it again after __handles__ has been populated. we # can't populate __handles__ when the class is created because # _load_handlers() _must_ be called at run-time, not at # compile-time. This also has to _extend_ self.handled, not # set it, because self.handled has some really crazy # semi-global thing going that, frankly, scares the crap out # of me. for struct in config: self.handled.extend([e for e in struct if (e not in self.handled and self.handlesEntry(e))]) def _load_handlers(self): """ load available POSIX tool handlers. this must be called at run-time, not at compile-time, or we get wierd circular import issues. """ rv = dict() for submodule in walk_packages(path=__path__, prefix=__name__ + "."): mname = submodule[1].rsplit('.', 1)[-1] if mname == 'base': continue try: module = getattr(__import__(submodule[1]).Client.Tools.POSIX, mname) except ImportError: continue hdlr = getattr(module, "POSIX" + mname) if POSIXTool in hdlr.__mro__: # figure out what entry type this handler handles etype = hdlr.__name__[5:].lower() rv[etype] = hdlr(self.config) return rv def canVerify(self, entry): if not Bcfg2.Client.Tools.Tool.canVerify(self, entry): return False if not self._handlers[entry.get("type")].fully_specified(entry): self.logger.error('POSIX: Cannot verify incomplete entry %s. ' 'Try running bcfg2-lint.' % entry.get('name')) return False return True def canInstall(self, entry): """Check if entry is complete for installation.""" if not Bcfg2.Client.Tools.Tool.canInstall(self, entry): return False if not self._handlers[entry.get("type")].fully_specified(entry): self.logger.error('POSIX: Cannot install incomplete entry %s. ' 'Try running bcfg2-lint.' % entry.get('name')) return False return True def InstallPath(self, entry): """Dispatch install to the proper method according to type""" self.logger.debug("POSIX: Installing entry %s:%s:%s" % (entry.tag, entry.get("type"), entry.get("name"))) self._paranoid_backup(entry) return self._handlers[entry.get("type")].install(entry) def VerifyPath(self, entry, modlist): """Dispatch verify to the proper method according to type""" self.logger.debug("POSIX: Verifying entry %s:%s:%s" % (entry.tag, entry.get("type"), entry.get("name"))) ret = self._handlers[entry.get("type")].verify(entry, modlist) if Bcfg2.Options.setup.interactive and not ret: entry.set('qtext', '%s\nInstall %s %s: (y/N) ' % (entry.get('qtext', ''), entry.get('type'), entry.get('name'))) return ret def _prune_old_backups(self, entry): """ Remove old paranoid backup files """ bkupnam = entry.get('name').replace('/', '_') bkup_re = re.compile( bkupnam + r'_\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}$') # current list of backups for this file try: bkuplist = [f for f in os.listdir(Bcfg2.Options.setup.paranoid_path) if bkup_re.match(f)] except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to create backup list in %s: %s" % (Bcfg2.Options.setup.paranoid_path, err)) return bkuplist.sort() while len(bkuplist) >= int(Bcfg2.Options.setup.paranoid_copies): # remove the oldest backup available oldest = bkuplist.pop(0) self.logger.info("POSIX: Removing old backup %s" % oldest) try: os.remove(os.path.join(Bcfg2.Options.setup.paranoid_path, oldest)) except OSError: err = sys.exc_info()[1] self.logger.error( "POSIX: Failed to remove old backup %s: %s" % (os.path.join(Bcfg2.Options.setup.paranoid_path, oldest), err)) def _paranoid_backup(self, entry): """ Take a backup of the specified entry for paranoid mode """ if (entry.get("paranoid", 'false').lower() == 'true' and Bcfg2.Options.setup.paranoid and entry.get('current_exists', 'true') == 'true' and not os.path.isdir(entry.get("name"))): self._prune_old_backups(entry) bkupnam = "%s_%s" % (entry.get('name').replace('/', '_'), datetime.isoformat(datetime.now())) bfile = os.path.join(Bcfg2.Options.setup.paranoid_path, bkupnam) try: shutil.copy(entry.get('name'), bfile) self.logger.info("POSIX: Backup of %s saved to %s" % (entry.get('name'), bfile)) except IOError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to create backup file for " "%s: %s" % (entry.get('name'), err)) src/lib/Bcfg2/Client/Tools/POSIX/base.py000066400000000000000000001010521303523157100200410ustar00rootroot00000000000000""" Base class for tools that handle POSIX (Path) entries """ import os import sys import pwd import grp import stat import copy import errno import shutil import Bcfg2.Client.Tools import Bcfg2.Client.XML import Bcfg2.Options from Bcfg2.Compat import oct_mode try: import selinux HAS_SELINUX = selinux.is_selinux_enabled() except ImportError: HAS_SELINUX = False try: import posix1e HAS_ACLS = True # map between permissions characters and numeric ACL constants ACL_MAP = dict(r=posix1e.ACL_READ, w=posix1e.ACL_WRITE, x=posix1e.ACL_EXECUTE) except ImportError: HAS_ACLS = False ACL_MAP = dict(r=4, w=2, x=1) # map between dev_type attribute and stat constants device_map = dict(block=stat.S_IFBLK, # pylint: disable=C0103 char=stat.S_IFCHR, fifo=stat.S_IFIFO) class POSIXTool(Bcfg2.Client.Tools.Tool): """ Base class for tools that handle POSIX (Path) entries """ options = [ Bcfg2.Options.Option( cf=('POSIX', 'secontext_ignore'), default=['anon_inodefs_t', 'bdev_t', 'binfmt_misc_fs_t', 'capifs_t', 'configfs_t', 'cpusetfs_t', 'ecryptfs_t', 'eventpollfs_t', 'futexfs_t', 'hugetlbfs_t', 'ibmasmfs_t', 'inotifyfs_t', 'mvfs_t', 'nfsd_fs_t', 'oprofilefs_t', 'ramfs_t', 'romfs_t', 'rpc_pipefs_t', 'spufs_t', 'squash_t', 'vmblock_t', 'vxfs_t', 'xenfs_t', 'autofs_t', 'cifs_t', 'dosfs_t', 'fusefs_t', 'iso9660_t', 'removable_t', 'nfs_t'], help='secontext types to ignore labeling errors', type=Bcfg2.Options.Types.colon_list) ] def fully_specified(self, entry): # pylint: disable=W0613 """ return True if the entry is fully specified """ # checking is done by __req__ return True def verify(self, entry, modlist): # pylint: disable=W0613 """ return True if the entry is correct on disk """ if not self._verify_metadata(entry): return False if entry.get('recursive', 'false').lower() == 'true': # verify ownership information recursively for root, dirs, files in os.walk(entry.get('name')): for path in dirs + files: if not self._verify_metadata(entry, path=os.path.join(root, path)): return False return True def install(self, entry): """ Install the given entry. Return True on success. """ rv = True rv &= self._set_perms(entry) if entry.get('recursive', 'false').lower() == 'true': # set metadata recursively for root, dirs, files in os.walk(entry.get('name')): for path in dirs + files: rv &= self._set_perms(entry, path=os.path.join(root, path)) return rv def _remove(self, entry, recursive=True): """ Remove a Path entry, whatever that takes """ if os.path.islink(entry.get('name')): os.unlink(entry.get('name')) elif os.path.isdir(entry.get('name')): if recursive: shutil.rmtree(entry.get('name')) else: os.rmdir(entry.get('name')) else: os.unlink(entry.get('name')) def _exists(self, entry, remove=False): """ check for existing paths and optionally remove them. if the path exists, return the lstat of it """ try: ondisk = os.lstat(entry.get('name')) if remove: try: self._remove(entry) return None except OSError: err = sys.exc_info()[1] self.logger.warning('POSIX: Failed to unlink %s: %s' % (entry.get('name'), err)) return ondisk # probably still exists else: return ondisk except OSError: return None def _set_perms(self, entry, path=None): """ set permissions on the given entry, or on the given path according to the given entry """ if path is None: path = entry.get("name") rv = True if os.geteuid() == 0: if entry.get("owner") and entry.get("group"): try: self.logger.debug("POSIX: Setting ownership of %s to %s:%s" % (path, self._norm_entry_uid(entry), self._norm_entry_gid(entry))) os.chown(path, self._norm_entry_uid(entry), self._norm_entry_gid(entry)) except (OSError, KeyError): self.logger.error('POSIX: Failed to change ownership of %s' % path) rv = False if sys.exc_info()[0] == KeyError: os.chown(path, 0, 0) else: self.logger.debug("POSIX: Run as non-root, not setting ownership") if entry.get("mode"): wanted_mode = int(entry.get('mode'), 8) if entry.get('dev_type'): wanted_mode |= device_map[entry.get('dev_type')] try: self.logger.debug("POSIX: Setting mode on %s to %s" % (path, oct_mode(wanted_mode))) os.chmod(path, wanted_mode) except (OSError, KeyError): self.logger.error('POSIX: Failed to change mode on %s' % path) rv = False if entry.get('mtime'): try: os.utime(entry.get('name'), (int(entry.get('mtime')), int(entry.get('mtime')))) except OSError: self.logger.error("POSIX: Failed to set mtime of %s" % path) rv = False rv &= self._set_secontext(entry, path=path) rv &= self._set_acls(entry, path=path) return rv def _apply_acl(self, acl, path, atype=None): """ Apply the given ACL to the given path """ if atype is None: # the default value for atype is set this way (rather than # in the argument list) because posix1e libs may not be # installed, and this code is executed at run-time (and # thus will never be reached if ACLs aren't supported), # but argument lists are parsed at compile-time atype = posix1e.ACL_TYPE_ACCESS if atype == posix1e.ACL_TYPE_ACCESS: atype_str = "access" else: atype_str = "default" if acl.valid(): self.logger.debug("POSIX: Applying %s ACL to %s:" % (atype_str, path)) for line in str(acl).splitlines(): self.logger.debug(" " + line) try: acl.applyto(path, atype) return True except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to set ACLs on %s: %s" % (path, err)) return False else: self.logger.warning("POSIX: %s ACL created for %s was invalid:" % (atype_str.title(), path)) for line in str(acl).splitlines(): self.logger.warning(" " + line) return False def _set_acls(self, entry, path=None): # pylint: disable=R0912 """ set POSIX ACLs on the file on disk according to the config """ if not HAS_ACLS: if entry.findall("ACL"): self.logger.debug("POSIX: ACLs listed for %s but no pylibacl " "library installed" % entry.get('name')) return True acls = self._list_entry_acls(entry) if path is None: path = entry.get("name") try: acl = posix1e.ACL(file=path) except IOError: err = sys.exc_info()[1] if err.errno == 95: # fs is mounted noacl if acls: self.logger.error("POSIX: Cannot set ACLs on filesystem " "mounted without ACL support: %s" % path) else: # no ACLs on the entry, no ACLs on the filesystem. # all is well in the world. return True else: self.logger.error("POSIX: Error getting current ACLS on %s: %s" % (path, err)) return False # clear ACLs out so we start fresh -- way easier than trying # to add/remove/modify ACLs for aclentry in acl: if aclentry.tag_type in [posix1e.ACL_USER, posix1e.ACL_GROUP]: acl.delete_entry(aclentry) if os.path.isdir(path): defacl = posix1e.ACL(filedef=path) for aclentry in defacl: if aclentry.tag_type in [posix1e.ACL_USER, posix1e.ACL_USER_OBJ, posix1e.ACL_GROUP, posix1e.ACL_GROUP_OBJ, posix1e.ACL_OTHER]: defacl.delete_entry(aclentry) else: defacl = None if not acls: self.logger.debug("POSIX: Removed ACLs from %s" % entry.get("name")) return True for aclkey, perms in acls.items(): atype, scope, qualifier = aclkey if atype == "default": if defacl is None: self.logger.warning("POSIX: Cannot set default ACLs on " "non-directory %s" % path) continue aclentry = posix1e.Entry(defacl) else: aclentry = posix1e.Entry(acl) for perm in ACL_MAP.values(): if perm & perms: aclentry.permset.add(perm) aclentry.tag_type = scope try: if scope == posix1e.ACL_USER: scopename = "user" if qualifier: aclentry.qualifier = self._norm_uid(qualifier) else: aclentry.tag_type = posix1e.ACL_USER_OBJ elif scope == posix1e.ACL_GROUP: scopename = "group" if qualifier: aclentry.qualifier = self._norm_gid(qualifier) else: aclentry.tag_type = posix1e.ACL_GROUP_OBJ except (OSError, KeyError): err = sys.exc_info()[1] self.logger.error("POSIX: Could not resolve %s %s: %s" % (scopename, qualifier, err)) continue acl.calc_mask() rv = self._apply_acl(acl, path) if defacl: defacl.calc_mask() rv &= self._apply_acl(defacl, path, posix1e.ACL_TYPE_DEFAULT) return rv def _set_secontext(self, entry, path=None): # pylint: disable=R0911 """ set the SELinux context of the file on disk according to the config""" if not HAS_SELINUX: return True if path is None: path = entry.get("name") context = entry.get("secontext") if not context: # no context listed return True secontext = selinux.lgetfilecon(path)[1].split(":")[2] if secontext in Bcfg2.Options.setup.secontext_ignore: return True try: if context == '__default__': selinux.restorecon(path) return True else: return selinux.lsetfilecon(path, context) == 0 except OSError: err = sys.exc_info()[1] if err.errno == errno.EOPNOTSUPP: # Operation not supported if context != '__default__': self.logger.debug("POSIX: Failed to set SELinux context " "for %s: %s" % (path, err)) return False return True err = sys.exc_info()[1] self.logger.error("POSIX: Failed to set or restore SELinux " "context for %s: %s" % (path, err)) return False def _norm_gid(self, gid): """ This takes a group name or gid and returns the corresponding gid. """ try: return int(gid) except ValueError: return int(grp.getgrnam(gid)[2]) def _norm_entry_gid(self, entry): """ Given an entry, return the GID number of the desired group """ try: return self._norm_gid(entry.get('group')) except (OSError, KeyError): err = sys.exc_info()[1] self.logger.error('POSIX: GID normalization failed for %s on %s: ' '%s' % (entry.get('group'), entry.get('name'), err)) return 0 def _norm_uid(self, uid): """ This takes a username or uid and returns the corresponding uid. """ try: return int(uid) except ValueError: return int(pwd.getpwnam(uid)[2]) def _norm_entry_uid(self, entry): """ Given an entry, return the UID number of the desired owner """ try: return self._norm_uid(entry.get("owner")) except (OSError, KeyError): err = sys.exc_info()[1] self.logger.error('POSIX: UID normalization failed for %s on %s: ' '%s' % (entry.get('owner'), entry.get('name'), err)) return 0 def _norm_acl_perms(self, perms): """ takes a representation of an ACL permset and returns a digit representing the permissions entailed by it. representations can either be a single octal digit, a string of up to three 'r', 'w', 'x', or '-' characters, or a posix1e.Permset object""" if perms is None: return 0 elif hasattr(perms, 'test'): # Permset object return sum([p for p in ACL_MAP.values() if perms.test(p)]) try: # single octal digit rv = int(perms) if rv >= 0 and rv < 8: return rv else: self.logger.error("POSIX: Permissions digit out of range in " "ACL: %s" % perms) return 0 except ValueError: # couldn't be converted to an int; process as a string if len(perms) > 3: self.logger.error("POSIX: Permissions string too long in ACL: " "%s" % perms) return 0 rv = 0 for char in perms: if char == '-': continue elif char not in ACL_MAP: self.logger.warning("POSIX: Unknown permissions character " "in ACL: %s" % char) elif rv & ACL_MAP[char]: self.logger.warning("POSIX: Duplicate permissions " "character in ACL: %s" % perms) else: rv |= ACL_MAP[char] return rv def _acl2string(self, aclkey, perms): """ Get a string representation of the given ACL. aclkey must be a tuple of (, , ) """ atype, scope, qualifier = aclkey if not qualifier: qualifier = '' acl_str = [] if atype == 'default': acl_str.append(atype) if scope == posix1e.ACL_USER or scope == posix1e.ACL_USER_OBJ: acl_str.append("user") elif scope == posix1e.ACL_GROUP or scope == posix1e.ACL_GROUP_OBJ: acl_str.append("group") elif scope == posix1e.ACL_OTHER: acl_str.append("other") acl_str.append(qualifier) acl_str.append(self._acl_perm2string(perms)) return ":".join(acl_str) def _acl_perm2string(self, perm): """ Turn an octal permissions integer into a string suitable for use with ACLs """ rv = [] for char in 'rwx': if ACL_MAP[char] & perm: rv.append(char) else: rv.append('-') return ''.join(rv) def _gather_data(self, path): """ Get data on the existing state of -- e.g., whether or not it exists, owner, group, permissions, etc. """ try: ondisk = os.lstat(path) except OSError: self.logger.debug("POSIX: %s does not exist" % path) return (False, None, None, None, None, None) try: owner = str(ondisk[stat.ST_UID]) except OSError: err = sys.exc_info()[1] self.logger.debug("POSIX: Could not get current owner of %s: %s" % (path, err)) owner = None except KeyError: self.logger.error('POSIX: User resolution failed for %s' % path) owner = None try: group = str(ondisk[stat.ST_GID]) except (OSError, KeyError): err = sys.exc_info()[1] self.logger.debug("POSIX: Could not get current group of %s: %s" % (path, err)) group = None except KeyError: self.logger.error('POSIX: Group resolution failed for %s' % path) group = None try: mode = oct_mode(ondisk[stat.ST_MODE])[-4:] except (OSError, KeyError, TypeError): err = sys.exc_info()[1] self.logger.debug("POSIX: Could not get current permissions of " "%s: %s" % (path, err)) mode = None if HAS_SELINUX: try: secontext = selinux.lgetfilecon(path)[1].split(":")[2] except (OSError, KeyError): err = sys.exc_info()[1] self.logger.debug("POSIX: Could not get current SELinux " "context of %s: %s" % (path, err)) secontext = None else: secontext = None if HAS_ACLS and not stat.S_ISLNK(ondisk[stat.ST_MODE]): acls = self._list_file_acls(path) else: acls = None return (ondisk, owner, group, mode, secontext, acls) def _verify_metadata(self, entry, path=None): # pylint: disable=R0912 """ generic method to verify mode, owner, group, secontext, acls, and mtime """ # allow setting an alternate path for recursive permissions checking if path is None: path = entry.get('name') attrib = dict() ondisk, attrib['current_owner'], attrib['current_group'], \ attrib['current_mode'], attrib['current_secontext'] = \ self._gather_data(path)[0:5] if not ondisk: entry.set('current_exists', 'false') return False # we conditionally verify every bit of metadata only if it's # specified on the entry. consequently, canVerify() and # fully_specified() are preconditions of _verify_metadata(), # since they will ensure that everything that needs to be # specified actually is. this lets us gracefully handle # symlink and hardlink entries, which have SELinux contexts # but not other permissions, optional secontext and mtime # attrs, and so on. wanted_owner, wanted_group, wanted_mode, mtime = None, None, None, -1 if entry.get('mtime', '-1') != '-1': mtime = str(ondisk[stat.ST_MTIME]) if entry.get("owner"): wanted_owner = str(self._norm_entry_uid(entry)) if entry.get("group"): wanted_group = str(self._norm_entry_gid(entry)) if entry.get("mode"): while len(entry.get('mode', '')) < 4: entry.set('mode', '0' + entry.get('mode', '')) wanted_mode = int(entry.get('mode'), 8) errors = [] if wanted_owner and attrib['current_owner'] != wanted_owner: errors.append("Owner for path %s is incorrect. " "Current owner is %s but should be %s" % (path, attrib['current_owner'], entry.get('owner'))) if wanted_group and attrib['current_group'] != wanted_group: errors.append("Group for path %s is incorrect. " "Current group is %s but should be %s" % (path, attrib['current_group'], entry.get('group'))) if (wanted_mode and oct_mode(int(attrib['current_mode'], 8)) != oct_mode(wanted_mode)): errors.append("Permissions for path %s are incorrect. " "Current permissions are %s but should be %s" % (path, attrib['current_mode'], entry.get('mode'))) if entry.get('mtime'): attrib['current_mtime'] = mtime if mtime != entry.get('mtime', '-1'): errors.append("mtime for path %s is incorrect. " "Current mtime is %s but should be %s" % (path, mtime, entry.get('mtime'))) if HAS_SELINUX: wanted_secontext = None if entry.get("secontext") == "__default__": try: wanted_secontext = \ selinux.matchpathcon( path, ondisk[stat.ST_MODE])[1].split(":")[2] except OSError: errors.append("%s has no default SELinux context" % entry.get("name")) elif entry.get("secontext"): wanted_secontext = entry.get("secontext").split(":")[2] if (wanted_secontext and attrib['current_secontext'] != wanted_secontext): errors.append("SELinux context for path %s is incorrect. " "Current context is %s but should be %s" % (path, attrib['current_secontext'], wanted_secontext)) if errors: for error in errors: self.logger.debug("POSIX: " + error) entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors)) if path == entry.get("name"): for attr, val in attrib.items(): if val is not None: entry.set(attr, str(val)) return self._verify_acls(entry, path=path) and len(errors) == 0 def _list_entry_acls(self, entry): """ Given an entry, get a dict of POSIX ACLs described in that entry. """ wanted = dict() for acl in entry.findall("ACL"): if acl.get("scope") == "user": if acl.get("user"): scope = posix1e.ACL_USER else: scope = posix1e.ACL_USER_OBJ elif acl.get("scope") == "group": if acl.get("group"): scope = posix1e.ACL_GROUP else: scope = posix1e.ACL_GROUP_OBJ elif acl.get("scope") == "other": scope = posix1e.ACL_OTHER else: self.logger.error("POSIX: Unknown ACL scope %s" % acl.get("scope")) continue if acl.get('perms') is None: self.logger.error("POSIX: No permissions set for ACL: %s" % Bcfg2.Client.XML.tostring(acl)) continue qual = acl.get(acl.get("scope")) if not qual: qual = '' wanted[(acl.get("type"), scope, qual)] = \ self._norm_acl_perms(acl.get('perms')) return wanted def _list_file_acls(self, path): """ Given a path, get a dict of existing POSIX ACLs on that path. The dict keys are a tuple of (, , . values are the permissions of the described ACL. """ def _process_acl(acl, atype): """ Given an ACL object, process it appropriately and add it to the return value """ try: qual = '' if acl.tag_type == posix1e.ACL_USER: qual = pwd.getpwuid(acl.qualifier)[0] elif acl.tag_type == posix1e.ACL_GROUP: qual = grp.getgrgid(acl.qualifier)[0] elif atype == "access" or acl.tag_type == posix1e.ACL_MASK: return except (OSError, KeyError): err = sys.exc_info()[1] self.logger.error("POSIX: Lookup of %s %s failed: %s" % (atype, acl.qualifier, err)) qual = acl.qualifier existing[(atype, acl.tag_type, qual)] = \ self._norm_acl_perms(acl.permset) existing = dict() try: for acl in posix1e.ACL(file=path): _process_acl(acl, "access") except IOError: err = sys.exc_info()[1] if err.errno == 95: # fs is mounted noacl self.logger.debug("POSIX: Filesystem mounted without ACL " "support: %s" % path) else: self.logger.error("POSIX: Error getting current ACLS on %s: %s" % (path, err)) return existing if os.path.isdir(path): for acl in posix1e.ACL(filedef=path): _process_acl(acl, "default") return existing def _verify_acls(self, entry, path=None): # pylint: disable=R0912 """ verify POSIX ACLs on the given entry. return True if all ACLS are correct, false otherwise """ def _verify_acl(aclkey, perms): """ Given ACL data, process it appropriately and add it to missing or wrong lists if appropriate """ if aclkey not in existing: missing.append(self._acl2string(aclkey, perms)) elif existing[aclkey] != perms: wrong.append((self._acl2string(aclkey, perms), self._acl2string(aclkey, existing[aclkey]))) if path == entry.get("name"): atype, scope, qual = aclkey aclentry = Bcfg2.Client.XML.Element("ACL", type=atype, perms=str(perms)) if (scope == posix1e.ACL_USER or scope == posix1e.ACL_USER_OBJ): aclentry.set("scope", "user") elif (scope == posix1e.ACL_GROUP or scope == posix1e.ACL_GROUP_OBJ): aclentry.set("scope", "group") elif scope == posix1e.ACL_OTHER: aclentry.set("scope", "other") else: self.logger.debug("POSIX: Unknown ACL scope %s on %s" % (scope, path)) return if scope != posix1e.ACL_OTHER: aclentry.set(aclentry.get("scope"), qual) entry.append(aclentry) if not HAS_ACLS: if entry.findall("ACL"): self.logger.debug("POSIX: ACLs listed for %s but no pylibacl " "library installed" % entry.get('name')) return True if path is None: path = entry.get("name") # create lists of normalized representations of the ACLs we want # and the ACLs we have. this will make them easier to compare # than trying to mine that data out of the ACL objects and XML # objects and compare it at the same time. wanted = self._list_entry_acls(entry) existing = self._list_file_acls(path) missing = [] extra = [] wrong = [] for aclkey, perms in wanted.items(): _verify_acl(aclkey, perms) for aclkey, perms in existing.items(): if aclkey not in wanted: extra.append(self._acl2string(aclkey, perms)) msg = [] if missing: msg.append("%s ACLs are missing: %s" % (len(missing), ", ".join(missing))) if wrong: msg.append("%s ACLs are wrong: %s" % (len(wrong), "; ".join(["%s should be %s" % (e, w) for w, e in wrong]))) if extra: msg.append("%s extra ACLs: %s" % (len(extra), ", ".join(extra))) if msg: msg.insert(0, "POSIX: ACLs for %s are incorrect." % path) self.logger.debug(msg[0]) for line in msg[1:]: self.logger.debug(" " + line) entry.set('qtext', "\n".join([entry.get("qtext", '')] + msg)) return False return True def _makedirs(self, entry, path=None): """ os.makedirs helpfully creates all parent directories for us, but it sets permissions according to umask, which is probably wrong. we need to find out which directories were created and try to set permissions on those (http://trac.mcs.anl.gov/projects/bcfg2/ticket/1125 and http://trac.mcs.anl.gov/projects/bcfg2/ticket/1134) """ created = [] if path is None: path = entry.get("name") cur = path while cur and cur != '/': if not os.path.exists(cur): created.append(cur) cur = os.path.dirname(cur) rv = True try: os.makedirs(path) except OSError: err = sys.exc_info()[1] self.logger.error('POSIX: Failed to create directory %s: %s' % (path, err)) rv = False # set auto-created directories to mode 755 and use best effort for # permissions. If you need something else, you should specify it in # your config. tmpentry = copy.deepcopy(entry) tmpentry.set('mode', '0755') for acl in tmpentry.findall('ACL'): acl.set('perms', oct_mode(self._norm_acl_perms(acl.get('perms')) | ACL_MAP['x'])) for cpath in created: self._set_perms(tmpentry, path=cpath) return rv class POSIXLinkTool(POSIXTool): """ Base handler for link (symbolic and hard) entries """ __req__ = ['name', 'to'] __linktype__ = None def verify(self, entry, modlist): rv = True try: if not self._verify(entry): msg = "%s %s is incorrect" % (self.__linktype__.title(), entry.get('name')) self.logger.debug("POSIX: " + msg) entry.set('qtext', "\n".join([entry.get('qtext', ''), msg])) rv = False except OSError: self.logger.debug("POSIX: %s %s does not exist" % (entry.tag, entry.get("name"))) entry.set('current_exists', 'false') return False return POSIXTool.verify(self, entry, modlist) and rv def _verify(self, entry): """ perform actual verification of the link entry """ raise NotImplementedError def install(self, entry): ondisk = self._exists(entry, remove=True) if ondisk: self.logger.info("POSIX: %s %s cleanup failed" % (self.__linktype__.title(), entry.get('name'))) try: self._link(entry) rv = True except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to create %s %s to %s: %s" % (self.__linktype__, entry.get('name'), entry.get('to'), err)) rv = False return POSIXTool.install(self, entry) and rv def _link(self, entry): """ create the link """ raise NotImplementedError src/lib/Bcfg2/Client/Tools/POSIXUsers.py000066400000000000000000000335241303523157100201610ustar00rootroot00000000000000""" A tool to handle creating users and groups with useradd/mod/del and groupadd/mod/del """ import pwd import grp import Bcfg2.Options import Bcfg2.Client.XML import Bcfg2.Client.Tools from Bcfg2.Utils import PackedDigitRange def uid_range_type(val): """ Option type to unpack a list of numerical ranges """ return PackedDigitRange(*Bcfg2.Options.Types.comma_list(val)) class POSIXUsers(Bcfg2.Client.Tools.Tool): """ A tool to handle creating users and groups with useradd/mod/del and groupadd/mod/del """ options = Bcfg2.Client.Tools.Tool.options + [ Bcfg2.Options.Option( cf=('POSIXUsers', 'uid_whitelist'), default=[], type=uid_range_type, help="UID ranges the POSIXUsers tool will manage"), Bcfg2.Options.Option( cf=('POSIXUsers', 'gid_whitelist'), default=[], type=uid_range_type, help="GID ranges the POSIXUsers tool will manage"), Bcfg2.Options.Option( cf=('POSIXUsers', 'uid_blacklist'), default=[], type=uid_range_type, help="UID ranges the POSIXUsers tool will not manage"), Bcfg2.Options.Option( cf=('POSIXUsers', 'gid_blacklist'), default=[], type=uid_range_type, help="GID ranges the POSIXUsers tool will not manage")] __execs__ = ['/usr/sbin/useradd', '/usr/sbin/usermod', '/usr/sbin/userdel', '/usr/sbin/groupadd', '/usr/sbin/groupmod', '/usr/sbin/groupdel'] __handles__ = [('POSIXUser', None), ('POSIXGroup', None)] __req__ = dict(POSIXUser=['name'], POSIXGroup=['name']) #: A mapping of XML entry attributes to the indexes of #: corresponding values in the get{pw|gr}all data structures attr_mapping = dict(POSIXUser=dict(name=0, uid=2, gecos=4, home=5, shell=6), POSIXGroup=dict(name=0, gid=2)) #: A mapping that describes the attribute name of the id of a given #: user or group id_mapping = dict(POSIXUser="uid", POSIXGroup="gid") def __init__(self, config): Bcfg2.Client.Tools.Tool.__init__(self, config) self.set_defaults = dict(POSIXUser=self.populate_user_entry, POSIXGroup=lambda g: g) self._existing = None self._whitelist = dict(POSIXUser=Bcfg2.Options.setup.uid_whitelist, POSIXGroup=Bcfg2.Options.setup.gid_whitelist) self._blacklist = dict(POSIXUser=Bcfg2.Options.setup.uid_blacklist, POSIXGroup=Bcfg2.Options.setup.gid_blacklist) @property def existing(self): """ Get a dict of existing users and groups """ if self._existing is None: self._existing = dict(POSIXUser=dict([(u[0], u) for u in pwd.getpwall()]), POSIXGroup=dict([(g[0], g) for g in grp.getgrall()])) return self._existing def _in_managed_range(self, tag, eid): """ Check if the given uid or gid is in the appropriate managed range. This means that either a) a whitelist is defined, and the uid/gid is in that whitelist; or b) no whitelist is defined, and the uid/gid is not in the blacklist. """ if not self._whitelist[tag]: return eid not in self._blacklist[tag] else: return eid in self._whitelist[tag] def canInstall(self, entry): if not Bcfg2.Client.Tools.Tool.canInstall(self, entry): return False eid = entry.get(self.id_mapping[entry.tag]) if eid is not None and not self._in_managed_range(entry.tag, eid): if self._whitelist[entry.tag] is not None: err = "not in whitelist" else: # blacklisted err = "in blacklist" self.logger.debug("%s: %s %s %s: %s" % (self.primarykey(entry), err, self.id_mapping[entry.tag], eid, self._blacklist[entry.tag])) return False return True def Inventory(self, structures=None): if not structures: structures = self.config.getchildren() # we calculate a list of all POSIXUser and POSIXGroup entries, # and then add POSIXGroup entries that are required to create # the primary group for each user to the structures. this is # sneaky and possibly evil, but it works great. groups = [] for struct in structures: groups.extend([e.get("name") for e in struct.findall("POSIXGroup")]) for struct in structures: for entry in struct.findall("POSIXUser"): group = self.set_defaults[entry.tag](entry).get('group') if group and group not in groups: self.logger.debug("POSIXUsers: Adding POSIXGroup entry " "'%s' for user '%s'" % (group, entry.get("name"))) struct.append(Bcfg2.Client.XML.Element("POSIXGroup", name=group)) return Bcfg2.Client.Tools.Tool.Inventory(self, structures) Inventory.__doc__ = Bcfg2.Client.Tools.Tool.Inventory.__doc__ def FindExtra(self): extra = [] for handles in self.__handles__: tag = handles[0] specified = [] for entry in self.getSupportedEntries(): if entry.tag == tag: specified.append(entry.get("name")) for name, data in self.existing[tag].items(): eid = data[self.attr_mapping[tag][self.id_mapping[tag]]] if name not in specified and self._in_managed_range(tag, eid): extra.append(Bcfg2.Client.XML.Element(tag, name=name)) return extra def populate_user_entry(self, entry): """ Given a POSIXUser entry, set all of the 'missing' attributes with their defaults """ defaults = dict(group=entry.get('name'), gecos=entry.get('name'), shell='/bin/bash') if entry.get('name') == 'root': defaults['home'] = '/root' else: defaults['home'] = '/home/%s' % entry.get('name') for key, val in defaults.items(): if entry.get(key) is None: entry.set(key, val) if entry.get('group') in self.existing['POSIXGroup']: entry.set('gid', str(self.existing['POSIXGroup'][entry.get('group')][2])) return entry def user_supplementary_groups(self, entry): """ Get a list of supplmentary groups that the user in the given entry is a member of """ return [g for g in self.existing['POSIXGroup'].values() if entry.get("name") in g[3] and self._in_managed_range('POSIXGroup', g[2])] def VerifyPOSIXUser(self, entry, _): """ Verify a POSIXUser entry """ rv = self._verify(self.populate_user_entry(entry)) if entry.get("current_exists", "true") == "true": # verify supplemental groups actual = [g[0] for g in self.user_supplementary_groups(entry)] expected = [e.get("group", e.text).strip() for e in entry.findall("MemberOf")] if set(expected) != set(actual): entry.set('qtext', "\n".join([entry.get('qtext', '')] + ["%s %s has incorrect supplemental group " "membership. Currently: %s. Should be: %s" % (entry.tag, entry.get("name"), actual, expected)])) rv = False if Bcfg2.Options.setup.interactive and not rv: entry.set('qtext', '%s\nInstall %s %s: (y/N) ' % (entry.get('qtext', ''), entry.tag, entry.get('name'))) return rv def VerifyPOSIXGroup(self, entry, _): """ Verify a POSIXGroup entry """ rv = self._verify(entry) if Bcfg2.Options.setup.interactive and not rv: entry.set('qtext', '%s\nInstall %s %s: (y/N) ' % (entry.get('qtext', ''), entry.tag, entry.get('name'))) return rv def _verify(self, entry): """ Perform most of the actual work of verification """ errors = [] if entry.get("name") not in self.existing[entry.tag]: entry.set('current_exists', 'false') errors.append("%s %s does not exist" % (entry.tag, entry.get("name"))) else: for attr, idx in self.attr_mapping[entry.tag].items(): val = str(self.existing[entry.tag][entry.get("name")][idx]) entry.set("current_%s" % attr, val.decode(Bcfg2.Options.setup.encoding)) if attr in ["uid", "gid"]: if entry.get(attr) is None: # no uid/gid specified, so we let the tool # automatically determine one -- i.e., it always # verifies continue entval = entry.get(attr) if not isinstance(entval, str): entval = entval.encode('utf-8') if val != entval: errors.append("%s for %s %s is incorrect. Current %s is " "%s, but should be %s" % (attr.title(), entry.tag, entry.get("name"), attr, val, entry.get(attr))) if errors: for error in errors: self.logger.debug("%s: %s" % (self.name, error)) entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors)) return len(errors) == 0 def Install(self, entries): states = dict() for entry in entries: # install groups first, so that all groups exist for # users that might need them if entry.tag == 'POSIXGroup': states[entry] = self._install(entry) for entry in entries: if entry.tag == 'POSIXUser': states[entry] = self._install(entry) self._existing = None return states def _install(self, entry): """ add or modify a user or group using the appropriate command """ if entry.get("name") not in self.existing[entry.tag]: action = "add" else: action = "mod" rv = self.cmd.run(self._get_cmd(action, self.set_defaults[entry.tag](entry))) if rv.success: self.modified.append(entry) else: self.logger.error("POSIXUsers: Error creating %s %s: %s" % (entry.tag, entry.get("name"), rv.error)) return rv.success def _get_cmd(self, action, entry): """ Get a command to perform the appropriate action (add, mod, del) on the given entry. The command is always the same; we set all attributes on a given user or group when modifying it rather than checking which ones need to be changed. This makes things fail as a unit (e.g., if a user is logged in, you can't change its home dir, but you could change its GECOS, but the whole operation fails), but it also makes this function a lot, lot easier and simpler.""" cmd = ["/usr/sbin/%s%s" % (entry.tag[5:].lower(), action)] if action != 'del': if entry.tag == 'POSIXGroup': if entry.get('gid'): cmd.extend(['-g', entry.get('gid')]) elif entry.tag == 'POSIXUser': if entry.get('uid'): cmd.extend(['-u', entry.get('uid')]) cmd.extend(['-g', entry.get('group')]) extras = [e.get("group", e.text).strip() for e in entry.findall("MemberOf")] if extras: cmd.extend(['-G', ",".join(extras)]) cmd.extend(['-d', entry.get('home')]) cmd.extend(['-s', entry.get('shell')]) cmd.extend(['-c', entry.get('gecos')]) cmd.append(entry.get('name')) return cmd def Remove(self, entries): for entry in entries: # remove users first, so that all users have been removed # from groups before we remove them if entry.tag == 'POSIXUser': self._remove(entry) for entry in entries: if entry.tag == 'POSIXGroup': try: grp.getgrnam(entry.get("name")) self._remove(entry) except KeyError: # at least some versions of userdel automatically # remove the primary group for a user if the group # name is the same as the username, and no other # users are in the group self.logger.info("POSIXUsers: Group %s does not exist. " "It may have already been removed when " "its users were deleted" % entry.get("name")) self._existing = None self.extra = self.FindExtra() def _remove(self, entry): """ Remove an entry """ rv = self.cmd.run(self._get_cmd("del", entry)) if not rv.success: self.logger.error("POSIXUsers: Error deleting %s %s: %s" % (entry.tag, entry.get("name"), rv.error)) return rv.success src/lib/Bcfg2/Client/Tools/Pacman.py000066400000000000000000000053271303523157100174340ustar00rootroot00000000000000"""This is the bcfg2 support for pacman""" import sys import Bcfg2.Client.Tools class Pacman(Bcfg2.Client.Tools.PkgTool): '''Arch Linux package support''' name = 'Pacman' __execs__ = ["/usr/bin/pacman"] __handles__ = [('Package', 'pacman')] __req__ = {'Package': ['name', 'version']} pkgtype = 'pacman' pkgtool = ("/usr/bin/pacman --needed --noconfirm --noprogressbar") def RefreshPackages(self): '''Refresh memory hashes of packages''' self.installed = {} for pkg in self.cmd.run("/usr/bin/pacman -Q").stdout.splitlines(): pkgname = pkg.split(' ')[0].strip() version = pkg.split(' ')[1].strip() self.installed[pkgname] = version def VerifyPackage(self, entry, _): '''Verify Package status for entry''' self.logger.debug("VerifyPackage: %s : %s" % (entry.get('name'), entry.get('version'))) if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % entry.attrib['name']) return False if entry.attrib['name'] in self.installed: if entry.attrib['version'] == 'auto': return True elif self.installed[entry.attrib['name']] == \ entry.attrib['version']: # FIXME: need to figure out if pacman # allows you to verify packages return True else: entry.set('current_version', self.installed[entry.get('name')]) self.logger.debug("attribname: %s" % (entry.attrib['name'])) return False entry.set('current_exists', 'false') self.logger.debug("attribname: %s" % (entry.attrib['name'])) return False def Remove(self, packages): '''Remove extra packages''' names = [pkg.get('name') for pkg in packages] self.logger.info("Removing packages: %s" % " ".join(names)) self.cmd.run("%s --noconfirm --noprogressbar -R %s" % (self.pkgtool, " ".join(names))) self.RefreshPackages() self.extra = self.FindExtra() def Install(self, packages): ''' Pacman Install ''' pkgline = "" for pkg in packages: pkgline += " " + pkg.get('name') self.logger.info("packages : " + pkgline) try: self.logger.debug("Running : %s -S %s" % (self.pkgtool, pkgline)) self.cmd.run("%s -S %s" % (self.pkgtool, pkgline)) except: # pylint: disable=W0702 err = sys.exc_info()[1] self.logger.error("Error occurred during installation: %s" % err) src/lib/Bcfg2/Client/Tools/Pkgng.py000066400000000000000000000215451303523157100173030ustar00rootroot00000000000000"""This is the Bcfg2 support for pkg.""" import os import Bcfg2.Options import Bcfg2.Client.Tools class Pkgng(Bcfg2.Client.Tools.Tool): """Support for pkgng packages on FreeBSD.""" options = Bcfg2.Client.Tools.Tool.options + [ Bcfg2.Options.PathOption( cf=('Pkgng', 'path'), default='/usr/sbin/pkg', dest='pkg_path', help='Pkgng tool path')] name = 'Pkgng' __execs__ = [] __handles__ = [('Package', 'pkgng'), ('Path', 'ignore')] __req__ = {'Package': ['name', 'version'], 'Path': ['type']} def __init__(self, config): Bcfg2.Client.Tools.Tool.__init__(self, config) self.pkg = Bcfg2.Options.setup.pkg_path self.__execs__ = [self.pkg] self.pkgcmd = self.pkg + ' install -fy' if not Bcfg2.Options.setup.debug: self.pkgcmd += ' -q' self.pkgcmd += ' %s' self.ignores = [entry.get('name') for struct in config for entry in struct if entry.tag == 'Path' and entry.get('type') == 'ignore'] self.__important__ = self.__important__ + \ [entry.get('name') for struct in config for entry in struct if (entry.tag == 'Path' and entry.get('name').startswith('/etc/pkg/'))] self.nonexistent = [entry.get('name') for struct in config for entry in struct if entry.tag == 'Path' and entry.get('type') == 'nonexistent'] self.actions = {} self.pkg_cache = {} try: self._load_pkg_cache() except OSError: raise Bcfg2.Client.Tools.ToolInstantiationError def _load_pkg_cache(self): """Cache the version of all currently installed packages.""" self.pkg_cache = {} output = self.cmd.run([self.pkg, 'query', '-a', '%n %v']).stdout for line in output.splitlines(): parts = line.split(' ') name = ' '.join(parts[:-1]) self.pkg_cache[name] = parts[-1] def FindExtra(self): """Find extra packages.""" packages = [entry.get('name') for entry in self.getSupportedEntries()] extras = [(name, value) for (name, value) in self.pkg_cache.items() if name not in packages] return [Bcfg2.Client.XML.Element('Package', name=name, type='pkgng', current_version=version) for (name, version) in extras] def VerifyChecksums(self, entry, modlist): """Verify the checksum of the files, owned by a package.""" output = self.cmd.run([self.pkg, 'check', '-s', entry.get('name')]).stdout.splitlines() files = [] for item in output: if "checksum mismatch" in item: files.append(item.split()[-1]) elif "No such file or directory" in item: continue else: self.logger.error("Got Unsupported pattern %s " "from pkg check" % item) files = list(set(files) - set(self.ignores)) # We check if there is file in the checksum to do if files: # if files are found there we try to be sure our modlist is sane # with erroneous symlinks modlist = [os.path.realpath(filename) for filename in modlist] bad = [filename for filename in files if filename not in modlist] if bad: self.logger.debug("It is suggested that you either manage " "these files, revert the changes, or ignore " "false failures:") self.logger.info("Package %s failed validation. Bad files " "are:" % entry.get('name')) self.logger.info(bad) entry.set('qtext', "Reinstall Package %s-%s to fix failing files? " "(y/N) " % (entry.get('name'), entry.get('version'))) return False return True def _get_candidate_versions(self, name): """ Get versions of the specified package name available for installation from the configured remote repositories. """ output = self.cmd.run([self.pkg, 'search', '-U', '-Qversion', '-q', '-Sname', '-e', name]).stdout.splitlines() versions = [] for line in output: versions.append(line) if len(versions) == 0: return None return sorted(versions) def VerifyPackage(self, entry, modlist, checksums=True): """Verify package for entry.""" if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % (entry.attrib['name'])) return False pkgname = entry.get('name') if pkgname not in self.pkg_cache: self.logger.info("Package %s not installed" % (entry.get('name'))) entry.set('current_exists', 'false') return False installed_version = self.pkg_cache[pkgname] candidate_versions = self._get_candidate_versions(pkgname) if candidate_versions is not None: candidate_version = candidate_versions[0] else: self.logger.error("Package %s is installed but no candidate" "version was found." % (entry.get('name'))) return False if entry.get('version').startswith('auto'): desired_version = candidate_version entry.set('version', "auto: %s" % desired_version) elif entry.get('version').startswith('any'): desired_version = installed_version entry.set('version', "any: %s" % desired_version) else: desired_version = entry.get('version') if desired_version != installed_version: entry.set('current_version', installed_version) entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % (entry.get('name'), entry.get('current_version'), desired_version)) return False else: # version matches if (not Bcfg2.Options.setup.quick and entry.get('verify', 'true') == 'true' and checksums): pkgsums = self.VerifyChecksums(entry, modlist) return pkgsums return True def Remove(self, packages): """Deal with extra configuration detected.""" pkgnames = " ".join([pkg.get('name') for pkg in packages]) if len(packages) > 0: self.logger.info('Removing packages:') self.logger.info(pkgnames) self.cmd.run([self.pkg, 'delete', '-y', pkgnames]) self._load_pkg_cache() self.modified += packages self.extra = self.FindExtra() def Install(self, packages): ipkgs = [] bad_pkgs = [] for pkg in packages: versions = self._get_candidate_versions(pkg.get('name')) if versions is None: self.logger.error("pkg has no information about package %s" % (pkg.get('name'))) continue if pkg.get('version').startswith('auto') or \ pkg.get('version').startswith('any'): ipkgs.append("%s-%s" % (pkg.get('name'), versions[0])) continue if pkg.get('version') in versions: ipkgs.append("%s-%s" % (pkg.get('name'), pkg.get('version'))) continue else: self.logger.error("Package %s: desired version %s not in %s" % (pkg.get('name'), pkg.get('version'), versions)) bad_pkgs.append(pkg.get('name')) if bad_pkgs: self.logger.error("Cannot find correct versions of packages:") self.logger.error(bad_pkgs) if not ipkgs: return dict() if not self.cmd.run(self.pkgcmd % (" ".join(ipkgs))): self.logger.error("pkg command failed") self._load_pkg_cache() self.extra = self.FindExtra() mark = [] states = dict() for package in packages: states[package] = self.VerifyPackage(package, [], checksums=False) if states[package]: self.modified.append(package) if package.get('origin') == 'Packages': mark.append(package.get('name')) if mark: self.cmd.run([self.pkg, 'set', '-A1', '-y'] + mark) return states def VerifyPath(self, _entry, _): """Do nothing here since we only verify Path type=ignore.""" return True src/lib/Bcfg2/Client/Tools/Portage.py000066400000000000000000000104321303523157100176270ustar00rootroot00000000000000"""This is the Bcfg2 tool for the Gentoo Portage system.""" import re import Bcfg2.Client.Tools class Portage(Bcfg2.Client.Tools.PkgTool): """The Gentoo toolset implements package and service operations and inherits the rest from Tools.Tool.""" options = Bcfg2.Client.Tools.PkgTool.options + [ Bcfg2.Options.BooleanOption( cf=('Portage', 'binpkgonly'), help='Portage binary packages only')] __execs__ = ['/usr/bin/emerge', '/usr/bin/equery'] __handles__ = [('Package', 'ebuild')] __req__ = {'Package': ['name', 'version']} pkgtype = 'ebuild' # requires a working PORTAGE_BINHOST in make.conf _binpkgtool = ('emerge --getbinpkgonly %s', ('=%s-%s', ['name', 'version'])) pkgtool = ('emerge %s', ('=%s-%s', ['name', 'version'])) def __init__(self, config): self._initialised = False Bcfg2.Client.Tools.PkgTool.__init__(self, config) self._initialised = True self.__important__ = self.__important__ + ['/etc/make.conf'] self._pkg_pattern = re.compile(r'(.*)-(\d.*)') self._ebuild_pattern = re.compile('(ebuild|binary)') self.installed = {} if Bcfg2.Options.setup.binpkgonly: self.pkgtool = self._binpkgtool self.RefreshPackages() def RefreshPackages(self): """Refresh memory hashes of packages.""" if not self._initialised: return self.logger.info('Getting list of installed packages') self.installed = {} for pkg in self.cmd.run(["equery", "-q", "list", "*"]).stdout.splitlines(): if self._pkg_pattern.match(pkg): name = self._pkg_pattern.match(pkg).group(1) version = self._pkg_pattern.match(pkg).group(2) self.installed[name] = version else: self.logger.info("Failed to parse pkg name %s" % pkg) def VerifyPackage(self, entry, modlist): """Verify package for entry.""" if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % (entry.get('name'))) return False if not (entry.get('name') in self.installed): # Can't verify package that isn't installed entry.set('current_exists', 'false') return False # get the installed version version = self.installed[entry.get('name')] entry.set('current_version', version) if not Bcfg2.Options.setup.quick: if ('verify' not in entry.attrib or entry.get('verify').lower() == 'true'): # Check the package if: # - Not running in quick mode # - No verify option is specified in the literal configuration # OR # - Verify option is specified and is true self.logger.debug('Running equery check on %s' % entry.get('name')) for line in self.cmd.run( ["/usr/bin/equery", "-N", "check", '=%s-%s' % (entry.get('name'), entry.get('version'))]).stdout.splitlines(): if '!!!' in line and line.split()[1] not in modlist: return False # By now the package must be in one of the following states: # - Not require checking # - Have no files modified at all # - Have modified files in the modlist only if self.installed[entry.get('name')] == version: # Specified package version is installed # Specified package version may be any in literal configuration return True # Something got skipped. Indicates a bug return False def Remove(self, packages): """Deal with extra configuration detected.""" pkgnames = " ".join([pkg.get('name') for pkg in packages]) if len(packages) > 0: self.logger.info('Removing packages:') self.logger.info(pkgnames) self.cmd.run("emerge --unmerge --quiet %s" % " ".join(pkgnames.split(' '))) self.RefreshPackages() self.extra = self.FindExtra() src/lib/Bcfg2/Client/Tools/RPM.py000066400000000000000000002650201303523157100166710ustar00rootroot00000000000000"""Bcfg2 Support for RPMS""" import os import rpm import Bcfg2.Client.Tools import grp import optparse import pwd import stat import sys try: import hashlib py24compat = False except ImportError: # FIXME: Remove when client python dep is 2.5 or greater py24compat = True import md5 # Determine what prelink tools we have available. # The isprelink module is a python extension that examines the ELF headers # to see if the file has been prelinked. If it is not present a lot of files # are unnecessarily run through the prelink command. try: from isprelink import * isprelink_imported = True except ImportError: isprelink_imported = False # If the prelink command is installed on the system then we need to do # prelink -y on files. if os.access('/usr/sbin/prelink', os.X_OK): prelink_exists = True else: prelink_exists = False # If we don't have isprelink then we will use the prelink configuration file to # filter what we have to put through prelink -y. import re blacklist = [] whitelist = [] try: f = open('/etc/prelink.conf', mode='r') for line in f: if line.startswith('#'): continue option, pattern = line.split() if pattern.startswith('*.'): pattern = pattern.replace('*.', '\.') pattern += '$' elif pattern.startswith('/'): pattern = '^' + pattern if option == '-b': blacklist.append(pattern) elif option == '-l': whitelist.append(pattern) f.close() except IOError: pass blacklist_re = re.compile('|'.join(blacklist)) whitelist_re = re.compile('|'.join(whitelist)) # Flags that are not defined in rpm-python. # They are defined in lib/rpmcli.h # Bit(s) for verifyFile() attributes. # RPMVERIFY_NONE = 0 RPMVERIFY_MD5 = 1 # 1 << 0 # from %verify(md5) RPMVERIFY_FILESIZE = 2 # 1 << 1 # from %verify(size) RPMVERIFY_LINKTO = 4 # 1 << 2 # from %verify(link) RPMVERIFY_USER = 8 # 1 << 3 # from %verify(user) RPMVERIFY_GROUP = 16 # 1 << 4 # from %verify(group) RPMVERIFY_MTIME = 32 # 1 << 5 # from %verify(mtime) RPMVERIFY_MODE = 64 # 1 << 6 # from %verify(mode) RPMVERIFY_RDEV = 128 # 1 << 7 # from %verify(rdev) RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # from --nocontexts RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # readlink failed RPMVERIFY_READFAIL = 536870912 # (1 << 29) # file read failed RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # lstat failed RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # lgetfilecon failed RPMVERIFY_FAILURES = \ (RPMVERIFY_LSTATFAIL | RPMVERIFY_READFAIL | RPMVERIFY_READLINKFAIL | RPMVERIFY_LGETFILECONFAIL) # Bit(s) to control rpm_verify() operation. # VERIFY_DEFAULT = 0, # /*!< */ VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */ VERIFY_SIZE = 1 << 1 # /*!< from --nosize */ VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */ VERIFY_USER = 1 << 3 # /*!< from --nouser */ VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */ VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */ VERIFY_MODE = 1 << 6 # /*!< from --nomode */ VERIFY_RDEV = 1 << 7 # /*!< from --nodev */ # /* bits 8-14 unused, reserved for rpmVerifyAttrs */ VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */ VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */ VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */ VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */ VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */ VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */ VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */ VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */ VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */ VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */ VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */ VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */ VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */ # /* bits 28-31 used in rpmVerifyAttrs */ # Comes from C cource. lib/rpmcli.h VERIFY_ATTRS = \ (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS) VERIFY_ALL = \ (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST | VERIFY_SIGNATURE | VERIFY_HDRCHK) # Some masks for what checks to NOT do on these file types. # The C code actiually resets these up for every file. DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | RPMVERIFY_LINKTO) # These file types all have the same mask, but hopefully this will make the # code more readable. FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP) REG_FLAGS = ~(RPMVERIFY_LINKTO) def s_isdev(mode): """ Check to see if a file is a device. """ return stat.S_ISBLK(mode) | stat.S_ISCHR(mode) def rpmpackagelist(rts): """ Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver. Requires rpmtransactionset() to be run first to get a ts. Returns a list of pkgspec dicts. e.g. [{'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' }, {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' }] """ return [ {'name': header[rpm.RPMTAG_NAME], 'epoch': header[rpm.RPMTAG_EPOCH], 'version': header[rpm.RPMTAG_VERSION], 'release': header[rpm.RPMTAG_RELEASE], 'arch': header[rpm.RPMTAG_ARCH], 'gpgkeyid': header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]} for header in rts.dbMatch()] def getindexbykeyword(index_ts, **kwargs): """ Return list of indexs from the rpmdb matching keywords ex: getHeadersByKeyword(name='foo', version='1', release='1') Can be passed any structure that can be indexed by the pkgspec keyswords as other keys are filtered out. """ lst = [] name = kwargs.get('name') if name: index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name) else: index_mi = index_ts.dbMatch() if 'epoch' in kwargs: if kwargs['epoch'] is not None and kwargs['epoch'] != 'None': kwargs['epoch'] = int(kwargs['epoch']) else: del(kwargs['epoch']) keywords = [key for key in list(kwargs.keys()) if key in ('name', 'epoch', 'version', 'release', 'arch')] keywords_len = len(keywords) for hdr in index_mi: match = 0 for keyword in keywords: if hdr[keyword] == kwargs[keyword]: match += 1 if match == keywords_len: lst.append(index_mi.instance()) del index_mi return lst def getheadersbykeyword(header_ts, **kwargs): """ Borrowed parts of this from from Yum. Need to fix it though. Epoch is not handled right. Return list of headers from the rpmdb matching keywords ex: getHeadersByKeyword(name='foo', version='1', release='1') Can be passed any structure that can be indexed by the pkgspec keyswords as other keys are filtered out. """ lst = [] name = kwargs.get('name') if name: header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name) else: header_mi = header_ts.dbMatch() if 'epoch' in kwargs: if kwargs['epoch'] is not None and kwargs['epoch'] != 'None': kwargs['epoch'] = int(kwargs['epoch']) else: del(kwargs['epoch']) keywords = [key for key in list(kwargs.keys()) if key in ('name', 'epoch', 'version', 'release', 'arch')] keywords_len = len(keywords) for hdr in header_mi: match = 0 for keyword in keywords: if hdr[keyword] == kwargs[keyword]: match += 1 if match == keywords_len: lst.append(hdr) del header_mi return lst def prelink_md5_check(filename): """ Checks if a file is prelinked. If it is run it through prelink -y to get the unprelinked md5 and file size. Return 0 if the file was not prelinked, otherwise return the file size. Always return the md5. """ prelink = False try: plf = open(filename, "rb") except IOError: return False, 0 if prelink_exists: if isprelink_imported: plfd = plf.fileno() if isprelink(plfd): plf.close() cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ % (re.escape(filename)) plf = os.popen(cmd, 'rb') prelink = True elif (whitelist_re.search(filename) and not blacklist_re.search(filename)): plf.close() cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ % (re.escape(filename)) plf = os.popen(cmd, 'rb') prelink = True fsize = 0 if py24compat: chksum = md5.new() else: chksum = hashlib.md5() while 1: data = plf.read() if not data: break fsize += len(data) chksum.update(data) plf.close() file_md5 = chksum.hexdigest() if prelink: return file_md5, fsize else: return file_md5, 0 def prelink_size_check(filename): """ This check is only done if the prelink_md5_check() is not done first. Checks if a file is prelinked. If it is run it through prelink -y to get the unprelinked file size. Return 0 if the file was not prelinked, otherwise return the file size. """ fsize = 0 try: plf = open(filename, "rb") except IOError: return False if prelink_exists: if isprelink_imported: plfd = plf.fileno() if isprelink(plfd): plf.close() cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ % (re.escape(filename)) plf = os.popen(cmd, 'rb') while 1: data = plf.read() if not data: break fsize += len(data) elif (whitelist_re.search(filename) and not blacklist_re.search(filename)): plf.close() cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ % (re.escape(filename)) plf = os.popen(cmd, 'rb') while 1: data = plf.read() if not data: break fsize += len(data) plf.close() return fsize def debug_verify_flags(vflags): """ Decodes the verify flags bits. """ if vflags & RPMVERIFY_MD5: print('RPMVERIFY_MD5') if vflags & RPMVERIFY_FILESIZE: print('RPMVERIFY_FILESIZE') if vflags & RPMVERIFY_LINKTO: print('RPMVERIFY_LINKTO') if vflags & RPMVERIFY_USER: print('RPMVERIFY_USER') if vflags & RPMVERIFY_GROUP: print('RPMVERIFY_GROUP') if vflags & RPMVERIFY_MTIME: print('RPMVERIFY_MTIME') if vflags & RPMVERIFY_MODE: print('RPMVERIFY_MODE') if vflags & RPMVERIFY_RDEV: print('RPMVERIFY_RDEV') if vflags & RPMVERIFY_CONTEXTS: print('RPMVERIFY_CONTEXTS') if vflags & RPMVERIFY_READLINKFAIL: print('RPMVERIFY_READLINKFAIL') if vflags & RPMVERIFY_READFAIL: print('RPMVERIFY_READFAIL') if vflags & RPMVERIFY_LSTATFAIL: print('RPMVERIFY_LSTATFAIL') if vflags & RPMVERIFY_LGETFILECONFAIL: print('RPMVERIFY_LGETFILECONFAIL') def debug_file_flags(fflags): """ Decodes the file flags bits. """ if fflags & rpm.RPMFILE_CONFIG: print('rpm.RPMFILE_CONFIG') if fflags & rpm.RPMFILE_DOC: print('rpm.RPMFILE_DOC') if fflags & rpm.RPMFILE_ICON: print('rpm.RPMFILE_ICON') if fflags & rpm.RPMFILE_MISSINGOK: print('rpm.RPMFILE_MISSINGOK') if fflags & rpm.RPMFILE_NOREPLACE: print('rpm.RPMFILE_NOREPLACE') if fflags & rpm.RPMFILE_GHOST: print('rpm.RPMFILE_GHOST') if fflags & rpm.RPMFILE_LICENSE: print('rpm.RPMFILE_LICENSE') if fflags & rpm.RPMFILE_README: print('rpm.RPMFILE_README') if fflags & rpm.RPMFILE_EXCLUDE: print('rpm.RPMFILE_EXLUDE') if fflags & rpm.RPMFILE_UNPATCHED: print('rpm.RPMFILE_UNPATCHED') if fflags & rpm.RPMFILE_PUBKEY: print('rpm.RPMFILE_PUBKEY') def rpm_verify_file(fileinfo, rpmlinktos, omitmask): """ Verify all the files in a package. Returns a list of error flags, the file type and file name. The list entries are strings that are the same as the labels for the bitwise flags used in the C code. """ (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, vflags, fuser, fgroup, fmd5) = fileinfo # 1. rpmtsRootDir stuff. What does it do and where to I get it from? file_results = [] flags = vflags # Check to see if the file was installed - if not pretend all is ok. # This is what the rpm C code does! if fstate != rpm.RPMFILE_STATE_NORMAL: return file_results # Get the installed files stats try: lstat = os.lstat(fname) except OSError: if not (fflags & (rpm.RPMFILE_MISSINGOK | rpm.RPMFILE_GHOST)): file_results.append('RPMVERIFY_LSTATFAIL') #file_results.append(fname) return file_results # 5. Contexts? SELinux stuff? # Setup what checks to do. This is straight out of the C code. if stat.S_ISDIR(lstat.st_mode): flags &= DIR_FLAGS elif stat.S_ISLNK(lstat.st_mode): flags &= LINK_FLAGS elif stat.S_ISFIFO(lstat.st_mode): flags &= FIFO_FLAGS elif stat.S_ISCHR(lstat.st_mode): flags &= CHR_FLAGS elif stat.S_ISBLK(lstat.st_mode): flags &= BLK_FLAGS else: flags &= REG_FLAGS if (fflags & rpm.RPMFILE_GHOST): flags &= GHOST_FLAGS flags &= ~(omitmask | RPMVERIFY_FAILURES) # 8. SELinux stuff. prelink_size = 0 if flags & RPMVERIFY_MD5: prelink_md5, prelink_size = prelink_md5_check(fname) if prelink_md5 is False: file_results.append('RPMVERIFY_MD5') file_results.append('RPMVERIFY_READFAIL') elif prelink_md5 != fmd5: file_results.append('RPMVERIFY_MD5') if flags & RPMVERIFY_LINKTO: linkto = os.readlink(fname) if not linkto: file_results.append('RPMVERIFY_READLINKFAIL') file_results.append('RPMVERIFY_LINKTO') else: if len(rpmlinktos) == 0 or linkto != rpmlinktos: file_results.append('RPMVERIFY_LINKTO') if flags & RPMVERIFY_FILESIZE: if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done. prelink_size = prelink_size_check(fname) if (prelink_size != 0): # This is a prelinked file. if (prelink_size != fsize): file_results.append('RPMVERIFY_FILESIZE') elif lstat.st_size != fsize: # It wasn't a prelinked file. file_results.append('RPMVERIFY_FILESIZE') if flags & RPMVERIFY_MODE: metamode = fmode filemode = lstat.st_mode # Comparing the type of %ghost files is meaningless, but perms are ok. if fflags & rpm.RPMFILE_GHOST: metamode &= ~0xf000 filemode &= ~0xf000 if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \ (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)): file_results.append('RPMVERIFY_MODE') if flags & RPMVERIFY_RDEV: if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)): file_results.append('RPMVERIFY_RDEV') elif (s_isdev(fmode) & s_isdev(lstat.st_mode)): st_rdev = lstat.st_rdev if frdev != st_rdev: file_results.append('RPMVERIFY_RDEV') if flags & RPMVERIFY_MTIME: if lstat.st_mtime != fmtime: file_results.append('RPMVERIFY_MTIME') if flags & RPMVERIFY_USER: try: user = pwd.getpwuid(lstat.st_uid)[0] except KeyError: user = None if not user or not fuser or (user != fuser): file_results.append('RPMVERIFY_USER') if flags & RPMVERIFY_GROUP: try: group = grp.getgrgid(lstat.st_gid)[0] except KeyError: group = None if not group or not fgroup or (group != fgroup): file_results.append('RPMVERIFY_GROUP') return file_results def rpm_verify_dependencies(header): """ Check package dependencies. Header is an rpm.hdr. Don't like opening another ts to do this, but it was the only way I could find of clearing the ts out. Have asked on the rpm-maint list on how to do this the right way (28 Feb 2007). ts.check() returns: ((name, version, release), (reqname, reqversion), \ flags, suggest, sense) """ _ts1 = rpmtransactionset() _ts1.addInstall(header, 'Dep Check', 'i') dep_errors = _ts1.check() _ts1.closeDB() return dep_errors def rpm_verify_package(vp_ts, header, verify_options): """ Verify a single package specified by header. Header is an rpm.hdr. If errors are found it returns a dictionary of errors. """ # Set some transaction level flags. vsflags = 0 if 'nodigest' in verify_options: vsflags |= rpm._RPMVSF_NODIGESTS if 'nosignature' in verify_options: vsflags |= rpm._RPMVSF_NOSIGNATURES ovsflags = vp_ts.setVSFlags(vsflags) # Map from the Python options to the rpm bitwise flags. omitmask = 0 if 'nolinkto' in verify_options: omitmask |= VERIFY_LINKTO if 'nomd5' in verify_options: omitmask |= VERIFY_MD5 if 'nosize' in verify_options: omitmask |= VERIFY_SIZE if 'nouser' in verify_options: omitmask |= VERIFY_USER if 'nogroup' in verify_options: omitmask |= VERIFY_GROUP if 'nomtime' in verify_options: omitmask |= VERIFY_MTIME if 'nomode' in verify_options: omitmask |= VERIFY_MODE if 'nordev' in verify_options: omitmask |= VERIFY_RDEV omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS) package_results = {} # Check Signatures and Digests. # No idea what this might return. Need to break something to see. # Setting the vsflags above determines what gets checked in the header. hdr_stat = vp_ts.hdrCheck(header.unload()) if hdr_stat: package_results['hdr'] = hdr_stat # Check Package Depencies. if 'nodeps' not in verify_options: dep_stat = rpm_verify_dependencies(header) if dep_stat: package_results['deps'] = dep_stat # Check all the package files. if 'nofiles' not in verify_options: vp_fi = header.fiFromHeader() for fileinfo in vp_fi: # Do not bother doing anything with ghost files. # This is what RPM does. if fileinfo[4] & rpm.RPMFILE_GHOST: continue # This is only needed because of an inconsistency in the # rpm.fi interface. linktos = vp_fi.FLink() file_stat = rpm_verify_file(fileinfo, linktos, omitmask) #if len(file_stat) > 0 or options.verbose: if len(file_stat) > 0: fflags = fileinfo[4] if fflags & rpm.RPMFILE_CONFIG: file_stat.append('c') elif fflags & rpm.RPMFILE_DOC: file_stat.append('d') elif fflags & rpm.RPMFILE_GHOST: file_stat.append('g') elif fflags & rpm.RPMFILE_LICENSE: file_stat.append('l') elif fflags & rpm.RPMFILE_PUBKEY: file_stat.append('P') elif fflags & rpm.RPMFILE_README: file_stat.append('r') else: file_stat.append(' ') file_stat.append(fileinfo[0]) # The filename. package_results.setdefault('files', []).append(file_stat) # Run the verify script if there is one. # Do we want this? #if 'noscripts' not in verify_options: # script_stat = rpmVerifyscript() # if script_stat: # package_results['script'] = script_stat # If there have been any errors, add the package nevra to the result. if len(package_results) > 0: package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], header[rpm.RPMTAG_EPOCH], header[rpm.RPMTAG_VERSION], header[rpm.RPMTAG_RELEASE], header[rpm.RPMTAG_ARCH])) else: package_results = None # Put things back the way we found them. vsflags = vp_ts.setVSFlags(ovsflags) return package_results def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]): """ Requires rpmtransactionset() to be run first to get a ts. pkgspec is a dict specifying the package e.g.: For a single package { name='foo', epoch='20', version='1', release='1', arch='x86_64'} For all packages {} Or any combination of keywords to select one or more packages to verify. options is a list of 'rpm --verify' options. Default is to check everything. e.g.: [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature', 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime', 'nomode', 'nordev' ] Returns a list. One list entry per package. Each list entry is a dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'. Entries only get added for the failures. If nothing failed, None is returned. Its all a bit messy and probably needs reviewing. [ { 'hdr': [???], 'deps: [((name, version, release), (reqname, reqversion), flags, suggest, sense), .... ] 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ], ['filename2', 'RPMVERFIY_LSTATFAIL']] 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] } { 'hdr': [???], 'deps: [((name, version, release), (reqname, reqversion), flags, suggest, sense), .... ] 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ], ['filename2', 'RPMVERFIY_LSTATFAIL']] 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ] """ verify_results = [] headers = getheadersbykeyword(verify_ts, **verify_pkgspec) for header in headers: result = rpm_verify_package(verify_ts, header, verify_options) if result: verify_results.append(result) return verify_results def rpmtransactionset(): """ A simple wrapper for rpm.TransactionSet() to keep everthiing together. Might use it to set some ts level flags later. """ ts = rpm.TransactionSet() return ts class Rpmtscallback(object): """ Callback for ts.run(). Used for adding, upgrading and removing packages. Starting with all possible reasons codes, but bcfg2 will probably only make use of a few of them. Mostly just printing stuff at the moment to understand how the callback is used. """ def __init__(self): self.fdnos = {} def callback(self, reason, amount, total, key, client_data): """ Generic rpmts call back. """ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE: pass elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE: pass elif reason == rpm.RPMCALLBACK_INST_START: pass elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \ reason == rpm.RPMCALLBACK_INST_PROGRESS: pass # rpm.RPMCALLBACK_INST_PROGRESS' elif reason == rpm.RPMCALLBACK_TRANS_START: pass elif reason == rpm.RPMCALLBACK_TRANS_STOP: pass elif reason == rpm.RPMCALLBACK_REPACKAGE_START: pass elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: pass elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP: pass elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS: pass elif reason == rpm.RPMCALLBACK_UNINST_START: pass elif reason == rpm.RPMCALLBACK_UNINST_STOP: pass # How do we get at this? # RPM.modified += key elif reason == rpm.RPMCALLBACK_UNPACK_ERROR: pass elif reason == rpm.RPMCALLBACK_CPIO_ERROR: pass elif reason == rpm.RPMCALLBACK_UNKNOWN: pass else: print('ERROR - Fell through callBack') def rpm_erase(erase_pkgspecs, erase_flags): """ pkgspecs is a list of pkgspec dicts specifying packages e.g.: For a single package { name='foo', epoch='20', version='1', release='1', arch='x86_64'} """ erase_ts_flags = 0 if 'noscripts' in erase_flags: erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS if 'notriggers' in erase_flags: erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS if 'repackage' in erase_flags: erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE erase_ts = rpmtransactionset() erase_ts.setFlags(erase_ts_flags) for pkgspec in erase_pkgspecs: idx_list = getindexbykeyword(erase_ts, **pkgspec) if len(idx_list) > 1 and not 'allmatches' in erase_flags: #pass print('ERROR - Multiple package match for erase', pkgspec) else: for idx in idx_list: erase_ts.addErase(idx) #for te in erase_ts: erase_problems = [] if 'nodeps' not in erase_flags: erase_problems = erase_ts.check() if erase_problems == []: erase_ts.order() erase_callback = Rpmtscallback() erase_ts.run(erase_callback.callback, 'Erase') #else: erase_ts.closeDB() del erase_ts return erase_problems def display_verify_file(file_results): ''' Display file results similar to rpm --verify. ''' filename = file_results[-1] filetype = file_results[-2] result_string = '' if 'RPMVERIFY_LSTATFAIL' in file_results: result_string = 'missing ' else: if 'RPMVERIFY_FILESIZE' in file_results: result_string = result_string + 'S' else: result_string = result_string + '.' if 'RPMVERIFY_MODE' in file_results: result_string = result_string + 'M' else: result_string = result_string + '.' if 'RPMVERIFY_MD5' in file_results: if 'RPMVERIFY_READFAIL' in file_results: result_string = result_string + '?' else: result_string = result_string + '5' else: result_string = result_string + '.' if 'RPMVERIFY_RDEV' in file_results: result_string = result_string + 'D' else: result_string = result_string + '.' if 'RPMVERIFY_LINKTO' in file_results: if 'RPMVERIFY_READLINKFAIL' in file_results: result_string = result_string + '?' else: result_string = result_string + 'L' else: result_string = result_string + '.' if 'RPMVERIFY_USER' in file_results: result_string = result_string + 'U' else: result_string = result_string + '.' if 'RPMVERIFY_GROUP' in file_results: result_string = result_string + 'G' else: result_string = result_string + '.' if 'RPMVERIFY_MTIME' in file_results: result_string = result_string + 'T' else: result_string = result_string + '.' print(result_string + ' ' + filetype + ' ' + filename) sys.stdout.flush() #============================================================================= # Some options and output to assist with development and testing. # These are not intended for normal use. if __name__ == "__main__": p = optparse.OptionParser() p.add_option('--name', action='store', default=None, help='''Package name to verify. ****************************************** NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES. ****************************************** The specified operation will be carried out on all instances of packages that match the package specification (name, epoch, version, release, arch).''') p.add_option('--epoch', action='store', default=None, help='''Package epoch.''') p.add_option('--version', action='store', default=None, help='''Package version.''') p.add_option('--release', action='store', default=None, help='''Package release.''') p.add_option('--arch', action='store', default=None, help='''Package arch.''') p.add_option('--erase', '-e', action='store_true', default=None, help= '''**************************************************** REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED. ****************************************************''') p.add_option('--list', '-l', action='store_true', help='''List package identity info. rpm -qa ish equivalent intended for use in RefreshPackages().''') p.add_option('--verify', action='store_true', help='''Verify Package(s). Output is only produced after all packages has been verified. Be patient.''') p.add_option('--verbose', '-v', action='store_true', help='''Verbose output for --verify option. Output is the same as rpm -v --verify.''') p.add_option('--nodeps', action='store_true', default=False, help='Do not do dependency testing.') p.add_option('--nodigest', action='store_true', help='Do not check package digests.') p.add_option('--nofiles', action='store_true', help='Do not do file checks.') p.add_option('--noscripts', action='store_true', help='Do not run verification scripts.') p.add_option('--nosignature', action='store_true', help='Do not do package signature verification.') p.add_option('--nolinkto', action='store_true', help='Do not do symlink tests.') p.add_option('--nomd5', action='store_true', help='''Do not do MD5 checksums on files. Note that this does not work for prelink files yet.''') p.add_option('--nosize', action='store_true', help='''Do not do file size tests. Note that this does not work for prelink files yet.''') p.add_option('--nouser', action='store_true', help='Do not check file user ownership.') p.add_option('--nogroup', action='store_true', help='Do not check file group ownership.') p.add_option('--nomtime', action='store_true', help='Do not check file modification times.') p.add_option('--nomode', action='store_true', help='Do not check file modes (permissions).') p.add_option('--nordev', action='store_true', help='Do not check device node.') p.add_option('--notriggers', action='store_true', help='Do not do not generate triggers on erase.') p.add_option('--repackage', action='store_true', help='''Do repackage on erase.i Packages are put in /var/spool/repackage.''') p.add_option('--allmatches', action='store_true', help= '''Remove all package instances that match the pkgspec. *************************************************** NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC THAT MEANS ALL PACKAGES!!!! ***************************************************''') options, arguments = p.parse_args() pkgspec = {} rpm_options = [] if options.nodeps: rpm_options.append('nodeps') if options.nodigest: rpm_options.append('nodigest') if options.nofiles: rpm_options.append('nofiles') if options.noscripts: rpm_options.append('noscripts') if options.nosignature: rpm_options.append('nosignature') if options.nolinkto: rpm_options.append('nolinkto') if options.nomd5: rpm_options.append('nomd5') if options.nosize: rpm_options.append('nosize') if options.nouser: rpm_options.append('nouser') if options.nogroup: rpm_options.append('nogroup') if options.nomtime: rpm_options.append('nomtime') if options.nomode: rpm_options.append('nomode') if options.nordev: rpm_options.append('nordev') if options.repackage: rpm_options.append('repackage') if options.allmatches: rpm_options.append('allmatches') main_ts = rpmtransactionset() cmdline_pkgspec = {} if options.name != 'all': if options.name: cmdline_pkgspec['name'] = str(options.name) if options.epoch: cmdline_pkgspec['epoch'] = str(options.epoch) if options.version: cmdline_pkgspec['version'] = str(options.version) if options.release: cmdline_pkgspec['release'] = str(options.release) if options.arch: cmdline_pkgspec['arch'] = str(options.arch) if options.verify: results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options) for r in results: files = r.get('files', '') for f in files: display_verify_file(f) elif options.list: for p in rpmpackagelist(main_ts): print(p) elif options.erase: if options.name: rpm_erase([cmdline_pkgspec], rpm_options) else: print('You must specify the "--name" option') class RPM(Bcfg2.Client.Tools.PkgTool): """Support for RPM packages.""" options = Bcfg2.Client.Tools.PkgTool.options + [ Bcfg2.Options.Option( cf=('RPM', 'installonlypackages'), dest="rpm_installonly", type=Bcfg2.Options.Types.comma_list, default=['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], help='RPM install-only packages'), Bcfg2.Options.BooleanOption( cf=('RPM', 'pkg_checks'), default=True, dest="rpm_pkg_checks", help="Perform RPM package checks"), Bcfg2.Options.BooleanOption( cf=('RPM', 'pkg_verify'), default=True, dest="rpm_pkg_verify", help="Perform RPM package verify"), Bcfg2.Options.BooleanOption( cf=('RPM', 'install_missing'), default=True, dest="rpm_install_missing", help="Install missing packages"), Bcfg2.Options.Option( cf=('RPM', 'erase_flags'), default=["allmatches"], dest="rpm_erase_flags", type=Bcfg2.Options.Types.comma_list, help="RPM erase flags"), Bcfg2.Options.BooleanOption( cf=('RPM', 'fix_version'), default=True, dest="rpm_fix_version", help="Fix (upgrade or downgrade) packages with the wrong version"), Bcfg2.Options.BooleanOption( cf=('RPM', 'reinstall_broken'), default=True, dest="rpm_reinstall_broken", help="Reinstall packages that fail to verify"), Bcfg2.Options.Option( cf=('RPM', 'verify_flags'), default=[], dest="rpm_verify_flags", type=Bcfg2.Options.Types.comma_list, help="RPM verify flags")] __execs__ = ['/bin/rpm', '/var/lib/rpm'] __handles__ = [('Package', 'rpm')] __req__ = {'Package': ['name', 'version']} __ireq__ = {'Package': ['url']} __new_req__ = {'Package': ['name'], 'Instance': ['version', 'release', 'arch']} __new_ireq__ = {'Package': ['uri'], 'Instance': ['simplefile']} __gpg_req__ = {'Package': ['name', 'version']} __gpg_ireq__ = {'Package': ['name', 'version']} __new_gpg_req__ = {'Package': ['name'], 'Instance': ['version', 'release']} __new_gpg_ireq__ = {'Package': ['name'], 'Instance': ['version', 'release']} pkgtype = 'rpm' pkgtool = ("rpm --oldpackage --replacepkgs --quiet -U %s", ("%s", ["url"])) def __init__(self, config): Bcfg2.Client.Tools.PkgTool.__init__(self, config) # create a global ignore list used when ignoring particular # files during package verification self.ignores = [entry.get('name') for struct in config for entry in struct if entry.get('type') == 'ignore'] self.instance_status = {} self.extra_instances = [] self.modlists = {} self.gpg_keyids = self.getinstalledgpg() self.installOnlyPkgs = Bcfg2.Options.setup.rpm_installonly if 'gpg-pubkey' not in self.installOnlyPkgs: self.installOnlyPkgs.append('gpg-pubkey') self.verify_flags = Bcfg2.Options.setup.rpm_verify_flags if '' in self.verify_flags: self.verify_flags.remove('') self.logger.debug('%s: installOnlyPackages = %s' % (self.name, self.installOnlyPkgs)) self.logger.debug('%s: erase_flags = %s' % (self.name, Bcfg2.Options.setup.rpm_erase_flags)) self.logger.debug('%s: pkg_checks = %s' % (self.name, Bcfg2.Options.setup.rpm_pkg_checks)) self.logger.debug('%s: pkg_verify = %s' % (self.name, Bcfg2.Options.setup.rpm_pkg_verify)) self.logger.debug('%s: install_missing = %s' % (self.name, Bcfg2.Options.setup.rpm_install_missing)) self.logger.debug('%s: fix_version = %s' % (self.name, Bcfg2.Options.setup.rpm_fix_version)) self.logger.debug('%s: reinstall_broken = %s' % (self.name, Bcfg2.Options.setup.rpm_reinstall_broken)) self.logger.debug('%s: verify_flags = %s' % (self.name, self.verify_flags)) # Force a re- prelink of all packages if prelink exists. # Many, if not most package verifies can be caused by out of # date prelinking. if (os.path.isfile('/usr/sbin/prelink') and not Bcfg2.Options.setup.dry_run): rv = self.cmd.run('/usr/sbin/prelink -a -mR') if rv.success: self.logger.debug('Pre-emptive prelink succeeded') else: # FIXME : this is dumb - what if the output is huge? self.logger.error('Pre-emptive prelink failed: %s' % rv.error) def RefreshPackages(self): """ Creates self.installed{} which is a dict of installed packages. The dict items are lists of nevra dicts. This loosely matches the config from the server and what rpmtools uses to specify pacakges. e.g. self.installed['foo'] = [ {'name':'foo', 'epoch':None, 'version':'1', 'release':2, 'arch':'i386'}, {'name':'foo', 'epoch':None, 'version':'1', 'release':2, 'arch':'x86_64'} ] """ self.installed = {} refresh_ts = rpmtransactionset() # Don't bother with signature checks at this stage. The GPG keys might # not be installed. refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES) for nevra in rpmpackagelist(refresh_ts): self.installed.setdefault(nevra['name'], []).append(nevra) if Bcfg2.Options.setup.debug: print("The following package instances are installed:") for name, instances in list(self.installed.items()): self.logger.debug(" " + name) for inst in instances: self.logger.debug(" %s" % self.str_evra(inst)) refresh_ts.closeDB() del refresh_ts def VerifyPackage(self, entry, modlist, pinned_version=None): """ Verify Package status for entry. Performs the following: - Checks for the presence of required Package Instances. - Compares the evra 'version' info against self.installed{}. - RPM level package verify (rpm --verify). - Checks for the presence of unrequired package instances. Produces the following dict and list for RPM.Install() to use: For installs/upgrades/fixes of required instances: instance_status = { : { 'installed': True|False, 'version_fail': True|False, 'verify_fail': True|False, 'pkg': , 'modlist': [ , ... ], 'verify' : [ ] }, ...... } For deletions of unrequired instances: extra_instances = [ , ..... ] Constructs the text prompts for interactive mode. """ instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package'] if instances == []: # We have an old style no Instance entry. Convert it to new style. instance = Bcfg2.Client.XML.SubElement(entry, 'Package') for attrib in list(entry.attrib.keys()): instance.attrib[attrib] = entry.attrib[attrib] if (Bcfg2.Options.setup.rpm_pkg_checks and entry.get('pkg_checks', 'true').lower() == 'true'): if 'any' in [entry.get('version'), pinned_version]: version, release = 'any', 'any' elif entry.get('version') == 'auto': if pinned_version is not None: version, release = pinned_version.split('-') else: return False else: version, release = entry.get('version').split('-') instance.set('version', version) instance.set('release', release) if entry.get('verify', 'true') == 'false': instance.set('verify', 'false') instances = [instance] self.logger.debug("Verifying package instances for %s" % entry.get('name')) package_fail = False qtext_versions = '' if entry.get('name') in self.installed: # There is at least one instance installed. if (Bcfg2.Options.setup.rpm_pkg_checks and entry.get('pkg_checks', 'true').lower() == 'true'): rpmTs = rpm.TransactionSet() rpmHeader = None for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')): if rpmHeader is None or \ rpm.versionCompare(h, rpmHeader) > 0: rpmHeader = h rpmProvides = [h['provides'] for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name'))] rpmIntersection = set(rpmHeader['provides']) & \ set(self.installOnlyPkgs) if len(rpmIntersection) > 0: # Packages that should only be installed or removed. # e.g. kernels. self.logger.debug(" Install only package.") for inst in instances: self.instance_status.setdefault(inst, {})['installed']\ = False self.instance_status[inst]['version_fail'] = False if inst.tag == 'Package' and \ len(self.installed[entry.get('name')]) > 1: self.logger.error("WARNING: Multiple instances of " "package %s are installed." % (entry.get('name'))) for pkg in self.installed[entry.get('name')]: if inst.get('version') == 'any' or \ self.pkg_vr_equal(inst, pkg) or \ self.inst_evra_equal(inst, pkg): if inst.get('version') == 'any': self.logger.error("got any version") self.logger.debug(" %s" % self.str_evra(inst)) self.instance_status[inst]['installed'] = True if (Bcfg2.Options.setup.rpm_pkg_verify and inst.get('pkg_verify', 'true').lower() == 'true'): flags = inst.get('verify_flags', '').split(',') + \ self.verify_flags if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \ entry.get('name') != 'gpg-pubkey': flags += ['nosignature', 'nodigest'] self.logger.debug('WARNING: Package ' '%s %s requires GPG ' 'Public key with ID ' '%s' % (pkg.get('name'), self.str_evra(pkg), pkg.get('gpgkeyid', ''))) self.logger.debug(' Disabling ' 'signature check.') if Bcfg2.Options.setup.quick: if prelink_exists: flags += ['nomd5', 'nosize'] else: flags += ['nomd5'] self.logger.debug(" verify_flags = " "%s" % flags) if inst.get('verify', 'true') == 'false': self.instance_status[inst]['verify'] =\ None else: vp_ts = rpmtransactionset() self.instance_status[inst]['verify'] =\ rpm_verify(vp_ts, pkg, flags) vp_ts.closeDB() del vp_ts if not self.instance_status[inst]['installed']: self.logger.info(" Package %s %s not " "installed." % (entry.get('name'), self.str_evra(inst))) qtext_versions = qtext_versions + 'I(%s) ' % \ self.str_evra(inst) entry.set('current_exists', 'false') else: # Normal Packages that can be upgraded. for inst in instances: self.instance_status.setdefault(inst, {})['installed']\ = False self.instance_status[inst]['version_fail'] = False # only installed packages with the same architecture # are relevant. if inst.get('arch', None) is None: arch_match = self.installed[entry.get('name')] else: arch_match = [pkg for pkg in self.installed[entry.get('name')] if pkg.get('arch', None) == inst.get('arch', None)] if len(arch_match) > 1: self.logger.error("Multiple instances of package " "%s installed with the same " "achitecture." % (entry.get('name'))) elif len(arch_match) == 1: # There is only one installed like there should be. # Check that it is the right version. for pkg in arch_match: if inst.get('version') == 'any' or \ self.pkg_vr_equal(inst, pkg) or \ self.inst_evra_equal(inst, pkg): self.logger.debug(" %s" % self.str_evra(inst)) self.instance_status[inst]['installed'] = \ True if (Bcfg2.Options.setup.rpm_pkg_verify and inst.get( 'pkg_verify', 'true').lower() == 'true'): flags = inst.get('verify_flags', '').split(',') + \ self.verify_flags if pkg.get('gpgkeyid', '')[-8:] not in\ self.gpg_keyids and 'nosignature'\ not in flags: flags += ['nosignature', 'nodigest'] self.logger.info( 'WARNING: Package %s %s ' 'requires GPG Public key with ' 'ID %s' % (pkg.get('name'), self.str_evra(pkg), pkg.get('gpgkeyid', ''))) self.logger.info( ' Disabling signature ' 'check.') if Bcfg2.Options.setup.quick: if prelink_exists: flags += ['nomd5', 'nosize'] else: flags += ['nomd5'] self.logger.debug( " verify_flags = %s" % flags) if inst.get('verify', 'true') == \ 'false': self.instance_status[inst]['verify'] = None else: vp_ts = rpmtransactionset() self.instance_status[inst]['verify'] = rpm_verify(vp_ts, pkg, flags) vp_ts.closeDB() del vp_ts else: # Wrong version installed. self.instance_status[inst]['version_fail']\ = True self.logger.info(" Wrong version " "installed. Want %s, but " "have %s" % (self.str_evra(inst), self.str_evra(pkg))) qtext_versions = qtext_versions + \ 'U(%s -> %s) ' % (self.str_evra(pkg), self.str_evra(inst)) elif len(arch_match) == 0: # This instance is not installed. self.instance_status[inst]['installed'] = False self.logger.info(" %s is not installed." % self.str_evra(inst)) qtext_versions = qtext_versions + \ 'I(%s) ' % self.str_evra(inst) # Check the rpm verify results. for inst in instances: instance_fail = False # Dump the rpm verify results. #****Write something to format this nicely.***** if (Bcfg2.Options.setup.debug and self.instance_status[inst].get('verify', None)): self.logger.debug(self.instance_status[inst]['verify']) self.instance_status[inst]['verify_fail'] = False if self.instance_status[inst].get('verify', None): if len(self.instance_status[inst].get('verify')) > 1: self.logger.info("WARNING: Verification of more " "than one package instance.") for result in self.instance_status[inst]['verify']: # Check header results if result.get('hdr', None): instance_fail = True self.instance_status[inst]['verify_fail'] = \ True # Check dependency results if result.get('deps', None): instance_fail = True self.instance_status[inst]['verify_fail'] = \ True # check the rpm verify file results against # the modlist and entry and per Instance Ignores. ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \ [ig.get('name') for ig in inst.findall('Ignore')] + \ self.ignores for file_result in result.get('files', []): if file_result[-1] not in modlist + ignores: instance_fail = True self.instance_status[inst]['verify_fail'] \ = True else: self.logger.debug(" Modlist/Ignore " "match: %s" % (file_result[-1])) if instance_fail: self.logger.debug("*** Instance %s failed RPM " "verification ***" % self.str_evra(inst)) qtext_versions = qtext_versions + \ 'R(%s) ' % self.str_evra(inst) self.modlists[entry] = modlist # Attach status structure for reporting. inst.set('verify_status', str(self.instance_status[inst])) version_fail = self.instance_status[inst].get( 'version_fail', False) verify_fail = self.instance_status[inst].get( 'verify_fail', False) if not self.instance_status[inst]['installed'] or \ version_fail or verify_fail: package_fail = True self.instance_status[inst]['pkg'] = entry self.modlists[entry] = modlist # Find Installed Instances that are not in the Config. extra_installed = self.FindExtraInstances( entry, self.installed[entry.get('name')]) if extra_installed is not None: package_fail = True self.extra_instances.append(extra_installed) for inst in extra_installed.findall('Instance'): qtext_versions = qtext_versions + \ 'D(%s) ' % self.str_evra(inst) self.logger.debug("Found Extra Instances %s" % qtext_versions) if package_fail: self.logger.info(" Package %s failed verification." % (entry.get('name'))) qtext = 'Install/Upgrade/delete Package %s instance(s) - '\ '%s (y/N) ' % (entry.get('name'), qtext_versions) entry.set('qtext', qtext) bcfg2_versions = '' for bcfg2_inst in [inst for inst in instances if inst.tag == 'Instance']: bcfg2_versions = bcfg2_versions + \ '(%s) ' % self.str_evra(bcfg2_inst) if bcfg2_versions != '': entry.set('version', bcfg2_versions) installed_versions = '' for installed_inst in self.installed[entry.get('name')]: installed_versions = installed_versions + \ '(%s) ' % self.str_evra(installed_inst) entry.set('current_version', installed_versions) return False else: # There are no Instances of this package installed. self.logger.debug("Package %s has no instances installed" % (entry.get('name'))) entry.set('current_exists', 'false') bcfg2_versions = '' for inst in instances: qtext_versions = qtext_versions + \ 'I(%s) ' % self.str_evra(inst) self.instance_status.setdefault(inst, {})['installed'] = False self.modlists[entry] = modlist self.instance_status[inst]['pkg'] = entry if inst.tag == 'Instance': bcfg2_versions = bcfg2_versions + \ '(%s) ' % self.str_evra(inst) if bcfg2_versions != '': entry.set('version', bcfg2_versions) entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % (entry.get('name'), qtext_versions)) return False return True def Remove(self, packages): """ Remove specified entries. packages is a list of Package Entries with Instances generated by FindExtra(). """ self.logger.debug('Running RPM.Remove()') pkgspec_list = [] for pkg in packages: for inst in pkg: if pkg.get('name') != 'gpg-pubkey': pkgspec = {'name': pkg.get('name'), 'epoch': inst.get('epoch', None), 'version': inst.get('version'), 'release': inst.get('release'), 'arch': inst.get('arch')} pkgspec_list.append(pkgspec) else: pkgspec = {'name': pkg.get('name'), 'version': inst.get('version'), 'release': inst.get('release')} self.logger.info("WARNING: gpg-pubkey package not in " "configuration %s %s" % (pkgspec.get('name'), self.str_evra(pkgspec))) self.logger.info(" This package will be deleted " "in a future version of the RPM driver.") #pkgspec_list.append(pkg_spec) erase_results = rpm_erase(pkgspec_list, Bcfg2.Options.setup.rpm_erase_flags) if erase_results == []: self.modified += packages for pkg in pkgspec_list: self.logger.info("Deleted %s %s" % (pkg.get('name'), self.str_evra(pkg))) else: self.logger.info("Bulk erase failed with errors:") self.logger.debug("Erase results = %s" % erase_results) self.logger.info("Attempting individual erase for each package.") pkgspec_list = [] for pkg in packages: pkg_modified = False for inst in pkg: if pkg.get('name') != 'gpg-pubkey': pkgspec = {'name': pkg.get('name'), 'epoch': inst.get('epoch', None), 'version': inst.get('version'), 'release': inst.get('release'), 'arch': inst.get('arch')} pkgspec_list.append(pkgspec) else: pkgspec = {'name': pkg.get('name'), 'version': inst.get('version'), 'release': inst.get('release')} self.logger.info("WARNING: gpg-pubkey package not in " "configuration %s %s" % (pkgspec.get('name'), self.str_evra(pkgspec))) self.logger.info(" This package will be " "deleted in a future version of the " "RPM driver.") continue # don't delete the gpg-pubkey packages erase_results = rpm_erase( [pkgspec], Bcfg2.Options.setup.rpm_erase_flags) if erase_results == []: pkg_modified = True self.logger.info("Deleted %s %s" % (pkgspec.get('name'), self.str_evra(pkgspec))) else: self.logger.error("unable to delete %s %s" % (pkgspec.get('name'), self.str_evra(pkgspec))) self.logger.debug("Failure = %s" % erase_results) if pkg_modified: self.modified.append(pkg) self.RefreshPackages() self.extra = self.FindExtra() def FixInstance(self, instance, inst_status): """ Control if a reinstall of a package happens or not based on the results from RPM.VerifyPackage(). Return True to reinstall, False to not reintstall. """ fix = False if not inst_status.get('installed', False): if (instance.get('install_missing', 'true').lower() == "true" and Bcfg2.Options.setup.rpm_install_missing): fix = True else: self.logger.debug('Installed Action for %s %s is to not ' 'install' % (inst_status.get('pkg').get('name'), self.str_evra(instance))) elif inst_status.get('version_fail', False): if (instance.get('fix_version', 'true').lower() == "true" and Bcfg2.Options.setup.rpm_fix_version): fix = True else: self.logger.debug('Version Fail Action for %s %s is to ' 'not upgrade' % (inst_status.get('pkg').get('name'), self.str_evra(instance))) elif inst_status.get('verify_fail', False): if (instance.get('reinstall_broken', 'true').lower() == "true" and Bcfg2.Options.setup.rpm_reinstall_broken): for inst in inst_status.get('verify'): # This needs to be a for loop rather than a straight get() # because the underlying routines handle multiple packages # and return a list of results. self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % inst.get('nevra')) if inst.get("hdr", False): fix = True elif inst.get('files', False): # Parse rpm verify file results for file_result in inst.get('files', []): self.logger.debug('reinstall_check: file: %s' % file_result) if file_result[-2] != 'c': fix = True break # Shouldn't really need this, but included for clarity. elif inst.get("deps", False): fix = False else: self.logger.debug('Verify Fail Action for %s %s is to not ' 'reinstall' % (inst_status.get('pkg').get('name'), self.str_evra(instance))) return fix def Install(self, packages): """ Try and fix everything that RPM.VerifyPackages() found wrong for each Package Entry. This can result in individual RPMs being installed (for the first time), reinstalled, deleted, downgraded or upgraded. packages is a list of Package Elements that has states[] == False The following effects occur: - states{} is conditionally updated for each package. - self.installed{} is rebuilt, possibly multiple times. - self.instance_statusi{} is conditionally updated for each instance of a package. - Each package will be added to self.modified[] if its states{} entry is set to True. """ self.logger.info('Runing RPM.Install()') states = dict() install_only_pkgs = [] gpg_keys = [] upgrade_pkgs = [] # Remove extra instances. # Can not reverify because we don't have a package entry. if len(self.extra_instances) > 0: if (Bcfg2.Options.setup.remove in ['all', 'packages'] and not Bcfg2.Options.setup.dry_run): self.Remove(self.extra_instances) else: self.logger.info("The following extra package instances will " "be removed by the '-r' option:") for pkg in self.extra_instances: for inst in pkg: self.logger.info(" %s %s" % (pkg.get('name'), self.str_evra(inst))) # Figure out which instances of the packages actually need something # doing to them and place in the appropriate work 'queue'. for pkg in packages: for inst in [instn for instn in pkg if instn.tag in ['Instance', 'Package']]: if self.FixInstance(inst, self.instance_status[inst]): if pkg.get('name') == 'gpg-pubkey': gpg_keys.append(inst) elif pkg.get('name') in self.installOnlyPkgs: install_only_pkgs.append(inst) else: upgrade_pkgs.append(inst) # Fix installOnlyPackages if len(install_only_pkgs) > 0: self.logger.info("Attempting to install 'install only packages'") install_args = " ".join(os.path.join( self.instance_status[inst].get('pkg').get('uri'), inst.get('simplefile')) for inst in install_only_pkgs) if self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs " "%s" % install_args): # The rpm command succeeded. All packages installed. self.logger.info("Single Pass for InstallOnlyPkgs Succeded") self.RefreshPackages() else: # The rpm command failed. No packages installed. # Try installing instances individually. self.logger.error("Single Pass for InstallOnlyPackages Failed") installed_instances = [] for inst in install_only_pkgs: pkguri = self.instance_status[inst].get('pkg').get('uri') pkgname = self.instance_status[inst].get('pkg').get('name') install_args = os.path.join(pkguri, inst.get('simplefile')) if self.cmd.run("rpm --install --quiet --oldpackage " "--replacepkgs %s" % install_args): installed_instances.append(inst) else: self.logger.debug("InstallOnlyPackage %s %s would not " "install." % (pkgname, self.str_evra(inst))) install_pkg_set = set([self.instance_status[inst].get('pkg') for inst in install_only_pkgs]) self.RefreshPackages() # Install GPG keys. if len(gpg_keys) > 0: for inst in gpg_keys: self.logger.info("Installing GPG keys.") pkguri = self.instance_status[inst].get('pkg').get('uri') pkgname = self.instance_status[inst].get('pkg').get('name') key_arg = os.path.join(pkguri, inst.get('simplefile')) if not self.cmd.run("rpm --import %s" % key_arg): self.logger.debug("Unable to install %s-%s" % (pkgname, self.str_evra(inst))) else: self.logger.debug("Installed %s-%s-%s" % (pkgname, inst.get('version'), inst.get('release'))) self.RefreshPackages() self.gpg_keyids = self.getinstalledgpg() pkg = self.instance_status[gpg_keys[0]].get('pkg') states[pkg] = self.VerifyPackage(pkg, []) # Fix upgradeable packages. if len(upgrade_pkgs) > 0: self.logger.info("Attempting to upgrade packages") upgrade_args = " ".join([os.path.join( self.instance_status[inst].get('pkg').get('uri'), inst.get('simplefile')) for inst in upgrade_pkgs]) if self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs " "%s" % upgrade_args): # The rpm command succeeded. All packages upgraded. self.logger.info("Single Pass for Upgraded Packages Succeded") upgrade_pkg_set = set([self.instance_status[inst].get('pkg') for inst in upgrade_pkgs]) self.RefreshPackages() else: # The rpm command failed. No packages upgraded. # Try upgrading instances individually. self.logger.error("Single Pass for Upgrading Packages Failed") upgraded_instances = [] for inst in upgrade_pkgs: upgrade_args = os.path.join( self.instance_status[inst].get('pkg').get('uri'), inst.get('simplefile')) #self.logger.debug("rpm --upgrade --quiet --oldpackage " # "--replacepkgs %s" % upgrade_args) if self.cmd.run("rpm --upgrade --quiet --oldpackage " "--replacepkgs %s" % upgrade_args): upgraded_instances.append(inst) else: self.logger.debug( "Package %s %s would not upgrade." % (self.instance_status[inst].get('pkg').get('name'), self.str_evra(inst))) upgrade_pkg_set = set([self.instance_status[inst].get('pkg') for inst in upgrade_pkgs]) self.RefreshPackages() if not Bcfg2.Options.setup.kevlar: for pkg_entry in packages: self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name'))) states[pkg_entry] = self.VerifyPackage( pkg_entry, self.modlists.get(pkg_entry, [])) self.modified.extend(ent for ent in packages if states[ent]) return states def _log_incomplete_entry_install(self, etag, ename): self.logger.error("Incomplete information for entry %s:%s; " "cannot install" % (etag, ename)) return def canInstall(self, entry): """Test if entry has enough information to be installed.""" if not self.handlesEntry(entry): return False if 'failure' in entry.attrib: self.logger.error("Cannot install entry %s:%s with bind failure" % (entry.tag, entry.get('name'))) return False instances = entry.findall('Instance') # If the entry wasn't verifiable, then we really don't want to try # and fix something that we don't know is broken. if not self.canVerify(entry): self.logger.debug("WARNING: Package %s was not verifiable, not " "passing to Install()" % entry.get('name')) return False if not instances: # Old non Instance format, unmodified. if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. # check that the Package level has # what we need for verification. if [attr for attr in self.__gpg_ireq__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_install(entry.tag, entry.get('name')) return False else: if [attr for attr in self.__ireq__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_install(entry.tag, entry.get('name')) return False else: if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. # check that the Package level has # what we need for verification. if [attr for attr in self.__new_gpg_ireq__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_install(entry.tag, entry.get('name')) return False # check that the Instance level has # what we need for verification. for inst in instances: if [attr for attr in self.__new_gpg_ireq__[inst.tag] if attr not in inst.attrib]: self._log_incomplete_entry_install(inst.tag, entry.get('name')) return False else: # New format with Instances. # check that the Package level has # what we need for verification. if [attr for attr in self.__new_ireq__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_install(entry.tag, entry.get('name')) self.logger.error(" Required attributes that " "may not be present are %s" % (self.__new_ireq__[entry.tag])) return False # check that the Instance level has # what we need for verification. for inst in instances: if inst.tag == 'Instance': if [attr for attr in self.__new_ireq__[inst.tag] if attr not in inst.attrib]: self._log_incomplete_entry_install( inst.tag, entry.get('name')) self.logger.error(" Required attributes " "that may not be present are %s" % (self.__new_ireq__[inst.tag])) return False return True def _log_incomplete_entry_verify(self, etag, ename): self.logger.error("Incomplete information for entry %s:%s; " "cannot verify" % (etag, ename)) return def canVerify(self, entry): """ Test if entry has enough information to be verified. Three types of entries are checked. Old style Package New style Package with Instances pgp-pubkey packages Also the old style entries get modified after the first VerifyPackage() run, so there needs to be a second test. """ if not self.handlesEntry(entry): return False if 'failure' in entry.attrib: self.logger.error("Entry %s:%s reports bind failure: %s" % (entry.tag, entry.get('name'), entry.get('failure'))) return False # we don't want to do any checks so # we don't care what the entry has in it. if (not Bcfg2.Options.setup.rpm_pkg_checks or entry.get('pkg_checks', 'true').lower() == 'false'): return True instances = entry.findall('Instance') if not instances: # Old non Instance format, unmodified. if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. # check that the Package level has # what we need for verification. if [attr for attr in self.__gpg_req__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_verify(entry.tag, entry.get('name')) return False elif entry.tag == 'Path' and entry.get('type') == 'ignore': # ignored Paths are only relevant during failed package # verification pass else: if [attr for attr in self.__req__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_verify(entry.tag, entry.get('name')) return False else: if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. # check that the Package level has # what we need for verification. if [attr for attr in self.__new_gpg_req__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_verify(entry.tag, entry.get('name')) return False # check that the Instance level has # what we need for verification. for inst in instances: if [attr for attr in self.__new_gpg_req__[inst.tag] if attr not in inst.attrib]: self._log_incomplete_entry_verify(inst.tag, inst.get('name')) return False else: # new format with Instances, or old style modified. # check that the Package level has # what we need for verification. if [attr for attr in self.__new_req__[entry.tag] if attr not in entry.attrib]: self._log_incomplete_entry_verify(entry.tag, entry.get('name')) return False # check that the Instance level has # what we need for verification. for inst in instances: if inst.tag == 'Instance': if [attr for attr in self.__new_req__[inst.tag] if attr not in inst.attrib]: self._log_incomplete_entry_verify(inst.tag, inst.get('name')) return False return True def _get_tmp_entry(self, extra_entry, inst): tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', version=inst.get('version'), release=inst.get('release')) if inst.get('epoch', None) is not None: tmp_entry.set('epoch', str(inst.get('epoch'))) if installed_inst.get('arch', None) is not None: tmp_entry.set('arch', inst.get('arch')) return def FindExtra(self): """Find extra packages.""" packages = [entry.get('name') for entry in self.getSupportedEntries()] extras = [] for (name, instances) in list(self.installed.items()): if name not in packages: extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype) for installed_inst in instances: if Bcfg2.Options.setup.extra: self.logger.info("Extra Package %s %s." % (name, self.str_evra(installed_inst))) self._get_tmp_entry(extra_entry, installed_inst) extras.append(extra_entry) return extras def FindExtraInstances(self, pkg_entry, installed_entry): """ Check for installed instances that are not in the config. Return a Package Entry with Instances to remove, or None if there are no Instances to remove. """ name = pkg_entry.get('name') extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype) instances = [inst for inst in pkg_entry if inst.tag == 'Instance' or inst.tag == 'Package'] if name in self.installOnlyPkgs: for installed_inst in installed_entry: not_found = True for inst in instances: if self.pkg_vr_equal(inst, installed_inst) or \ self.inst_evra_equal(inst, installed_inst): not_found = False break if not_found: # Extra package. self.logger.info("Extra InstallOnlyPackage %s %s." % (name, self.str_evra(installed_inst))) self._get_tmp_entry(extra_entry, installed_inst) else: # Normal package, only check arch. for installed_inst in installed_entry: not_found = True for inst in instances: if (installed_inst.get('arch', None) == inst.get('arch', None) or inst.tag == 'Package'): not_found = False break if not_found: self.logger.info("Extra Normal Package Instance %s %s" % (name, self.str_evra(installed_inst))) self._get_tmp_entry(extra_entry, installed_inst) if len(extra_entry) == 0: extra_entry = None return extra_entry def str_evra(self, instance): """Convert evra dict entries to a string.""" if instance.get('epoch', '*') in ['*', None]: return '%s-%s.%s' % (instance.get('version', '*'), instance.get('release', '*'), instance.get('arch', '*')) else: return '%s:%s-%s.%s' % (instance.get('epoch', '*'), instance.get('version', '*'), instance.get('release', '*'), instance.get('arch', '*')) def pkg_vr_equal(self, config_entry, installed_entry): ''' Compare old style entry to installed entry. Which means ignore the epoch and arch. ''' if (config_entry.tag == 'Package' and config_entry.get('version') == installed_entry.get('version') and config_entry.get('release') == installed_entry.get('release')): return True else: return False def inst_evra_equal(self, config_entry, installed_entry): """Compare new style instance to installed entry.""" if config_entry.get('epoch', None) is not None: epoch = int(config_entry.get('epoch')) else: epoch = None if (config_entry.tag == 'Instance' and (epoch == installed_entry.get('epoch', 0) or (epoch == 0 and installed_entry.get('epoch', 0) is None) or (epoch is None and installed_entry.get('epoch', 0) == 0)) and config_entry.get('version') == installed_entry.get('version') and config_entry.get('release') == installed_entry.get('release') and config_entry.get('arch', None) == installed_entry.get('arch', None)): return True else: return False def getinstalledgpg(self): """ Create a list of installed GPG key IDs. The pgp-pubkey package version is the least significant 4 bytes (big-endian) of the key ID which is good enough for our purposes. """ init_ts = rpmtransactionset() init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES) gpg_hdrs = getheadersbykeyword(init_ts, **{'name': 'gpg-pubkey'}) keyids = [header[rpm.RPMTAG_VERSION] for header in gpg_hdrs] keyids.append('None') init_ts.closeDB() del init_ts return keyids def VerifyPath(self, entry, _): """ We don't do anything here since all Paths are processed in __init__ """ return True src/lib/Bcfg2/Client/Tools/RcUpdate.py000066400000000000000000000113061303523157100177360ustar00rootroot00000000000000"""This is rc-update support.""" import os import Bcfg2.Client.Tools import Bcfg2.Client.XML class RcUpdate(Bcfg2.Client.Tools.SvcTool): """RcUpdate support for Bcfg2.""" name = 'RcUpdate' __execs__ = ['/sbin/rc-update', '/bin/rc-status'] __handles__ = [('Service', 'rc-update')] __req__ = {'Service': ['name', 'status']} def get_enabled_svcs(self): """ Return a list of all enabled services. """ return [line.split()[0] for line in self.cmd.run(['/bin/rc-status', '-s']).stdout.splitlines() if 'started' in line] def get_default_svcs(self): """Return a list of services in the 'default' runlevel.""" return [line.split()[0] for line in self.cmd.run(['/sbin/rc-update', 'show']).stdout.splitlines() if 'default' in line] def verify_bootstatus(self, entry, bootstatus): """Verify bootstatus for entry.""" # get a list of all started services allsrv = self.get_default_svcs() # set current_bootstatus attribute if entry.get('name') in allsrv: entry.set('current_bootstatus', 'on') else: entry.set('current_bootstatus', 'off') if bootstatus == 'on': return entry.get('name') in allsrv else: return entry.get('name') not in allsrv def VerifyService(self, entry, _): """ Verify Service status for entry. Assumes we run in the "default" runlevel. """ entry.set('target_status', entry.get('status')) # for reporting bootstatus = self.get_bootstatus(entry) if bootstatus is None: return True current_bootstatus = self.verify_bootstatus(entry, bootstatus) # check if init script exists try: os.stat('/etc/init.d/%s' % entry.get('name')) except OSError: self.logger.debug('Init script for service %s does not exist' % entry.get('name')) return False if entry.get('status') == 'ignore': # 'ignore' should verify current_svcstatus = True svcstatus = True else: svcstatus = self.check_service(entry) if entry.get('status') == 'on': if svcstatus: current_svcstatus = True else: current_svcstatus = False elif entry.get('status') == 'off': if svcstatus: current_svcstatus = False else: current_svcstatus = True if svcstatus: entry.set('current_status', 'on') else: entry.set('current_status', 'off') return current_bootstatus and current_svcstatus def InstallService(self, entry): """Install Service entry.""" self.logger.info('Installing Service %s' % entry.get('name')) bootstatus = self.get_bootstatus(entry) if bootstatus is not None: if bootstatus == 'on': # make sure service is enabled on boot bootcmd = '/sbin/rc-update add %s default' elif bootstatus == 'off': # make sure service is disabled on boot bootcmd = '/sbin/rc-update del %s default' bootcmdrv = self.cmd.run(bootcmd % entry.get('name')).success if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs return bootcmdrv buildmode = Bcfg2.Options.setup.service_mode == 'build' if (entry.get('status') == 'on' and not buildmode) and \ entry.get('current_status') == 'off': svccmdrv = self.start_service(entry) elif (entry.get('status') == 'off' or buildmode) and \ entry.get('current_status') == 'on': svccmdrv = self.stop_service(entry) else: svccmdrv = True # ignore status attribute return bootcmdrv and svccmdrv else: # when bootstatus is 'None', status == 'ignore' return True def FindExtra(self): """Locate extra rc-update services.""" allsrv = self.get_enabled_svcs() self.logger.debug('Found active services:') self.logger.debug(allsrv) specified = [srv.get('name') for srv in self.getSupportedEntries()] return [Bcfg2.Client.XML.Element('Service', type='rc-update', name=name) for name in allsrv if name not in specified] src/lib/Bcfg2/Client/Tools/SELinux.py000066400000000000000000001032431303523157100175600ustar00rootroot00000000000000""" Classes for SELinux entry support """ import os import re import sys import copy import glob import struct import socket import logging import selinux import seobject import Bcfg2.Client.XML import Bcfg2.Client.Tools from Bcfg2.Client.Tools.POSIX.File import POSIXFile from Bcfg2.Compat import long # pylint: disable=W0622 def pack128(int_val): """ pack a 128-bit integer in big-endian format """ max_word_size = 2 ** 32 - 1 if int_val <= max_word_size: return struct.pack('>L', int_val) words = [] for i in range(4): # pylint: disable=W0612 word = int_val & max_word_size words.append(int(word)) int_val >>= 32 words.reverse() return struct.pack('>4I', *words) def netmask_itoa(netmask, proto="ipv4"): """ convert an integer netmask (e.g., /16) to dotted-quad notation (255.255.0.0) or IPv6 prefix notation (ffff::) """ if proto == "ipv4": size = 32 family = socket.AF_INET else: # ipv6 size = 128 family = socket.AF_INET6 try: netmask = int(netmask) except ValueError: return netmask if netmask > size: raise ValueError("Netmask too large: %s" % netmask) res = long(0) for i in range(netmask): res |= 1 << (size - i - 1) netmask = socket.inet_ntop(family, pack128(res)) return netmask class SELinux(Bcfg2.Client.Tools.Tool): """ SELinux entry support """ name = 'SELinux' __handles__ = [('SEBoolean', None), ('SEFcontext', None), ('SEInterface', None), ('SELogin', None), ('SEModule', None), ('SENode', None), ('SEPermissive', None), ('SEPort', None), ('SEUser', None)] __req__ = dict(SEBoolean=['name', 'value'], SEFcontext=['name', 'selinuxtype'], SEInterface=['name', 'selinuxtype'], SELogin=['name', 'selinuxuser'], SEModule=['name'], SENode=['name', 'selinuxtype', 'proto'], SEPermissive=['name'], SEPort=['name', 'selinuxtype'], SEUser=['name', 'roles', 'prefix']) def __init__(self, config): Bcfg2.Client.Tools.Tool.__init__(self, config) self.handlers = {} for handler in self.__handles__: etype = handler[0] self.handlers[etype] = \ globals()["SELinux%sHandler" % etype.title()](self, config) self.txn = False self.post_txn_queue = [] def __getattr__(self, attr): if attr.startswith("VerifySE"): return self.GenericSEVerify elif attr.startswith("InstallSE"): return self.GenericSEInstall # there's no need for an else here, because python checks for # an attribute in the "normal" ways first. i.e., if self.txn # is used, __getattr__() is never called because txn exists as # a "normal" attribute of this object. See # http://docs.python.org/2/reference/datamodel.html#object.__getattr__ # for details def FindExtra(self): extra = [] for handler in self.handlers.values(): extra.extend(handler.FindExtra()) return extra def canInstall(self, entry): return (Bcfg2.Client.Tools.Tool.canInstall(self, entry) and self.handlers[entry.tag].canInstall(entry)) def primarykey(self, entry): """ return a string that should be unique amongst all entries in the specification """ return self.handlers[entry.tag].primarykey(entry) def Install(self, entries): # start a transaction semanage = seobject.semanageRecords("") if hasattr(semanage, "start"): self.logger.debug("Starting SELinux transaction") semanage.start() self.txn = True else: self.logger.debug("SELinux transactions not supported; this may " "slow things down considerably") states = Bcfg2.Client.Tools.Tool.Install(self, entries) if hasattr(semanage, "finish"): self.logger.debug("Committing SELinux transaction") semanage.finish() self.txn = False for func, arg, kwargs in self.post_txn_queue: states[arg] = func(*arg, **kwargs) return states def GenericSEInstall(self, entry): """Dispatch install to the proper method according to entry tag""" return self.handlers[entry.tag].Install(entry) def GenericSEVerify(self, entry, _): """Dispatch verify to the proper method according to entry tag""" rv = self.handlers[entry.tag].Verify(entry) if entry.get('qtext') and Bcfg2.Options.setup.interactive: entry.set('qtext', '%s\nInstall %s: (y/N) ' % (entry.get('qtext'), self.handlers[entry.tag].tostring(entry))) return rv def Remove(self, entries): """Dispatch verify to the proper removal method according to entry tag""" # sort by type types = list() for entry in entries: if entry.tag not in types: types.append(entry.tag) for etype in types: self.handlers[etype].Remove([e for e in entries if e.tag == etype]) class SELinuxEntryHandler(object): """ Generic handler for all SELinux entries """ etype = None key_format = ("name",) value_format = () str_format = '%(name)s' custom_re = re.compile(r' (?P\S+)$') custom_format = None def __init__(self, tool, config): self.tool = tool self.logger = logging.getLogger(self.__class__.__name__) self.config = config self._records = None self._all = None if not self.custom_format: self.custom_format = self.key_format @property def records(self): """ return the records object for this entry type """ if self._records is None: self._records = getattr(seobject, "%sRecords" % self.etype)("") return self._records @property def all_records(self): """ get a dict of all defined records for this entry type """ if self._all is None: self._all = self.records.get_all() return self._all @property def custom_records(self): """ try to get a dict of all customized records for this entry type, if the records object supports the customized() method """ if hasattr(self.records, "customized") and self.custom_re: rv = dict() for key in self.custom_keys: if key in self.all_records: rv[key] = self.all_records[key] else: self.logger.warning("SELinux %s %s customized, but no " "record found. This may indicate an " "error in your SELinux policy." % (self.etype, key)) return rv else: # ValueError is really a pretty dumb exception to raise, # but that's what the seobject customized() method raises # if it's defined but not implemented. yeah, i know, wtf. raise ValueError("custom_records") @property def custom_keys(self): """ get a list of keys for selinux records of this entry type that have been customized """ keys = [] for cmd in self.records.customized(): match = self.custom_re.search(cmd) if match: if (len(self.custom_format) == 1 and self.custom_format[0] == "name"): keys.append(match.group("name")) else: keys.append(tuple([match.group(k) for k in self.custom_format])) return keys def tostring(self, entry): """ transform an XML SELinux entry into a human-readable string """ return self.str_format % entry.attrib def keytostring(self, key): """ transform a SELinux record key into a human-readable string """ return self.str_format % self._key2attrs(key) def _key(self, entry): """ Generate an SELinux record key from an XML SELinux entry """ if len(self.key_format) == 1 and self.key_format[0] == "name": return entry.get("name") else: rv = [] for key in self.key_format: rv.append(entry.get(key)) return tuple(rv) def _key2attrs(self, key): """ Generate an XML attribute dict from an SELinux record key """ if isinstance(key, tuple): rv = dict((self.key_format[i], key[i]) for i in range(len(self.key_format)) if self.key_format[i]) else: rv = dict(name=key) if self.value_format: vals = self.all_records[key] rv.update(dict((self.value_format[i], vals[i]) for i in range(len(self.value_format)) if self.value_format[i])) return rv def key2entry(self, key): """ Generate an XML entry from an SELinux record key """ attrs = self._key2attrs(key) return Bcfg2.Client.XML.Element("SE%s" % self.etype.title(), **attrs) def _args(self, entry, method): """ Get the argument list for invoking _modify or _add, or _delete methods """ if hasattr(self, "_%sargs" % method): return getattr(self, "_%sargs" % method)(entry) elif hasattr(self, "_defaultargs"): # default args return self._defaultargs(entry) # pylint: disable=E1101 else: raise NotImplementedError def _deleteargs(self, entry): """ Get the argument list for invoking delete methods """ return (self._key(entry)) def canInstall(self, entry): """ return True if this entry is complete and can be installed """ return bool(self._key(entry)) def primarykey(self, entry): """ return a string that should be unique amongst all entries in the specification. some entry types are not universally disambiguated by tag:type:name alone """ return ":".join([entry.tag, entry.get("name")]) def exists(self, entry): """ return True if the entry already exists in the record list """ if self._key(entry) not in self.all_records: self.logger.debug("SELinux %s %s does not exist" % (self.etype, self.tostring(entry))) return False return True def Verify(self, entry): """ verify that the entry is correct on the client system """ if not self.exists(entry): entry.set('current_exists', 'false') return False errors = [] current_attrs = self._key2attrs(self._key(entry)) desired_attrs = entry.attrib for attr in self.value_format: if not attr: continue if current_attrs[attr] != desired_attrs[attr]: entry.set('current_%s' % attr, current_attrs[attr]) errors.append("%s %s has wrong %s: %s, should be %s" % (entry.tag, entry.get('name'), attr, current_attrs[attr], desired_attrs[attr])) if errors: for error in errors: self.logger.debug(error) entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors)) return False else: return True def Install(self, entry, method=None): """ install the entry on the client system """ if not method: if self.exists(entry): method = "modify" else: method = "add" self.logger.debug("%s SELinux %s %s" % (method.title(), self.etype, self.tostring(entry))) try: getattr(self.records, method)(*self._args(entry, method)) self._all = None return True except ValueError: err = sys.exc_info()[1] self.logger.info("Failed to %s SELinux %s %s: %s" % (method, self.etype, self.tostring(entry), err)) return False def Remove(self, entries): """ remove the entry from the client system """ for entry in entries: try: self.records.delete(*self._args(entry, "delete")) self._all = None except ValueError: err = sys.exc_info()[1] self.logger.info("Failed to remove SELinux %s %s: %s" % (self.etype, self.tostring(entry), err)) def FindExtra(self): """ find extra entries of this entry type """ specified = [self._key(e) for e in self.tool.getSupportedEntries() if e.tag == "SE%s" % self.etype.title()] try: records = self.custom_records except ValueError: records = self.all_records return [self.key2entry(key) for key in records.keys() if key not in specified] class SELinuxSebooleanHandler(SELinuxEntryHandler): """ handle SELinux boolean entries """ etype = "boolean" value_format = ("value",) @property def all_records(self): # older versions of selinux return a single 0/1 value for each # bool, while newer versions return a list of three 0/1 values # representing various states. we don't care about the latter # two values, but it's easier to coerce the older format into # the newer format as far as interoperation with the rest of # SELinuxEntryHandler goes rv = SELinuxEntryHandler.all_records.fget(self) if rv.values()[0] in [0, 1]: for key, val in rv.items(): rv[key] = [val, val, val] return rv def _key2attrs(self, key): rv = SELinuxEntryHandler._key2attrs(self, key) status = self.all_records[key][0] if status: rv['value'] = "on" else: rv['value'] = "off" return rv def _defaultargs(self, entry): """ argument list for adding, modifying and deleting entries """ # the only values recognized by both new and old versions of # selinux are the strings "0" and "1". old selinux accepts # ints or bools as well, new selinux accepts "on"/"off" if entry.get("value").lower() == "on": value = "1" else: value = "0" return (entry.get("name"), value) def canInstall(self, entry): if entry.get("value").lower() not in ["on", "off"]: self.logger.debug("SELinux %s %s has a bad value: %s" % (self.etype, self.tostring(entry), entry.get("value"))) return False return (self.exists(entry) and SELinuxEntryHandler.canInstall(self, entry)) class SELinuxSeportHandler(SELinuxEntryHandler): """ handle SELinux port entries """ etype = "port" value_format = ('selinuxtype', None) custom_re = re.compile(r'-p (?Ptcp|udp).*? ' r'(?P\d+)(?:-(?P\d+))?$') @property def custom_keys(self): keys = [] for cmd in self.records.customized(): match = self.custom_re.search(cmd) if match: if match.group('end'): keys.append((int(match.group('start')), int(match.group('end')), match.group('proto'))) else: keys.append((int(match.group('start')), int(match.group('start')), match.group('proto'))) return keys @property def all_records(self): if self._all is None: # older versions of selinux use (startport, endport) as # they key for the ports.get_all() dict, and (type, proto, # level) as the value; this is obviously broken, so newer # versions use (startport, endport, proto) as the key, and # (type, level) as the value. abstracting around this # sucks. ports = self.records.get_all() if len(ports.keys()[0]) == 3: self._all = ports else: # uglist list comprehension ever? self._all = dict([((k[0], k[1], v[1]), (v[0], v[2])) for k, v in ports.items()]) return self._all def _key(self, entry): try: (port, proto) = entry.get("name").split("/") except ValueError: self.logger.error("Invalid SELinux node %s: no protocol specified" % entry.get("name")) return if "-" in port: start, end = port.split("-") else: start = port end = port return (int(start), int(end), proto) def _key2attrs(self, key): if key[0] == key[1]: port = str(key[0]) else: port = "%s-%s" % (key[0], key[1]) vals = self.all_records[key] return dict(name="%s/%s" % (port, key[2]), selinuxtype=vals[0]) def _defaultargs(self, entry): """ argument list for adding and modifying entries """ (port, proto) = entry.get("name").split("/") return (port, proto, entry.get("mlsrange", ""), entry.get("selinuxtype")) def _deleteargs(self, entry): return tuple(entry.get("name").split("/")) class SELinuxSefcontextHandler(SELinuxEntryHandler): """ handle SELinux file context entries """ etype = "fcontext" key_format = ("name", "filetype") value_format = (None, None, "selinuxtype", None) filetypeargs = dict(all="", regular="--", directory="-d", symlink="-l", pipe="-p", socket="-s", block="-b", char="-c", door="-D") filetypenames = dict(all="all files", regular="regular file", directory="directory", symlink="symbolic link", pipe="named pipe", socket="socket", block="block device", char="character device", door="door") filetypeattrs = dict([v, k] for k, v in filetypenames.iteritems()) custom_re = re.compile(r'-f \'(?P[a-z ]+)\'.*? \'(?P.*)\'') @property def all_records(self): if self._all is None: # on older selinux, fcontextRecords.get_all() returns a # list of tuples of (filespec, filetype, seuser, serole, # setype, level); on newer selinux, get_all() returns a # dict of (filespec, filetype) => (seuser, serole, setype, # level). fcontexts = self.records.get_all() if isinstance(fcontexts, dict): self._all = fcontexts else: self._all = dict([(f[0:2], f[2:]) for f in fcontexts]) return self._all def _key(self, entry): ftype = entry.get("filetype", "all") return (entry.get("name"), self.filetypenames.get(ftype, ftype)) def _key2attrs(self, key): rv = dict(name=key[0], filetype=self.filetypeattrs[key[1]]) vals = self.all_records[key] # in older versions of selinux, an fcontext with no selinux # type is the single value None; in newer versions, it's a # tuple whose 0th (and only) value is None. if vals and vals[0]: rv["selinuxtype"] = vals[2] else: rv["selinuxtype"] = "<>" return rv def canInstall(self, entry): return (entry.get("filetype", "all") in self.filetypeargs and SELinuxEntryHandler.canInstall(self, entry)) def _defaultargs(self, entry): """ argument list for adding, modifying, and deleting entries """ return (entry.get("name"), entry.get("selinuxtype"), self.filetypeargs[entry.get("filetype", "all")], entry.get("mlsrange", ""), '') def primarykey(self, entry): return ":".join([entry.tag, entry.get("name"), entry.get("filetype", "all")]) class SELinuxSenodeHandler(SELinuxEntryHandler): """ handle SELinux node entries """ etype = "node" value_format = (None, None, "selinuxtype", None) str_format = '%(name)s (%(proto)s)' custom_re = re.compile(r'-M (?P\S+).*?' r'-p (?Pipv\d).*? (?P\S+)$') custom_format = ('addr', 'netmask', 'proto') def _key(self, entry): try: (addr, netmask) = entry.get("name").split("/") except ValueError: self.logger.error("Invalid SELinux node %s: no netmask specified" % entry.get("name")) return netmask = netmask_itoa(netmask, proto=entry.get("proto")) return (addr, netmask, entry.get("proto")) def _key2attrs(self, key): vals = self.all_records[key] return dict(name="%s/%s" % (key[0], key[1]), proto=key[2], selinuxtype=vals[2]) def _defaultargs(self, entry): """ argument list for adding, modifying, and deleting entries """ (addr, netmask) = entry.get("name").split("/") return (addr, netmask, entry.get("proto"), entry.get("mlsrange", ""), entry.get("selinuxtype")) class SELinuxSeloginHandler(SELinuxEntryHandler): """ handle SELinux login entries """ etype = "login" value_format = ("selinuxuser", None) def _defaultargs(self, entry): """ argument list for adding, modifying, and deleting entries """ return (entry.get("name"), entry.get("selinuxuser"), entry.get("mlsrange", "")) class SELinuxSeuserHandler(SELinuxEntryHandler): """ handle SELinux user entries """ etype = "user" value_format = ("prefix", None, None, "roles") def __init__(self, tool, config): SELinuxEntryHandler.__init__(self, tool, config) self.needs_prefix = False @property def records(self): if self._records is None: self._records = seobject.seluserRecords() return self._records def Install(self, entry, method=None): # in older versions of selinux, modify() is broken if you # provide a prefix _at all_, so we try to avoid giving the # prefix. however, in newer versions, prefix is _required_, # so we a) try without a prefix; b) catch TypeError, which # indicates that we had the wrong number of args (ValueError # is thrown by the bug in older versions of selinux); and c) # try with prefix. try: SELinuxEntryHandler.Install(self, entry, method=method) except TypeError: self.needs_prefix = True SELinuxEntryHandler.Install(self, entry, method=method) def _defaultargs(self, entry): """ argument list for adding, modifying, and deleting entries """ # in older versions of selinux, modify() is broken if you # provide a prefix _at all_, so we try to avoid giving the # prefix. see the comment in Install() above for more # details. rv = [entry.get("name"), entry.get("roles", "").replace(" ", ",").split(","), '', entry.get("mlsrange", "")] if self.needs_prefix: rv.append(entry.get("prefix")) else: key = self._key(entry) if key in self.all_records: attrs = self._key2attrs(key) if attrs['prefix'] != entry.get("prefix"): rv.append(entry.get("prefix")) return tuple(rv) class SELinuxSeinterfaceHandler(SELinuxEntryHandler): """ handle SELinux interface entries """ etype = "interface" value_format = (None, None, "selinuxtype", None) def _defaultargs(self, entry): """ argument list for adding, modifying, and deleting entries """ return (entry.get("name"), entry.get("mlsrange", ""), entry.get("selinuxtype")) class SELinuxSepermissiveHandler(SELinuxEntryHandler): """ handle SELinux permissive domain entries """ etype = "permissive" @property def records(self): try: return SELinuxEntryHandler.records.fget(self) except AttributeError: self.logger.info("Permissive domains not supported by this " "version of SELinux") self._records = None return self._records @property def all_records(self): if self._all is None: if self.records is None: self._all = dict() else: # permissiveRecords.get_all() returns a list, so we just # make it into a dict so that the rest of # SELinuxEntryHandler works self._all = dict([(d, d) for d in self.records.get_all()]) return self._all def _defaultargs(self, entry): """ argument list for adding, modifying, and deleting entries """ return (entry.get("name"),) class SELinuxSemoduleHandler(SELinuxEntryHandler): """ handle SELinux module entries """ etype = "module" value_format = (None, "disabled") def __init__(self, tool, config): SELinuxEntryHandler.__init__(self, tool, config) self.filetool = POSIXFile(config) try: self.setype = selinux.selinux_getpolicytype()[1] except IndexError: self.logger.error("Unable to determine SELinux policy type") self.setype = None @property def all_records(self): if self._all is None: try: # we get a list of tuples back; coerce it into a dict self._all = dict([(m[0], (m[1], m[2])) for m in self.records.get_all()]) except AttributeError: # early versions of seobject don't have moduleRecords, # so we parse the output of `semodule` >_< self._all = dict() self.logger.debug("SELinux: Getting modules from semodule") try: rv = self.tool.cmd.run(['semodule', '-l']) except OSError: # semanage failed; probably not in $PATH. try to # get the list of modules from the filesystem err = sys.exc_info()[1] self.logger.debug("SELinux: Failed to run semodule: %s" % err) self._all.update(self._all_records_from_filesystem()) else: if rv.success: # ran semodule successfully for line in rv.stdout.splitlines(): mod, version = line.split() self._all[mod] = (version, 1) # get other (disabled) modules from the filesystem for mod in self._all_records_from_filesystem().keys(): if mod not in self._all: self._all[mod] = ('', 0) else: self.logger.error("SELinux: Failed to run semodule: %s" % rv.error) self._all.update(self._all_records_from_filesystem()) return self._all def _all_records_from_filesystem(self): """ the seobject API doesn't support modules and semodule is broken or missing, so just list modules on the filesystem. this is terrible. """ self.logger.debug("SELinux: Getting modules from filesystem") rv = dict() for mod in glob.glob(os.path.join("/usr/share/selinux", self.setype, "*.pp")): rv[os.path.basename(mod)[:-3]] = ('', 1) return rv def _key(self, entry): name = entry.get("name").lstrip("/") if name.endswith(".pp"): return name[:-3] else: return name def _key2attrs(self, key): rv = SELinuxEntryHandler._key2attrs(self, key) status = self.all_records[key][1] if status: rv['disabled'] = "false" else: rv['disabled'] = "true" return rv def _filepath(self, entry): """ get the path to the .pp module file for this module entry """ return os.path.join("/usr/share/selinux", self.setype, entry.get("name") + '.pp') def _pathentry(self, entry): """ Get an XML Path entry based on this SELinux module entry, suitable for installing the module .pp file itself to the filesystem """ pathentry = copy.deepcopy(entry) pathentry.set("name", self._filepath(pathentry)) pathentry.set("mode", "0644") pathentry.set("owner", "root") pathentry.set("group", "root") pathentry.set("secontext", "__default__") return pathentry def Verify(self, entry): if not entry.get("disabled"): entry.set("disabled", "false") return (SELinuxEntryHandler.Verify(self, entry) and self.filetool.verify(self._pathentry(entry), [])) def canInstall(self, entry): return (entry.text and self.setype and SELinuxEntryHandler.canInstall(self, entry)) def Install(self, entry, _=None): if not self.filetool.install(self._pathentry(entry)): return False if hasattr(seobject, 'moduleRecords'): # if seobject has the moduleRecords attribute, install the # module using the seobject library return self._install_seobject(entry) else: # seobject doesn't have the moduleRecords attribute, so # install the module using `semodule` self.logger.debug("Installing %s using semodule" % entry.get("name")) self._all = None return self._install_semodule(entry) def _install_seobject(self, entry): """ Install an SELinux module using the seobject library """ try: if not SELinuxEntryHandler.Install(self, entry): return False except NameError: # some versions of selinux have a bug in seobject that # makes modify() calls fail. add() seems to have the same # effect as modify, but without the bug if self.exists(entry): if not SELinuxEntryHandler.Install(self, entry, method="add"): return False if entry.get("disabled", "false").lower() == "true": method = "disable" else: method = "enable" return SELinuxEntryHandler.Install(self, entry, method=method) def _install_semodule(self, entry, fromqueue=False): """ Install an SELinux module using the semodule command """ if fromqueue: self.logger.debug("Installing SELinux module %s from " "post-transaction queue" % entry.get("name")) elif self.tool.txn: # we've started a transaction, so if we run semodule -i # then it'll fail with lock errors. so we add this # installation to a queue to be run after the transaction # is closed. self.logger.debug("Waiting to install SELinux module %s until " "SELinux transaction is finished" % entry.get('name')) self.tool.post_txn_queue.append((self._install_semodule, (entry,), dict(fromqueue=True))) return False self.logger.debug("Install SELinux module %s with semodule -i %s" % (entry.get('name'), self._filepath(entry))) try: rv = self.tool.cmd.run(['semodule', '-i', self._filepath(entry)]) except OSError: err = sys.exc_info()[1] self.logger.error("Failed to install SELinux module %s with " "semodule: %s" % (entry.get("name"), err)) return False if rv.success: if entry.get("disabled", "false").lower() == "true": self.logger.warning("SELinux: Cannot disable modules with " "semodule") return False else: return True else: self.logger.error("Failed to install SELinux module %s with " "semodule: %s" % (entry.get("name"), rv.error)) return False def _addargs(self, entry): """ argument list for adding entries """ return (self._filepath(entry),) def _defaultargs(self, entry): """ argument list for modifying and deleting entries """ return (entry.get("name"),) def FindExtra(self): specified = [self._key(e) for e in self.tool.getSupportedEntries()] rv = [] for module in self._all_records_from_filesystem().keys(): if module not in specified: rv.append(self.key2entry(module)) return rv src/lib/Bcfg2/Client/Tools/SMF.py000066400000000000000000000125561303523157100166640ustar00rootroot00000000000000"""SMF support for Bcfg2""" import glob import os import Bcfg2.Client.Tools class SMF(Bcfg2.Client.Tools.SvcTool): """Support for Solaris SMF Services.""" __handles__ = [('Service', 'smf')] __execs__ = ['/usr/sbin/svcadm', '/usr/bin/svcs'] __req__ = {'Service': ['name', 'status', 'FMRI']} def get_svc_command(self, service, action): if service.get('type') == 'lrc': return Bcfg2.Client.Tools.SvcTool.get_svc_command(self, service, action) if action == 'stop': return "/usr/sbin/svcadm disable %s" % (service.get('FMRI')) elif action == 'restart': return "/usr/sbin/svcadm restart %s" % (service.get('FMRI')) elif action == 'start': return "/usr/sbin/svcadm enable %s" % (service.get('FMRI')) def GetFMRI(self, entry): """Perform FMRI resolution for service.""" if 'FMRI' not in entry.attrib: rv = self.cmd.run(["/usr/bin/svcs", "-H", "-o", "FMRI", entry.get('name')]) if rv.success: entry.set('FMRI', rv.stdout.splitlines()[0]) else: self.logger.info('Failed to locate FMRI for service %s' % entry.get('name')) return rv.success return True def VerifyService(self, entry, _): """Verify SMF Service entry.""" if not self.GetFMRI(entry): self.logger.error("smf service %s doesn't have FMRI set" % entry.get('name')) return False if entry.get('FMRI').startswith('lrc'): filename = entry.get('FMRI').split('/')[-1] # this is a legacy service gname = "/etc/rc*.d/%s" % filename files = glob.glob(gname.replace('_', '.')) if files: self.logger.debug("Matched %s with %s" % (entry.get("FMRI"), ":".join(files))) return entry.get('status') == 'on' else: self.logger.debug("No service matching %s" % entry.get("FMRI")) return entry.get('status') == 'off' try: srvdata = \ self.cmd.run("/usr/bin/svcs -H -o STA %s" % entry.get('FMRI')).stdout.splitlines()[0].split() except IndexError: # Occurs when no lines are returned (service not installed) return False entry.set('current_status', srvdata[0]) if entry.get('status') == 'on': return srvdata[0] == 'ON' else: return srvdata[0] in ['OFF', 'UN', 'MNT', 'DIS', 'DGD'] def InstallService(self, entry): """Install SMF Service entry.""" self.logger.info("Installing Service %s" % (entry.get('name'))) if entry.get('status') == 'off': if entry.get("FMRI").startswith('lrc'): try: loc = entry.get("FMRI")[4:].replace('_', '.') self.logger.debug("Renaming file %s to %s" % (loc, loc.replace('/S', '/DISABLED.S'))) os.rename(loc, loc.replace('/S', '/DISABLED.S')) return True except OSError: self.logger.error("Failed to rename init script %s" % loc) return False else: return self.cmd.run("/usr/sbin/svcadm disable %s" % entry.get('FMRI')).success elif entry.get('FMRI').startswith('lrc'): loc = entry.get("FMRI")[4:].replace('_', '.') try: os.stat(loc.replace('/S', '/Disabled.')) self.logger.debug("Renaming file %s to %s" % (loc.replace('/S', '/DISABLED.S'), loc)) os.rename(loc.replace('/S', '/DISABLED.S'), loc) return True except OSError: self.logger.debug("Failed to rename %s to %s" % (loc.replace('/S', '/DISABLED.S'), loc)) return False else: srvdata = \ self.cmd.run("/usr/bin/svcs -H -o STA %s" % entry.get('FMRI'))[1].splitlines()[0].split() if srvdata[0] == 'MNT': cmdarg = 'clear' else: cmdarg = 'enable' return self.cmd.run("/usr/sbin/svcadm %s -r %s" % (cmdarg, entry.get('FMRI'))).success def Remove(self, svcs): """Remove Extra SMF entries.""" # Extra service entry removal is nonsensical # Extra service entries should be reflected in config, even if disabled pass def FindExtra(self): """Find Extra SMF Services.""" allsrv = [] for srvc in self.cmd.run(["/usr/bin/svcs", "-a", "-H", "-o", "FMRI,STATE"]).stdout.splitlines(): name, version = srvc.split() if version != 'disabled': allsrv.append(name) for svc in self.getSupportedEntries(): if svc.get("FMRI") in allsrv: allsrv.remove(svc.get('FMRI')) return [Bcfg2.Client.XML.Element("Service", type='smf', name=name) for name in allsrv] src/lib/Bcfg2/Client/Tools/SYSV.py000066400000000000000000000132471303523157100170410ustar00rootroot00000000000000"""This provides bcfg2 support for Solaris SYSV packages.""" import tempfile from Bcfg2.Compat import any # pylint: disable=W0622 import Bcfg2.Client.Tools import Bcfg2.Client.XML from Bcfg2.Compat import urlretrieve # pylint: disable=C0103 noask = ''' mail= instance=overwrite partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=ask setuid=nocheck conflict=nocheck action=nocheck basedir=default ''' # pylint: enable=C0103 class SYSV(Bcfg2.Client.Tools.PkgTool): """Solaris SYSV package support.""" __execs__ = ["/usr/sbin/pkgadd", "/usr/bin/pkginfo"] __handles__ = [('Package', 'sysv')] __req__ = {'Package': ['name', 'version']} __ireq__ = {'Package': ['name', 'url', 'version']} name = 'SYSV' pkgtype = 'sysv' pkgtool = ("/usr/sbin/pkgadd %s -n -d %%s", (('%s %s', ['url', 'name']))) def __init__(self, config): Bcfg2.Client.Tools.PkgTool.__init__(self, config) # noaskfile needs to live beyond __init__ otherwise file is removed self.noaskfile = tempfile.NamedTemporaryFile() self.noaskname = self.noaskfile.name # for any pkg files downloaded self.tmpfiles = [] try: self.noaskfile.write(noask) # flush admin file contents to disk self.noaskfile.flush() self.pkgtool = (self.pkgtool[0] % ("-a %s" % (self.noaskname)), self.pkgtool[1]) except: # pylint: disable=W0702 self.pkgtool = (self.pkgtool[0] % "", self.pkgtool[1]) self.origpkgtool = self.pkgtool def pkgmogrify(self, packages): """ Take a list of pkg objects, check for a 'simplefile' attribute. If present, insert a _sysv_pkg_path attribute to the package and download the datastream format SYSV package to a temporary file. """ for pkg in packages: if pkg.get('simplefile'): tmpfile = tempfile.NamedTemporaryFile() self.tmpfiles.append(tmpfile) self.logger.info("Downloading %s to %s" % (pkg.get('url'), tmpfile.name)) urlretrieve(pkg.get('url'), tmpfile.name) pkg.set('_sysv_pkg_path', tmpfile.name) def _get_package_command(self, packages): """Override the default _get_package_command, replacing the attribute 'url' if '_sysv_pkg_path' if necessary in the returned command string """ if hasattr(self, 'origpkgtool'): if len(packages) == 1 and '_sysv_pkg_path' in packages[0].keys(): self.pkgtool = (self.pkgtool[0], ('%s %s', ['_sysv_pkg_path', 'name'])) else: self.pkgtool = self.origpkgtool pkgcmd = super(SYSV, self)._get_package_command(packages) self.logger.debug("Calling install command: %s" % pkgcmd) return pkgcmd def Install(self, packages): self.pkgmogrify(packages) super(SYSV, self).Install(packages) def RefreshPackages(self): """Refresh memory hashes of packages.""" self.installed = {} # Build list of packages lines = self.cmd.run("/usr/bin/pkginfo -x").stdout.splitlines() while lines: # Splitting on whitespace means that packages with spaces in # their version numbers don't work right. Found this with # IBM TSM software with package versions like # "Version 6 Release 1 Level 0.0" # Should probably be done with a regex but this works. version = lines.pop().split(') ')[1] pkg = lines.pop().split()[0] self.installed[pkg] = version def VerifyPackage(self, entry, modlist): """Verify Package status for entry.""" desired_version = entry.get('version') if desired_version == 'any': desired_version = self.installed.get(entry.get('name'), desired_version) if not self.cmd.run(["/usr/bin/pkginfo", "-q", "-v", desired_version, entry.get('name')]): if entry.get('name') in self.installed: self.logger.debug("Package %s version incorrect: " "have %s want %s" % (entry.get('name'), self.installed[entry.get('name')], desired_version)) else: self.logger.debug("Package %s not installed" % entry.get("name")) else: if Bcfg2.Options.setup.quick or \ entry.attrib.get('verify', 'true') == 'false': return True rv = self.cmd.run("/usr/sbin/pkgchk -n %s" % entry.get('name')) if rv.success: return True else: output = [line for line in rv.stdout.splitlines() if line[:5] == 'ERROR'] if any(name for name in output if name.split()[-1] not in modlist): self.logger.debug("Package %s content verification failed" % entry.get('name')) else: return True return False def Remove(self, packages): """Remove specified Sysv packages.""" names = [pkg.get('name') for pkg in packages] self.logger.info("Removing packages: %s" % (names)) self.cmd.run("/usr/sbin/pkgrm -a %s -n %s" % (self.noaskname, names)) self.RefreshPackages() self.extra = self.FindExtra() src/lib/Bcfg2/Client/Tools/Systemd.py000066400000000000000000000076201303523157100176630ustar00rootroot00000000000000# This is the bcfg2 support for systemd """This is systemd support.""" import glob import os import Bcfg2.Client.Tools import Bcfg2.Client.XML class Systemd(Bcfg2.Client.Tools.SvcTool): """Systemd support for Bcfg2.""" name = 'Systemd' __execs__ = ['/bin/systemctl'] __handles__ = [('Service', 'systemd')] __req__ = {'Service': ['name', 'status']} def get_svc_name(self, service): """Append .service to name if name doesn't specify a unit type.""" svc = service.get('name') if svc.endswith(('.service', '.socket', '.device', '.mount', '.automount', '.swap', '.target', '.path', '.timer', '.snapshot', '.slice', '.scope')): return svc else: return '%s.service' % svc def get_svc_command(self, service, action): return "/bin/systemctl %s %s" % (action, self.get_svc_name(service)) def VerifyService(self, entry, _): """Verify Service status for entry.""" entry.set('target_status', entry.get('status')) # for reporting bootstatus = self.get_bootstatus(entry) if bootstatus is None: # bootstatus is unspecified and status is ignore return True if self.cmd.run(self.get_svc_command(entry, 'is-enabled')): current_bootstatus = 'on' else: current_bootstatus = 'off' if entry.get('status') == 'ignore': return current_bootstatus == bootstatus cmd = self.get_svc_command(entry, 'show') + ' -p ActiveState' rv = self.cmd.run(cmd) if rv.stdout.strip() in ('ActiveState=active', 'ActiveState=activating', 'ActiveState=reloading'): current_status = 'on' else: current_status = 'off' entry.set('current_status', current_status) return (entry.get('status') == current_status and bootstatus == current_bootstatus) def InstallService(self, entry): """Install Service entry.""" self.logger.info("Installing Service %s" % (entry.get('name'))) bootstatus = self.get_bootstatus(entry) if bootstatus is None: # bootstatus is unspecified and status is ignore return True # Enable or disable the service if bootstatus == 'on': cmd = self.get_svc_command(entry, 'enable') else: cmd = self.get_svc_command(entry, 'disable') if not self.cmd.run(cmd).success: # Return failure immediately and do not start/stop the service. return False # Start or stop the service, depending on the current service_mode cmd = None if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs pass elif Bcfg2.Options.setup.service_mode == 'build': # 'build' means we attempt to stop all services started if entry.get('current_status') == 'on': cmd = self.get_svc_command(entry, 'stop') else: if entry.get('status') == 'on': cmd = self.get_svc_command(entry, 'start') elif entry.get('status') == 'off': cmd = self.get_svc_command(entry, 'stop') if cmd: return self.cmd.run(cmd).success else: return True def FindExtra(self): """Find Extra Systemd Service entries.""" specified = [self.get_svc_name(entry) for entry in self.getSupportedEntries()] extra = set() for fname in glob.glob("/etc/systemd/system/*.wants/*"): name = os.path.basename(fname) if name not in specified: extra.add(name) return [Bcfg2.Client.XML.Element('Service', name=name, type='systemd') for name in list(extra)] src/lib/Bcfg2/Client/Tools/Upstart.py000066400000000000000000000057321303523157100176770ustar00rootroot00000000000000"""Upstart support for Bcfg2.""" import glob import re import Bcfg2.Client.Tools import Bcfg2.Client.XML class Upstart(Bcfg2.Client.Tools.SvcTool): """Upstart service support for Bcfg2.""" name = 'Upstart' __execs__ = ['/lib/init/upstart-job', '/sbin/initctl', '/usr/sbin/service'] __handles__ = [('Service', 'upstart')] __req__ = {'Service': ['name', 'status']} svcre = re.compile("/etc/init/(?P.*).conf") def get_svc_command(self, service, action): return "/usr/sbin/service %s %s" % (service.get('name'), action) def VerifyService(self, entry, _): """Verify Service status for entry Verifying whether or not the service is enabled can be done at the file level with upstart using the contents of /etc/init/servicename.conf. All we need to do is make sure the service is running when it should be. """ if entry.get('status') == 'ignore': return True if entry.get('parameters'): params = entry.get('parameters') else: params = '' try: output = self.cmd.run('/usr/sbin/service %s status %s' % (entry.get('name'), params)).stdout.splitlines()[0] except IndexError: self.logger.error("Service %s not an Upstart service" % entry.get('name')) return False match = re.compile(r'%s( \(.*\))? (start|stop)/(running|waiting)' % entry.get('name')).match(output) if match is None: # service does not exist entry.set('current_status', 'off') status = False elif match.group(3) == 'running': # service is running entry.set('current_status', 'on') if entry.get('status') == 'off': status = False else: status = True else: # service is not running entry.set('current_status', 'off') if entry.get('status') == 'on': status = False else: status = True return status def InstallService(self, entry): """Install Service for entry.""" if entry.get('status') == 'on': cmd = "start" elif entry.get('status') == 'off': cmd = "stop" return self.cmd.run(self.get_svc_command(entry, cmd)).success def FindExtra(self): """Locate extra Upstart services.""" specified = [entry.get('name') for entry in self.getSupportedEntries()] extra = [] for fname in glob.glob("/etc/init/*.conf"): if self.svcre.match(fname).group('name') not in specified: extra.append(self.svcre.match(fname).group('name')) return [Bcfg2.Client.XML.Element('Service', type='upstart', name=name) for name in extra] src/lib/Bcfg2/Client/Tools/VCS.py000066400000000000000000000156511303523157100166710ustar00rootroot00000000000000"""VCS support.""" # TODO: # * add svn support # * integrate properly with reports missing = [] import errno import os import shutil import sys import stat # python-dulwich git imports try: import dulwich import dulwich.index from dulwich.errors import NotGitRepository except ImportError: missing.append('git') # subversion import try: import pysvn except ImportError: missing.append('svn') import Bcfg2.Client.Tools def cleanup_mode(mode): """Cleanup a mode value. This will return a mode that can be stored in a tree object. :param mode: Mode to clean up. """ if stat.S_ISLNK(mode): return stat.S_IFLNK elif stat.S_ISDIR(mode): return stat.S_IFDIR elif dulwich.index.S_ISGITLINK(mode): return dulwich.index.S_IFGITLINK ret = stat.S_IFREG | int('644', 8) ret |= (mode & int('111', 8)) return ret def index_entry_from_stat(stat_val, hex_sha, flags, mode=None): """Create a new index entry from a stat value. :param stat_val: POSIX stat_result instance :param hex_sha: Hex sha of the object :param flags: Index flags """ if mode is None: mode = cleanup_mode(stat_val.st_mode) return (stat_val.st_ctime, stat_val.st_mtime, stat_val.st_dev, stat_val.st_ino, mode, stat_val.st_uid, stat_val.st_gid, stat_val.st_size, hex_sha, flags) class VCS(Bcfg2.Client.Tools.Tool): """VCS support.""" __handles__ = [('Path', 'vcs')] __req__ = {'Path': ['name', 'type', 'vcstype', 'sourceurl', 'revision']} def git_write_index(self, entry): """Write the git index""" pass def Verifygit(self, entry, _): """Verify git repositories""" try: repo = dulwich.repo.Repo(entry.get('name')) except NotGitRepository: self.logger.info("Repository %s does not exist" % entry.get('name')) return False try: expected_rev = entry.get('revision') cur_rev = repo.head() except: return False try: client, path = dulwich.client.get_transport_and_path( entry.get('sourceurl')) remote_refs = client.fetch_pack(path, (lambda x: None), None, None, None) if expected_rev in remote_refs: expected_rev = remote_refs[expected_rev] except: pass if cur_rev != expected_rev: self.logger.info("At revision %s need to go to revision %s" % (cur_rev.strip(), expected_rev.strip())) return False return True def Installgit(self, entry): """Checkout contents from a git repository""" destname = entry.get('name') if os.path.lexists(destname): # remove incorrect contents try: if os.path.isdir(destname): shutil.rmtree(destname) else: os.remove(destname) except OSError: self.logger.info('Failed to remove %s' % destname) return False dulwich.file.ensure_dir_exists(destname) destr = dulwich.repo.Repo.init(destname) determine_wants = destr.object_store.determine_wants_all cl, host_path = dulwich.client.get_transport_and_path( entry.get('sourceurl')) remote_refs = cl.fetch(host_path, destr, determine_wants=determine_wants, progress=sys.stdout.write) if entry.get('revision') in remote_refs: destr.refs['HEAD'] = remote_refs[entry.get('revision')] else: destr.refs['HEAD'] = entry.get('revision') dtree = destr['HEAD'].tree index = dulwich.index.Index(destr.index_path()) for fname, mode, sha in destr.object_store.iter_tree_contents(dtree): full_path = os.path.join(destname, fname) dulwich.file.ensure_dir_exists(os.path.dirname(full_path)) if stat.S_ISLNK(mode): src_path = destr[sha].as_raw_string() try: os.symlink(src_path, full_path) except OSError: e = sys.exc_info()[1] if e.errno == errno.EEXIST: os.unlink(full_path) os.symlink(src_path, full_path) else: raise else: file = open(full_path, 'wb') file.write(destr[sha].as_raw_string()) file.close() os.chmod(full_path, mode) st = os.lstat(full_path) index[fname] = index_entry_from_stat(st, sha, 0) index.write() return True def Verifysvn(self, entry, _): """Verify svn repositories""" # pylint: disable=E1101 headrev = pysvn.Revision(pysvn.opt_revision_kind.head) # pylint: enable=E1101 client = pysvn.Client() try: cur_rev = str(client.info(entry.get('name')).revision.number) server = client.info2(entry.get('sourceurl'), headrev, recurse=False) if server: server_rev = str(server[0][1].rev.number) except: self.logger.info("Repository %s does not exist" % entry.get('name')) return False if entry.get('revision') == 'latest' and cur_rev == server_rev: return True if cur_rev != entry.get('revision'): self.logger.info("At revision %s need to go to revision %s" % (cur_rev, entry.get('revision'))) return False return True def Installsvn(self, entry): """Checkout contents from a svn repository""" # pylint: disable=E1101 client = pysvn.Client() try: client.update(entry.get('name'), recurse=True) except pysvn.ClientError: self.logger.error("Failed to update repository", exc_info=1) return False return True # pylint: enable=E1101 def VerifyPath(self, entry, _): vcs = entry.get('vcstype') if vcs in missing: self.logger.error("Missing %s python libraries. Cannot verify" % vcs) return False ret = getattr(self, 'Verify%s' % vcs) return ret(entry, _) def InstallPath(self, entry): vcs = entry.get('vcstype') if vcs in missing: self.logger.error("Missing %s python libraries. " "Unable to install" % vcs) return False ret = getattr(self, 'Install%s' % vcs) return ret(entry) src/lib/Bcfg2/Client/Tools/YUM.py000066400000000000000000001366301303523157100167110ustar00rootroot00000000000000"""This provides bcfg2 support for yum.""" import copy import os.path import sys import logging import yum import yum.packages import yum.rpmtrans import yum.callbacks import yum.Errors import yum.misc import rpmUtils.arch import rpmUtils.miscutils import Bcfg2.Client.XML import Bcfg2.Client.Tools import Bcfg2.Options def build_yname(pkgname, inst): """Build yum appropriate package name.""" rv = {} if isinstance(inst, yum.packages.PackageObject): for i in ['name', 'epoch', 'version', 'release', 'arch']: rv[i] = getattr(inst, i) else: rv['name'] = pkgname if inst.get('version') != 'any': rv['version'] = inst.get('version') if inst.get('epoch', False): rv['epoch'] = inst.get('epoch') if inst.get('release', False) and inst.get('release') != 'any': rv['release'] = inst.get('release') if inst.get('arch', False) and inst.get('arch') != 'any': rv['arch'] = inst.get('arch') return rv def short_yname(nevra): """ given a nevra dict, get a dict of options to pass to functions like yum.YumBase.rpmdb.searchNevra(), which expect short names (e.g., "rel" instead of "release") """ rv = nevra.copy() if 'version' in rv: rv['ver'] = rv['version'] del rv['version'] if 'release' in rv: rv['rel'] = rv['release'] del rv['release'] return rv def nevra2string(pkg): """ convert a yum package object or nevra dict to a friendly human-readable string """ if isinstance(pkg, yum.packages.PackageObject): return str(pkg) else: ret = [] for attr, fmt in [('epoch', '%s:'), ('name', '%s'), ('version', '-%s'), ('release', '-%s'), ('arch', '.%s')]: if attr in pkg: ret.append(fmt % pkg[attr]) return "".join(ret) class RPMDisplay(yum.rpmtrans.RPMBaseCallback): """We subclass the default RPM transaction callback so that we can control Yum's verbosity and pipe it through the right logger.""" def __init__(self): yum.rpmtrans.RPMBaseCallback.__init__(self) # we want to log events to *both* the Bcfg2 logger (which goes # to stderr or syslog or wherever the user wants it to go) # *and* the yum file logger, which will go to yum.log (ticket # #1103) self.bcfg2_logger = logging.getLogger(self.__class__.__name__) self.state = None self.package = None def event(self, package, action, te_current, te_total, ts_current, ts_total): """ @param package: A yum package object or simple string of a package name @param action: A yum.constant transaction set state or in the obscure rpm repackage case it could be the string 'repackaging' @param te_current: Current number of bytes processed in the transaction element being processed @param te_total: Total number of bytes in the transaction element being processed @param ts_current: number of processes completed in whole transaction @param ts_total: total number of processes in the transaction. """ if self.package != str(package) or action != self.state: self.bcfg2_logger.info("%s: %s" % (self.action[action], package)) self.state = action self.package = str(package) def scriptout(self, package, msgs): """Handle output from package scripts.""" if msgs: msg = "%s: %s" % (package, msgs) self.bcfg2_logger.debug(msg) def errorlog(self, msg): """Deal with error reporting.""" self.bcfg2_logger.error(msg) class YumDisplay(yum.callbacks.ProcessTransBaseCallback): """Class to handle display of what step we are in the Yum transaction such as downloading packages, etc.""" def __init__(self): yum.callbacks.ProcessTransBaseCallback.__init__(self) self.logger = logging.getLogger(self.__class__.__name__) class YUM(Bcfg2.Client.Tools.PkgTool): """Support for Yum packages.""" options = Bcfg2.Client.Tools.PkgTool.options + [ Bcfg2.Options.BooleanOption( cf=('YUM', 'pkg_checks'), default=True, dest="yum_pkg_checks", help="Perform YUM package checks"), Bcfg2.Options.BooleanOption( cf=('YUM', 'pkg_verify'), default=True, dest="yum_pkg_verify", help="Perform YUM package verify"), Bcfg2.Options.BooleanOption( cf=('YUM', 'install_missing'), default=True, dest="yum_install_missing", help="Install missing packages"), Bcfg2.Options.Option( cf=('YUM', 'erase_flags'), default=["allmatches"], dest="yum_erase_flags", type=Bcfg2.Options.Types.comma_list, help="YUM erase flags"), Bcfg2.Options.BooleanOption( cf=('YUM', 'fix_version'), default=True, dest="yum_fix_version", help="Fix (upgrade or downgrade) packages with the wrong version"), Bcfg2.Options.BooleanOption( cf=('YUM', 'reinstall_broken'), default=True, dest="yum_reinstall_broken", help="Reinstall packages that fail to verify"), Bcfg2.Options.Option( cf=('YUM', 'verify_flags'), default=[], dest="yum_verify_flags", type=Bcfg2.Options.Types.comma_list, help="YUM verify flags"), Bcfg2.Options.Option( cf=('YUM', 'disabled_plugins'), default=[], type=Bcfg2.Options.Types.comma_list, dest="yum_disabled_plugins", help="YUM disabled plugins"), Bcfg2.Options.Option( cf=('YUM', 'enabled_plugins'), default=[], type=Bcfg2.Options.Types.comma_list, dest="yum_enabled_plugins", help="YUM enabled plugins")] pkgtype = 'yum' __execs__ = [] __handles__ = [('Package', 'yum'), ('Package', 'rpm'), ('Path', 'ignore')] __req__ = {'Package': ['type'], 'Path': ['type']} conflicts = ['RPM'] def __init__(self, config): self.yumbase = self._loadYumBase() Bcfg2.Client.Tools.PkgTool.__init__(self, config) self.ignores = [] for struct in config: self.ignores.extend([entry.get('name') for entry in struct if (entry.tag == 'Path' and entry.get('type') == 'ignore')]) self.instance_status = {} self.extra_instances = [] self.modlists = {} for struct in config: self.__important__.extend( [entry.get('name') for entry in struct if (entry.tag == 'Path' and (entry.get('name').startswith('/etc/yum.d') or entry.get('name').startswith('/etc/yum.repos.d')) or entry.get('name') == '/etc/yum.conf')]) self.yum_avail = dict() self.yum_installed = dict() self.verify_cache = dict() yup = self.yumbase.doPackageLists(pkgnarrow='updates') if hasattr(self.yumbase.rpmdb, 'pkglist'): yinst = self.yumbase.rpmdb.pkglist else: yinst = self.yumbase.rpmdb.getPkgList() for dest, source in [(self.yum_avail, yup.updates), (self.yum_installed, yinst)]: for pkg in source: if dest is self.yum_avail: pname = pkg.name data = [(pkg.arch, (pkg.epoch, pkg.version, pkg.release))] else: pname = pkg[0] data = [(pkg[1], (pkg[2], pkg[3], pkg[4]))] if pname in dest: dest[pname].update(data) else: dest[pname] = dict(data) self.installonlypkgs = self.yumbase.conf.installonlypkgs if 'gpg-pubkey' not in self.installonlypkgs: self.installonlypkgs.append('gpg-pubkey') self.logger.debug("Yum: Install missing: %s" % Bcfg2.Options.setup.yum_install_missing) self.logger.debug("Yum: pkg_checks: %s" % Bcfg2.Options.setup.yum_pkg_checks) self.logger.debug("Yum: pkg_verify: %s" % Bcfg2.Options.setup.yum_pkg_verify) self.logger.debug("Yum: Upgrade on version fail: %s" % Bcfg2.Options.setup.yum_fix_version) self.logger.debug("Yum: Reinstall on verify fail: %s" % Bcfg2.Options.setup.yum_reinstall_broken) self.logger.debug("Yum: installonlypkgs: %s" % self.installonlypkgs) self.logger.debug("Yum: verify_flags: %s" % Bcfg2.Options.setup.yum_verify_flags) self.logger.debug("Yum: disabled_plugins: %s" % Bcfg2.Options.setup.yum_disabled_plugins) self.logger.debug("Yum: enabled_plugins: %s" % Bcfg2.Options.setup.yum_enabled_plugins) def _loadYumBase(self): ''' this may be called before PkgTool.__init__() is called on this object (when the YUM object is first instantiated; PkgTool.__init__() calls RefreshPackages(), which requires a YumBase object already exist), or after __init__() has completed, when we reload the yum config before installing packages. ''' rv = yum.YumBase() # pylint: disable=C0103 if hasattr(self, "logger"): logger = self.logger else: logger = logging.getLogger(self.name) if Bcfg2.Options.setup.debug: debuglevel = 3 elif Bcfg2.Options.setup.verbose: debuglevel = 2 else: debuglevel = 0 if len(Bcfg2.Options.setup.yum_disabled_plugins) > 0: rv.preconf.disabled_plugins = \ Bcfg2.Options.setup.yum_disabled_plugins if len(Bcfg2.Options.setup.yum_enabled_plugins) > 0: rv.preconf.enabled_plugins = \ Bcfg2.Options.setup.yum_enabled_plugins # pylint: disable=E1121,W0212 try: rv.preconf.debuglevel = debuglevel rv._getConfig() except AttributeError: rv._getConfig(self.yumbase.conf.config_file_path, debuglevel=debuglevel) # pylint: enable=E1121,W0212 try: rv.doConfigSetup() rv.doTsSetup() rv.doRpmDBSetup() except yum.Errors.RepoError: logger.error("YUM Repository error: %s" % sys.exc_info()[1]) raise Bcfg2.Client.Tools.ToolInstantiationError except Exception: logger.error("Yum error: %s" % sys.exc_info()[1]) raise Bcfg2.Client.Tools.ToolInstantiationError return rv def _fixAutoVersion(self, entry): """ handle entries with version="auto" by setting the version to the newest available """ # old style entry; synthesize Instances from current installed if (entry.get('name') not in self.yum_installed and entry.get('name') not in self.yum_avail): # new entry; fall back to default entry.set('version', 'any') else: data = copy.copy(self.yum_installed[entry.get('name')]) if entry.get('name') in self.yum_avail: # installed but out of date data.update(self.yum_avail[entry.get('name')]) for (arch, (epoch, vers, rel)) in list(data.items()): inst = Bcfg2.Client.XML.SubElement(entry, "Instance", name=entry.get('name'), version=vers, arch=arch, release=rel, epoch=epoch) if 'verify_flags' in entry.attrib: inst.set('verify_flags', entry.get('verify_flags')) if 'verify' in entry.attrib: inst.set('verify', entry.get('verify')) def _buildInstances(self, entry): """ get a list of all instances of the package from the given entry. converts from a Package entry without any Instance tags as necessary """ instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package'] # Uniquify instances. Cases where duplicates are returned. # However, the elements aren't comparable. if instances == []: # We have an old style no Instance entry. Convert it to new style. instance = Bcfg2.Client.XML.SubElement(entry, 'Package') for attrib in list(entry.attrib.keys()): instance.attrib[attrib] = entry.attrib[attrib] instances = [instance] return instances def _getGPGKeysAsPackages(self): """Return a list of the GPG RPM signing keys installed on the system as a list of Package Objects.""" # GPG keys existing in the RPMDB have numbered days # and newer Yum versions will not return information about them if hasattr(self.yumbase.rpmdb, 'returnGPGPubkeyPackages'): return self.yumbase.rpmdb.returnGPGPubkeyPackages() return self.yumbase.rpmdb.searchNevra(name='gpg-pubkey') def missing_attrs(self, entry): """ Implementing from superclass to check for existence of either name or group attribute for Package entry in the case of a YUM group. """ missing = Bcfg2.Client.Tools.PkgTool.missing_attrs(self, entry) if (entry.get('name', None) is None and entry.get('group', None) is None): missing += ['name', 'group'] return missing def _verifyHelper(self, pkg_obj): """ _verifyHelper primarly deals with a yum bug where the pkg_obj.verify() method does not properly take into count multilib sharing of files. Neither does RPM proper, really....it just ignores the problem. """ def verify(pkg): """ helper to perform the verify according to the best options for whatever version of the API we're using. Disabling file checksums is a new feature yum 3.2.17-ish """ try: return pkg.verify(fast=Bcfg2.Options.setup.quick) except TypeError: # Older Yum API return pkg.verify() key = (pkg_obj.name, pkg_obj.epoch, pkg_obj.version, pkg_obj.release, pkg_obj.arch) if key in self.verify_cache: results = self.verify_cache[key] else: results = verify(pkg_obj) self.verify_cache[key] = results if not rpmUtils.arch.isMultiLibArch(): return results # Okay deal with a buggy yum multilib and verify. first find # all arches of pkg packages = self.yumbase.rpmdb.searchNevra(name=pkg_obj.name, epoch=pkg_obj.epoch, ver=pkg_obj.version, rel=pkg_obj.release) if len(packages) == 1: return results # No mathcing multilib packages # Will be the list of common fnames files = set(pkg_obj.returnFileEntries()) common = {} for pkg in packages: if pkg != pkg_obj: files = files & set(pkg.returnFileEntries()) for pkg in packages: key = (pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch) self.logger.debug("Multilib Verify: comparing %s to %s" % (pkg_obj, pkg)) if key not in self.verify_cache: self.verify_cache[key] = verify(pkg) for fname in list(self.verify_cache[key].keys()): # file problems must exist in ALL multilib packages to be real if fname in files: common[fname] = common.get(fname, 0) + 1 flag = len(packages) - 1 for fname, i in list(common.items()): if i == flag: # this fname had verify problems in all but one of the multilib # packages. That means its correct in the package that's # "on top." Therefore, this is a fake verify problem. if fname in results: del results[fname] return results def RefreshPackages(self): """ Creates self.installed{} which is a dict of installed packages. The dict items are lists of nevra dicts. This loosely matches the config from the server and what rpmtools uses to specify pacakges. e.g. self.installed['foo'] = [ {'name':'foo', 'epoch':None, 'version':'1', 'release':2, 'arch':'i386'}, {'name':'foo', 'epoch':None, 'version':'1', 'release':2, 'arch':'x86_64'} ] """ self.installed = {} packages = self._getGPGKeysAsPackages() + \ self.yumbase.rpmdb.returnPackages() for pkg in packages: pattrs = {} for i in ['name', 'epoch', 'version', 'release', 'arch']: if i == 'arch' and getattr(pkg, i) is None: pattrs[i] = 'noarch' elif i == 'epoch' and getattr(pkg, i) is None: pattrs[i] = '0' else: pattrs[i] = getattr(pkg, i) self.installed.setdefault(pkg.name, []).append(pattrs) # pylint: disable=R0914,R0912,R0915 def VerifyPackage(self, entry, modlist): """ Verify Package status for entry. Performs the following: * Checks for the presence of required Package Instances. * Compares the evra 'version' info against self.installed{}. * RPM level package verify (rpm --verify). * Checks for the presence of unrequired package instances. Produces the following dict and list for Yum.Install() to use: * For installs/upgrades/fixes of required instances:: instance_status = { : { 'installed': True|False, 'version_fail': True|False, 'verify_fail': True|False, 'pkg': , 'modlist': [ , ... ], 'verify' : [ ] }, ...... } * For deletions of unrequired instances:: extra_instances = [ , ..... ] Constructs the text prompts for interactive mode. """ if entry.get('version', False) == 'auto': self._fixAutoVersion(entry) if entry.get('group'): self.logger.debug("Verifying packages for group %s" % entry.get('group')) else: self.logger.debug("Verifying package instances for %s" % entry.get('name')) self.verify_cache = dict() # Used for checking multilib packages self.modlists[entry] = modlist instances = self._buildInstances(entry) pkg_cache = [] package_fail = False qtext_versions = [] virt_pkg = False pkg_checks = (Bcfg2.Options.setup.yum_pkg_checks and entry.get('pkg_checks', 'true').lower() == 'true') pkg_verify = (Bcfg2.Options.setup.yum_pkg_verify and entry.get('pkg_verify', 'true').lower() == 'true') yum_group = False if entry.get('name') == 'gpg-pubkey': all_pkg_objs = self._getGPGKeysAsPackages() pkg_verify = False # No files here to verify elif entry.get('group'): entry.set('name', 'group:%s' % entry.get('group')) yum_group = True all_pkg_objs = [] instances = [] if self.yumbase.comps.has_group(entry.get('group')): group = self.yumbase.comps.return_group(entry.get('group')) group_packages = [p for p, d in group.mandatory_packages.items() if d] group_type = entry.get('choose', 'default') if group_type in ['default', 'optional', 'all']: group_packages += [ p for p, d in group.default_packages.items() if d] if group_type in ['optional', 'all']: group_packages += [ p for p, d in group.optional_packages.items() if d] if len(group_packages) == 0: self.logger.error("No packages found for group %s" % entry.get("group")) for pkg in group_packages: # create package instances for each package in yum group instance = Bcfg2.Client.XML.SubElement(entry, 'Package') instance.attrib['name'] = pkg instance.attrib['type'] = 'yum' try: newest = \ self.yumbase.pkgSack.returnNewestByName(pkg)[0] instance.attrib['version'] = newest['version'] instance.attrib['epoch'] = newest['epoch'] instance.attrib['release'] = newest['release'] except: # pylint: disable=W0702 self.logger.info("Error finding newest package " "for %s" % pkg) instance.attrib['version'] = 'any' instances.append(instance) else: self.logger.error("Group not found: %s" % entry.get("group")) else: all_pkg_objs = \ self.yumbase.rpmdb.searchNevra(name=entry.get('name')) if len(all_pkg_objs) == 0 and yum_group is not True: # Some sort of virtual capability? Try to resolve it all_pkg_objs = self.yumbase.rpmdb.searchProvides(entry.get('name')) if len(all_pkg_objs) > 0: virt_pkg = True self.logger.info("%s appears to be provided by:" % entry.get('name')) for pkg in all_pkg_objs: self.logger.info(" %s" % pkg) for inst in instances: if yum_group: # the entry is not the name of the package nevra = build_yname(inst.get('name'), inst) all_pkg_objs = \ self.yumbase.rpmdb.searchNevra(name=inst.get('name')) else: nevra = build_yname(entry.get('name'), inst) if nevra in pkg_cache: continue # Ignore duplicate instances else: pkg_cache.append(nevra) self.logger.debug("Verifying: %s" % nevra2string(nevra)) # Set some defaults here stat = self.instance_status.setdefault(inst, {}) stat['installed'] = True stat['version_fail'] = False stat['verify'] = {} stat['verify_fail'] = False if yum_group: stat['pkg'] = inst else: stat['pkg'] = entry stat['modlist'] = modlist if inst.get('verify_flags'): # this splits on either space or comma verify_flags = \ inst.get('verify_flags').lower().replace(' ', ',').split(',') else: verify_flags = Bcfg2.Options.setup.yum_verify_flags if 'arch' in nevra: # If arch is specified use it to select the package pkg_objs = [p for p in all_pkg_objs if p.arch == nevra['arch']] else: pkg_objs = all_pkg_objs if len(pkg_objs) == 0: # Package (name, arch) not installed entry.set('current_exists', 'false') self.logger.debug(" %s is not installed" % nevra2string(nevra)) stat['installed'] = False package_fail = True qtext_versions.append("I(%s)" % nevra) continue if not pkg_checks: continue # Check EVR if virt_pkg: # we need to make sure that the version of the symbol # provided matches the one required in the # configuration vlist = [] for attr in ["epoch", "version", "release"]: vlist.append(nevra.get(attr)) if tuple(vlist) == (None, None, None): # we just require the package name, no particular # version, so just make a copy of all_pkg_objs since every # package that provides this symbol satisfies the # requirement pkg_objs = [po for po in all_pkg_objs] else: pkg_objs = [po for po in all_pkg_objs if po.checkPrco('provides', (nevra["name"], 'EQ', tuple(vlist)))] elif entry.get('name') == 'gpg-pubkey': if 'version' not in nevra: self.logger.warning("Skipping verify: gpg-pubkey without " "an RPM version") continue if 'release' not in nevra: self.logger.warning("Skipping verify: gpg-pubkey without " "an RPM release") continue pkg_objs = [p for p in all_pkg_objs if (p.version == nevra['version'] and p.release == nevra['release'])] else: pkg_objs = self.yumbase.rpmdb.searchNevra(**short_yname(nevra)) if len(pkg_objs) == 0: package_fail = True stat['version_fail'] = True # Just chose the first pkg for the error message current_pkg = all_pkg_objs[0] if virt_pkg: provides = \ [p for p in current_pkg.provides if p[0] == entry.get("name")][0] current_evr = provides[2] self.logger.info( " %s: Wrong version installed. " "Want %s, but %s provides %s" % (entry.get("name"), nevra2string(nevra), nevra2string(current_pkg), yum.misc.prco_tuple_to_string(provides))) else: current_evr = (current_pkg.epoch, current_pkg.version, current_pkg.release) self.logger.info(" %s: Wrong version installed. " "Want %s, but have %s" % (entry.get("name"), nevra2string(nevra), nevra2string(current_pkg))) wanted_evr = (nevra.get('epoch', 'any'), nevra.get('version', 'any'), nevra.get('release', 'any')) entry.set('current_version', "%s:%s-%s" % current_evr) entry.set('version', "%s:%s-%s" % wanted_evr) if rpmUtils.miscutils.compareEVR(current_evr, wanted_evr) == 1: entry.set("package_fail_action", "downgrade") else: entry.set("package_fail_action", "update") qtext_versions.append("U(%s)" % str(all_pkg_objs[0])) continue if Bcfg2.Options.setup.quick: # Passed -q on the command line continue if not (pkg_verify and inst.get('pkg_verify', 'true').lower() == 'true'): continue # XXX: We ignore GPG sig checking the package as it # has nothing to do with the individual file hash/size/etc. # GPG checking the package only eaxmines some header/rpmdb # wacky-ness, and will not properly detect a compromised rpmdb. # Yum's verify routine does not support it for that reaosn. if len(pkg_objs) > 1: self.logger.debug(" Verify Instance found many packages:") for pkg in pkg_objs: self.logger.debug(" %s" % str(pkg)) try: vrfy_result = self._verifyHelper(pkg_objs[0]) except: # pylint: disable=W0702 err = sys.exc_info()[1] # Unknown Yum exception self.logger.warning(" Verify Exception: %s" % err) package_fail = True continue # Now take out the Yum specific objects / modlists / unproblems ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \ [ig.get('name') for ig in inst.findall('Ignore')] + \ self.ignores for fname, probs in list(vrfy_result.items()): if fname in modlist: self.logger.debug(" %s in modlist, skipping" % fname) continue if fname in ignores: self.logger.debug(" %s in ignore list, skipping" % fname) continue tmp = [] for prob in probs: if prob.type == 'missing' and os.path.islink(fname): continue elif 'no' + prob.type in verify_flags: continue if prob.type not in ['missingok', 'ghost']: tmp.append((prob.type, prob.message)) if tmp != []: stat['verify'][fname] = tmp if stat['verify'] != {}: stat['verify_fail'] = True package_fail = True self.logger.info("It is suggested that you either manage " "these files, revert the changes, or ignore " "false failures:") self.logger.info(" Verify Problems: %s" % stat['pkg'].get('name')) for fname, probs in list(stat['verify'].items()): if len(probs) > 1: self.logger.info(" %s" % fname) for prob in probs: self.logger.info(" %s" % prob[1]) else: self.logger.info(" %s: %s" % (fname, probs[0])) if len(all_pkg_objs) > 0: # Is this an install only package? We just look at the first one provides = set([p[0] for p in all_pkg_objs[0].provides] + [all_pkg_objs[0].name]) install_only = len(set(self.installonlypkgs) & provides) > 0 else: install_only = False if virt_pkg or \ (install_only and not Bcfg2.Options.setup.kevlar) or \ yum_group: # virtual capability supplied, we are probably dealing # with multiple packages of different names. This check # doesn't make a lot of since in this case. # install_only: Yum may clean some of these up itself. # Otherwise having multiple instances of install only packages # is considered correct self.extra_instances = None else: self.extra_instances = self.FindExtraInstances(entry, all_pkg_objs) if self.extra_instances is not None: package_fail = True return not package_fail # pylint: enable=R0914,R0912,R0915 def FindExtraInstances(self, entry, all_pkg_objs): """ Check for installed instances that are not in the config. Return a Package Entry with Instances to remove, or None if there are no Instances to remove. """ if len(all_pkg_objs) == 0: return None name = entry.get('name') extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype) instances = self._buildInstances(entry) pkg_objs = [p for p in all_pkg_objs] # Shallow copy # Algorythm is sensitive to duplicates, check for them checked = [] for inst in instances: nevra = build_yname(name, inst) pkgs = self.yumbase.rpmdb.searchNevra(**short_yname(nevra)) if len(pkgs) > 0: if pkgs[0] in checked: continue # We've already taken care of this Instance else: checked.append(pkgs[0]) pkg_objs.remove(pkgs[0]) for pkg in pkg_objs: self.logger.debug(" Extra Instance Found: %s" % str(pkg)) Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', epoch=pkg.epoch, name=pkg.name, version=pkg.version, release=pkg.release, arch=pkg.arch) if pkg_objs == []: return None else: return extra_entry def FindExtra(self): """Find extra packages.""" packages = [e.get('name') for e in self.getSupportedEntries()] extras = [] for pkg in list(self.installed.keys()): if pkg not in packages: entry = Bcfg2.Client.XML.Element('Package', name=pkg, type=self.pkgtype) for i in self.installed[pkg]: Bcfg2.Client.XML.SubElement(entry, 'Instance', epoch=i['epoch'], version=i['version'], release=i['release'], arch=i['arch']) extras.append(entry) return extras def _installGPGKey(self, inst, key_file): """Examine the GPG keys carefully before installation. Avoid installing duplicate keys. Returns True on successful install.""" # RPM Transaction Set tset = self.yumbase.rpmdb.readOnlyTS() if not os.path.exists(key_file): self.logger.debug("GPG Key file %s not installed" % key_file) return False rawkey = open(key_file).read() gpg = yum.misc.getgpgkeyinfo(rawkey) ver = yum.misc.keyIdToRPMVer(gpg['keyid']) rel = yum.misc.keyIdToRPMVer(gpg['timestamp']) if not (ver == inst.get('version') and rel == inst.get('release')): self.logger.info("GPG key file %s does not match gpg-pubkey-%s-%s" % (key_file, inst.get('version'), inst.get('release'))) return False if not yum.misc.keyInstalled(tset, gpg['keyid'], gpg['timestamp']) == 0: result = tset.pgpImportPubkey(yum.misc.procgpgkey(rawkey)) else: self.logger.debug("gpg-pubkey-%s-%s already installed" % (inst.get('version'), inst.get('release'))) return True if result != 0: self.logger.debug( "Unable to install %s-%s" % (self.instance_status[inst].get('pkg').get('name'), nevra2string(inst))) return False else: self.logger.debug( "Installed %s-%s-%s" % (self.instance_status[inst].get('pkg').get('name'), inst.get('version'), inst.get('release'))) return True def _runYumTransaction(self): """ run the yum transaction that has already been set up """ def cleanup(): """ clean up open stuff when we hit an error """ self.yumbase.closeRpmDB() self.RefreshPackages() rpm_display = RPMDisplay() yum_display = YumDisplay() # Run the Yum Transaction try: rescode, restring = self.yumbase.buildTransaction() except yum.Errors.YumBaseError: err = sys.exc_info()[1] self.logger.error("Error building Yum transaction: %s" % err) cleanup() return self.logger.debug("Initial Yum buildTransaction() run said:") self.logger.debug(" resultcode: %s, msgs: %s" % (rescode, restring)) if rescode != 1: # Transaction built successfully, run it try: self.yumbase.processTransaction(callback=yum_display, rpmDisplay=rpm_display) self.logger.info("Single Pass for Install Succeeded") except yum.Errors.YumBaseError: err = sys.exc_info()[1] self.logger.error("Error processing Yum transaction: %s" % err) cleanup() return else: # The yum command failed. No packages installed. # Try installing instances individually. self.logger.error("Single Pass Install of Packages Failed") skip_broken = self.yumbase.conf.skip_broken self.yumbase.conf.skip_broken = True try: rescode, restring = self.yumbase.buildTransaction() if rescode != 1: self.yumbase.processTransaction(callback=yum_display, rpmDisplay=rpm_display) self.logger.debug( "Second pass install did not install all packages") else: self.logger.error("Second pass yum install failed.") self.logger.debug(" %s" % restring) except yum.Errors.YumBaseError: err = sys.exc_info()[1] self.logger.error("Error rerunning Yum transaction: %s" % err) self.yumbase.conf.skip_broken = skip_broken cleanup() def Install(self, packages): # pylint: disable=R0912,R0914,R0915 """ Try and fix everything that Yum.VerifyPackages() found wrong for each Package Entry. This can result in individual RPMs being installed (for the first time), deleted, downgraded or upgraded. packages is a list of Package Elements that has states[] == False The following effects occur: - states{} is conditionally updated for each package. - self.installed{} is rebuilt, possibly multiple times. - self.instance_status{} is conditionally updated for each instance of a package. - Each package will be added to self.modified[] if its states{} entry is set to True. """ self.logger.debug('Running Yum.Install()') states = dict() install_pkgs = [] gpg_keys = [] upgrade_pkgs = [] downgrade_pkgs = [] reinstall_pkgs = [] def queue_pkg(pkg, inst, queue): """ add a package to the appropriate work queue -- packages to install, packages to upgrade, etc. """ if pkg.get('name') == 'gpg-pubkey': gpg_keys.append(inst) else: queue.append(inst) # Remove extra instances. # Can not reverify because we don't have a package entry. if self.extra_instances is not None and len(self.extra_instances) > 0: if Bcfg2.Options.setup.remove in ['all', 'packages']: self.Remove(self.extra_instances) else: self.logger.info("The following extra package instances will " "be removed by the '-r' option:") for pkg in self.extra_instances: for inst in pkg: self.logger.info(" %s %s" % ((pkg.get('name'), nevra2string(inst)))) # Figure out which instances of the packages actually need something # doing to them and place in the appropriate work 'queue'. for pkg in packages: insts = [pinst for pinst in pkg if pinst.tag in ['Instance', 'Package']] if insts: for inst in insts: if inst not in self.instance_status: self.logger.warning( " Asked to install/update package never " "verified: %s" % nevra2string(build_yname(pkg.get('name'), inst))) continue status = self.instance_status[inst] if not status.get('installed', False) and \ Bcfg2.Options.setup.yum_install_missing: queue_pkg(pkg, inst, install_pkgs) elif (status.get('version_fail', False) and Bcfg2.Options.setup.yum_fix_version): if pkg.get("package_fail_action") == "downgrade": queue_pkg(pkg, inst, downgrade_pkgs) else: queue_pkg(pkg, inst, upgrade_pkgs) elif (status.get('verify_fail', False) and Bcfg2.Options.setup.yum_reinstall_broken): queue_pkg(pkg, inst, reinstall_pkgs) else: # Either there was no Install/Version/Verify # task to be done or the user disabled the actions # in the configuration. XXX Logging for the latter? pass else: msg = "Yum: Package tag found where Instance expected: %s" self.logger.warning(msg % pkg.get('name')) queue_pkg(pkg, pkg, install_pkgs) # Install GPG keys. # Alternatively specify the required keys using 'gpgkey' in the # repository definition in yum.conf. YUM will install the keys # automatically. if len(gpg_keys) > 0: self.logger.info("Installing GPG keys.") for inst in gpg_keys: if inst.get('simplefile') is None: self.logger.error("GPG key has no simplefile attribute") continue key_file = os.path.join( self.instance_status[inst].get('pkg').get('uri'), inst.get('simplefile')) self._installGPGKey(inst, key_file) self.RefreshPackages() pkg = self.instance_status[gpg_keys[0]].get('pkg') states[pkg] = self.VerifyPackage(pkg, []) # We want to reload all Yum configuration in case we've # deployed new .repo files we should consider self._loadYumBase() # Install packages. if len(install_pkgs) > 0: self.logger.info("Attempting to install packages") for inst in install_pkgs: pkg_arg = self.instance_status[inst].get('pkg').get('name') self.logger.debug("Installing %s" % pkg_arg) try: self.yumbase.install(**build_yname(pkg_arg, inst)) except yum.Errors.YumBaseError: yume = sys.exc_info()[1] self.logger.error("Error installing package %s: %s" % (pkg_arg, yume)) if len(upgrade_pkgs) > 0: self.logger.info("Attempting to upgrade packages") for inst in upgrade_pkgs: pkg_arg = self.instance_status[inst].get('pkg').get('name') self.logger.debug("Upgrading %s" % pkg_arg) try: self.yumbase.update(**build_yname(pkg_arg, inst)) except yum.Errors.YumBaseError: yume = sys.exc_info()[1] self.logger.error("Error upgrading package %s: %s" % (pkg_arg, yume)) if len(downgrade_pkgs) > 0: self.logger.info("Attempting to downgrade packages") for inst in downgrade_pkgs: pkg_arg = self.instance_status[inst].get('pkg').get('name') self.logger.debug("Downgrading %s" % pkg_arg) try: self.yumbase.downgrade(**build_yname(pkg_arg, inst)) except yum.Errors.YumBaseError: yume = sys.exc_info()[1] self.logger.error("Error downgrading package %s: %s" % (pkg_arg, yume)) if len(reinstall_pkgs) > 0: self.logger.info("Attempting to reinstall packages") for inst in reinstall_pkgs: pkg_arg = self.instance_status[inst].get('pkg').get('name') self.logger.debug("Reinstalling %s" % pkg_arg) try: self.yumbase.reinstall(**build_yname(pkg_arg, inst)) except yum.Errors.YumBaseError: yume = sys.exc_info()[1] self.logger.error("Error reinstalling package %s: %s" % (pkg_arg, yume)) self._runYumTransaction() if not Bcfg2.Options.setup.kevlar: for pkg_entry in [p for p in packages if self.canVerify(p)]: self.logger.debug("Reverifying Failed Package %s" % pkg_entry.get('name')) states[pkg_entry] = \ self.VerifyPackage(pkg_entry, self.modlists.get(pkg_entry, [])) self.modified.extend(ent for ent in packages if states[ent]) return states def Remove(self, packages): """ Remove specified entries. packages is a list of Package Entries with Instances generated by FindExtra(). """ self.logger.debug('Running Yum.Remove()') for pkg in packages: for inst in pkg: nevra = build_yname(pkg.get('name'), inst) if pkg.get('name') != 'gpg-pubkey': self.yumbase.remove(**nevra) self.modified.append(pkg) else: self.logger.info("WARNING: gpg-pubkey package not in " "configuration %s %s-%s" % (nevra['name'], nevra['version'], nevra['release'])) self._runYumTransaction() self.extra = self.FindExtra() def VerifyPath(self, entry, _): # pylint: disable=W0613 """Do nothing here since we only verify Path type=ignore""" return True src/lib/Bcfg2/Client/Tools/__init__.py000066400000000000000000000634761303523157100200050ustar00rootroot00000000000000"""This contains all Bcfg2 Tool modules""" import os import sys import stat import logging import Bcfg2.Options import Bcfg2.Client import Bcfg2.Client.XML from Bcfg2.Utils import Executor, ClassName class ToolInstantiationError(Exception): """ This error is raised if the toolset cannot be instantiated. """ pass class Tool(object): """ The base tool class. All tools subclass this. .. private-include: _entry_is_complete .. autoattribute:: Bcfg2.Client.Tools.Tool.__execs__ .. autoattribute:: Bcfg2.Client.Tools.Tool.__handles__ .. autoattribute:: Bcfg2.Client.Tools.Tool.__req__ .. autoattribute:: Bcfg2.Client.Tools.Tool.__important__ """ options = [ Bcfg2.Options.Option( cf=('client', 'command_timeout'), help="Timeout when running external commands other than probes", type=Bcfg2.Options.Types.timeout)] #: The name of the tool. By default this uses #: :class:`Bcfg2.Client.Tools.ClassName` to ensure that it is the #: same as the name of the class. name = ClassName() #: Full paths to all executables the tool uses. When the tool is #: instantiated it will check to ensure that all of these files #: exist and are executable. __execs__ = [] #: A list of 2-tuples of entries handled by this tool. Each #: 2-tuple should contain ``(, )``, where ```` is #: the ``type`` attribute of the entry. If this tool handles #: entries with no ``type`` attribute, specify None. __handles__ = [] #: A dict that describes the required attributes for entries #: handled by this tool. The keys are the names of tags. The #: values may either be lists of attribute names (if the same #: attributes are required by all tags of that name), or dicts #: whose keys are the ``type`` attribute and whose values are #: lists of attributes required by tags with that ``type`` #: attribute. In that case, the ``type`` attribute will also be #: required. __req__ = {} #: A list of entry names that will be treated as important and #: installed before other entries. __important__ = [] #: This tool is deprecated, and a warning will be produced if it #: is used. deprecated = False #: This tool is experimental, and a warning will be produced if it #: is used. experimental = False #: List of other tools (by name) that this tool conflicts with. #: If any of the listed tools are loaded, they will be removed at #: runtime with a warning. conflicts = [] def __init__(self, config): """ :param config: The XML configuration for this client :type config: lxml.etree._Element :raises: :exc:`Bcfg2.Client.Tools.ToolInstantiationError` """ #: A :class:`logging.Logger` object that will be used by this #: tool for logging self.logger = logging.getLogger(self.name) #: The XML configuration for this client self.config = config #: An :class:`Bcfg2.Utils.Executor` object for #: running external commands. self.cmd = Executor(timeout=Bcfg2.Options.setup.command_timeout) #: A list of entries that have been modified by this tool self.modified = [] #: A list of extra entries that are not listed in the #: configuration self.extra = [] #: A list of all entries handled by this tool self.handled = [] self._analyze_config() self._check_execs() def _analyze_config(self): """ Analyze the config at tool initialization-time for important and handled entries """ for struct in self.config: for entry in struct: if (entry.tag == 'Path' and entry.get('important', 'false').lower() == 'true'): self.__important__.append(entry.get('name')) self.handled = self.getSupportedEntries() def _check_execs(self): """ Check all executables used by this tool to ensure that they exist and are executable """ for filename in self.__execs__: try: mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE]) except OSError: raise ToolInstantiationError(sys.exc_info()[1]) except: raise ToolInstantiationError("%s: Failed to stat %s" % (self.name, filename)) if not mode & stat.S_IEXEC: raise ToolInstantiationError("%s: %s not executable" % (self.name, filename)) def _install_allowed(self, entry): """ Return true if the given entry is allowed to be installed by the whitelist or blacklist """ if (Bcfg2.Options.setup.decision == 'whitelist' and not Bcfg2.Client.matches_white_list( entry, Bcfg2.Options.setup.decision_list)): self.logger.info("In whitelist mode: suppressing Action: %s" % entry.get('name')) return False if (Bcfg2.Options.setup.decision == 'blacklist' and not Bcfg2.Client.passes_black_list( entry, Bcfg2.Options.setup.decision_list)): self.logger.info("In blacklist mode: suppressing Action: %s" % entry.get('name')) return False return True def BundleUpdated(self, bundle): # pylint: disable=W0613 """ Callback that is invoked when a bundle has been updated. :param bundle: The bundle that has been updated :type bundle: lxml.etree._Element :returns: dict - A dict of the state of entries suitable for updating :attr:`Bcfg2.Client.Client.states` """ return dict() def BundleNotUpdated(self, bundle): # pylint: disable=W0613 """ Callback that is invoked when a bundle has been updated. :param bundle: The bundle that has been updated :type bundle: lxml.etree._Element :returns: dict - A dict of the state of entries suitable for updating :attr:`Bcfg2.Client.Client.states` """ return dict() def Inventory(self, structures=None): """ Take an inventory of the system as it exists. This involves two steps: * Call the appropriate entry-specific Verify method for each entry this tool verifies; * Call :func:`Bcfg2.Client.Tools.Tool.FindExtra` to populate :attr:`Bcfg2.Client.Tools.Tool.extra` with extra entries. This implementation of :func:`Bcfg2.Client.Tools.Tool.Inventory` calls a ``Verify`` method to verify each entry, where ```` is the entry tag. E.g., a Path entry would be verified by calling :func:`VerifyPath`. :param structures: The list of structures (i.e., bundles) to get entries from. If this is not given, all children of :attr:`Bcfg2.Client.Tools.Tool.config` will be used. :type structures: list of lxml.etree._Element :returns: dict - A dict of the state of entries suitable for updating :attr:`Bcfg2.Client.Client.states` """ if not structures: structures = self.config.getchildren() mods = self.buildModlist() states = dict() for struct in structures: for entry in struct.getchildren(): if self.canVerify(entry): try: func = getattr(self, "Verify%s" % entry.tag) except AttributeError: self.logger.error("%s: Cannot verify %s entries" % (self.name, entry.tag)) continue try: states[entry] = func(entry, mods) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s: Unexpected failure verifying %s" % (self.name, self.primarykey(entry)), exc_info=1) self.extra = self.FindExtra() return states def Install(self, entries): """ Install entries. 'Install' in this sense means either initially install, or update as necessary to match the specification. This implementation of :func:`Bcfg2.Client.Tools.Tool.Install` calls a ``Install`` method to install each entry, where ```` is the entry tag. E.g., a Path entry would be installed by calling :func:`InstallPath`. :param entries: The entries to install :type entries: list of lxml.etree._Element :returns: dict - A dict of the state of entries suitable for updating :attr:`Bcfg2.Client.Client.states` """ states = dict() for entry in entries: try: func = getattr(self, "Install%s" % entry.tag) except AttributeError: self.logger.error("%s: Cannot install %s entries" % (self.name, entry.tag)) continue try: states[entry] = func(entry) if states[entry]: self.modified.append(entry) except: # pylint: disable=W0702 self.logger.error("%s: Unexpected failure installing %s" % (self.name, self.primarykey(entry)), exc_info=1) return states def Remove(self, entries): """ Remove specified extra entries. :param entries: The entries to remove :type entries: list of lxml.etree._Element :returns: None """ pass def getSupportedEntries(self): """ Get all entries that are handled by this tool. :returns: list of lxml.etree._Element """ rv = [] for struct in self.config.getchildren(): rv.extend([entry for entry in struct.getchildren() if self.handlesEntry(entry)]) return rv def handlesEntry(self, entry): """ Return True if the entry is handled by this tool. :param entry: Determine if this entry is handled. :type entry: lxml.etree._Element :returns: bool """ return (entry.tag, entry.get('type')) in self.__handles__ def buildModlist(self): """ Build a list of all Path entries in the configuration. (This can be used to determine which paths might be modified from their original state, useful for verifying packages) :returns: list of lxml.etree._Element """ rv = [] for struct in self.config.getchildren(): rv.extend([entry.get('name') for entry in struct.getchildren() if entry.tag == 'Path']) return rv def missing_attrs(self, entry): """ Return a list of attributes that were expected on an entry (from :attr:`Bcfg2.Client.Tools.Tool.__req__`), but not found. :param entry: The entry to find missing attributes on :type entry: lxml.etree._Element :returns: list of strings """ required = self.__req__[entry.tag] if isinstance(required, dict): required = ["type"] try: required.extend(self.__req__[entry.tag][entry.get("type")]) except KeyError: pass return [attr for attr in required if attr not in entry.attrib or not entry.attrib[attr]] def canVerify(self, entry): """ Test if entry can be verified by calling :func:`Bcfg2.Client.Tools.Tool._entry_is_complete`. :param entry: The entry to evaluate :type entry: lxml.etree._Element :returns: bool - True if the entry can be verified, False otherwise. """ return self._entry_is_complete(entry, action="verify") def FindExtra(self): """ Return a list of extra entries, i.e., entries that exist on the client but are not in the configuration. :returns: list of lxml.etree._Element """ return [] def primarykey(self, entry): """ Return a string that describes the entry uniquely amongst all entries in the configuration. :param entry: The entry to describe :type entry: lxml.etree._Element :returns: string """ return "%s:%s" % (entry.tag, entry.get("name")) def canInstall(self, entry): """ Test if entry can be installed by calling :func:`Bcfg2.Client.Tools.Tool._entry_is_complete`. :param entry: The entry to evaluate :type entry: lxml.etree._Element :returns: bool - True if the entry can be installed, False otherwise. """ return self._entry_is_complete(entry, action="install") def _entry_is_complete(self, entry, action=None): """ Test if the entry is complete. This involves three things: * The entry is handled by this tool (as reported by :func:`Bcfg2.Client.Tools.Tool.handlesEntry`; * The entry does not report a bind failure; * The entry is not missing any attributes (as reported by :func:`Bcfg2.Client.Tools.Tool.missing_attrs`). :param entry: The entry to evaluate :type entry: lxml.etree._Element :param action: The action being performed on the entry (e.g., "install", "verify"). This is used to produce error messages; if not provided, generic error messages will be used. :type action: string :returns: bool - True if the entry can be verified, False otherwise. """ if not self.handlesEntry(entry): return False if 'failure' in entry.attrib: if action is None: msg = "%s: %s reports bind failure" else: msg = "%%s: Cannot %s entry %%s with bind failure" % action self.logger.error(msg % (self.name, self.primarykey(entry))) return False missing = self.missing_attrs(entry) if missing: if action is None: desc = "%s is" % self.primarykey(entry) else: desc = "Cannot %s %s due to" % (action, self.primarykey(entry)) self.logger.error("%s: %s missing required attribute(s): %s" % (self.name, desc, ", ".join(missing))) return False return True class PkgTool(Tool): """ PkgTool provides a one-pass install with fallback for use with packaging systems. PkgTool makes a number of assumptions that may need to be overridden by a subclass. For instance, it assumes that packages are installed by a shell command; that only one version of a given package can be installed; etc. Nonetheless, it offers a strong base for writing simple package tools. """ #: A tuple describing the format of the command to run to install #: a single package. The first element of the tuple is a string #: giving the format of the command, with a single '%s' for the #: name of the package or packages to be installed. The second #: element is a tuple whose first element is the format of the #: name of the package, and whose second element is a list whose #: members are the names of attributes that will be used when #: formatting the package name format string. pkgtool = ('echo %s', ('%s', ['name'])) #: The ``type`` attribute of Packages handled by this tool. pkgtype = 'echo' def __init__(self, config): Tool.__init__(self, config) #: A dict of installed packages; the keys should be package #: names and the values should be simple strings giving the #: installed version. self.installed = {} self.RefreshPackages() def VerifyPackage(self, entry, modlist): """ Verify the given Package entry. :param entry: The Package entry to verify :type entry: lxml.etree._Element :param modlist: A list of all Path entries in the configuration, which may be considered when verifying a package. For instance, a package should verify successfully if paths in ``modlist`` have been modified outside the package. :type modlist: list of strings :returns: bool - True if the package verifies, false otherwise. """ raise NotImplementedError def _get_package_command(self, packages): """ Get the command to install the given list of packages. :param packages: The Package entries to install :type packages: list of lxml.etree._Element :returns: string - the command to run """ pkgargs = " ".join(self.pkgtool[1][0] % tuple(pkg.get(field) for field in self.pkgtool[1][1]) for pkg in packages) return self.pkgtool[0] % pkgargs def Install(self, packages): """ Run a one-pass install where all required packages are installed with a single command, followed by single package installs in case of failure. :param entries: The entries to install :type entries: list of lxml.etree._Element :returns: dict - A dict of the state of entries suitable for updating :attr:`Bcfg2.Client.Client.states` """ self.logger.info("Trying single pass package install for pkgtype %s" % self.pkgtype) states = dict() if self.cmd.run(self._get_package_command(packages)): self.logger.info("Single Pass Succeded") # set all package states to true and flush workqueues for entry in packages: self.logger.debug('Setting state to true for %s' % self.primarykey(entry)) states[entry] = True self.RefreshPackages() else: self.logger.error("Single Pass Failed") # do single pass installs self.RefreshPackages() for pkg in packages: # handle state tracking updates if self.VerifyPackage(pkg, []): self.logger.info("Forcing state to true for pkg %s" % (pkg.get('name'))) states[pkg] = True else: self.logger.info("Installing pkg %s version %s" % (pkg.get('name'), pkg.get('version'))) if self.cmd.run(self._get_package_command([pkg])): states[pkg] = True else: states[pkg] = False self.logger.error("Failed to install package %s" % pkg.get('name')) self.RefreshPackages() self.modified.extend(entry for entry in packages if entry in states and states[entry]) return states def RefreshPackages(self): """ Refresh the internal representation of the package database (:attr:`Bcfg2.Client.Tools.PkgTool.installed`). :returns: None""" raise NotImplementedError def FindExtra(self): packages = [entry.get('name') for entry in self.getSupportedEntries()] extras = [data for data in list(self.installed.items()) if data[0] not in packages] return [Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype, current_version=version) for (name, version) in extras] FindExtra.__doc__ = Tool.FindExtra.__doc__ class SvcTool(Tool): """ Base class for tools that handle Service entries """ options = Tool.options + [ Bcfg2.Options.Option( '-s', '--service-mode', default='default', choices=['default', 'disabled', 'build'], help='Set client service mode')] def __init__(self, config): Tool.__init__(self, config) #: List of services that have been restarted self.restarted = [] __init__.__doc__ = Tool.__init__.__doc__ def get_svc_command(self, service, action): """ Return a command that can be run to start or stop a service. :param service: The service entry to modify :type service: lxml.etree._Element :param action: The action to take (e.g., "stop", "start") :type action: string :returns: string - The command to run """ return '/etc/init.d/%s %s' % (service.get('name'), action) def get_bootstatus(self, service): """ Return the bootstatus attribute if it exists. :param service: The service entry :type service: lxml.etree._Element :returns: string or None - Value of bootstatus if it exists. If bootstatus is unspecified and status is not *ignore*, return value of status. If bootstatus is unspecified and status is *ignore*, return None. """ if service.get('bootstatus') is not None: return service.get('bootstatus') elif service.get('status') != 'ignore': return service.get('status') return None def start_service(self, service): """ Start a service. :param service: The service entry to modify :type service: lxml.etree._Element :returns: Bcfg2.Utils.ExecutorResult - The return value from :class:`Bcfg2.Utils.Executor.run` """ self.logger.debug('Starting service %s' % service.get('name')) return self.cmd.run(self.get_svc_command(service, 'start')) def stop_service(self, service): """ Stop a service. :param service: The service entry to modify :type service: lxml.etree._Element :returns: Bcfg2.Utils.ExecutorResult - The return value from :class:`Bcfg2.Utils.Executor.run` """ self.logger.debug('Stopping service %s' % service.get('name')) return self.cmd.run(self.get_svc_command(service, 'stop')) def restart_service(self, service): """Restart a service. :param service: The service entry to modify :type service: lxml.etree._Element :returns: Bcfg2.Utils.ExecutorResult - The return value from :class:`Bcfg2.Utils.Executor.run` """ self.logger.debug('Restarting service %s' % service.get('name')) restart_target = service.get('target', 'restart') return self.cmd.run(self.get_svc_command(service, restart_target)) def check_service(self, service): """ Check the status a service. :param service: The service entry to modify :type service: lxml.etree._Element :returns: bool - True if the status command returned 0, False otherwise """ return bool(self.cmd.run(self.get_svc_command(service, 'status'))) def Remove(self, services): if Bcfg2.Options.setup.service_mode != 'disabled': for entry in services: entry.set("status", "off") self.InstallService(entry) Remove.__doc__ = Tool.Remove.__doc__ def BundleUpdated(self, bundle): if Bcfg2.Options.setup.service_mode == 'disabled': return for entry in bundle: if (not self.handlesEntry(entry) or not self._install_allowed(entry)): continue estatus = entry.get('status') restart = entry.get("restart", "true").lower() if (restart == "false" or estatus == 'ignore' or (restart == "interactive" and not Bcfg2.Options.setup.interactive)): continue success = False if estatus == 'on': if Bcfg2.Options.setup.service_mode == 'build': success = self.stop_service(entry) elif entry.get('name') not in self.restarted: if Bcfg2.Options.setup.interactive: if not Bcfg2.Client.prompt('Restart service %s? (y/N) ' % entry.get('name')): continue success = self.restart_service(entry) if success: self.restarted.append(entry.get('name')) else: success = self.stop_service(entry) if not success: self.logger.error("Failed to manipulate service %s" % (entry.get('name'))) return dict() BundleUpdated.__doc__ = Tool.BundleUpdated.__doc__ def Install(self, entries): install_entries = [] for entry in entries: if entry.get('install', 'true').lower() == 'false': self.logger.info("Installation is false for %s:%s, skipping" % (entry.tag, entry.get('name'))) else: install_entries.append(entry) return Tool.Install(self, install_entries) Install.__doc__ = Tool.Install.__doc__ def InstallService(self, entry): """ Install a single service entry. See :func:`Bcfg2.Client.Tools.Tool.Install`. :param entry: The Service entry to install :type entry: lxml.etree._Element :returns: bool - True if installation was successful, False otherwise """ raise NotImplementedError src/lib/Bcfg2/Client/Tools/launchd.py000066400000000000000000000135771303523157100176610ustar00rootroot00000000000000"""launchd support for Bcfg2.""" import os import Bcfg2.Client.Tools class launchd(Bcfg2.Client.Tools.Tool): # pylint: disable=C0103 """Support for Mac OS X launchd services. Currently requires the path to the plist to load/unload, and Name is acually a reverse-fqdn (or the label).""" __handles__ = [('Service', 'launchd')] __execs__ = ['/bin/launchctl', '/usr/bin/defaults'] __req__ = {'Service': ['name', 'status']} def __init__(self, config): Bcfg2.Client.Tools.Tool.__init__(self, config) # Locate plist file that provides given reverse-fqdn name: # # * ``/Library/LaunchAgents``: Per-user agents provided by the # administrator. # * ``/Library/LaunchDaemons``: System-wide daemons provided # by the administrator. # * ``/System/Library/LaunchAgents``: Mac OS X per-user # agents. # * ``/System/Library/LaunchDaemons``: Mac OS X system-wide # daemons. plist_locations = ["/Library/LaunchDaemons", "/System/Library/LaunchDaemons"] self.plist_mapping = {} for directory in plist_locations: for daemon in os.listdir(directory): if daemon.endswith(".plist"): daemon = daemon[:-6] dpath = os.path.join(directory, daemon) rv = self.cmd.run(['defaults', 'read', dpath, 'Label']) if rv.success: label = rv.stdout.splitlines()[0] self.plist_mapping[label] = dpath else: self.logger.warning("Could not get label from %s" % dpath) def FindPlist(self, entry): """ Find the location of the plist file for the given entry """ return self.plist_mapping.get(entry.get('name'), None) def os_version(self): """ Determine the OS version """ rv = self.cmd.run('sw_vers') if rv: for line in rv.stdout.splitlines(): if line.startswith("ProductVersion"): return line.split()[-1] else: return '' def VerifyService(self, entry, _): """Verify launchd service entry.""" if entry.get('status') == 'ignore': return True try: services = self.cmd.run("/bin/launchctl list").stdout.splitlines() except IndexError: # happens when no services are running (should be never) services = [] # launchctl output changed in 10.5 # It is now three columns, with the last # column being the name of the # service if int(self.os_version().split('.')[1]) >= 5: services = [s.split()[-1] for s in services] if entry.get('name') in services: # doesn't check if non-spawning services are Started return entry.get('status') == 'on' else: self.logger.debug("Launchd: Didn't find service Loaded " "(launchd running under same user as bcfg)") return entry.get('status') == 'off' try: # Perhaps add the "-w" flag to load and # unload to modify the file itself! self.cmd.run("/bin/launchctl load -w %s" % self.FindPlist(entry)) except IndexError: return 'on' return False def InstallService(self, entry): """Enable or disable launchd item.""" name = entry.get('name') if entry.get('status') == 'on': self.logger.error("Installing service %s" % name) self.cmd.run("/bin/launchctl load -w %s" % self.FindPlist(entry)) return self.cmd.run("/bin/launchctl start %s" % name).success else: self.logger.error("Uninstalling service %s" % name) self.cmd.run("/bin/launchctl stop %s" % name) return self.cmd.run("/bin/launchctl unload -w %s" % self.FindPlist(entry)).success def Remove(self, svcs): """Remove Extra launchd entries.""" pass def FindExtra(self): """Find Extra launchd services.""" try: allsrv = self.cmd.run("/bin/launchctl list").stdout.splitlines() except IndexError: allsrv = [] for entry in self.getSupportedEntries(): svc = entry.get("name") if svc in allsrv: allsrv.remove(svc) return [Bcfg2.Client.XML.Element("Service", type='launchd', name=name, status='on') for name in allsrv] def BundleUpdated(self, bundle): """Reload launchd plist.""" for entry in bundle: if not self.handlesEntry(entry): continue if not self.canInstall(entry): self.logger.error("Insufficient information to restart " "service %s" % entry.get('name')) else: name = entry.get('name') if entry.get('status') == 'on' and self.FindPlist(entry): self.logger.info("Reloading launchd service %s" % name) # stop? self.cmd.run("/bin/launchctl stop %s" % name) # what if it disappeared? how do we stop services # that are currently running but the plist disappeared?! self.cmd.run("/bin/launchctl unload -w %s" % (self.FindPlist(entry))) self.cmd.run("/bin/launchctl load -w %s" % (self.FindPlist(entry))) self.cmd.run("/bin/launchctl start %s" % name) else: # only if necessary.... self.cmd.run("/bin/launchctl stop %s" % name) self.cmd.run("/bin/launchctl unload -w %s" % (self.FindPlist(entry))) src/lib/Bcfg2/Client/XML.py000066400000000000000000000043711303523157100155730ustar00rootroot00000000000000'''XML lib compatibility layer for the Bcfg2 client''' # library will use lxml, then builtin xml.etree, then ElementTree # pylint: disable=E0611,W0611,W0613,C0103 try: from lxml.etree import Element, SubElement, tostring, XMLParser from lxml.etree import XMLSyntaxError as ParseError from lxml.etree import XML as _XML from Bcfg2.Compat import wraps driver = 'lxml' # libxml2 2.9.0+ doesn't parse 10M+ documents by default: # https://mail.gnome.org/archives/commits-list/2012-August/msg00645.html try: _parser = XMLParser(huge_tree=True) except TypeError: _parser = XMLParser() @wraps(_XML) def XML(val, **kwargs): """ unicode strings w/encoding declaration are not supported in recent lxml.etree, so we try to read XML, and if it fails we try encoding the string. """ kwargs.setdefault('parser', _parser) try: return _XML(val, **kwargs) except ValueError: return _XML(val.encode(), **kwargs) except ImportError: # lxml not available from xml.parsers.expat import ExpatError as ParseError try: import xml.etree.ElementTree Element = xml.etree.ElementTree.Element SubElement = xml.etree.ElementTree.SubElement XML = xml.etree.ElementTree.XML def tostring(el, encoding=None, xml_declaration=None): """ tostring implementation compatible with lxml """ return xml.etree.ElementTree.tostring(el, encoding=encoding) driver = 'etree-py' except ImportError: try: from elementtree.ElementTree import Element, SubElement, XML, \ tostring driver = 'etree' import elementtree.ElementTree Element = elementtree.ElementTree.Element SubElement = elementtree.ElementTree.SubElement XML = elementtree.ElementTree.XML def tostring(el, encoding=None, xml_declaration=None): """ tostring implementation compatible with lxml """ return elementtree.ElementTree.tostring(el) except ImportError: print("Failed to load lxml, xml.etree or elementtree.ElementTree") print("Cannot continue") raise SystemExit(1) src/lib/Bcfg2/Client/__init__.py000066400000000000000000001201741303523157100166720ustar00rootroot00000000000000"""This contains all Bcfg2 Client modules""" import os import sys import stat import time import fcntl import socket import fnmatch import logging import argparse import tempfile import copy import Bcfg2.Logger import Bcfg2.Options from Bcfg2.Client import XML from Bcfg2.Client import Proxy from Bcfg2.Client import Tools from Bcfg2.Utils import locked, Executor, safe_input from Bcfg2.version import __version__ # pylint: disable=W0622 from Bcfg2.Compat import xmlrpclib, walk_packages, any, all, cmp # pylint: enable=W0622 def cmpent(ent1, ent2): """Sort entries.""" if ent1.tag != ent2.tag: return cmp(ent1.tag, ent2.tag) else: return cmp(ent1.get('name'), ent2.get('name')) def matches_entry(entryspec, entry): """ Determine if the Decisions-style entry specification matches the entry. Both are tuples of (tag, name). The entryspec can handle the wildcard * in either position. """ if entryspec == entry: return True return all(fnmatch.fnmatch(entry[i], entryspec[i]) for i in [0, 1]) def matches_white_list(entry, whitelist): """ Return True if (, ) is in the given whitelist. """ return any(matches_entry(we, (entry.tag, entry.get('name'))) for we in whitelist) def passes_black_list(entry, blacklist): """ Return True if (, ) is not in the given blacklist. """ return not any(matches_entry(be, (entry.tag, entry.get('name'))) for be in blacklist) def prompt(msg): """ Helper to give a yes/no prompt to the user. Flushes input buffers, handles exceptions, etc. Returns True if the user answers in the affirmative, False otherwise. :param msg: The message to show to the user. The message is not altered in any way for display; i.e., it should contain "[y/N]" if desired, etc. :type msg: string :returns: bool - True if yes, False if no """ try: ans = safe_input(msg) return ans in ['y', 'Y'] except UnicodeEncodeError: ans = safe_input(msg.encode('utf-8')) return ans in ['y', 'Y'] except (EOFError, KeyboardInterrupt): # handle ^C raise SystemExit(1) except: print("Error while reading input: %s" % sys.exc_info()[1]) return False class ClientDriverAction(Bcfg2.Options.ComponentAction): """ Action to load client drivers """ bases = ['Bcfg2.Client.Tools'] fail_silently = True class Client(object): """ The main Bcfg2 client class """ options = Proxy.ComponentProxy.options + [ Bcfg2.Options.Common.syslog, Bcfg2.Options.Common.interactive, Bcfg2.Options.BooleanOption( "-q", "--quick", help="Disable some checksum verification"), Bcfg2.Options.Option( cf=('client', 'probe_timeout'), type=Bcfg2.Options.Types.timeout, help="Timeout when running client probes"), Bcfg2.Options.Option( "-b", "--only-bundles", default=[], type=Bcfg2.Options.Types.colon_list, help='Only configure the given bundle(s)'), Bcfg2.Options.Option( "-B", "--except-bundles", default=[], type=Bcfg2.Options.Types.colon_list, help='Configure everything except the given bundle(s)'), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.BooleanOption( "-Q", "--bundle-quick", help='Only verify the given bundle(s)'), Bcfg2.Options.Option( '-r', '--remove', choices=['all', 'services', 'packages', 'users'], help='Force removal of additional configuration items')), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.PathOption( '-f', '--file', type=argparse.FileType('rb'), help='Configure from a file rather than querying the server'), Bcfg2.Options.PathOption( '-c', '--cache', type=argparse.FileType('wb'), help='Store the configuration in a file')), Bcfg2.Options.BooleanOption( '--exit-on-probe-failure', default=True, cf=('client', 'exit_on_probe_failure'), help="The client should exit if a probe fails"), Bcfg2.Options.Option( '-p', '--profile', cf=('client', 'profile'), help='Assert the given profile for the host'), Bcfg2.Options.Option( '-l', '--decision', cf=('client', 'decision'), choices=['whitelist', 'blacklist', 'none'], help='Run client in server decision list mode'), Bcfg2.Options.BooleanOption( "-O", "--no-lock", help='Omit lock check'), Bcfg2.Options.PathOption( cf=('components', 'lockfile'), default='/var/lock/bcfg2.run', help='Client lock file'), Bcfg2.Options.BooleanOption( "-n", "--dry-run", help='Do not actually change the system'), Bcfg2.Options.Option( "-D", "--drivers", cf=('client', 'drivers'), type=Bcfg2.Options.Types.comma_list, default=[m[1] for m in walk_packages(path=Tools.__path__)], action=ClientDriverAction, help='Client drivers'), Bcfg2.Options.BooleanOption( "-e", "--show-extra", help='Enable extra entry output'), Bcfg2.Options.BooleanOption( "-k", "--kevlar", help='Run in bulletproof mode'), Bcfg2.Options.BooleanOption( "-i", "--only-important", help='Only configure the important entries')] def __init__(self): self.config = None self._proxy = None self.logger = logging.getLogger('bcfg2') self.cmd = Executor(Bcfg2.Options.setup.probe_timeout) self.tools = [] self.times = dict() self.times['initialization'] = time.time() if Bcfg2.Options.setup.bundle_quick: if (not Bcfg2.Options.setup.only_bundles and not Bcfg2.Options.setup.except_bundles): self.logger.error("-Q option requires -b or -B") raise SystemExit(1) if Bcfg2.Options.setup.remove == 'services': self.logger.error("Service removal is nonsensical; " "removed services will only be disabled") if not Bcfg2.Options.setup.server.startswith('https://'): Bcfg2.Options.setup.server = \ 'https://' + Bcfg2.Options.setup.server #: A dict of the state of each entry. Keys are the entries. #: Values are boolean: True means that the entry is good, #: False means that the entry is bad. self.states = {} self.whitelist = [] self.blacklist = [] self.removal = [] self.unhandled = [] self.logger = logging.getLogger(__name__) def _probe_failure(self, probename, msg): """ handle failure of a probe in the way the user wants us to (exit or continue) """ message = "Failed to execute probe %s: %s" % (probename, msg) if Bcfg2.Options.setup.exit_on_probe_failure: self.fatal_error(message) else: self.logger.error(message) def run_probe(self, probe): """Execute probe.""" name = probe.get('name') self.logger.info("Running probe %s" % name) ret = XML.Element("probe-data", name=name, source=probe.get('source')) try: scripthandle, scriptname = tempfile.mkstemp() if sys.hexversion >= 0x03000000: script = os.fdopen(scripthandle, 'w', encoding=Bcfg2.Options.setup.encoding) else: script = os.fdopen(scripthandle, 'w') try: script.write("#!%s\n" % (probe.attrib.get('interpreter', '/bin/sh'))) if sys.hexversion >= 0x03000000: script.write(probe.text) else: script.write(probe.text.encode('utf-8')) script.close() os.chmod(scriptname, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IWUSR) # 0755 rv = self.cmd.run(scriptname) if rv.stderr: self.logger.warning("Probe %s has error output: %s" % (name, rv.stderr)) if not rv.success: self._probe_failure(name, "Return value %s" % rv.retval) self.logger.info("Probe %s has result:" % name) self.logger.info(rv.stdout) if sys.hexversion >= 0x03000000: ret.text = rv.stdout else: ret.text = rv.stdout.decode('utf-8') finally: os.unlink(scriptname) except SystemExit: raise except: self._probe_failure(name, sys.exc_info()[1]) return ret def fatal_error(self, message): """Signal a fatal error.""" self.logger.error("Fatal error: %s" % (message)) raise SystemExit(1) @property def proxy(self): """ get an XML-RPC proxy to the server """ if self._proxy is None: self._proxy = Proxy.ComponentProxy() return self._proxy def run_probes(self): """ run probes and upload probe data """ try: probes = XML.XML(str(self.proxy.GetProbes())) except (Proxy.ProxyError, Proxy.CertificateError, socket.gaierror, socket.error): err = sys.exc_info()[1] self.fatal_error("Failed to download probes from bcfg2: %s" % err) except XML.ParseError: err = sys.exc_info()[1] self.fatal_error("Server returned invalid probe requests: %s" % err) self.times['probe_download'] = time.time() # execute probes probedata = XML.Element("ProbeData") for probe in probes.findall(".//probe"): probedata.append(self.run_probe(probe)) if len(probes.findall(".//probe")) > 0: try: # upload probe responses self.proxy.RecvProbeData( XML.tostring(probedata, xml_declaration=False).decode('utf-8')) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to upload probe data: %s" % err) self.times['probe_upload'] = time.time() def get_config(self): """ load the configuration, either from the cached configuration file (-f), or from the server """ if Bcfg2.Options.setup.file: # read config from file try: self.logger.debug("Reading cached configuration from %s" % Bcfg2.Options.setup.file.name) return Bcfg2.Options.setup.file.read() except IOError: self.fatal_error("Failed to read cached configuration from: %s" % Bcfg2.Options.setup.file.name) else: # retrieve config from server if Bcfg2.Options.setup.profile: try: self.proxy.AssertProfile(Bcfg2.Options.setup.profile) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to set client profile: %s" % err) try: self.proxy.DeclareVersion(__version__) except (xmlrpclib.Fault, Proxy.ProxyError, Proxy.CertificateError, socket.gaierror, socket.error): err = sys.exc_info()[1] self.fatal_error("Failed to declare version: %s" % err) self.run_probes() if Bcfg2.Options.setup.decision in ['whitelist', 'blacklist']: try: # TODO: read decision list from --decision-list Bcfg2.Options.setup.decision_list = \ self.proxy.GetDecisionList( Bcfg2.Options.setup.decision) self.logger.info("Got decision list from server:") self.logger.info(Bcfg2.Options.setup.decision_list) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to get decision list: %s" % err) try: rawconfig = self.proxy.GetConfig().encode('utf-8') except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to download configuration from " "Bcfg2: %s" % err) self.times['config_download'] = time.time() if Bcfg2.Options.setup.cache: try: Bcfg2.Options.setup.cache.write(rawconfig) os.chmod(Bcfg2.Options.setup.cache.name, 384) # 0600 except IOError: self.logger.warning("Failed to write config cache file %s" % (Bcfg2.Options.setup.cache)) self.times['caching'] = time.time() return rawconfig def parse_config(self, rawconfig): """ Parse the XML configuration received from the Bcfg2 server """ try: self.config = XML.XML(rawconfig) except XML.ParseError: syntax_error = sys.exc_info()[1] self.fatal_error("The configuration could not be parsed: %s" % syntax_error) self.load_tools() # find entries not handled by any tools self.unhandled = [entry for struct in self.config for entry in struct if entry not in self.handled] if self.unhandled: self.logger.error("The following entries are not handled by any " "tool:") for entry in self.unhandled: self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'), entry.get('name'))) # find duplicates self.find_dups(self.config) pkgs = [(entry.get('name'), entry.get('origin')) for struct in self.config for entry in struct if entry.tag == 'Package'] if pkgs: self.logger.debug("The following packages are specified in bcfg2:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None]) self.logger.debug("The following packages are prereqs added by " "Packages:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) self.times['config_parse'] = time.time() def run(self): """Perform client execution phase.""" # begin configuration self.times['start'] = time.time() self.logger.info("Starting Bcfg2 client run at %s" % self.times['start']) self.parse_config(self.get_config().decode('utf-8')) if self.config.tag == 'error': self.fatal_error("Server error: %s" % (self.config.text)) if Bcfg2.Options.setup.bundle_quick: newconfig = XML.XML('') for bundle in self.config.getchildren(): name = bundle.get("name") if (name and (name in Bcfg2.Options.setup.only_bundles or name not in Bcfg2.Options.setup.except_bundles)): newconfig.append(bundle) self.config = newconfig if not Bcfg2.Options.setup.no_lock: # check lock here try: lockfile = open(Bcfg2.Options.setup.lockfile, 'w') if locked(lockfile.fileno()): self.fatal_error("Another instance of Bcfg2 is running. " "If you want to bypass the check, run " "with the -O/--no-lock option") except SystemExit: raise except: lockfile = None self.logger.error("Failed to open lockfile %s: %s" % (Bcfg2.Options.setup.lockfile, sys.exc_info()[1])) # execute the configuration self.Execute() if not Bcfg2.Options.setup.no_lock: # unlock here if lockfile: try: fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN) os.remove(Bcfg2.Options.setup.lockfile) except OSError: self.logger.error("Failed to unlock lockfile %s" % lockfile.name) if (not Bcfg2.Options.setup.file and not Bcfg2.Options.setup.bundle_quick): # upload statistics feedback = self.GenerateStats() try: self.proxy.RecvStats( XML.tostring(feedback, xml_declaration=False).decode('utf-8')) except Proxy.ProxyError: err = sys.exc_info()[1] self.logger.error("Failed to upload configuration statistics: " "%s" % err) raise SystemExit(2) self.logger.info("Finished Bcfg2 client run at %s" % time.time()) def load_tools(self): """ Load all applicable client tools """ for tool in Bcfg2.Options.setup.drivers: try: self.tools.append(tool(self.config)) except Tools.ToolInstantiationError: continue except: self.logger.error("Failed to instantiate tool %s" % tool, exc_info=1) for tool in self.tools[:]: for conflict in getattr(tool, 'conflicts', []): for item in self.tools: if item.name == conflict: self.tools.remove(item) self.logger.info("Loaded tool drivers:") self.logger.info([tool.name for tool in self.tools]) deprecated = [tool.name for tool in self.tools if tool.deprecated] if deprecated: self.logger.warning("Loaded deprecated tool drivers:") self.logger.warning(deprecated) experimental = [tool.name for tool in self.tools if tool.experimental] if experimental: self.logger.warning("Loaded experimental tool drivers:") self.logger.warning(experimental) def find_dups(self, config): """ Find duplicate entries and warn about them """ entries = dict() for struct in config: for entry in struct: for tool in self.tools: if tool.handlesEntry(entry): pkey = tool.primarykey(entry) if pkey in entries: entries[pkey] += 1 else: entries[pkey] = 1 multi = [e for e, c in entries.items() if c > 1] if multi: self.logger.debug("The following entries are included multiple " "times:") for entry in multi: self.logger.debug(entry) def promptFilter(self, msg, entries): """Filter a supplied list based on user input.""" ret = [] entries.sort(key=lambda e: e.tag + ":" + e.get('name')) for entry in entries[:]: if entry in self.unhandled: # don't prompt for entries that can't be installed continue if 'qtext' in entry.attrib: iprompt = entry.get('qtext') else: iprompt = msg % (entry.tag, entry.get('name')) if prompt(iprompt): ret.append(entry) return ret def __getattr__(self, name): if name in ['extra', 'handled', 'modified', '__important__']: ret = [] for tool in self.tools: ret += getattr(tool, name) return ret elif name in self.__dict__: return self.__dict__[name] raise AttributeError(name) def InstallImportant(self): """Install important entries We also process the decision mode stuff here because we want to prevent non-whitelisted/blacklisted 'important' entries from being installed prior to determining the decision mode on the client. """ # Need to process decision stuff early so that dryrun mode # works with it self.whitelist = [entry for entry in self.states if not self.states[entry]] if not Bcfg2.Options.setup.file: if Bcfg2.Options.setup.decision == 'whitelist': dwl = Bcfg2.Options.setup.decision_list w_to_rem = [e for e in self.whitelist if not matches_white_list(e, dwl)] if w_to_rem: self.logger.info("In whitelist mode: " "suppressing installation of:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem]) self.whitelist = [x for x in self.whitelist if x not in w_to_rem] elif Bcfg2.Options.setup.decision == 'blacklist': b_to_rem = \ [e for e in self.whitelist if not passes_black_list(e, Bcfg2.Options.setup.decision_list)] if b_to_rem: self.logger.info("In blacklist mode: " "suppressing installation of:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem]) self.whitelist = [x for x in self.whitelist if x not in b_to_rem] # take care of important entries first if (not Bcfg2.Options.setup.dry_run or Bcfg2.Options.setup.only_important): important_installs = set() for parent in self.config.findall(".//Path/.."): name = parent.get("name") if not name or (name in Bcfg2.Options.setup.except_bundles and name not in Bcfg2.Options.setup.only_bundles): continue for cfile in parent.findall("./Path"): if (cfile.get('name') not in self.__important__ or cfile.get('type') != 'file' or cfile not in self.whitelist): continue tools = [t for t in self.tools if t.handlesEntry(cfile) and t.canVerify(cfile)] if not tools: continue if Bcfg2.Options.setup.dry_run: important_installs.add(cfile) continue if (Bcfg2.Options.setup.interactive and not self.promptFilter("Install %s: %s? (y/N):", [cfile])): self.whitelist.remove(cfile) continue try: self.states[cfile] = tools[0].InstallPath(cfile) if self.states[cfile]: tools[0].modified.append(cfile) except: # pylint: disable=W0702 self.logger.error("Unexpected tool failure", exc_info=1) cfile.set('qtext', '') if tools[0].VerifyPath(cfile, []): self.whitelist.remove(cfile) if Bcfg2.Options.setup.dry_run and len(important_installs) > 0: self.logger.info("In dryrun mode: " "suppressing entry installation for:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in important_installs]) def Inventory(self): """ Verify all entries, find extra entries, and build up workqueues """ # initialize all states for struct in self.config.getchildren(): for entry in struct.getchildren(): self.states[entry] = False for tool in self.tools: try: self.states.update(tool.Inventory()) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) def Decide(self): # pylint: disable=R0912 """Set self.whitelist based on user interaction.""" iprompt = "Install %s: %s? (y/N): " rprompt = "Remove %s: %s? (y/N): " if Bcfg2.Options.setup.remove: if Bcfg2.Options.setup.remove == 'all': self.removal = self.extra elif Bcfg2.Options.setup.remove == 'services': self.removal = [entry for entry in self.extra if entry.tag == 'Service'] elif Bcfg2.Options.setup.remove == 'packages': self.removal = [entry for entry in self.extra if entry.tag == 'Package'] elif Bcfg2.Options.setup.remove == 'users': self.removal = [entry for entry in self.extra if entry.tag in ['POSIXUser', 'POSIXGroup']] candidates = [entry for entry in self.states if not self.states[entry]] if Bcfg2.Options.setup.dry_run: if self.whitelist: self.logger.info("In dryrun mode: " "suppressing entry installation for:") self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry in self.whitelist]) self.whitelist = [] if self.removal: self.logger.info("In dryrun mode: " "suppressing entry removal for:") self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry in self.removal]) self.removal = [] # Here is where most of the work goes # first perform bundle filtering all_bundle_names = [b.get('name') for b in self.config.findall('./Bundle')] bundles = self.config.getchildren() if Bcfg2.Options.setup.only_bundles: # warn if non-existent bundle given for bundle in Bcfg2.Options.setup.only_bundles: if bundle not in all_bundle_names: self.logger.info("Warning: Bundle %s not found" % bundle) bundles = [b for b in bundles if b.get('name') in Bcfg2.Options.setup.only_bundles] if Bcfg2.Options.setup.except_bundles: # warn if non-existent bundle given if not Bcfg2.Options.setup.bundle_quick: for bundle in Bcfg2.Options.setup.except_bundles: if bundle not in all_bundle_names: self.logger.info("Warning: Bundle %s not found" % bundle) bundles = [ b for b in bundles if b.get('name') not in Bcfg2.Options.setup.except_bundles] self.whitelist = [e for e in self.whitelist if any(e in b for b in bundles)] # first process prereq actions for bundle in bundles[:]: if bundle.tag == 'Bundle': bmodified = any((item in self.whitelist or item in self.modified) for item in bundle) else: bmodified = False actions = [a for a in bundle.findall('./Action') if (a.get('timing') in ['pre', 'both'] and (bmodified or a.get('when') == 'always'))] # now we process all "pre" and "both" actions that are either # always or the bundle has been modified if Bcfg2.Options.setup.interactive: self.promptFilter(iprompt, actions) self.DispatchInstallCalls(actions) if bundle.tag != 'Bundle': continue # need to test to fail entries in whitelist if not all(self.states[a] for a in actions): # then display bundles forced off with entries self.logger.info("%s %s failed prerequisite action" % (bundle.tag, bundle.get('name'))) bundles.remove(bundle) b_to_remv = [ent for ent in self.whitelist if ent in bundle] if b_to_remv: self.logger.info("Not installing entries from %s %s" % (bundle.tag, bundle.get('name'))) self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_remv]) for ent in b_to_remv: self.whitelist.remove(ent) self.logger.debug("Installing entries in the following bundle(s):") self.logger.debug(" %s" % ", ".join(b.get("name") for b in bundles if b.get("name"))) if Bcfg2.Options.setup.interactive: self.whitelist = self.promptFilter(iprompt, self.whitelist) self.removal = self.promptFilter(rprompt, self.removal) for entry in candidates: if entry not in self.whitelist: self.blacklist.append(entry) def DispatchInstallCalls(self, entries): """Dispatch install calls to underlying tools.""" for tool in self.tools: handled = [entry for entry in entries if tool.canInstall(entry)] if not handled: continue try: self.states.update(tool.Install(handled)) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.Install() call failed:" % tool.name, exc_info=1) def Install(self): """Install all entries.""" self.DispatchInstallCalls(self.whitelist) mods = self.modified mbundles = [struct for struct in self.config.findall('Bundle') if any(True for mod in mods if mod in struct)] if self.modified: # Handle Bundle interdeps if mbundles: self.logger.info("The Following Bundles have been modified:") self.logger.info([mbun.get('name') for mbun in mbundles]) tbm = [(t, b) for t in self.tools for b in mbundles] for tool, bundle in tbm: try: self.states.update(tool.Inventory(structures=[bundle])) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) clobbered = [entry for bundle in mbundles for entry in bundle if (not self.states[entry] and entry not in self.blacklist)] if clobbered: self.logger.debug("Found clobbered entries:") self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) for entry in clobbered]) if not Bcfg2.Options.setup.interactive: self.DispatchInstallCalls(clobbered) all_bundles = self.config.findall('./Bundle') mbundles.extend(self._get_all_modified_bundles(mbundles, all_bundles)) for bundle in all_bundles: if (Bcfg2.Options.setup.only_bundles and bundle.get('name') not in Bcfg2.Options.setup.only_bundles): # prune out unspecified bundles when running with -b continue if bundle in mbundles: continue self.logger.debug("Bundle %s was not modified" % bundle.get('name')) for tool in self.tools: try: self.states.update(tool.BundleNotUpdated(bundle)) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error('%s.BundleNotUpdated(%s:%s) call failed:' % (tool.name, bundle.tag, bundle.get('name')), exc_info=1) for indep in self.config.findall('.//Independent'): for tool in self.tools: try: self.states.update(tool.BundleNotUpdated(indep)) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:" % (tool.name, indep.tag, indep.get("name")), exc_info=1) def _get_all_modified_bundles(self, mbundles, all_bundles): """This gets all modified bundles by calling BundleUpdated until no new bundles get added to the modification list.""" new_mbundles = mbundles add_mbundles = [] while new_mbundles: for bundle in self.config.findall('./Bundle'): if (Bcfg2.Options.setup.only_bundles and bundle.get('name') not in Bcfg2.Options.setup.only_bundles): # prune out unspecified bundles when running with -b continue if bundle not in new_mbundles: continue self.logger.debug('Bundle %s was modified' % bundle.get('name')) for tool in self.tools: try: self.states.update(tool.BundleUpdated(bundle)) except: # pylint: disable=W0702 self.logger.error('%s.BundleUpdated(%s:%s) call ' 'failed:' % (tool.name, bundle.tag, bundle.get("name")), exc_info=1) mods = self.modified new_mbundles = [struct for struct in all_bundles if any(True for mod in mods if mod in struct) and struct not in mbundles + add_mbundles] add_mbundles.extend(new_mbundles) return add_mbundles def Remove(self): """Remove extra entries.""" for tool in self.tools: extras = [entry for entry in self.removal if tool.handlesEntry(entry)] if extras: try: tool.Remove(extras) except: # pylint: disable=W0702 self.logger.error("%s.Remove() failed" % tool.name, exc_info=1) def CondDisplayState(self, phase): """Conditionally print tracing information.""" self.logger.info('Phase: %s' % phase) self.logger.info('Correct entries: %d' % list(self.states.values()).count(True)) self.logger.info('Incorrect entries: %d' % list(self.states.values()).count(False)) if phase == 'final' and list(self.states.values()).count(False): for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" + e.get('name')): if not self.states[entry]: etype = entry.get('type') if etype: self.logger.info("%s:%s:%s" % (entry.tag, etype, entry.get('name'))) else: self.logger.info("%s:%s" % (entry.tag, entry.get('name'))) self.logger.info('Total managed entries: %d' % len(list(self.states.values()))) self.logger.info('Unmanaged entries: %d' % len(self.extra)) if phase == 'final' and Bcfg2.Options.setup.show_extra: for entry in sorted(self.extra, key=lambda e: e.tag + ":" + e.get('name')): etype = entry.get('type') if etype: self.logger.info("%s:%s:%s" % (entry.tag, etype, entry.get('name'))) else: self.logger.info("%s:%s" % (entry.tag, entry.get('name'))) if ((list(self.states.values()).count(False) == 0) and not self.extra): self.logger.info('All entries correct.') def ReInventory(self): """Recheck everything.""" if not Bcfg2.Options.setup.dry_run and Bcfg2.Options.setup.kevlar: self.logger.info("Rechecking system inventory") self.Inventory() def Execute(self): """Run all methods.""" self.Inventory() self.times['inventory'] = time.time() self.CondDisplayState('initial') self.InstallImportant() if not Bcfg2.Options.setup.only_important: self.Decide() self.Install() self.times['install'] = time.time() self.Remove() self.times['remove'] = time.time() if self.modified: self.ReInventory() self.times['reinventory'] = time.time() self.times['finished'] = time.time() self.CondDisplayState('final') def GenerateStats(self): """Generate XML summary of execution statistics.""" states = {} for (item, val) in list(self.states.items()): if not Bcfg2.Options.setup.only_important or \ item.get('important', 'false').lower() == 'true': states[item] = val feedback = XML.Element("upload-statistics") stats = XML.SubElement(feedback, 'Statistics', total=str(len(states)), version='2.0', revision=self.config.get('revision', '-1')) flags = XML.SubElement(stats, "Flags") XML.SubElement(flags, "Flag", name="dry_run", value=str(Bcfg2.Options.setup.dry_run)) XML.SubElement(flags, "Flag", name="only_important", value=str(Bcfg2.Options.setup.only_important)) good_entries = [key for key, val in list(states.items()) if val] good = len(good_entries) stats.set('good', str(good)) if any(not val for val in list(states.values())): stats.set('state', 'dirty') else: stats.set('state', 'clean') # List bad elements of the configuration for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), (good_entries, "Good"), ([entry for entry in states if not states[entry]], "Bad")]: container = XML.SubElement(stats, ename) for item in data: new_item = copy.deepcopy(item) new_item.set('qtext', '') container.append(new_item) new_item.text = None timeinfo = XML.Element("OpStamps") feedback.append(stats) for (event, timestamp) in list(self.times.items()): timeinfo.set(event, str(timestamp)) stats.append(timeinfo) return feedback src/lib/Bcfg2/Compat.py000066400000000000000000000212721303523157100151370ustar00rootroot00000000000000""" Compatibility imports, mostly for Py3k support, but also for Python 2.4 and such-like """ ################################################### # # # IF YOU ADD SOMETHING TO THIS FILE, YOU MUST # # DOCUMENT IT IN docs/development/compat.txt # # # ################################################### import sys # pylint: disable=E0601,E0602,E0611,W0611,W0622,C0103 try: from email.Utils import formatdate except ImportError: from email.utils import formatdate # urllib imports try: from urllib import quote_plus from urllib import urlretrieve from urlparse import urljoin, urlparse from urllib2 import HTTPBasicAuthHandler, \ HTTPPasswordMgrWithDefaultRealm, build_opener, install_opener, \ urlopen, HTTPError, URLError except ImportError: from urllib.parse import urljoin, urlparse, quote_plus from urllib.request import HTTPBasicAuthHandler, \ HTTPPasswordMgrWithDefaultRealm, build_opener, install_opener, \ urlopen, urlretrieve from urllib.error import HTTPError, URLError try: from cStringIO import StringIO except ImportError: from io import StringIO try: import ConfigParser except ImportError: import configparser as ConfigParser try: import cPickle except ImportError: import pickle as cPickle try: from Queue import Queue, Empty, Full except ImportError: from queue import Queue, Empty, Full # xmlrpc imports try: import xmlrpclib import SimpleXMLRPCServer except ImportError: import xmlrpc.client as xmlrpclib import xmlrpc.server as SimpleXMLRPCServer # socketserver import try: import SocketServer except ImportError: import socketserver as SocketServer # httplib imports try: import httplib except ImportError: import http.client as httplib try: unicode = unicode except NameError: unicode = str def u_str(string, encoding=None): """ print to file compatibility """ if sys.hexversion >= 0x03000000: return string else: if encoding is not None: return unicode(string, encoding) else: return unicode(string) try: from functools import wraps except ImportError: def wraps(wrapped): # pylint: disable=W0613 """ implementation of functools.wraps() for python 2.4 """ return lambda f: f # base64 compat if sys.hexversion >= 0x03000000: from base64 import b64encode as _b64encode, b64decode as _b64decode @wraps(_b64encode) def b64encode(val, **kwargs): # pylint: disable=C0111 try: return _b64encode(val, **kwargs) except TypeError: return _b64encode(val.encode('UTF-8'), **kwargs).decode('UTF-8') @wraps(_b64decode) def b64decode(val, **kwargs): # pylint: disable=C0111 return _b64decode(val.encode('UTF-8'), **kwargs).decode('UTF-8') else: from base64 import b64encode, b64decode try: input = raw_input except NameError: input = input try: reduce = reduce except NameError: from functools import reduce try: from collections import MutableMapping except ImportError: from UserDict import DictMixin as MutableMapping class CmpMixin(object): """ In Py3K, :meth:`object.__cmp__` is no longer magical, so this mixin can be used to define the rich comparison operators from ``__cmp__`` -- i.e., it makes ``__cmp__`` magical again. """ def __lt__(self, other): return self.__cmp__(other) < 0 def __gt__(self, other): return self.__cmp__(other) > 0 def __eq__(self, other): return self.__cmp__(other) == 0 def __ne__(self, other): return not self.__eq__(other) def __ge__(self, other): return self.__gt__(other) or self.__eq__(other) def __le__(self, other): return self.__lt__(other) or self.__eq__(other) try: from pkgutil import walk_packages except ImportError: try: from pkgutil import iter_modules # iter_modules was added in python 2.5; use it to get an exact # re-implementation of walk_packages if possible def walk_packages(path=None, prefix='', onerror=None): """ Implementation of walk_packages for python 2.5 """ def seen(path, seenpaths={}): # pylint: disable=W0102 """ detect if a path has been 'seen' (i.e., considered for inclusion in the generator). tracks what has been seen through the magic of python default arguments """ if path in seenpaths: return True seenpaths[path] = True for importer, name, ispkg in iter_modules(path, prefix): yield importer, name, ispkg if ispkg: try: __import__(name) except ImportError: if onerror is not None: onerror(name) except Exception: if onerror is not None: onerror(name) else: raise else: path = getattr(sys.modules[name], '__path__', []) # don't traverse path items we've seen before path = [p for p in path if not seen(p)] for item in walk_packages(path, name + '.', onerror): yield item except ImportError: import os def walk_packages(path=None, prefix='', onerror=None): """ Imperfect, incomplete implementation of :func:`pkgutil.walk_packages` for python 2.4. Differences: * Requires a full path, not a path relative to something in sys.path. Anywhere we care about that shouldn't be an issue. * The first element of each tuple is None instead of an importer object. """ if path is None: path = sys.path for mpath in path: for fname in os.listdir(mpath): fpath = os.path.join(mpath, fname) if (os.path.isfile(fpath) and fname.endswith(".py") and fname != '__init__.py'): yield None, prefix + fname[:-3], False elif os.path.isdir(fpath): mname = prefix + fname if os.path.exists(os.path.join(fpath, "__init__.py")): yield None, mname, True try: __import__(mname) except ImportError: if onerror is not None: onerror(mname) except Exception: if onerror is not None: onerror(mname) else: raise else: for item in walk_packages([fpath], prefix=mname + '.', onerror=onerror): yield item try: all = all any = any except NameError: def all(iterable): """ implementation of builtin all() for python 2.4 """ for element in iterable: if not element: return False return True def any(iterable): """ implementation of builtin any() for python 2.4 """ for element in iterable: if element: return True return False try: from hashlib import md5 except ImportError: from md5 import md5 def oct_mode(mode): """ Convert a decimal number describing a POSIX permissions mode to a string giving the octal mode. In Python 2, this is a synonym for :func:`oct`, but in Python 3 the octal format has changed to ``0o000``, which cannot be used as an octal permissions mode, so we need to strip the 'o' from the output. I.e., this function acts like the Python 2 :func:`oct` regardless of what version of Python is in use. :param mode: The decimal mode to convert to octal :type mode: int :returns: string """ return oct(mode).replace('o', '') try: long = long except NameError: # longs are just ints in py3k long = int try: cmp = cmp except NameError: def cmp(a, b): """ Py3k implementation of cmp() """ return (a > b) - (a < b) # ast was introduced in python 2.6 try: from ast import literal_eval except ImportError: literal_eval = eval src/lib/Bcfg2/DBSettings.py000066400000000000000000000340271303523157100157240ustar00rootroot00000000000000""" Django settings for the Bcfg2 server """ import os import sys import logging import Bcfg2.Logger import Bcfg2.Options try: import django import django.core.management import django.conf HAS_DJANGO = True except ImportError: HAS_DJANGO = False # required for reporting try: import south # pylint: disable=W0611 HAS_SOUTH = True except ImportError: HAS_SOUTH = False settings = dict( # pylint: disable=C0103 TIME_ZONE=None, TEMPLATE_DEBUG=False, DEBUG=False, ALLOWED_HOSTS=['*'], MEDIA_URL='/site_media/', MANAGERS=(('Root', 'root'),), ADMINS=(('Root', 'root'),), # Language code for this installation. All choices can be found # here: # http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes # http://blogs.law.harvard.edu/tech/stories/storyReader$15 LANGUAGE_CODE='en-us', SITE_ID=1, INSTALLED_APPS=('django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'Bcfg2.Server'), MEDIA_ROOT='', STATIC_URL='/media/', # TODO - make this unique SECRET_KEY='eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7', TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader'), MIDDLEWARE_CLASSES=( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware'), ROOT_URLCONF='Bcfg2.Reporting.urls', AUTHENTICATION_BACKENDS=('django.contrib.auth.backends.ModelBackend'), LOGIN_URL='/login', SESSION_EXPIRE_AT_BROWSER_CLOSE=True, TEMPLATE_DIRS=( '/usr/share/python-support/python-django/django/contrib/admin/' 'templates/'), TEMPLATE_CONTEXT_PROCESSORS=( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request'), DATABASE_ROUTERS=['Bcfg2.DBSettings.PerApplicationRouter'], TEST_RUNNER='django.test.simple.DjangoTestSuiteRunner', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } }) if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] >= 6: settings['MIDDLEWARE_CLASSES'] += \ ('django.contrib.admindocs.middleware.XViewMiddleware',) elif HAS_SOUTH: settings['MIDDLEWARE_CLASSES'] += \ ('django.middleware.doc.XViewMiddleware',) if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] >= 7: settings['INSTALLED_APPS'] += ('Bcfg2.Reporting',) elif HAS_SOUTH: settings['INSTALLED_APPS'] += ('south', 'Bcfg2.Reporting') settings['SOUTH_MIGRATION_MODULES'] = { 'Reporting': 'Bcfg2.Reporting.south_migrations', 'Server': 'Bcfg2.Server.south_migrations', } if 'BCFG2_LEGACY_MODELS' in os.environ: settings['INSTALLED_APPS'] += ('Bcfg2.Server.Reports.reports',) def finalize_django_config(opts=None, silent=False): """ Perform final Django configuration """ if opts is None: opts = Bcfg2.Options.setup settings['DATABASES'] = dict( default=dict( ENGINE="django.db.backends.%s" % opts.db_engine, NAME=opts.db_name, USER=opts.db_user, PASSWORD=opts.db_password, HOST=opts.db_host, PORT=opts.db_port, OPTIONS=opts.db_opts, SCHEMA=opts.db_schema)) if hasattr(opts, "reporting_db_engine") and \ opts.reporting_db_engine is not None: settings['DATABASES']['Reporting'] = dict( ENGINE="django.db.backends.%s" % opts.reporting_db_engine, NAME=opts.reporting_db_name, USER=opts.reporting_db_user, PASSWORD=opts.reporting_db_password, HOST=opts.reporting_db_host, PORT=opts.reporting_db_port, OPTIONS=opts.reporting_db_opts, SCHEMA=opts.reporting_db_schema) settings['TIME_ZONE'] = opts.time_zone settings['TEMPLATE_DEBUG'] = settings['DEBUG'] = \ opts.web_debug if opts.web_debug: print("Warning: Setting web_debug to True causes extraordinary " "memory leaks. Only use this setting if you know what " "you're doing.") if opts.web_prefix: settings['MEDIA_URL'] = \ opts.web_prefix.rstrip('/') + \ settings['MEDIA_URL'] if opts.django_settings: settings.update(opts.django_settings) logger = logging.getLogger() logger.debug("Finalizing Django settings: %s" % settings) module = sys.modules[__name__] for name, value in settings.items(): setattr(module, name, value) try: django.conf.settings.configure(**settings) if django.VERSION[0] == 1 and django.VERSION[1] >= 7: django.setup() # pylint: disable=E1101 except RuntimeError: if not silent: logger.warning("Failed to finalize Django settings: %s" % sys.exc_info()[1]) def sync_databases(**kwargs): """ Synchronize all databases that we know about. """ if django.VERSION[0] == 1 and django.VERSION[1] >= 7: # Nothing needed here, it's all handled with migrate return logger = logging.getLogger() for database in settings['DATABASES']: logger.debug("Syncing database %s" % (database)) django.core.management.call_command("syncdb", database=database, **kwargs) def upgrade_to_django_migrations(database, logger): """ Get the migration state from south and move django migrations to the same state by fake applying the same migration. Note: We cannot use south directly here, because this functions runs on django-1.7 or higher, that is not supported by south. """ last_migration = None try: # get latest south migration cursor = django.db.connections[database].cursor() cursor.cursor.execute('SELECT migration FROM south_migrationhistory') applied_migrations = [name for (name,) in cursor.fetchall()] last_migration = sorted(applied_migrations).pop() except: # pylint: disable=W0702 # django.db.DatabaseError is not working here, because we are # using the low level api to interact directly with the database logger.debug("No south migration detected for database: %s." % database) if last_migration is not None: # fake-apply matching django migrations django.core.management.call_command( "migrate", 'Reporting', last_migration, database=database, fake=True) def initial_django_migration(database): """ Check if we ever executed an initial django migration. """ from django.db.migrations import loader # pylint: disable=E0611 loader = loader.MigrationLoader(django.db.connections[database]) return len(loader.applied_migrations) == 0 def migrate_databases(**kwargs): """ Do South migrations on all databases that we know about. """ logger = logging.getLogger() for database in settings['DATABASES']: logger.debug("Migrating database %s" % (database)) if django.VERSION[0] == 1 and django.VERSION[1] >= 7: if initial_django_migration(database): logger.warning( "No applied django migrations found for database %s. " "Trying to get the state from south migration in case " "you just upgraded your django version." % database) upgrade_to_django_migrations(database, logger) django.core.management.call_command("migrate", database=database, **kwargs) def get_db_label(application): """ Get the name of the database for a given Django "application". The rule is that if a database with the same name as the application exists, use it. Otherwise use the default. Returns a string suitible for use as a key in the Django database settings dict """ if application in settings['DATABASES']: return application return 'default' class PerApplicationRouter(object): """ Django database router for redirecting different applications to their own database """ def _db_per_app(self, model, **_): """ If a database with the same name as the application exists, use it. Otherwise use the default """ return get_db_label(model._meta.app_label) # pylint: disable=W0212 def db_for_read(self, model, **hints): """ Called when Django wants to find out what database to read from """ return self._db_per_app(model, **hints) def db_for_write(self, model, **hints): """ Called when Django wants to find out what database to write to """ return self._db_per_app(model, **hints) def allow_relation(self, obj1, obj2, **_): """ Called when Django wants to determine what relations to allow. Only allow relations within an app """ # pylint: disable=W0212 return obj1._meta.app_label == obj2._meta.app_label # pylint: enable=W0212 def allow_syncdb(self, *_): """ Called when Django wants to determine which models to sync to a given database. Take the cowards way out and sync all models to all databases to allow for easy migrations. This method is replaced with allow_migrate in django 1.7 and higher. """ return True def allow_migrate(self, *_args, **_kwargs): """ Called when Django wants to determine which migrations should be run on a given database. Take the cowards way out and run all migrations to all databases to allow for easy migrations. """ return True class _OptionContainer(object): """ Container for options loaded at import-time to configure databases """ parse_first = True options = [ Bcfg2.Options.Common.repository, Bcfg2.Options.PathOption( '-W', '--web-config', cf=('reporting', 'config'), default="/etc/bcfg2-web.conf", action=Bcfg2.Options.ConfigFileAction, help='Web interface configuration file'), # default database options Bcfg2.Options.Option( cf=('database', 'engine'), default='sqlite3', help='Database engine', dest='db_engine'), Bcfg2.Options.RepositoryMacroOption( cf=('database', 'name'), default='/etc/bcfg2.sqlite', help="Database name", dest="db_name"), Bcfg2.Options.Option( cf=('database', 'user'), help='Database username', dest='db_user'), Bcfg2.Options.Option( cf=('database', 'password'), help='Database password', dest='db_password'), Bcfg2.Options.Option( cf=('database', 'host'), help='Database host', dest='db_host'), Bcfg2.Options.Option( cf=('database', 'port'), help='Database port', dest='db_port'), Bcfg2.Options.Option( cf=('database', 'schema'), help='Database schema', dest='db_schema', default='public'), Bcfg2.Options.Option( cf=('database', 'options'), help='Database options', dest='db_opts', type=Bcfg2.Options.Types.literal_dict, default=dict()), # reporting database options Bcfg2.Options.Option( cf=('database', 'reporting_engine'), help='Reporting database engine', dest='reporting_db_engine'), Bcfg2.Options.Option( cf=('database', 'reporting_name'), default='/etc/reporting.sqlite', help="Reporting database name", dest="reporting_db_name"), Bcfg2.Options.Option( cf=('database', 'reporting_user'), help='Reporting database username', dest='reporting_db_user'), Bcfg2.Options.Option( cf=('database', 'reporting_password'), help='Reporting database password', dest='reporting_db_password'), Bcfg2.Options.Option( cf=('database', 'reporting_host'), help='Reporting database host', dest='reporting_db_host'), Bcfg2.Options.Option( cf=('database', 'reporting_port'), help='Reporting database port', dest='reporting_db_port'), Bcfg2.Options.Option( cf=('database', 'reporting_schema'), help='Reporting database schema', dest='reporting_db_schema', default='public'), Bcfg2.Options.Option( cf=('database', 'reporting_options'), help='Reporting database options', dest='reporting_db_opts', type=Bcfg2.Options.Types.literal_dict, default=dict()), # Django options Bcfg2.Options.Option( cf=('reporting', 'time_zone'), help='Django timezone'), Bcfg2.Options.BooleanOption( cf=('reporting', 'web_debug'), help='Django debug'), Bcfg2.Options.Option( cf=('reporting', 'web_prefix'), help='Web prefix'), Bcfg2.Options.Option( cf=('reporting', 'django_settings'), help='Additional django settings', type=Bcfg2.Options.Types.literal_dict, default=dict())] @staticmethod def component_parsed_hook(opts): """ Finalize the Django config after this component's options are parsed. """ if HAS_DJANGO: finalize_django_config(opts=opts) @staticmethod def options_parsed_hook(): """ Finalize the Django config after all options are parsed. This is added in case the DBSettings component isn't added early enough in option parsing to be parsed in the 'early' phase. Chances are good that things will break if that happens, but we do our best to be a good citizen. """ if HAS_DJANGO: finalize_django_config(silent=True) Bcfg2.Options.get_parser().add_component(_OptionContainer) src/lib/Bcfg2/Logger.py000066400000000000000000000263161303523157100151370ustar00rootroot00000000000000"""Bcfg2 logging support""" import copy import fcntl import logging import logging.handlers import math import socket import struct import sys import termios import Bcfg2.Options logging.raiseExceptions = 0 class TermiosFormatter(logging.Formatter): """The termios formatter displays output in a terminal-sensitive fashion. """ def __init__(self, fmt=None, datefmt=None): logging.Formatter.__init__(self, fmt, datefmt) if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): # now get termios info try: self.width = struct.unpack('hhhh', fcntl.ioctl(0, termios.TIOCGWINSZ, "\000" * 8))[1] if self.width == 0: self.width = 80 except: # pylint: disable=W0702 self.width = 80 else: # output to a pipe self.width = 32768 def format(self, record): '''format a record for display''' returns = [] line_len = self.width if isinstance(record.msg, str): if len(record.args) != 0: record.msg = record.msg % record.args for line in record.msg.split('\n'): if len(line) <= line_len: returns.append(line) else: inner_lines = \ int(math.floor(float(len(line)) / line_len)) + 1 for msgline in range(inner_lines): returns.append( line[msgline * line_len:(msgline + 1) * line_len]) elif isinstance(record.msg, list): if not record.msg: return '' record.msg.sort() msgwidth = self.width col_width = max([len(item) for item in record.msg]) columns = int(math.floor(float(msgwidth) / (col_width + 2))) lines = int(math.ceil(float(len(record.msg)) / columns)) for lineno in range(lines): indices = [idx for idx in [(colNum * lines) + lineno for colNum in range(columns)] if idx < len(record.msg)] retformat = (len(indices) * (" %%-%ds " % col_width)) returns.append(retformat % tuple([record.msg[idx] for idx in indices])) else: returns.append(str(record.msg)) if record.exc_info: returns.append(self.formatException(record.exc_info)) return '\n'.join(returns) class FragmentingSysLogHandler(logging.handlers.SysLogHandler): """ This handler fragments messages into chunks smaller than 250 characters """ def __init__(self, procname, path, facility): self.procname = procname self.unixsocket = False logging.handlers.SysLogHandler.__init__(self, path, facility) def emit(self, record): """Chunk and deliver records.""" record.name = self.procname if isinstance(record.msg, str): msgs = [] error = record.exc_info record.exc_info = None msgdata = record.msg if len(msgdata) == 0: return while msgdata: newrec = copy.copy(record) newrec.msg = msgdata[:250] msgs.append(newrec) msgdata = msgdata[250:] msgs[0].exc_info = error else: msgs = [record] for newrec in msgs: msg = '<%d>%s\000' % \ (self.encodePriority(self.facility, newrec.levelname.lower()), self.format(newrec)) try: try: encoded = msg.encode('utf-8') except UnicodeDecodeError: encoded = msg self.socket.send(encoded) except socket.error: for i in range(10): # pylint: disable=W0612 try: if isinstance(self.address, tuple): self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.connect(self.address) else: self._connect_unixsocket(self.address) break except socket.error: continue try: reconn = copy.copy(record) reconn.msg = 'Reconnected to syslog' self.socket.send('<%d>%s\000' % (self.encodePriority(self.facility, logging.WARNING), self.format(reconn))) self.socket.send(msg) except: # pylint: disable=W0702 # If we still fail then drop it. Running # bcfg2-server as non-root can trigger permission # denied exceptions. pass def add_console_handler(level=logging.DEBUG): """ Add a logging handler that logs at a level to sys.stderr """ console = logging.StreamHandler(sys.stderr) console.setLevel(level) # tell the handler to use this format console.setFormatter(TermiosFormatter()) try: console.set_name("console") # pylint: disable=E1101 except AttributeError: console.name = "console" # pylint: disable=W0201 logging.root.addHandler(console) def add_syslog_handler(procname=None, syslog_facility='daemon', level=logging.DEBUG): """Add a logging handler that logs as procname to syslog_facility.""" if procname is None: procname = Bcfg2.Options.get_parser().prog try: try: syslog = FragmentingSysLogHandler(procname, '/dev/log', syslog_facility) except socket.error: syslog = FragmentingSysLogHandler(procname, ('localhost', 514), syslog_facility) try: syslog.set_name("syslog") # pylint: disable=E1101 except AttributeError: syslog.name = "syslog" # pylint: disable=W0201 syslog.setLevel(level) syslog.setFormatter( logging.Formatter('%(name)s[%(process)d]: %(message)s')) logging.root.addHandler(syslog) except socket.error: logging.root.error("Failed to activate syslogging") except: print("Failed to activate syslogging") def add_file_handler(level=logging.DEBUG): """Add a logging handler that logs to a file.""" filelog = logging.FileHandler(Bcfg2.Options.setup.logfile) try: filelog.set_name("file") # pylint: disable=E1101 except AttributeError: filelog.name = "file" # pylint: disable=W0201 filelog.setLevel(level) filelog.setFormatter( logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s')) logging.root.addHandler(filelog) def default_log_level(): """ Get the default log level, according to the configuration """ if Bcfg2.Options.setup.debug: return logging.DEBUG elif Bcfg2.Options.setup.verbose: return logging.INFO else: return logging.WARNING def setup_logging(): """Setup logging for Bcfg2 software.""" if hasattr(logging, 'already_setup'): return level = default_log_level() params = [] to_console = True if hasattr(Bcfg2.Options.setup, "daemon"): if Bcfg2.Options.setup.daemon: to_console = False # if a command can be daemonized, but hasn't been, then we # assume that they're running it in the foreground and thus # want some more output. clvl = min(level, logging.INFO) else: clvl = level if to_console: params.append("%s to console" % logging.getLevelName(clvl)) add_console_handler(level=clvl) if hasattr(Bcfg2.Options.setup, "syslog") and Bcfg2.Options.setup.syslog: slvl = min(level, logging.INFO) params.append("%s to syslog" % logging.getLevelName(slvl)) add_syslog_handler(level=slvl) if Bcfg2.Options.setup.logfile: params.append("%s to %s" % (logging.getLevelName(level), Bcfg2.Options.setup.logfile)) add_file_handler(level=level) logging.root.setLevel(logging.DEBUG) logging.root.debug("Configured logging: %s" % "; ".join(params)) logging.already_setup = True class Debuggable(object): """ Mixin to add a debugging interface to an object """ options = [] #: List of names of methods to be exposed as XML-RPC functions, if #: applicable to the child class __rmi__ = ['toggle_debug', 'set_debug'] #: How exposed XML-RPC functions should be dispatched to child #: processes. __child_rmi__ = __rmi__[:] def __init__(self, name=None): """ :param name: The name of the logger object to get. If none is supplied, the full name of the class (including module) will be used. :type name: string """ if name is None: name = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) self.debug_flag = Bcfg2.Options.setup.debug self.logger = logging.getLogger(name) def set_debug(self, debug): """ Explicitly enable or disable debugging. :returns: bool - The new value of the debug flag """ self.debug_flag = debug return debug def toggle_debug(self): """ Turn debugging output on or off. :returns: bool - The new value of the debug flag """ return self.set_debug(not self.debug_flag) def debug_log(self, message, flag=None): """ Log a message at the debug level. :param message: The message to log :type message: string :param flag: Override the current debug flag with this value :type flag: bool :returns: None """ if (flag is None and self.debug_flag) or flag: self.logger.error(message) class _OptionContainer(object): """ Container for options loaded at import-time to configure logging """ options = [ Bcfg2.Options.BooleanOption( '-d', '--debug', help='Enable debugging output', cf=('logging', 'debug')), Bcfg2.Options.BooleanOption( '-v', '--verbose', help='Enable verbose output', cf=('logging', 'verbose')), Bcfg2.Options.PathOption( '-o', '--logfile', help='Set path of file log', cf=('logging', 'path'))] @staticmethod def options_parsed_hook(): """ initialize settings from /etc/bcfg2-web.conf or /etc/bcfg2.conf, or set up basic defaults. this lets manage.py work in all cases """ setup_logging() Bcfg2.Options.get_parser().add_component(_OptionContainer) src/lib/Bcfg2/Options/000077500000000000000000000000001303523157100147715ustar00rootroot00000000000000src/lib/Bcfg2/Options/Actions.py000066400000000000000000000156771303523157100167630ustar00rootroot00000000000000""" Custom argparse actions """ import sys import argparse from Bcfg2.Options.Parser import get_parser, OptionParserException from Bcfg2.Options.Options import _debug __all__ = ["ConfigFileAction", "ComponentAction", "PluginsAction"] class FinalizableAction(argparse.Action): """ A FinalizableAction requires some additional action to be taken when storing the value, and as a result must be finalized if the default value is used.""" def __init__(self, *args, **kwargs): argparse.Action.__init__(self, *args, **kwargs) self._final = False def finalize(self, parser, namespace): """ Finalize a default value by calling the action callable. """ if not self._final: self.__call__(parser, namespace, getattr(namespace, self.dest, self.default)) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) self._final = True class ComponentAction(FinalizableAction): """ ComponentAction automatically imports classes and modules based on the value of the option, and automatically collects options from the loaded classes and modules. It cannot be used by itself, but must be subclassed, with either :attr:`mapping` or :attr:`bases` overridden. See :class:`Bcfg2.Options.PluginsAction` for an example. ComponentActions expect to be given a list of class names. If :attr:`bases` is overridden, then it will attempt to import those classes from identically named modules within the given bases. For instance: .. code-block:: python class FooComponentAction(Bcfg2.Options.ComponentAction): bases = ["Bcfg2.Server.Foo"] class FooLoader(object): options = [ Bcfg2.Options.Option( "--foo", type=Bcfg2.Options.Types.comma_list, default=["One"], action=FooComponentAction)] If "--foo One,Two,Three" were given on the command line, then ``FooComponentAction`` would attempt to import ``Bcfg2.Server.Foo.One.One``, ``Bcfg2.Server.Foo.Two.Two``, and ``Bcfg2.Server.Foo.Three.Three``. (It would also call :func:`Bcfg2.Options.Parser.add_component` with each of those classes as arguments.) Note that, although ComponentActions expect lists of components (by default; this can be overridden by setting :attr:`islist`), you must still explicitly specify a ``type`` argument to the :class:`Bcfg2.Options.Option` constructor to split the value into a list. Note also that, unlike other actions, the default value of a ComponentAction option does not need to be the actual literal final value. (I.e., you don't have to import ``Bcfg2.Server.Foo.One.One`` and set it as the default in the example above; the string "One" suffices.) """ #: A list of parent modules where modules or classes should be #: imported from. bases = [] #: A mapping of `` => `` that components will be #: loaded from. This can be used to permit much more complex #: behavior than just a list of :attr:`bases`. mapping = dict() #: If ``module`` is True, then only the module will be loaded, not #: a class from the module. For instance, in the example above, #: ``FooComponentAction`` would attempt instead to import #: ``Bcfg2.Server.Foo.One``, ``Bcfg2.Server.Foo.Two``, and #: ``Bcfg2.Server.Foo.Three``. module = False #: By default, ComponentActions expect a list of components to #: load. If ``islist`` is False, then it will only expect a #: single component. islist = True #: If ``fail_silently`` is True, then failures to import modules #: or classes will not be logged. This is useful when the default #: is to import everything, some of which are expected to fail. fail_silently = False def __init__(self, *args, **kwargs): if self.mapping and not self.islist: if 'choices' not in kwargs: kwargs['choices'] = self.mapping.keys() FinalizableAction.__init__(self, *args, **kwargs) def _import(self, module, name): """ Import the given name from the given module, handling errors """ try: return getattr(__import__(module, fromlist=[name]), name) except (AttributeError, ImportError): msg = "Failed to load %s from %s: %s" % (name, module, sys.exc_info()[1]) if not self.fail_silently: print(msg) else: _debug(msg) return None def _load_component(self, name): """ Import a single class or module, adding it as a component to the parser. :param name: The name of the class or module to import, without the base prepended. :type name: string :returns: the imported class or module """ cls = None if self.mapping and name in self.mapping: cls = self.mapping[name] elif "." in name: cls = self._import(*name.rsplit(".", 1)) else: for base in self.bases: if self.module: mod = base else: mod = "%s.%s" % (base, name) cls = self._import(mod, name) if cls is not None: break if cls: get_parser().add_component(cls) elif not self.fail_silently: raise OptionParserException("Could not load component %s" % name) return cls def __call__(self, parser, namespace, values, option_string=None): if values is None: result = None else: if self.islist: result = [] for val in values: cls = self._load_component(val) if cls is not None: result.append(cls) else: result = self._load_component(values) FinalizableAction.__call__(self, parser, namespace, result, option_string=option_string) class ConfigFileAction(FinalizableAction): """ ConfigFileAction automatically loads and parses a supplementary config file (e.g., ``bcfg2-web.conf`` or ``bcfg2-lint.conf``). """ def __call__(self, parser, namespace, values, option_string=None): if values: parser.add_config_file(self.dest, values, reparse=False) else: _debug("No config file passed for %s" % self) FinalizableAction.__call__(self, parser, namespace, values, option_string=option_string) class PluginsAction(ComponentAction): """ :class:`Bcfg2.Options.ComponentAction` subclass for loading Bcfg2 server plugins. """ bases = ['Bcfg2.Server.Plugins'] fail_silently = True src/lib/Bcfg2/Options/Common.py000066400000000000000000000111271303523157100165750ustar00rootroot00000000000000""" Common options used in multiple different contexts. """ from Bcfg2.Utils import classproperty from Bcfg2.Options import Types from Bcfg2.Options.Actions import PluginsAction, ComponentAction from Bcfg2.Options.Parser import repository as _repository_option from Bcfg2.Options import Option, PathOption, BooleanOption __all__ = ["Common"] class ReportingTransportAction(ComponentAction): """ :class:`Bcfg2.Options.ComponentAction` that loads a single reporting transport from :mod:`Bcfg2.Reporting.Transport`. """ islist = False bases = ['Bcfg2.Reporting.Transport'] class ReportingStorageAction(ComponentAction): """ :class:`Bcfg2.Options.ComponentAction` that loads a single reporting storage driver from :mod:`Bcfg2.Reporting.Storage`. """ islist = False bases = ['Bcfg2.Reporting.Storage'] class Common(object): """ Common options used in multiple different contexts. """ _plugins = None _filemonitor = None _reporting_storage = None _reporting_transport = None @classproperty def plugins(cls): """ Load a list of Bcfg2 server plugins """ if cls._plugins is None: cls._plugins = Option( cf=('server', 'plugins'), type=Types.comma_list, help="Server plugin list", action=PluginsAction, default=['Bundler', 'Cfg', 'Metadata', 'Pkgmgr', 'Rules', 'SSHbase']) return cls._plugins @classproperty def filemonitor(cls): """ Load a single Bcfg2 file monitor (from :attr:`Bcfg2.Server.FileMonitor.available`) """ if cls._filemonitor is None: import Bcfg2.Server.FileMonitor class FileMonitorAction(ComponentAction): """ ComponentAction for loading a single FAM backend class """ islist = False mapping = Bcfg2.Server.FileMonitor.available cls._filemonitor = Option( cf=('server', 'filemonitor'), action=FileMonitorAction, default='default', help='Server file monitoring driver') return cls._filemonitor @classproperty def reporting_storage(cls): """ Load a Reporting storage backend """ if cls._reporting_storage is None: cls._reporting_storage = Option( cf=('reporting', 'storage'), dest="reporting_storage", help='Reporting storage engine', action=ReportingStorageAction, default='DjangoORM') return cls._reporting_storage @classproperty def reporting_transport(cls): """ Load a Reporting transport backend """ if cls._reporting_transport is None: cls._reporting_transport = Option( cf=('reporting', 'transport'), dest="reporting_transport", help='Reporting transport', action=ReportingTransportAction, default='DirectStore') return cls._reporting_transport #: Set the path to the Bcfg2 repository repository = _repository_option #: Daemonize process, storing PID daemon = PathOption( '-D', '--daemon', help="Daemonize process, storing PID") #: Run interactively, prompting the user for each change interactive = BooleanOption( "-I", "--interactive", help='Run interactively, prompting the user for each change') #: Log to syslog syslog = BooleanOption( cf=('logging', 'syslog'), help="Log to syslog", default=True) #: Server location location = Option( '-S', '--server', cf=('components', 'bcfg2'), default='https://localhost:6789', metavar='', help="Server location") #: Communication password password = Option( '-x', '--password', cf=('communication', 'password'), metavar='', help="Communication Password") #: Path to SSL CA certificate ssl_ca = PathOption( cf=('communication', 'ca'), help='Path to SSL CA Cert') #: Communication protocol protocol = Option( cf=('communication', 'protocol'), default='xmlrpc/tlsv1', choices=['xmlrpc/ssl', 'xmlrpc/tlsv1'], help='Communication protocol to use.') #: Default Path paranoid setting default_paranoid = Option( cf=('mdata', 'paranoid'), dest="default_paranoid", default='true', choices=['true', 'false'], help='Default Path paranoid setting') #: Client timeout client_timeout = Option( "-t", "--timeout", type=float, default=90.0, dest="client_timeout", cf=('communication', 'timeout'), help='Set the client XML-RPC timeout') src/lib/Bcfg2/Options/OptionGroups.py000066400000000000000000000201461303523157100200160ustar00rootroot00000000000000""" Option grouping classes """ import re import copy import fnmatch from Bcfg2.Options import Option from itertools import chain __all__ = ["OptionGroup", "ExclusiveOptionGroup", "Subparser", "WildcardSectionGroup"] class _OptionContainer(list): """ Parent class of all option groups """ def list_options(self): """ Get a list of all options contained in this group, including options contained in option groups in this group, and so on. """ return list(chain(*[o.list_options() for o in self])) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, list.__repr__(self)) def add_to_parser(self, parser): """ Add this option group to a :class:`Bcfg2.Options.Parser` object. """ for opt in self: opt.add_to_parser(parser) class OptionGroup(_OptionContainer): """ Generic option group that is used only to organize options. This uses :meth:`argparse.ArgumentParser.add_argument_group` behind the scenes. """ def __init__(self, *items, **kwargs): r""" :param \*args: Child options :type \*args: Bcfg2.Options.Option :param title: The title of the option group :type title: string :param description: A longer description of the option group :param description: string """ _OptionContainer.__init__(self, items) self.title = kwargs.pop('title') self.description = kwargs.pop('description', None) def add_to_parser(self, parser): group = parser.add_argument_group(self.title, self.description) _OptionContainer.add_to_parser(self, group) class ExclusiveOptionGroup(_OptionContainer): """ Option group that ensures that only one argument in the group is present. This uses :meth:`argparse.ArgumentParser.add_mutually_exclusive_group` behind the scenes.""" def __init__(self, *items, **kwargs): r""" :param \*args: Child options :type \*args: Bcfg2.Options.Option :param required: Exactly one argument in the group *must* be specified. :type required: boolean """ _OptionContainer.__init__(self, items) self.required = kwargs.pop('required', False) def add_to_parser(self, parser): _OptionContainer.add_to_parser( self, parser.add_mutually_exclusive_group(required=self.required)) class Subparser(_OptionContainer): """ Option group that adds options in it to a subparser. This uses a lot of functionality tied to `argparse Sub-commands `_. The subcommand string itself is stored in the :attr:`Bcfg2.Options.setup` namespace as ``subcommand``. This is commonly used with :class:`Bcfg2.Options.Subcommand` groups. """ _subparsers = dict() def __init__(self, *items, **kwargs): r""" :param \*args: Child options :type \*args: Bcfg2.Options.Option :param name: The name of the subparser. Required. :type name: string :param help: A help message for the subparser :param help: string """ self.name = kwargs.pop('name') self.help = kwargs.pop('help', None) _OptionContainer.__init__(self, items) def __repr__(self): return "%s %s(%s)" % (self.__class__.__name__, self.name, list.__repr__(self)) def add_to_parser(self, parser): if parser not in self._subparsers: self._subparsers[parser] = parser.add_subparsers(dest='subcommand') subparser = self._subparsers[parser].add_parser(self.name, help=self.help) _OptionContainer.add_to_parser(self, subparser) class WildcardSectionGroup(_OptionContainer, Option): """WildcardSectionGroups contain options that may exist in several different sections of the config that match a glob. It works by creating options on the fly to match the sections described in the glob. For example, consider: .. code-block:: python options = [ Bcfg2.Options.WildcardSectionGroup( Bcfg2.Options.Option(cf=("myplugin:*", "number"), type=int), Bcfg2.Options.Option(cf=("myplugin:*", "description"))] If the config file contained ``[myplugin:foo]`` and ``[myplugin:bar]`` sections, then this would automagically create options for each of those. The end result would be: .. code-block:: python >>> Bcfg2.Options.setup Namespace(myplugin_bar_description='Bar description', myplugin_myplugin_bar_number=2, myplugin_myplugin_foo_description='Foo description', myplugin_myplugin_foo_number=1, myplugin_sections=['myplugin:foo', 'myplugin:bar']) All options must have the same section glob. The options are stored in an automatically-generated destination given by::
    _ ```` is the original `dest `_ of the option. ``
    `` is the section that it's found in. ```` is automatically generated from the section glob. (This can be overridden with the constructor.) Both ``
    `` and ```` have had all consecutive characters disallowed in Python variable names replaced with underscores. This group stores an additional option, the sections themselves, in an option given by ``sections``. """ #: Regex to automatically get a destination for this option _dest_re = re.compile(r'(\A(_|[^A-Za-z])+)|((_|[^A-Za-z0-9])+)') def __init__(self, *items, **kwargs): r""" :param \*args: Child options :type \*args: Bcfg2.Options.Option :param prefix: The prefix to use for options generated by this option group. By default this is generated automatically from the config glob; see above for details. :type prefix: string :param dest: The destination for the list of known sections that match the glob. :param dest: string """ _OptionContainer.__init__(self, []) self._section_glob = items[0].cf[0] # get a default destination self._prefix = kwargs.get("prefix", self._dest_re.sub('_', self._section_glob)) Option.__init__(self, dest=kwargs.get('dest', self._prefix + "sections")) self.option_templates = items def list_options(self): return [self] + _OptionContainer.list_options(self) def from_config(self, cfp): sections = [] for section in cfp.sections(): if fnmatch.fnmatch(section, self._section_glob): sections.append(section) newopts = [] for opt_tmpl in self.option_templates: option = copy.deepcopy(opt_tmpl) option.cf = (section, option.cf[1]) option.dest = "%s%s_%s" % (self._prefix, self._dest_re.sub('_', section), option.dest) newopts.append(option) self.extend(newopts) for parser in self.parsers: parser.add_options(newopts) return sections def add_to_parser(self, parser): Option.add_to_parser(self, parser) _OptionContainer.add_to_parser(self, parser) def __eq__(self, other): return (_OptionContainer.__eq__(self, other) and self.option_templates == getattr(other, "option_templates", None)) def __repr__(self): if len(self) == 0: return "%s(%s)" % (self.__class__.__name__, ", ".join(".".join(o.cf) for o in self.option_templates)) else: return _OptionContainer.__repr__(self) src/lib/Bcfg2/Options/Options.py000066400000000000000000000433121303523157100170010ustar00rootroot00000000000000"""Base :class:`Bcfg2.Options.Option` object to represent an option. Unlike options in :mod:`argparse`, an Option object does not need to be associated with an option parser; it exists on its own. """ import argparse import copy import fnmatch import os import sys from Bcfg2.Options import Types from Bcfg2.Compat import ConfigParser __all__ = ["Option", "BooleanOption", "RepositoryMacroOption", "PathOption", "PositionalArgument", "_debug"] unit_test = False # pylint: disable=C0103 def _debug(msg): """ Option parsing happens before verbose/debug have been set -- they're options, after all -- so option parsing verbosity is enabled by changing this to True. The verbosity here is primarily of use to developers. """ if unit_test: print("DEBUG: %s" % msg) elif os.environ.get('BCFG2_OPTIONS_DEBUG', '0').lower() in ["true", "yes", "on", "1"]: sys.stderr.write("%s\n" % msg) #: A dict that records a mapping of argparse action name (e.g., #: "store_true") to the argparse Action class for it. See #: :func:`_get_action_class` _action_map = dict() # pylint: disable=C0103 def _get_action_class(action_name): """ Given an argparse action name (e.g., "store_true"), get the related :class:`argparse.Action` class. The mapping that stores this information in :mod:`argparse` itself is unfortunately private, so it's an implementation detail that we shouldn't depend on. So we just instantiate a dummy parser, add a dummy argument, and determine the class that way. """ if (isinstance(action_name, type) and issubclass(action_name, argparse.Action)): return action_name if action_name not in _action_map: action = argparse.ArgumentParser().add_argument(action_name, action=action_name) _action_map[action_name] = action.__class__ return _action_map[action_name] class Option(object): """ Representation of an option that can be specified on the command line, as an environment variable, or in a config file. Precedence is in that order; that is, an option specified on the command line takes precendence over an option given by the environment, which takes precedence over an option specified in the config file. """ #: Keyword arguments that should not be passed on to the #: :class:`argparse.ArgumentParser` constructor _local_args = ['cf', 'env', 'man'] def __init__(self, *args, **kwargs): """ See :meth:`argparse.ArgumentParser.add_argument` for a full list of accepted parameters. In addition to supporting all arguments and keyword arguments from :meth:`argparse.ArgumentParser.add_argument`, several additional keyword arguments are allowed. :param cf: A tuple giving the section and option name that this argument can be referenced as in the config file. The option name may contain the wildcard '*', in which case the value will be a dict of all options matching the glob. (To use a wildcard in the section, use a :class:`Bcfg2.Options.WildcardSectionGroup`.) :type cf: tuple :param env: An environment variable that the value of this option can be taken from. :type env: string :param man: A detailed description of the option that will be used to populate automatically-generated manpages. :type man: string """ #: The options by which this option can be called. #: (Coincidentally, this is also the list of arguments that #: will be passed to #: :meth:`argparse.ArgumentParser.add_argument` when this #: option is added to a parser.) As a result, ``args`` can be #: tested to see if this argument can be given on the command #: line at all, or if it is purely a config file option. self.args = args self._kwargs = kwargs #: The tuple giving the section and option name for this #: option in the config file self.cf = None # pylint: disable=C0103 #: The environment variable that this option can take its #: value from self.env = None #: A detailed description of this option that will be used in #: man pages. self.man = None #: A list of :class:`Bcfg2.Options.Parser` objects to which #: this option has been added. (There will be more than one #: parser if this option is added to a subparser, for #: instance.) self.parsers = [] #: A dict of :class:`Bcfg2.Options.Parser` -> #: :class:`argparse.Action` that gives the actions that #: resulted from adding this option to each parser that it was #: added to. If this option cannot be specified on the #: command line (i.e., it only takes its value from the config #: file), then this will be empty. self.actions = dict() self.type = self._kwargs.get("type") self.help = self._kwargs.get("help") self._default = self._kwargs.get("default") for kwarg in self._local_args: setattr(self, kwarg, self._kwargs.pop(kwarg, None)) if self.args: # cli option self._dest = None else: action_cls = _get_action_class(self._kwargs.get('action', 'store')) # determine the name of this option. use, in order, the # 'name' kwarg; the option name; the environment variable # name. self._dest = None if 'dest' in self._kwargs: self._dest = self._kwargs.pop('dest') elif self.env is not None: self._dest = self.env elif self.cf is not None: self._dest = self.cf[1] self._dest = self._dest.lower().replace("-", "_") kwargs = copy.copy(self._kwargs) kwargs.pop("action", None) self.actions[None] = action_cls(self._dest, self._dest, **kwargs) def __repr__(self): sources = [] if self.args: sources.extend(self.args) if self.cf: sources.append("%s.%s" % self.cf) if self.env: sources.append("$" + self.env) spec = ["sources=%s" % sources, "default=%s" % self.default, "%d parsers" % len(self.parsers)] return '%s(%s: %s)' % (self.__class__.__name__, self.dest, ", ".join(spec)) def list_options(self): """ List options contained in this option. This exists to provide a consistent interface with :class:`Bcfg2.Options.OptionGroup` """ return [self] def finalize(self, namespace): """ Finalize the default value for this option. This is used with actions (such as :class:`Bcfg2.Options.ComponentAction`) that allow you to specify a default in a different format than its final storage format; this can be called after it has been determined that the default will be used (i.e., the option is not given on the command line or in the config file) to store the appropriate default value in the appropriate format.""" for parser, action in self.actions.items(): if hasattr(action, "finalize"): if parser: _debug("Finalizing %s for %s" % (self, parser)) else: _debug("Finalizing %s" % self) action.finalize(parser, namespace) @property def _type_func(self): """get a function for converting a value to the option type. this always returns a callable, even when ``type`` is None. """ if self.type: return self.type else: return lambda x: x def from_config(self, cfp): """ Get the value of this option from the given :class:`ConfigParser.ConfigParser`. If it is not found in the config file, the default is returned. (If there is no default, None is returned.) :param cfp: The config parser to get the option value from :type cfp: ConfigParser.ConfigParser :returns: The default value """ if not self.cf: return None if '*' in self.cf[1]: if cfp.has_section(self.cf[0]): # build a list of known options in this section, and # exclude them exclude = set() for parser in self.parsers: exclude.update(o.cf[1] for o in parser.option_list if o.cf and o.cf[0] == self.cf[0]) rv = dict([(o, cfp.get(self.cf[0], o)) for o in fnmatch.filter(cfp.options(self.cf[0]), self.cf[1]) if o not in exclude]) else: rv = {} else: try: rv = self._type_func(self.get_config_value(cfp)) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): rv = None _debug("Getting value of %s from config file(s): %s" % (self, rv)) return rv def get_config_value(self, cfp): """fetch a value from the config file. This is passed the config parser. Its result is passed to the type function for this option. It can be overridden to, e.g., handle boolean options. """ return cfp.get(*self.cf) def get_environ_value(self, value): """fetch a value from the environment. This is passed the raw value from the environment variable, and its result is passed to the type function for this option. It can be overridden to, e.g., handle boolean options. """ return value def default_from_config(self, cfp): """ Set the default value of this option from the config file or from the environment. :param cfp: The config parser to get the option value from :type cfp: ConfigParser.ConfigParser """ if self.env and self.env in os.environ: self.default = self._type_func( self.get_environ_value(os.environ[self.env])) _debug("Setting the default of %s from environment: %s" % (self, self.default)) else: val = self.from_config(cfp) if val is not None: _debug("Setting the default of %s from config: %s" % (self, val)) self.default = val def _get_default(self): """ Getter for the ``default`` property """ return self._default def _set_default(self, value): """ Setter for the ``default`` property """ self._default = value for action in self.actions.values(): action.default = value #: The current default value of this option default = property(_get_default, _set_default) def _get_dest(self): """ Getter for the ``dest`` property """ return self._dest def _set_dest(self, value): """ Setter for the ``dest`` property """ self._dest = value for action in self.actions.values(): action.dest = value def early_parsing_hook(self, early_opts): # pylint: disable=C0111 """Hook called at the end of early option parsing. This can be used to save option values for macro fixup. """ pass #: The namespace destination of this option (see `dest #: `_) dest = property(_get_dest, _set_dest) def add_to_parser(self, parser): """ Add this option to the given parser. :param parser: The parser to add the option to. :type parser: Bcfg2.Options.Parser :returns: argparse.Action """ self.parsers.append(parser) if self.args: # cli option _debug("Adding %s to %s as a CLI option" % (self, parser)) action = parser.add_argument(*self.args, **self._kwargs) if not self._dest: self._dest = action.dest if self._default: action.default = self._default self.actions[parser] = action else: # else, config file-only option _debug("Adding %s to %s as a config file-only option" % (self, parser)) class RepositoryMacroOption(Option): """Option that does translation of ```` macros. Macro translation is done on the fly instead of just fixing up all values at the end of parsing because macro expansion needs to be done before path canonicalization for :class:`Bcfg2.Options.Options.PathOption`. """ repository = None def __init__(self, *args, **kwargs): self._original_type = kwargs.pop('type', lambda x: x) kwargs['type'] = self._type kwargs.setdefault('metavar', '') Option.__init__(self, *args, **kwargs) def early_parsing_hook(self, early_opts): if hasattr(early_opts, "repository"): if self.__class__.repository is None: _debug("Setting repository to %s for %s" % (early_opts.repository, self.__class__.__name__)) self.__class__.repository = early_opts.repository else: _debug("Repository is already set for %s" % self.__class__) def _get_default(self): """ Getter for the ``default`` property """ if not hasattr(self._default, "replace"): return self._default else: return self._type(self._default) default = property(_get_default, Option._set_default) def transform_value(self, value): """transform the value after macro expansion. this can be overridden to further transform the value set by the user *after* macros are expanded, but before the user's ``type`` function is applied. principally exists for PathOption to canonicalize the path. """ return value def _type(self, value): """Type function that fixes up macros.""" if self.__class__.repository is None: return value else: return self._original_type(self.transform_value( value.replace("", self.__class__.repository))) class PathOption(RepositoryMacroOption): """Shortcut for options that expect a path argument. Uses :meth:`Bcfg2.Options.Types.path` to transform the argument into a canonical path. The type of a path option can also be overridden to return a file-like object. For example: .. code-block:: python options = [ Bcfg2.Options.PathOption( "--input", type=argparse.FileType('r'), help="The input file")] PathOptions also do translation of ```` macros. """ def transform_value(self, value): return Types.path(value) class _BooleanOptionAction(argparse.Action): """BooleanOptionAction sets a boolean value. - if None is passed, store the default - if the option_string is not None, then the option was passed on the command line, thus store the opposite of the default (this is the argparse store_true and store_false behavior) - if a boolean value is passed, use that Makes a copy of the initial default, because otherwise the default can be changed by config file settings or environment variables. For instance, if a boolean option that defaults to True was set to False in the config file, specifying the option on the CLI would then set it back to True. Defined here instead of :mod:`Bcfg2.Options.Actions` because otherwise there is a circular import Options -> Actions -> Parser -> Options. """ def __init__(self, *args, **kwargs): argparse.Action.__init__(self, *args, **kwargs) self.original = self.default def __call__(self, parser, namespace, values, option_string=None): if values is None: setattr(namespace, self.dest, self.default) elif option_string is not None: setattr(namespace, self.dest, not self.original) else: setattr(namespace, self.dest, bool(values)) class BooleanOption(Option): """ Shortcut for boolean options. The default is False, but this can easily be overridden: .. code-block:: python options = [ Bcfg2.Options.PathOption( "--dwim", default=True, help="Do What I Mean")] """ def __init__(self, *args, **kwargs): kwargs.setdefault('action', _BooleanOptionAction) kwargs.setdefault('nargs', 0) kwargs.setdefault('default', False) Option.__init__(self, *args, **kwargs) def get_environ_value(self, value): if value.lower() in ["false", "no", "off", "0"]: return False elif value.lower() in ["true", "yes", "on", "1"]: return True else: raise ValueError("Invalid boolean value %s" % value) def get_config_value(self, cfp): """fetch a value from the config file. This is passed the config parser. Its result is passed to the type function for this option. It can be overridden to, e.g., handle boolean options. """ return cfp.getboolean(*self.cf) class PositionalArgument(Option): """ Shortcut for positional arguments. """ def __init__(self, *args, **kwargs): if 'metavar' not in kwargs: kwargs['metavar'] = '<%s>' % args[0] Option.__init__(self, *args, **kwargs) src/lib/Bcfg2/Options/Parser.py000066400000000000000000000415251303523157100166060ustar00rootroot00000000000000"""The option parser.""" import argparse import os import sys from Bcfg2.version import __version__ from Bcfg2.Compat import ConfigParser from Bcfg2.Options import Option, PathOption, _debug __all__ = ["setup", "OptionParserException", "Parser", "get_parser", "new_parser"] #: The repository option. This is specified here (and imported into #: :module:`Bcfg2.Options.Common`) rather than vice-versa due to #: circular imports. repository = PathOption( # pylint: disable=C0103 '-Q', '--repository', cf=('server', 'repository'), default='/var/lib/bcfg2', help="Server repository path") #: A module-level :class:`argparse.Namespace` object that stores all #: configuration for Bcfg2. setup = argparse.Namespace(version=__version__, # pylint: disable=C0103 name="Bcfg2", uri='http://trac.mcs.anl.gov/projects/bcfg2') class OptionParserException(Exception): """ Base exception raised for generic option parser errors """ class Parser(argparse.ArgumentParser): """ The Bcfg2 option parser. Most interfaces should not need to instantiate a parser, but should instead use :func:`Bcfg2.Options.get_parser` to get the parser that already exists.""" #: Option for specifying the path to the Bcfg2 config file configfile = PathOption('-C', '--config', env="BCFG2_CONFIG_FILE", help="Path to configuration file", default="/etc/bcfg2.conf") #: Verbose version string that is printed if executed with --version _version_string = "%s %s on Python %s" % ( os.path.basename(sys.argv[0]), __version__, ".".join(str(v) for v in sys.version_info[0:3])) #: Builtin options that apply to all commands options = [configfile, Option('--version', help="Print the version and exit", action="version", version=_version_string), Option('-E', '--encoding', metavar='', default='UTF-8', help="Encoding of config files", cf=('components', 'encoding'))] #: Flag used in unit tests to disable actual config file reads unit_test = False def __init__(self, **kwargs): """ See :class:`argparse.ArgumentParser` for a full list of accepted parameters. In addition to supporting all arguments and keyword arguments from :class:`argparse.ArgumentParser`, several additional keyword arguments are allowed. :param components: A list of components to add to the parser. :type components: list :param namespace: The namespace to store options in. Default is :attr:`Bcfg2.Options.setup`. :type namespace: argparse.Namespace :param add_base_options: Whether or not to add the options in :attr:`Bcfg2.Options.Parser.options` to the parser. Setting this to False is default for subparsers. Default is True. :type add_base_options: bool """ self._cfp = ConfigParser.ConfigParser() components = kwargs.pop('components', []) #: The namespace options will be stored in. self.namespace = kwargs.pop('namespace', setup) if self.namespace is None: self.namespace = setup add_base_options = kwargs.pop('add_base_options', True) #: Flag to indicate that this is the pre-parsing 'early' run #: for important options like database settings that must be #: loaded before other components can be. self._early = kwargs.pop('early', False) if 'add_help' not in kwargs: kwargs['add_help'] = add_base_options argparse.ArgumentParser.__init__(self, **kwargs) #: Whether or not parsing has completed on all current options. self.parsed = False #: The argument list that was parsed. self.argv = None #: Components that have been added to the parser self.components = [] #: Options that have been added to the parser self.option_list = [] self._defaults_set = [] self._config_files = [] if add_base_options: self.add_component(self) if components: for component in components: self.add_component(component) def _check_duplicate_cf(self, option): """Check for a duplicate config file option.""" def add_options(self, options): """ Add an explicit list of options to the parser. When possible, prefer :func:`Bcfg2.Options.Parser.add_component` to add a whole component instead.""" _debug("Adding options: %s" % options) self.parsed = False for option in options: if option not in self.option_list: # check for duplicates if (hasattr(option, "env") and option.env and option.env in [o.env for o in self.option_list]): raise OptionParserException( "Duplicate environment variable option: %s" % option.env) if (hasattr(option, "cf") and option.cf and option.cf in [o.cf for o in self.option_list]): raise OptionParserException( "Duplicate config file option: %s" % (option.cf,)) self.option_list.extend(option.list_options()) option.add_to_parser(self) for opt in option.list_options(): opt.default_from_config(self._cfp) self._defaults_set.append(opt) def add_component(self, component): """ Add a component (and all of its options) to the parser. """ if component not in self.components: _debug("Adding component %s to %s" % (component, self)) self.components.append(component) if hasattr(component, "options"): self.add_options(getattr(component, "options")) def _set_defaults_from_config(self): """ Set defaults from the config file for all options that can come from the config file, but haven't yet had their default set """ _debug("Setting defaults on all options") for opt in self.option_list: if opt not in self._defaults_set: opt.default_from_config(self._cfp) self._defaults_set.append(opt) def _parse_config_options(self): """ populate the namespace with default values for any options that aren't already in the namespace (i.e., options without CLI arguments) """ _debug("Parsing config file-only options") for opt in self.option_list[:]: if not opt.args and opt.dest not in self.namespace: value = opt.default if value: for _, action in opt.actions.items(): _debug("Setting config file-only option %s to %s" % (opt, value)) action(self, self.namespace, value) else: _debug("Setting config file-only option %s to %s" % (opt, value)) setattr(self.namespace, opt.dest, value) def _finalize(self): """ Finalize the value of any options that require that additional post-processing step. (Mostly :class:`Bcfg2.Options.Actions.ComponentAction` subclasses.) """ _debug("Finalizing options") for opt in self.option_list[:]: opt.finalize(self.namespace) def _reset_namespace(self): """ Delete all options from the namespace except for a few predefined values and config file options. """ self.parsed = False _debug("Resetting namespace") for attr in dir(self.namespace): if (not attr.startswith("_") and attr not in ['uri', 'version', 'name'] and attr not in self._config_files): _debug("Deleting %s" % attr) delattr(self.namespace, attr) def _parse_early_options(self): """Parse early options. Early options are options that need to be parsed before other options for some reason. These fall into two basic cases: 1. Database options, which need to be parsed so that Django modules can be imported, since Django configuration is all done at import-time; 2. The repository (``-Q``) option, so that ```` macros in other options can be resolved. """ _debug("Option parsing phase 2: Parse early options") early_opts = argparse.Namespace() early_parser = Parser(add_help=False, namespace=early_opts, early=True) # add the repo option so we can resolve # macros early_parser.add_options([repository]) early_components = [] for component in self.components: if getattr(component, "parse_first", False): early_components.append(component) early_parser.add_component(component) early_parser.parse(self.argv) _debug("Fixing up macros in early options") for attr_name in dir(early_opts): if not attr_name.startswith("_"): attr = getattr(early_opts, attr_name) if hasattr(attr, "replace"): setattr(early_opts, attr_name, attr.replace("", early_opts.repository)) _debug("Early parsing complete, calling hooks") for component in early_components: if hasattr(component, "component_parsed_hook"): _debug("Calling component_parsed_hook on %s" % component) getattr(component, "component_parsed_hook")(early_opts) _debug("Calling early parsing hooks; early options: %s" % early_opts) for option in self.option_list: option.early_parsing_hook(early_opts) def add_config_file(self, dest, cfile, reparse=True): """ Add a config file, which triggers a full reparse of all options. """ if dest not in self._config_files: _debug("Adding new config file %s for %s" % (cfile, dest)) self._reset_namespace() self._cfp.read([cfile]) self._defaults_set = [] self._set_defaults_from_config() if reparse: self._parse_config_options() self._config_files.append(dest) def reparse(self, argv=None): """ Reparse options after they have already been parsed. :param argv: The argument list to parse. By default, :attr:`Bcfg2.Options.Parser.argv` is reused. (I.e., the argument list that was initially parsed.) :type argv: list """ _debug("Reparsing all options") self._reset_namespace() self.parse(argv or self.argv) def parse(self, argv=None): """ Parse options. :param argv: The argument list to parse. By default, ``sys.argv[1:]`` is used. This is stored in :attr:`Bcfg2.Options.Parser.argv` for reuse by :func:`Bcfg2.Options.Parser.reparse`. :type argv: list """ _debug("Parsing options") if argv is None: argv = sys.argv[1:] # pragma: nocover if self.parsed and self.argv == argv: _debug("Returning already parsed namespace") return self.namespace self.argv = argv # phase 1: get and read config file _debug("Option parsing phase 1: Get and read main config file") bootstrap_parser = argparse.ArgumentParser(add_help=False) self.configfile.add_to_parser(bootstrap_parser) self.configfile.default_from_config(self._cfp) bootstrap = bootstrap_parser.parse_known_args(args=self.argv)[0] # check whether the specified bcfg2.conf exists if not self.unit_test and not os.path.exists(bootstrap.config): self.error("Could not read %s" % bootstrap.config) self.add_config_file(self.configfile.dest, bootstrap.config, reparse=False) # phase 2: re-parse command line for early options; currently, # that's database options if not self._early: self._parse_early_options() else: _debug("Skipping parsing phase 2 in early mode") # phase 3: re-parse command line, loading additional # components, until all components have been loaded. On each # iteration, set defaults from config file/environment # variables _debug("Option parsing phase 3: Main parser loop") # _set_defaults_from_config must be called before _parse_config_options # This is due to a tricky interaction between the two methods: # # (1) _set_defaults_from_config does what its name implies, it updates # the "default" property of each Option based on the value that exists # in the config. # # (2) _parse_config_options will look at each option and set it to the # default value that is _currently_ defined. If the option does not # exist in the namespace, it will be added. The method carefully # avoids overwriting the value of an option that is already defined in # the namespace. # # Thus, if _set_defaults_from_config has not been called yet when # _parse_config_options is called, all config file options will get set # to their hardcoded defaults. This process defines the options in the # namespace and _parse_config_options will never look at them again. # # we have to do the parsing in two loops: first, we squeeze as # much data out of the config file as we can to ensure that # all config file settings are read before we use any default # values. then we can start looking at the command line. while not self.parsed: self.parsed = True self._set_defaults_from_config() self._parse_config_options() self.parsed = False remaining = [] while not self.parsed: self.parsed = True _debug("Parsing known arguments") try: _, remaining = self.parse_known_args(args=self.argv, namespace=self.namespace) except OptionParserException: self.error(sys.exc_info()[1]) self._set_defaults_from_config() self._parse_config_options() self._finalize() if len(remaining) and not self._early: self.error("Unknown options: %s" % " ".join(remaining)) # phase 4: call post-parsing hooks if not self._early: _debug("Option parsing phase 4: Call hooks") for component in self.components: if hasattr(component, "options_parsed_hook"): _debug("Calling post-parsing hook on %s" % component) getattr(component, "options_parsed_hook")() return self.namespace #: A module-level :class:`Bcfg2.Options.Parser` object that is used #: for all parsing _parser = Parser() # pylint: disable=C0103 def new_parser(): """Create a new :class:`Bcfg2.Options.Parser` object. The new object can be retrieved with :func:`Bcfg2.Options.get_parser`. This is useful for unit testing. """ global _parser _parser = Parser() def get_parser(description=None, components=None, namespace=None): """Get an existing :class:`Bcfg2.Options.Parser` object. A Parser is created at the module level when :mod:`Bcfg2.Options` is imported. If any arguments are given, then the existing parser is modified before being returned. :param description: Set the parser description :type description: string :param components: Load the given components in the parser :type components: list :param namespace: Use the given namespace instead of :attr:`Bcfg2.Options.setup` :type namespace: argparse.Namespace :returns: Bcfg2.Options.Parser object """ if Parser.unit_test: return Parser(description=description, components=components, namespace=namespace) elif (description or components or namespace): if description: _parser.description = description if components is not None: for component in components: _parser.add_component(component) if namespace: _parser.namespace = namespace return _parser src/lib/Bcfg2/Options/Subcommands.py000066400000000000000000000232641303523157100176250ustar00rootroot00000000000000""" Classes to make it easier to create commands with large numbers of subcommands (e.g., bcfg2-admin, bcfg2-info). """ import re import cmd import sys import copy import shlex import logging from Bcfg2.Compat import StringIO from Bcfg2.Options import PositionalArgument, _debug from Bcfg2.Options.OptionGroups import Subparser from Bcfg2.Options.Parser import Parser, setup as master_setup __all__ = ["Subcommand", "CommandRegistry"] class Subcommand(object): """ Base class for subcommands. This must be subclassed to create commands. Specifically, you must override :func:`Bcfg2.Options.Subcommand.run`. You may want to override: * The docstring, which will be used as the short help. * :attr:`Bcfg2.Options.Subcommand.options` * :attr:`Bcfg2.Options.Subcommand.help` * :attr:`Bcfg2.Options.Subcommand.interactive` * * :func:`Bcfg2.Options.Subcommand.shutdown` You should not need to override :func:`Bcfg2.Options.Subcommand.__call__` or :func:`Bcfg2.Options.Subcommand.usage`. A ``Subcommand`` subclass constructor must not take any arguments. """ #: Options this command takes options = [] #: Longer help message help = None #: Whether or not to expose this command in an interactive #: :class:`cmd.Cmd` shell, if one is used. (``bcfg2-info`` uses #: one, ``bcfg2-admin`` does not.) interactive = True #: Whether or not to expose this command as command line parameter #: or only in an interactive :class:`cmd.Cmd` shell. only_interactive = False #: Additional aliases for the command. The contents of the list gets #: added to the default command name (the lowercased class name) aliases = [] _ws_re = re.compile(r'\s+', flags=re.MULTILINE) def __init__(self): self.core = None description = "%s: %s" % (self.__class__.__name__.lower(), self.__class__.__doc__) #: The :class:`Bcfg2.Options.Parser` that will be used to #: parse options if this subcommand is called from an #: interactive :class:`cmd.Cmd` shell. self.parser = Parser( prog=self.__class__.__name__.lower(), description=description, components=[self], add_base_options=False, epilog=self.help) self._usage = None #: A :class:`logging.Logger` that can be used to produce #: logging output for this command. self.logger = logging.getLogger(self.__class__.__name__.lower()) def __call__(self, args=None): """ Perform option parsing and other tasks necessary to support running ``Subcommand`` objects as part of a :class:`cmd.Cmd` shell. You should not need to override ``__call__``. :param args: Arguments given in the interactive shell :type args: list of strings :returns: The return value of :func:`Bcfg2.Options.Subcommand.run` """ if args is not None: self.parser.namespace = copy.copy(master_setup) self.parser.parsed = False alist = shlex.split(args) try: setup = self.parser.parse(alist) except SystemExit: return sys.exc_info()[1].code return self.run(setup) else: return self.run(master_setup) def usage(self): """ Get the short usage message. """ if self._usage is None: sio = StringIO() self.parser.print_usage(file=sio) usage = self._ws_re.sub(' ', sio.getvalue()).strip()[7:] doc = self._ws_re.sub(' ', getattr(self, "__doc__") or "").strip() if not doc: self._usage = usage else: self._usage = "%s - %s" % (usage, doc) return self._usage def run(self, setup): """ Run the command. :param setup: A namespace giving the options for this command. This must be used instead of :attr:`Bcfg2.Options.setup` because this command may have been called from an interactive :class:`cmd.Cmd` shell, and thus has its own option parser and its own (private) namespace. ``setup`` is guaranteed to contain all of the options in the global :attr:`Bcfg2.Options.setup` namespace, in addition to any local options given to this command from the interactive shell. :type setup: argparse.Namespace """ raise NotImplementedError # pragma: nocover def shutdown(self): """ Perform any necessary shutdown tasks for this command This is called to when the program exits (*not* when this command is finished executing). """ pass # pragma: nocover class Help(Subcommand): """List subcommands and usage, or get help on a specific subcommand.""" options = [PositionalArgument("command", nargs='?')] # the interactive shell has its own help interactive = False def __init__(self, registry): Subcommand.__init__(self) self._registry = registry def run(self, setup): commands = dict((name, cmd) for (name, cmd) in self._registry.commands.items() if not cmd.only_interactive) if setup.command: try: commands[setup.command].parser.print_help() return 0 except KeyError: print("No such command: %s" % setup.command) return 1 for command in sorted(commands.keys()): print(commands[command].usage()) class CommandRegistry(object): """A ``CommandRegistry`` is used to register subcommands and provides a single interface to run them. It's also used by :class:`Bcfg2.Options.Subcommands.Help` to produce help messages for all available commands. """ def __init__(self): #: A dict of registered commands. Keys are the class names, #: lowercased (i.e., the command names), and values are instances #: of the command objects. self.commands = dict() #: A list of options that should be added to the option parser #: in order to handle registered subcommands. self.subcommand_options = [] #: the help command self.help = Help(self) self.register_command(self.help) def runcommand(self): """ Run the single command named in ``Bcfg2.Options.setup.subcommand``, which is where :class:`Bcfg2.Options.Subparser` groups store the subcommand. """ _debug("Running subcommand %s" % master_setup.subcommand) try: return self.commands[master_setup.subcommand].run(master_setup) finally: self.shutdown() def shutdown(self): """Perform shutdown tasks. This calls the ``shutdown`` method of the subcommand that was run. """ _debug("Shutting down subcommand %s" % master_setup.subcommand) self.commands[master_setup.subcommand].shutdown() def register_command(self, cls_or_obj): """ Register a single command. :param cls_or_obj: The command class or object to register :type cls_or_obj: type or Subcommand :returns: An instance of ``cmdcls`` """ if isinstance(cls_or_obj, type): cmdcls = cls_or_obj cmd_obj = cmdcls() else: cmd_obj = cls_or_obj cmdcls = cmd_obj.__class__ names = [cmdcls.__name__.lower()] if cmdcls.aliases: names.extend(cmdcls.aliases) for name in names: self.commands[name] = cmd_obj if not cmdcls.only_interactive: # py2.5 can't mix *magic and non-magical keyword args, thus # the **dict(...) self.subcommand_options.append( Subparser(*cmdcls.options, **dict(name=name, help=cmdcls.__doc__))) if issubclass(self.__class__, cmd.Cmd) and cmdcls.interactive: setattr(self, "do_%s" % name, cmd_obj) setattr(self, "help_%s" % name, cmd_obj.parser.print_help) return cmd_obj def register_commands(self, candidates, parent=Subcommand): """ Register all subcommands in ``candidates`` against the :class:`Bcfg2.Options.CommandRegistry` subclass given in ``registry``. A command is registered if and only if: * It is a subclass of the given ``parent`` (by default, :class:`Bcfg2.Options.Subcommand`); * It is not the parent class itself; and * Its name does not start with an underscore. :param registry: The :class:`Bcfg2.Options.CommandRegistry` subclass against which commands will be registered. :type registry: Bcfg2.Options.CommandRegistry :param candidates: A list of objects that will be considered for registration. Only objects that meet the criteria listed above will be registered. :type candidates: list :param parent: Specify a parent class other than :class:`Bcfg2.Options.Subcommand` that all registered commands must subclass. :type parent: type """ for attr in candidates: if (isinstance(attr, type) and issubclass(attr, parent) and attr != parent and not attr.__name__.startswith("_")): self.register_command(attr) src/lib/Bcfg2/Options/Types.py000066400000000000000000000054701303523157100164550ustar00rootroot00000000000000""" :mod:`Bcfg2.Options` provides a number of useful types for use with the :class:`Bcfg2.Options.Option` constructor. """ import os import re import pwd import grp from Bcfg2.Compat import literal_eval _COMMA_SPLIT_RE = re.compile(r'\s*,\s*') def path(value): """ A generic path. ``~`` will be expanded with :func:`os.path.expanduser` and the absolute resulting path will be used. This does *not* ensure that the path exists. """ return os.path.abspath(os.path.expanduser(value)) def comma_list(value): """ Split a comma-delimited list, with optional whitespace around the commas.""" if value == '': return [] return _COMMA_SPLIT_RE.split(value) def colon_list(value): """ Split a colon-delimited list. Whitespace is not allowed around the colons. """ if value == '': return [] return value.split(':') def literal_dict(value): """ literally evaluate the option in order to allow for arbitrarily nested dictionaries """ return literal_eval(value) def anchored_regex_list(value): """ Split an option string on whitespace and compile each element as an anchored regex """ try: return [re.compile('^' + x + '$') for x in re.split(r'\s+', value)] except re.error: raise ValueError("Not a list of regexes", value) def octal(value): """ Given an octal string, get an integer representation. """ return int(value, 8) def username(value): """ Given a username or numeric UID, get a numeric UID. The user must exist.""" try: return int(value) except ValueError: return int(pwd.getpwnam(value)[2]) def groupname(value): """ Given a group name or numeric GID, get a numeric GID. The user must exist.""" try: return int(value) except ValueError: return int(grp.getgrnam(value)[2]) def timeout(value): """ Convert the value into a float or None. """ if value is None: return value rv = float(value) # pass ValueError up the stack if rv <= 0: return None return rv # pylint: disable=C0103 _bytes_multipliers = dict(k=1, m=2, g=3, t=4) _suffixes = "".join(_bytes_multipliers.keys()).lower() _suffixes += _suffixes.upper() _bytes_re = re.compile(r'(?P\d+)(?P[%s])?' % _suffixes) # pylint: enable=C0103 def size(value): """ Given a number of bytes in a human-readable format (e.g., '512m', '2g'), get the absolute number of bytes as an integer. """ mat = _bytes_re.match(value) if not mat: raise ValueError("Not a valid size", value) rvalue = int(mat.group("value")) mult = mat.group("multiplier") if mult: return rvalue * (1024 ** _bytes_multipliers[mult.lower()]) else: return rvalue src/lib/Bcfg2/Options/__init__.py000066400000000000000000000004751303523157100171100ustar00rootroot00000000000000""" Bcfg2 options parsing. """ # pylint: disable=W0611,W0401 from Bcfg2.Options import Types from Bcfg2.Options.Options import * from Bcfg2.Options.Common import * from Bcfg2.Options.Parser import * from Bcfg2.Options.Actions import * from Bcfg2.Options.Subcommands import * from Bcfg2.Options.OptionGroups import * src/lib/Bcfg2/Reporting/000077500000000000000000000000001303523157100153075ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/Collector.py000066400000000000000000000166551303523157100176240ustar00rootroot00000000000000import os import sys import atexit import daemon import logging import time import threading from lockfile import LockFailed, LockTimeout # pylint: disable=E0611 try: from daemon.pidfile import TimeoutPIDLockFile except ImportError: from daemon.pidlockfile import TimeoutPIDLockFile # pylint: enable=E0611 import Bcfg2.Logger import Bcfg2.Options from Bcfg2.Reporting.Transport.base import TransportError from Bcfg2.Reporting.Transport.DirectStore import DirectStore from Bcfg2.Reporting.Storage.base import StorageError class ReportingError(Exception): """Generic reporting exception""" pass class ReportingStoreThread(threading.Thread): """Thread for calling the storage backend""" def __init__(self, interaction, storage, group=None, target=None, name=None, semaphore=None, args=(), kwargs=None): """Initialize the thread with a reference to the interaction as well as the storage engine to use""" threading.Thread.__init__(self, group, target, name, args, kwargs or dict()) self.interaction = interaction self.storage = storage self.logger = logging.getLogger('bcfg2-report-collector') self.semaphore = semaphore def run(self): """Call the database storage procedure (aka import)""" try: try: start = time.time() self.storage.import_interaction(self.interaction) self.logger.info("Imported interaction for %s in %ss" % (self.interaction.get('hostname', ''), time.time() - start)) except: #TODO requeue? self.logger.error("Unhandled exception in import thread %s" % sys.exc_info()[1]) finally: if self.semaphore: self.semaphore.release() class ReportingCollector(object): """The collecting process for reports""" options = [Bcfg2.Options.Common.reporting_storage, Bcfg2.Options.Common.reporting_transport, Bcfg2.Options.Common.daemon, Bcfg2.Options.Option( '--max-children', dest="children", cf=('reporting', 'max_children'), type=int, default=0, help='Maximum number of children for the reporting collector')] def __init__(self): """Setup the collector. This may be called by the daemon or though bcfg2-admin""" self.terminate = None self.context = None self.children = [] self.cleanup_threshold = 25 self.semaphore = None if Bcfg2.Options.setup.children > 0: self.semaphore = threading.Semaphore( value=Bcfg2.Options.setup.children) if Bcfg2.Options.setup.debug: level = logging.DEBUG elif Bcfg2.Options.setup.verbose: level = logging.INFO else: level = logging.WARNING Bcfg2.Logger.setup_logging() self.logger = logging.getLogger('bcfg2-report-collector') try: self.transport = Bcfg2.Options.setup.reporting_transport() self.storage = Bcfg2.Options.setup.reporting_storage() except TransportError: self.logger.error("Failed to load transport: %s" % sys.exc_info()[1]) raise ReportingError except StorageError: self.logger.error("Failed to load storage: %s" % sys.exc_info()[1]) raise ReportingError if isinstance(self.transport, DirectStore): self.logger.error("DirectStore cannot be used with the collector. " "Use LocalFilesystem instead") self.shutdown() raise ReportingError try: self.logger.debug("Validating storage %s" % self.storage.__class__.__name__) self.storage.validate() except: self.logger.error("Storage backend %s failed to validate: %s" % (self.storage.__class__.__name__, sys.exc_info()[1])) def run(self): """Startup the processing and go!""" self.terminate = threading.Event() atexit.register(self.shutdown) self.context = daemon.DaemonContext(detach_process=True) iter = 0 if Bcfg2.Options.setup.daemon: self.logger.debug("Daemonizing") self.context.pidfile = TimeoutPIDLockFile( Bcfg2.Options.setup.daemon, acquire_timeout=5) # Attempt to ensure lockfile is able to be created and not stale try: self.context.pidfile.acquire() except LockFailed: self.logger.error("Failed to daemonize: %s" % sys.exc_info()[1]) self.shutdown() return except LockTimeout: try: # attempt to break the lock os.kill(self.context.pidfile.read_pid(), 0) except (OSError, TypeError): # No process with locked PID self.context.pidfile.break_lock() else: self.logger.error("Failed to daemonize: " "Failed to acquire lock on %s" % Bcfg2.Options.setup.daemon) self.shutdown() return else: self.context.pidfile.release() self.context.open() self.logger.info("Starting daemon") self.transport.start_monitor(self) while not self.terminate.isSet(): try: interaction = self.transport.fetch() if not interaction: continue if self.semaphore: self.semaphore.acquire() store_thread = ReportingStoreThread(interaction, self.storage, semaphore=self.semaphore) store_thread.start() self.children.append(store_thread) iter += 1 if iter >= self.cleanup_threshold: self.reap_children() iter = 0 except (SystemExit, KeyboardInterrupt): self.logger.info("Shutting down") self.shutdown() except: self.logger.error("Unhandled exception in main loop %s" % sys.exc_info()[1]) def shutdown(self): """Cleanup and go""" if self.terminate: # this wil be missing if called from bcfg2-admin self.terminate.set() if self.transport: try: self.transport.shutdown() except OSError: pass if self.storage: self.storage.shutdown() def reap_children(self): """Join any non-live threads""" newlist = [] self.logger.debug("Starting reap_children") for child in self.children: if child.isAlive(): newlist.append(child) else: child.join() self.logger.debug("Joined child thread %s" % child.getName()) self.children = newlist src/lib/Bcfg2/Reporting/Compat.py000066400000000000000000000014241303523157100171050ustar00rootroot00000000000000""" Compatibility imports for Django. """ from django import VERSION from django.db import transaction # Django 1.6 deprecated commit_on_success() and introduced atomic() with # similar semantics. if VERSION[0] == 1 and VERSION[1] < 6: transaction.atomic = transaction.commit_on_success try: # Django < 1.6 from django.conf.urls.defaults import url, patterns except ImportError: # Django > 1.6 from django.conf.urls import url try: from django.conf.urls import patterns except: # Django > 1.10 def patterns(_prefix, urls): url_list = list() for u in urls: if isinstance(url_tuple, (list, tuple)): u = url(*u) url_list.append(u) return url_list src/lib/Bcfg2/Reporting/Reports.py000077500000000000000000000274551303523157100173370ustar00rootroot00000000000000#!/usr/bin/env python """Query reporting system for client status.""" import sys import argparse import datetime import django import Bcfg2.DBSettings from django.core.exceptions import ObjectDoesNotExist def print_entries(interaction, etype): items = getattr(interaction, etype)() for item in items: print("%-70s %s" % (item.entry_type + ":" + item.name, etype)) class _FlagsFilterMixin(object): """ Mixin that allows to filter the interactions based on the only_important and/or the dry_run flag """ options = [ Bcfg2.Options.BooleanOption( "-n", "--no-dry-run", help="Do not consider interactions created with the --dry-run " "flag"), Bcfg2.Options.BooleanOption( "-i", "--no-only-important", help="Do not consider interactions created with the " "--only-important flag")] def get_interaction(self, client, setup): if not setup.no_dry_run and not setup.no_only_important: return client.current_interaction filter = {} if setup.no_dry_run: filter['dry_run'] = False if setup.no_only_important: filter['only_important'] = False from Bcfg2.Reporting.models import Interaction try: return Interaction.objects.filter(client=client, **filter).latest() except ObjectDoesNotExist: return None class _SingleHostCmd(Bcfg2.Options.Subcommand): # pylint: disable=W0223 """ Base class for bcfg2-reports modes that take a single host as a positional argument """ options = [Bcfg2.Options.PositionalArgument("host")] def get_client(self, setup): from Bcfg2.Reporting.models import Client try: return Client.objects.select_related().get(name=setup.host) except Client.DoesNotExist: print("No such host: %s" % setup.host) raise SystemExit(2) class Show(_SingleHostCmd, _FlagsFilterMixin): """ Show bad, extra, modified, or all entries from a given host """ options = _SingleHostCmd.options + _FlagsFilterMixin.options + [ Bcfg2.Options.BooleanOption( "-b", "--bad", help="Show bad entries from HOST"), Bcfg2.Options.BooleanOption( "-e", "--extra", help="Show extra entries from HOST"), Bcfg2.Options.BooleanOption( "-m", "--modified", help="Show modified entries from HOST")] def run(self, setup): client = self.get_client(setup) show_all = not setup.bad and not setup.extra and not setup.modified interaction = self.get_interaction(client, setup) if interaction is None: print("No interactions found for host: %s" % client.name) else: if setup.bad or show_all: print_entries(interaction, "bad") if setup.modified or show_all: print_entries(interaction, "modified") if setup.extra or show_all: print_entries(interaction, "extra") class Total(_SingleHostCmd, _FlagsFilterMixin): """ Show total number of managed and good entries from HOST """ options = _SingleHostCmd.options + _FlagsFilterMixin.options def run(self, setup): client = self.get_client(setup) interaction = self.get_interaction(client, setup) if interaction is None: print("No interactions found for host: %s" % client.name) else: managed = interaction.total_count good = interaction.good_count print("Total managed entries: %d (good: %d)" % (managed, good)) class Expire(_SingleHostCmd): """ Toggle the expired/unexpired state of HOST """ def run(self, setup): client = self.get_client(setup) if client.expiration is None: client.expiration = datetime.datetime.now() print("%s expired." % client.name) else: client.expiration = None print("%s un-expired." % client.name) client.save() class _ClientSelectCmd(Bcfg2.Options.Subcommand, _FlagsFilterMixin): """ Base class for subcommands that display lists of clients """ options = _FlagsFilterMixin.options + [ Bcfg2.Options.Option("--fields", metavar="FIELD,FIELD,...", help="Only display the listed fields", type=Bcfg2.Options.Types.comma_list, default=['name', 'time', 'state'])] def get_clients(self): from Bcfg2.Reporting.models import Client return Client.objects.exclude(current_interaction__isnull=True) def _print_fields(self, setup, fields, client, fmt, extra=None): """ Prints the fields specified in fields of client, max_name specifies the column width of the name column. """ fdata = [] if extra is None: extra = dict() interaction = self.get_interaction(client, setup) for field in fields: if field == 'time': fdata.append(str(interaction.timestamp)) elif field == 'state': if interaction.isclean(): fdata.append("clean") else: fdata.append("dirty") elif field == 'total': fdata.append(interaction.total_count) elif field == 'good': fdata.append(interaction.good_count) elif field == 'modified': fdata.append(interaction.modified_count) elif field == 'extra': fdata.append(interaction.extra_count) elif field == 'bad': fdata.append(interaction.bad_count) elif field == 'stale': fdata.append(interaction.isstale()) else: try: fdata.append(getattr(client, field)) except AttributeError: fdata.append(extra.get(field, "N/A")) print(fmt % tuple(fdata)) def display(self, setup, result, fields, extra=None): if 'name' not in fields: fields.insert(0, "name") if not result: print("No match found") return if extra is None: extra = dict() max_name = max(len(c.name) for c in result) ffmt = [] for field in fields: if field == "name": ffmt.append("%%-%ds" % max_name) elif field == "time": ffmt.append("%-19s") else: ffmt.append("%%-%ds" % len(field)) fmt = " ".join(ffmt) print(fmt % tuple(f.title() for f in fields)) for client in result: if not client.expiration: self._print_fields(setup, fields, client, fmt, extra=extra.get(client, None)) class Clients(_ClientSelectCmd): """ Query hosts """ options = _ClientSelectCmd.options + [ Bcfg2.Options.BooleanOption( "-c", "--clean", help="Show only clean hosts"), Bcfg2.Options.BooleanOption( "-d", "--dirty", help="Show only dirty hosts"), Bcfg2.Options.BooleanOption( "--stale", help="Show hosts that haven't run in the last 24 hours")] def run(self, setup): result = [] show_all = not setup.stale and not setup.clean and not setup.dirty for client in self.get_clients(): interaction = self.get_interaction(client, setup) if interaction is None: continue if (show_all or (setup.stale and interaction.isstale()) or (setup.clean and interaction.isclean()) or (setup.dirty and not interaction.isclean())): result.append(client) self.display(setup, result, setup.fields) class Entries(_ClientSelectCmd): """ Query hosts by entries """ options = _ClientSelectCmd.options + [ Bcfg2.Options.BooleanOption( "--badentry", help="Show hosts that have bad entries that match"), Bcfg2.Options.BooleanOption( "--modifiedentry", help="Show hosts that have modified entries that match"), Bcfg2.Options.BooleanOption( "--extraentry", help="Show hosts that have extra entries that match"), Bcfg2.Options.PathOption( "--file", type=argparse.FileType('r'), help="Read TYPE:NAME pairs from the specified file instead of " "from the command line"), Bcfg2.Options.PositionalArgument( "entries", metavar="TYPE:NAME", nargs="*")] def _hosts_by_entry_type(self, setup, clients, etype, entryspec): result = [] for entry in entryspec: for client in clients: interaction = self.get_interaction(client, setup) if interaction is None: continue items = getattr(interaction, etype)() for item in items: if (item.entry_type == entry[0] and item.name == entry[1]): result.append(client) return result def run(self, setup): result = [] if setup.file: try: entries = [l.strip().split(":") for l in setup.file] except IOError: err = sys.exc_info()[1] print("Cannot read entries from %s: %s" % (setup.file.name, err)) return 2 else: entries = [a.split(":") for a in setup.entries] clients = self.get_clients() if setup.badentry: result = self._hosts_by_entry_type(setup, clients, "bad", entries) elif setup.modifiedentry: result = self._hosts_by_entry_type(setup, clients, "modified", entries) elif setup.extraentry: result = self._hosts_by_entry_type(setup, clients, "extra", entries) self.display(setup, result, setup.fields) class Entry(_ClientSelectCmd): """ Show the status of a single entry on all hosts """ options = _ClientSelectCmd.options + [ Bcfg2.Options.PositionalArgument( "entry", metavar="TYPE:NAME", nargs=1)] def run(self, setup): from Bcfg2.Reporting.models import BaseEntry result = [] fields = setup.fields if 'state' in fields: fields.remove('state') fields.append("entry state") etype, ename = setup.entry[0].split(":") try: entry_cls = BaseEntry.entry_from_type(etype) except ValueError: print("Unhandled/unknown type %s" % etype) return 2 # TODO: batch fetch this. sqlite could break extra = dict() for client in self.get_clients(): interaction = self.get_interaction(client, setup) if interaction is None: continue ents = entry_cls.objects.filter( name=ename, interaction=interaction) if len(ents) == 0: continue extra[client] = {"entry state": ents[0].get_state_display(), "reason": ents[0]} result.append(client) self.display(setup, result, fields, extra=extra) class CLI(Bcfg2.Options.CommandRegistry): """ CLI class for bcfg2-reports """ def __init__(self): Bcfg2.Options.CommandRegistry.__init__(self) self.register_commands(globals().values()) parser = Bcfg2.Options.get_parser( description="Query the Bcfg2 reporting subsystem", components=[self]) parser.add_options(self.subcommand_options) parser.parse() def run(self): """ Run bcfg2-reports """ return self.runcommand() src/lib/Bcfg2/Reporting/Storage/000077500000000000000000000000001303523157100167135ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/Storage/DjangoORM.py000066400000000000000000000476051303523157100210610ustar00rootroot00000000000000""" The base for the original DjangoORM (DBStats) """ import difflib import traceback from datetime import datetime from time import strptime from lxml import etree import django from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from django.db.models import FieldDoesNotExist from django.core.cache import cache import Bcfg2.Options import Bcfg2.DBSettings from Bcfg2.Compat import b64decode, md5 from Bcfg2.Reporting.Compat import transaction from Bcfg2.Reporting.Storage.base import StorageBase, StorageError from Bcfg2.Server.Plugin.exceptions import PluginExecutionError def load_django_models(): """ Load models for Django after option parsing has completed """ # pylint: disable=W0602 global Interaction, PackageEntry, FilePerms, PathEntry, LinkEntry, \ Group, Client, Bundle, TYPE_EXTRA, TYPE_BAD, TYPE_MODIFIED, \ FailureEntry, Performance, BaseEntry # pylint: enable=W0602 from Bcfg2.Reporting.models import \ Interaction, PackageEntry, FilePerms, PathEntry, LinkEntry, \ Group, Client, Bundle, TYPE_EXTRA, TYPE_BAD, TYPE_MODIFIED, \ FailureEntry, Performance, BaseEntry def get_all_field_names(model): if django.VERSION[0] == 1 and django.VERSION[1] >= 8: return [field.name for field in model._meta.get_fields() if field.auto_created == False and not (field.is_relation and field.related_model is None)] else: return model._meta.get_all_field_names() class DjangoORM(StorageBase): options = StorageBase.options + [ Bcfg2.Options.Common.repository, Bcfg2.Options.Option( cf=('reporting', 'file_limit'), type=Bcfg2.Options.Types.size, help='Reporting file size limit', default=1024 * 1024)] options_parsed_hook = staticmethod(load_django_models) def _import_default(self, entry, state, entrytype=None, defaults=None, mapping=None, boolean=None, xforms=None): """ Default entry importer. Maps the entry (in state ``state``) to an appropriate *Entry object; by default, this is determined by the entry tag, e.g., from an Action entry an ActionEntry object is created. This can be overridden with ``entrytype``, which should be the class to instantiate for this entry. ``defaults`` is an optional mapping of : that will be used to set the default values for various attributes. ``mapping`` is a mapping of : that can be used to map fields that are named differently on the XML entry and in the database model. ``boolean`` is a list of attribute names that should be treated as booleans. ``xforms`` is a dict of :, where the given function will be applied to the value of the named attribute before trying to store it in the database. """ if entrytype is None: entrytype = globals()["%sEntry" % entry.tag] if defaults is None: defaults = dict() if mapping is None: mapping = dict() if boolean is None: boolean = [] if xforms is None: xforms = dict() mapping['exists'] = 'current_exists' defaults['current_exists'] = 'true' boolean.append("current_exists") def boolean_xform(val): try: return val.lower() == "true" except AttributeError: return False for attr in boolean + ["current_exists"]: xforms[attr] = boolean_xform act_dict = dict(state=state) for fieldname in get_all_field_names(entrytype): if fieldname in ['id', 'hash_key', 'state']: continue try: field = entrytype._meta.get_field(fieldname) except FieldDoesNotExist: continue attrname = mapping.get(fieldname, fieldname) val = entry.get(fieldname, defaults.get(attrname)) act_dict[fieldname] = xforms.get(attrname, lambda v: v)(val) self.logger.debug("Adding %s:%s" % (entry.tag, entry.get("name"))) return entrytype.entry_get_or_create(act_dict) def _import_Action(self, entry, state): return self._import_default(entry, state, defaults=dict(status='check', rc=-1), mapping=dict(output="rc")) def _import_Package(self, entry, state): name = entry.get('name') exists = entry.get('current_exists', default="true").lower() == "true" act_dict = dict(name=name, state=state, exists=exists, target_version=entry.get('version', default=''), current_version=entry.get('current_version', default='')) # extra entries are a bit different. They can have Instance # objects if not act_dict['target_version']: instance = entry.find("Instance") if instance: release = instance.get('release', '') arch = instance.get('arch', '') act_dict['current_version'] = instance.get('version') if release: act_dict['current_version'] += "-" + release if arch: act_dict['current_version'] += "." + arch self.logger.debug("Adding extra package %s %s" % (name, act_dict['current_version'])) else: self.logger.debug("Adding package %s %s" % (name, act_dict['target_version'])) # not implemented yet act_dict['verification_details'] = \ entry.get('verification_details', '') return PackageEntry.entry_get_or_create(act_dict) def _import_Path(self, entry, state): name = entry.get('name') exists = entry.get('current_exists', default="true").lower() == "true" path_type = entry.get("type").lower() act_dict = dict(name=name, state=state, exists=exists, path_type=path_type) target_dict = dict( owner=entry.get('owner', default="root"), group=entry.get('group', default="root"), mode=entry.get('mode', default=entry.get('perms', default="")) ) fperm, created = FilePerms.objects.get_or_create(**target_dict) act_dict['target_perms'] = fperm current_dict = dict( owner=entry.get('current_owner', default=""), group=entry.get('current_group', default=""), mode=entry.get('current_mode', default=entry.get('current_perms', default="")) ) fperm, created = FilePerms.objects.get_or_create(**current_dict) act_dict['current_perms'] = fperm if path_type in ('symlink', 'hardlink'): act_dict['target_path'] = entry.get('to', default="") act_dict['current_path'] = entry.get('current_to', default="") self.logger.debug("Adding link %s" % name) return LinkEntry.entry_get_or_create(act_dict) elif path_type == 'device': # TODO devices self.logger.warn("device path types are not supported yet") return # TODO - vcs output act_dict['detail_type'] = PathEntry.DETAIL_UNUSED if path_type == 'directory' and entry.get('prune', 'false') == 'true': unpruned_elist = [e.get('name') for e in entry.findall('Prune')] if unpruned_elist: act_dict['detail_type'] = PathEntry.DETAIL_PRUNED act_dict['details'] = "\n".join(unpruned_elist) elif entry.get('sensitive', 'false').lower() == 'true': act_dict['detail_type'] = PathEntry.DETAIL_SENSITIVE else: cdata = None if entry.get('current_bfile', None): act_dict['detail_type'] = PathEntry.DETAIL_BINARY cdata = entry.get('current_bfile') elif entry.get('current_bdiff', None): act_dict['detail_type'] = PathEntry.DETAIL_DIFF cdata = b64decode(entry.get('current_bdiff')) elif entry.get('current_diff', None): act_dict['detail_type'] = PathEntry.DETAIL_DIFF cdata = entry.get('current_bdiff') if cdata: if len(cdata) > Bcfg2.Options.setup.file_limit: act_dict['detail_type'] = PathEntry.DETAIL_SIZE_LIMIT act_dict['details'] = md5(cdata).hexdigest() else: act_dict['details'] = cdata self.logger.debug("Adding path %s" % name) return PathEntry.entry_get_or_create(act_dict) # TODO - secontext # TODO - acls def _import_Service(self, entry, state): return self._import_default(entry, state, defaults=dict(status='', current_status='', target_status=''), mapping=dict(status='target_status')) def _import_SEBoolean(self, entry, state): return self._import_default( entry, state, xforms=dict(value=lambda v: v.lower() == "on")) def _import_SEFcontext(self, entry, state): return self._import_default(entry, state, defaults=dict(filetype='all')) def _import_SEInterface(self, entry, state): return self._import_default(entry, state) def _import_SEPort(self, entry, state): return self._import_default(entry, state) def _import_SENode(self, entry, state): return self._import_default(entry, state) def _import_SELogin(self, entry, state): return self._import_default(entry, state) def _import_SEUser(self, entry, state): return self._import_default(entry, state) def _import_SEPermissive(self, entry, state): return self._import_default(entry, state) def _import_SEModule(self, entry, state): return self._import_default(entry, state, defaults=dict(disabled='false'), boolean=['disabled', 'current_disabled']) def _import_POSIXUser(self, entry, state): defaults = dict(group=entry.get("name"), gecos=entry.get("name"), shell='/bin/bash', uid=entry.get("current_uid")) if entry.get('name') == 'root': defaults['home'] = '/root' else: defaults['home'] = '/home/%s' % entry.get('name') # TODO: supplementary group membership return self._import_default(entry, state, defaults=defaults) def _import_POSIXGroup(self, entry, state): return self._import_default( entry, state, defaults=dict(gid=entry.get("current_gid"))) def _import_unknown(self, entry, _): self.logger.error("Unknown type %s not handled by reporting yet" % entry.tag) return None @transaction.atomic def _import_interaction(self, interaction): """Real import function""" hostname = interaction['hostname'] stats = etree.fromstring(interaction['stats']) metadata = interaction['metadata'] server = metadata['server'] client = cache.get(hostname) if not client: client, created = Client.objects.get_or_create(name=hostname) if created: self.logger.debug("Client %s added to the db" % hostname) cache.set(hostname, client) timestamp = datetime(*strptime(stats.get('time'))[0:6]) if len(Interaction.objects.filter(client=client, timestamp=timestamp)) > 0: self.logger.warn("Interaction for %s at %s already exists" % (hostname, timestamp)) return if 'profile' in metadata: profile, created = \ Group.objects.get_or_create(name=metadata['profile']) else: profile = None flags = {'dry_run': False, 'only_important': False} for flag in stats.findall('./Flags/Flag'): value = flag.get('value', default='false').lower() == 'true' name = flag.get('name') if name in flags: flags[name] = value inter = Interaction(client=client, timestamp=timestamp, state=stats.get('state', default="unknown"), repo_rev_code=stats.get('revision', default="unknown"), good_count=stats.get('good', default="0"), total_count=stats.get('total', default="0"), server=server, profile=profile, **flags) inter.save() self.logger.debug("Interaction for %s at %s with INSERTED in to db" % (client.id, timestamp)) # FIXME - this should be more efficient for group_name in metadata['groups']: group = cache.get("GROUP_" + group_name) if not group: group, created = Group.objects.get_or_create(name=group_name) if created: self.logger.debug("Added group %s" % group) cache.set("GROUP_" + group_name, group) inter.groups.add(group) for bundle_name in metadata.get('bundles', []): bundle = cache.get("BUNDLE_" + bundle_name) if not bundle: bundle, created = \ Bundle.objects.get_or_create(name=bundle_name) if created: self.logger.debug("Added bundle %s" % bundle) cache.set("BUNDLE_" + bundle_name, bundle) inter.bundles.add(bundle) inter.save() counter_fields = {TYPE_BAD: 0, TYPE_MODIFIED: 0, TYPE_EXTRA: 0} pattern = [('Bad/*', TYPE_BAD), ('Extra/*', TYPE_EXTRA), ('Modified/*', TYPE_MODIFIED)] updates = dict([(etype, []) for etype in Interaction.entry_types]) for (xpath, state) in pattern: for entry in stats.findall(xpath): counter_fields[state] = counter_fields[state] + 1 # handle server failures differently failure = entry.get('failure', '') if failure: act_dict = dict(name=entry.get("name"), entry_type=entry.tag, message=failure) newact = FailureEntry.entry_get_or_create(act_dict) updates['failures'].append(newact) continue updatetype = entry.tag.lower() + "s" update = getattr(self, "_import_%s" % entry.tag, self._import_unknown)(entry, state) if update is not None: updates[updatetype].append(update) inter.bad_count = counter_fields[TYPE_BAD] inter.modified_count = counter_fields[TYPE_MODIFIED] inter.extra_count = counter_fields[TYPE_EXTRA] inter.save() for entry_type in updates.keys(): # batch this for sqlite i = 0 while(i < len(updates[entry_type])): getattr(inter, entry_type).add(*updates[entry_type][i:i + 100]) i += 100 # performance metrics for times in stats.findall('OpStamps'): for metric, value in list(times.items()): Performance(interaction=inter, metric=metric, value=value).save() def import_interaction(self, interaction): """Import the data into the backend""" try: try: self._import_interaction(interaction) except: self.logger.error("Failed to import interaction: %s" % traceback.format_exc().splitlines()[-1]) finally: self.logger.debug("%s: Closing database connection" % self.__class__.__name__) if django.VERSION[0] == 1 and django.VERSION[1] >= 7: for connection in django.db.connections.all(): connection.close() else: django.db.close_connection() def validate(self): """Validate backend storage. Should be called once when loaded""" # verify our database schema try: if Bcfg2.Options.setup.debug: vrb = 2 elif Bcfg2.Options.setup.verbose: vrb = 1 else: vrb = 0 Bcfg2.DBSettings.sync_databases(verbosity=vrb, interactive=False) Bcfg2.DBSettings.migrate_databases(verbosity=vrb, interactive=False) except: msg = "Failed to update database schema: %s" % sys.exc_info()[1] self.logger.error(msg) raise StorageError(msg) def GetExtra(self, client): """Fetch extra entries for a client""" try: c_inst = Client.objects.get(name=client) if not c_inst.current_interaction: # the rare case where a client has no interations return None return [(ent.entry_type, ent.name) for ent in c_inst.current_interaction.extra()] except ObjectDoesNotExist: return [] except MultipleObjectsReturned: self.logger.error("%s Inconsistency: Multiple entries for %s." % (self.__class__.__name__, client)) return [] def GetCurrentEntry(self, client, e_type, e_name): """"GetCurrentEntry: Used by PullSource""" try: c_inst = Client.objects.get(name=client) except ObjectDoesNotExist: self.logger.error("Unknown client: %s" % client) raise PluginExecutionError except MultipleObjectsReturned: self.logger.error("%s Inconsistency: Multiple entries for %s." % (self.__class__.__name__, client)) raise PluginExecutionError try: cls = BaseEntry.entry_from_name(e_type + "Entry") result = cls.objects.filter(name=e_name, state=TYPE_BAD, interaction=c_inst.current_interaction) except ValueError: self.logger.error("Unhandled type %s" % e_type) raise PluginExecutionError if not result: raise PluginExecutionError entry = result[0] ret = [] for p_entry in ('owner', 'group', 'mode'): this_entry = getattr(entry.current_perms, p_entry) if this_entry == '': ret.append(getattr(entry.target_perms, p_entry)) else: ret.append(this_entry) if entry.entry_type == 'Path': if entry.is_sensitive(): raise PluginExecutionError elif entry.detail_type == PathEntry.DETAIL_PRUNED: ret.append('\n'.join(entry.details)) elif entry.is_binary(): ret.append(b64decode(entry.details)) elif entry.is_diff(): ret.append('\n'.join(difflib.restore(\ entry.details.split('\n'), 1))) elif entry.is_too_large(): # If len is zero the object was too large to store raise PluginExecutionError else: ret.append(None) return ret src/lib/Bcfg2/Reporting/Storage/__init__.py000066400000000000000000000000401303523157100210160ustar00rootroot00000000000000""" Public storage routines """ src/lib/Bcfg2/Reporting/Storage/base.py000066400000000000000000000022701303523157100202000ustar00rootroot00000000000000""" The base for all Storage backends """ import logging class StorageError(Exception): """Generic StorageError""" pass class StorageBase(object): """The base for all storages""" options = [] __rmi__ = ['Ping', 'GetExtra', 'GetCurrentEntry'] def __init__(self): """Do something here""" clsname = self.__class__.__name__ self.logger = logging.getLogger(clsname) self.logger.debug("Loading %s storage" % clsname) def import_interaction(self, interaction): """Import the data into the backend""" raise NotImplementedError def validate(self): """Validate backend storage. Should be called once when loaded""" raise NotImplementedError def shutdown(self): """Called at program exit""" pass def Ping(self): """Test for communication with reporting collector""" return "Pong" def GetExtra(self, client): """Return a list of extra entries for a client. Minestruct""" raise NotImplementedError def GetCurrentEntry(self, client, e_type, e_name): """Get the current status of an entry on the client""" raise NotImplementedError src/lib/Bcfg2/Reporting/Transport/000077500000000000000000000000001303523157100173035ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/Transport/DirectStore.py000066400000000000000000000121151303523157100221040ustar00rootroot00000000000000""" Reporting Transport that stores statistics data directly in the storage backend """ import os import sys import time import threading import Bcfg2.Options from Bcfg2.Reporting.Transport.base import TransportBase, TransportError from Bcfg2.Compat import Queue, Full, Empty, cPickle class DirectStore(TransportBase, threading.Thread): options = TransportBase.options + [Bcfg2.Options.Common.reporting_storage] def __init__(self): TransportBase.__init__(self) threading.Thread.__init__(self) self.save_file = os.path.join(self.data, ".saved") self.storage = Bcfg2.Options.setup.reporting_storage() self.storage.validate() self.queue = Queue(100000) self.terminate = threading.Event() self.debug_log("Reporting: Starting %s thread" % self.__class__.__name__) self.start() def shutdown(self): self.terminate.set() def store(self, hostname, metadata, stats): try: self.queue.put_nowait(dict(hostname=hostname, metadata=metadata, stats=stats)) except Full: self.logger.warning("Reporting: Queue is full, " "dropping statistics") def run(self): if not self._load(): self.logger.warning("Reporting: Failed to load saved data, " "DirectStore thread exiting") return while not self.terminate.isSet() and self.queue is not None: try: interaction = self.queue.get(block=True, timeout=self.timeout) start = time.time() self.storage.import_interaction(interaction) self.logger.info("Imported data for %s in %s seconds" % (interaction.get('hostname', ''), time.time() - start)) except Empty: self.debug_log("Reporting: Queue is empty") continue except: err = sys.exc_info()[1] self.logger.error("Reporting: Could not import interaction: %s" % err) continue self.debug_log("Reporting: Stopping %s thread" % self.__class__.__name__) if self.queue is not None and not self.queue.empty(): self._save() def fetch(self): """ no collector is necessary with this backend """ pass def start_monitor(self, collector): """ no collector is necessary with this backend """ pass def rpc(self, method, *args, **kwargs): try: return getattr(self.storage, method)(*args, **kwargs) except: # pylint: disable=W0702 msg = "Reporting: RPC method %s failed: %s" % (method, sys.exc_info()[1]) self.logger.error(msg) raise TransportError(msg) def _save(self): """ Save any saved data to a file """ self.debug_log("Reporting: Saving pending data to %s" % self.save_file) saved_data = [] try: while not self.queue.empty(): saved_data.append(self.queue.get_nowait()) except Empty: pass try: savefile = open(self.save_file, 'w') cPickle.dump(saved_data, savefile) savefile.close() self.logger.info("Saved pending Reporting data") except (IOError, TypeError): err = sys.exc_info()[1] self.logger.warning("Failed to save pending data: %s" % err) def _load(self): """ Load any saved data from a file """ if not os.path.exists(self.save_file): self.debug_log("Reporting: No saved data to load") return True saved_data = [] try: savefile = open(self.save_file, 'r') saved_data = cPickle.load(savefile) savefile.close() except (IOError, cPickle.UnpicklingError): err = sys.exc_info()[1] self.logger.warning("Failed to load saved data: %s" % err) return False for interaction in saved_data: # check that shutdown wasnt called early if self.terminate.isSet(): self.logger.warning("Reporting: Shutdown called while loading " " saved data") return False try: self.queue.put_nowait(interaction) except Full: self.logger.warning("Reporting: Queue is full, failed to " "load saved interaction data") break try: os.unlink(self.save_file) except OSError: self.logger.error("Reporting: Failed to unlink save file: %s" % self.save_file) self.logger.info("Reporting: Loaded saved interaction data") return True src/lib/Bcfg2/Reporting/Transport/LocalFilesystem.py000066400000000000000000000142741303523157100227640ustar00rootroot00000000000000""" The local transport. Stats are pickled and written to /store/-timestamp Leans on FileMonitor to detect changes """ import os import select import time import traceback import Bcfg2.Options import Bcfg2.Server.FileMonitor from Bcfg2.Reporting.Collector import ReportingCollector, ReportingError from Bcfg2.Reporting.Transport.base import TransportBase, TransportError from Bcfg2.Compat import cPickle class LocalFilesystem(TransportBase): options = TransportBase.options + [Bcfg2.Options.Common.filemonitor] def __init__(self): super(LocalFilesystem, self).__init__() self.work_path = "%s/work" % self.data self.debug_log("LocalFilesystem: work path %s" % self.work_path) self.fmon = None self._phony_collector = None #setup our local paths or die if not os.path.exists(self.work_path): try: os.makedirs(self.work_path) except: self.logger.error("%s: Unable to create storage: %s" % (self.__class__.__name__, traceback.format_exc().splitlines()[-1])) raise TransportError def set_debug(self, debug): rv = TransportBase.set_debug(self, debug) if self.fmon is not None: self.fmon.set_debug(debug) return rv def start_monitor(self, collector): """Start the file monitor. Most of this comes from BaseCore""" try: self.fmon = Bcfg2.Server.FileMonitor.get_fam() except IOError: msg = "Failed to instantiate fam driver %s" % \ Bcfg2.Options.setup.filemonitor self.logger.error(msg, exc_info=1) raise TransportError(msg) if self.debug_flag: self.fmon.set_debug(self.debug_flag) self.fmon.start() self.fmon.AddMonitor(self.work_path, self) def store(self, hostname, metadata, stats): """Store the file to disk""" try: payload = cPickle.dumps(dict(hostname=hostname, metadata=metadata, stats=stats)) except: # pylint: disable=W0702 msg = "%s: Failed to build interaction object: %s" % \ (self.__class__.__name__, traceback.format_exc().splitlines()[-1]) self.logger.error(msg) raise TransportError(msg) fname = "%s-%s" % (hostname, time.time()) save_file = os.path.join(self.work_path, fname) tmp_file = os.path.join(self.work_path, "." + fname) if os.path.exists(save_file): self.logger.error("%s: Oops.. duplicate statistic in directory." % self.__class__.__name__) raise TransportError # using a tmpfile to hopefully avoid the file monitor from grabbing too # soon saved = open(tmp_file, 'wb') try: saved.write(payload) except IOError: self.logger.error("Failed to store interaction for %s: %s" % (hostname, traceback.format_exc().splitlines()[-1])) os.unlink(tmp_file) saved.close() os.rename(tmp_file, save_file) def fetch(self): """Fetch the next object""" event = None fmonfd = self.fmon.fileno() if self.fmon.pending(): event = self.fmon.get_event() elif fmonfd: select.select([fmonfd], [], [], self.timeout) if self.fmon.pending(): event = self.fmon.get_event() else: # pseudo.. if nothings pending sleep and loop time.sleep(self.timeout) if not event or event.filename == self.work_path: return None #deviate from the normal routines here we only want one event etype = event.code2str() self.debug_log("Recieved event %s for %s" % (etype, event.filename)) if os.path.basename(event.filename)[0] == '.': return None if etype in ('created', 'exists'): self.debug_log("Handling event %s" % event.filename) payload = os.path.join(self.work_path, event.filename) try: payloadfd = open(payload, "rb") interaction = cPickle.load(payloadfd) payloadfd.close() os.unlink(payload) return interaction except IOError: self.logger.error("Failed to read payload: %s" % traceback.format_exc().splitlines()[-1]) except cPickle.UnpicklingError: self.logger.error("Failed to unpickle payload: %s" % traceback.format_exc().splitlines()[-1]) payloadfd.close() raise TransportError return None def shutdown(self): """Called at program exit""" if self.fmon: self.fmon.shutdown() if self._phony_collector: self._phony_collector.shutdown() def rpc(self, method, *args, **kwargs): """ Here this is more of a dummy. Rather then start a layer which doesn't exist or muck with files, start the collector This will all change when other layers are added """ try: if not self._phony_collector: self._phony_collector = ReportingCollector() except ReportingError: raise TransportError except: self.logger.error("Failed to load collector: %s" % traceback.format_exc().splitlines()[-1]) raise TransportError if not method in self._phony_collector.storage.__class__.__rmi__ or \ not hasattr(self._phony_collector.storage, method): self.logger.error("Unknown method %s called on storage engine %s" % (method, self._phony_collector.storage.__class__.__name__)) raise TransportError try: cls_method = getattr(self._phony_collector.storage, method) return cls_method(*args, **kwargs) except: self.logger.error("RPC method %s failed: %s" % (method, traceback.format_exc().splitlines()[-1])) raise TransportError src/lib/Bcfg2/Reporting/Transport/RedisTransport.py000066400000000000000000000147031303523157100226450ustar00rootroot00000000000000""" The Redis transport. Stats are pickled and written to a redis queue """ import time import signal import platform import traceback import threading import Bcfg2.Options from Bcfg2.Reporting.Transport.base import TransportBase, TransportError from Bcfg2.Compat import cPickle try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False class RedisMessage(object): """An rpc message""" def __init__(self, channel, method, args=[], kwargs=dict()): self.channel = channel self.method = method self.args = args self.kwargs = kwargs class RedisTransport(TransportBase): """ Redis Transport Class """ STATS_KEY = 'bcfg2_statistics' COMMAND_KEY = 'bcfg2_command' options = TransportBase.options + [ Bcfg2.Options.Option( cf=('reporting', 'redis_host'), dest="reporting_redis_host", default='127.0.0.1', help='Reporting Redis host'), Bcfg2.Options.Option( cf=('reporting', 'redis_port'), dest="reporting_redis_port", default=6379, type=int, help='Reporting Redis port'), Bcfg2.Options.Option( cf=('reporting', 'redis_db'), dest="reporting_redis_db", default=0, type=int, help='Reporting Redis DB')] def __init__(self): super(RedisTransport, self).__init__() self._commands = None self.logger.error("Warning: RedisTransport is experimental") if not HAS_REDIS: self.logger.error("redis python module is not available") raise TransportError self._redis = redis.Redis( host=Bcfg2.Options.setup.reporting_redis_host, port=Bcfg2.Options.setup.reporting_redis_port, db=Bcfg2.Options.setup.reporting_redis_db) def start_monitor(self, collector): """Start the monitor. Eventaully start the command thread""" self._commands = threading.Thread(target=self.monitor_thread, args=(self._redis, collector)) self._commands.start() def store(self, hostname, metadata, stats): """Store the file to disk""" try: payload = cPickle.dumps(dict(hostname=hostname, metadata=metadata, stats=stats)) except: # pylint: disable=W0702 msg = "%s: Failed to build interaction object: %s" % \ (self.__class__.__name__, traceback.format_exc().splitlines()[-1]) self.logger.error(msg) raise TransportError(msg) try: self._redis.rpush(RedisTransport.STATS_KEY, payload) except redis.RedisError: self.logger.error("Failed to store interaction for %s: %s" % (hostname, traceback.format_exc().splitlines()[-1])) def fetch(self): """Fetch the next object""" try: payload = self._redis.blpop(RedisTransport.STATS_KEY, timeout=5) if payload: return cPickle.loads(payload[1]) except redis.RedisError: self.logger.error("Failed to fetch an interaction: %s" % (traceback.format_exc().splitlines()[-1])) except cPickle.UnpicklingError: self.logger.error("Failed to unpickle payload: %s" % traceback.format_exc().splitlines()[-1]) raise TransportError return None def shutdown(self): """Called at program exit""" self._redis = None def rpc(self, method, *args, **kwargs): """ Send a command to the queue. Timeout after 10 seconds """ pubsub = self._redis.pubsub() channel = "%s%s" % (platform.node(), int(time.time())) pubsub.subscribe(channel) self._redis.rpush(RedisTransport.COMMAND_KEY, cPickle.dumps(RedisMessage(channel, method, args, kwargs))) resp = pubsub.listen() signal.signal(signal.SIGALRM, self.shutdown) signal.alarm(10) resp.next() # clear subscribe message response = resp.next() pubsub.unsubscribe() try: return cPickle.loads(response['data']) except: # pylint: disable=W0702 msg = "%s: Failed to receive response: %s" % \ (self.__class__.__name__, traceback.format_exc().splitlines()[-1]) self.logger.error(msg) return None def monitor_thread(self, rclient, collector): """Watch the COMMAND_KEY queue for rpc commands""" self.logger.info("Command thread started") while not collector.terminate.isSet(): try: payload = rclient.blpop(RedisTransport.COMMAND_KEY, timeout=5) if not payload: continue message = cPickle.loads(payload[1]) if not isinstance(message, RedisMessage): self.logger.error("Message \"%s\" is not a RedisMessage" % message) if not message.method in collector.storage.__class__.__rmi__ or\ not hasattr(collector.storage, message.method): self.logger.error( "Unknown method %s called on storage engine %s" % (message.method, collector.storage.__class__.__name__)) raise TransportError try: cls_method = getattr(collector.storage, message.method) response = cls_method(*message.args, **message.kwargs) response = cPickle.dumps(response) except: self.logger.error("RPC method %s failed: %s" % (message.method, traceback.format_exc().splitlines()[-1])) raise TransportError rclient.publish(message.channel, response) except redis.RedisError: self.logger.error("Failed to fetch an interaction: %s" % (traceback.format_exc().splitlines()[-1])) except cPickle.UnpicklingError: self.logger.error("Failed to unpickle payload: %s" % traceback.format_exc().splitlines()[-1]) except TransportError: pass except: # pylint: disable=W0702 self.logger.error("Unhandled exception in command thread: %s" % traceback.format_exc().splitlines()[-1]) self.logger.info("Command thread shutdown") src/lib/Bcfg2/Reporting/Transport/__init__.py000066400000000000000000000000421303523157100214100ustar00rootroot00000000000000""" Public transport routines """ src/lib/Bcfg2/Reporting/Transport/base.py000066400000000000000000000030051303523157100205650ustar00rootroot00000000000000""" The base for all server -> collector Transports """ import os import sys import Bcfg2.Options from Bcfg2.Logger import Debuggable class TransportError(Exception): """Generic TransportError""" pass class TransportBase(Debuggable): """The base for all transports""" options = Debuggable.options def __init__(self): """Do something here""" clsname = self.__class__.__name__ Debuggable.__init__(self, name=clsname) self.debug_log("Loading %s transport" % clsname) self.data = os.path.join(Bcfg2.Options.setup.repository, 'Reporting', clsname) if not os.path.exists(self.data): self.logger.info("%s does not exist, creating" % self.data) try: os.makedirs(self.data) except OSError: self.logger.warning("Could not create %s: %s" % (self.data, sys.exc_info()[1])) self.logger.warning("The transport may not function properly") self.timeout = 2 def start_monitor(self, collector): """Called to start monitoring""" raise NotImplementedError def store(self, hostname, metadata, stats): raise NotImplementedError def fetch(self): raise NotImplementedError def shutdown(self): """Called at program exit""" pass def rpc(self, method, *args, **kwargs): """Send a request for data to the collector""" raise NotImplementedError src/lib/Bcfg2/Reporting/__init__.py000066400000000000000000000000001303523157100174060ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/migrations/000077500000000000000000000000001303523157100174635ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/migrations/0001_initial.py000066400000000000000000000306031303523157100221300ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='ActionEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.IntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('status', models.CharField(default=b'check', max_length=128)), ('output', models.IntegerField(default=0)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='Bundle', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=255)), ], options={ 'ordering': ('name',), }, bases=(models.Model,), ), migrations.CreateModel( name='Client', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('creation', models.DateTimeField(auto_now_add=True)), ('name', models.CharField(max_length=128)), ('expiration', models.DateTimeField(null=True, blank=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='FailureEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.IntegerField(editable=False, db_index=True)), ('entry_type', models.CharField(max_length=128)), ('message', models.TextField()), ], options={ 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='FileAcl', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='FilePerms', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('owner', models.CharField(max_length=128)), ('group', models.CharField(max_length=128)), ('perms', models.CharField(max_length=128)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=255)), ('profile', models.BooleanField(default=False)), ('public', models.BooleanField(default=False)), ('category', models.CharField(max_length=1024, blank=True)), ('comment', models.TextField(blank=True)), ('bundles', models.ManyToManyField(to='Reporting.Bundle')), ('groups', models.ManyToManyField(to='Reporting.Group')), ], options={ 'ordering': ('name',), }, bases=(models.Model,), ), migrations.CreateModel( name='Interaction', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('timestamp', models.DateTimeField(db_index=True)), ('state', models.CharField(max_length=32)), ('repo_rev_code', models.CharField(max_length=64)), ('server', models.CharField(max_length=256)), ('good_count', models.IntegerField()), ('total_count', models.IntegerField()), ('bad_count', models.IntegerField(default=0)), ('modified_count', models.IntegerField(default=0)), ('extra_count', models.IntegerField(default=0)), ('actions', models.ManyToManyField(to='Reporting.ActionEntry')), ('bundles', models.ManyToManyField(to='Reporting.Bundle')), ('client', models.ForeignKey(related_name='interactions', to='Reporting.Client')), ('failures', models.ManyToManyField(to='Reporting.FailureEntry')), ('groups', models.ManyToManyField(to='Reporting.Group')), ], options={ 'ordering': ['-timestamp'], 'get_latest_by': 'timestamp', }, bases=(models.Model,), ), migrations.CreateModel( name='PackageEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.IntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('target_version', models.CharField(default=b'', max_length=1024)), ('current_version', models.CharField(max_length=1024)), ('verification_details', models.TextField(default=b'')), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='PathEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.IntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('path_type', models.CharField(max_length=128, choices=[(b'device', b'Device'), (b'directory', b'Directory'), (b'hardlink', b'Hard Link'), (b'nonexistent', b'Non Existent'), (b'permissions', b'Permissions'), (b'symlink', b'Symlink')])), ('detail_type', models.IntegerField(default=0, choices=[(0, b'Unused'), (1, b'Diff'), (2, b'Binary'), (3, b'Sensitive'), (4, b'Size limit exceeded'), (5, b'VCS output'), (6, b'Pruned paths')])), ('details', models.TextField(default=b'')), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='LinkEntry', fields=[ ('pathentry_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='Reporting.PathEntry')), ('target_path', models.CharField(max_length=1024, blank=True)), ('current_path', models.CharField(max_length=1024, blank=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=('Reporting.pathentry',), ), migrations.CreateModel( name='DeviceEntry', fields=[ ('pathentry_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='Reporting.PathEntry')), ('device_type', models.CharField(max_length=16, choices=[(b'block', b'Block'), (b'char', b'Char'), (b'fifo', b'Fifo')])), ('target_major', models.IntegerField()), ('target_minor', models.IntegerField()), ('current_major', models.IntegerField()), ('current_minor', models.IntegerField()), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=('Reporting.pathentry',), ), migrations.CreateModel( name='Performance', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('metric', models.CharField(max_length=128)), ('value', models.DecimalField(max_digits=32, decimal_places=16)), ('interaction', models.ForeignKey(related_name='performance_items', to='Reporting.Interaction')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ServiceEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.IntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('target_status', models.CharField(default=b'', max_length=128)), ('current_status', models.CharField(default=b'', max_length=128)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.AddField( model_name='pathentry', name='acls', field=models.ManyToManyField(to='Reporting.FileAcl'), preserve_default=True, ), migrations.AddField( model_name='pathentry', name='current_perms', field=models.ForeignKey(related_name='+', to='Reporting.FilePerms'), preserve_default=True, ), migrations.AddField( model_name='pathentry', name='target_perms', field=models.ForeignKey(related_name='+', to='Reporting.FilePerms'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='packages', field=models.ManyToManyField(to='Reporting.PackageEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='paths', field=models.ManyToManyField(to='Reporting.PathEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='profile', field=models.ForeignKey(related_name='+', to='Reporting.Group'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='services', field=models.ManyToManyField(to='Reporting.ServiceEntry'), preserve_default=True, ), migrations.AlterUniqueTogether( name='interaction', unique_together=set([('client', 'timestamp')]), ), migrations.AlterUniqueTogether( name='fileperms', unique_together=set([('owner', 'group', 'perms')]), ), migrations.AddField( model_name='client', name='current_interaction', field=models.ForeignKey(related_name='parent_client', blank=True, to='Reporting.Interaction', null=True), preserve_default=True, ), ] src/lib/Bcfg2/Reporting/migrations/0002_convert_perms_to_mode.py000066400000000000000000000010121303523157100250640ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('Reporting', '0001_initial'), ] operations = [ migrations.RenameField( model_name='fileperms', old_name='perms', new_name='mode', ), migrations.AlterUniqueTogether( name='fileperms', unique_together=set([('owner', 'group', 'mode')]), ), ] src/lib/Bcfg2/Reporting/migrations/0003_expand_hash_key.py000066400000000000000000000024741303523157100236400ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('Reporting', '0002_convert_perms_to_mode'), ] operations = [ migrations.AlterField( model_name='actionentry', name='hash_key', field=models.BigIntegerField(editable=False, db_index=True), preserve_default=True, ), migrations.AlterField( model_name='failureentry', name='hash_key', field=models.BigIntegerField(editable=False, db_index=True), preserve_default=True, ), migrations.AlterField( model_name='packageentry', name='hash_key', field=models.BigIntegerField(editable=False, db_index=True), preserve_default=True, ), migrations.AlterField( model_name='pathentry', name='hash_key', field=models.BigIntegerField(editable=False, db_index=True), preserve_default=True, ), migrations.AlterField( model_name='serviceentry', name='hash_key', field=models.BigIntegerField(editable=False, db_index=True), preserve_default=True, ), ] src/lib/Bcfg2/Reporting/migrations/0004_profile_can_be_null.py000066400000000000000000000007371303523157100244700ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('Reporting', '0003_expand_hash_key'), ] operations = [ migrations.AlterField( model_name='interaction', name='profile', field=models.ForeignKey(related_name='+', to='Reporting.Group', null=True), preserve_default=True, ), ] src/lib/Bcfg2/Reporting/migrations/0005_add_selinux_entry_support.py000066400000000000000000000235341303523157100260240ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('Reporting', '0004_profile_can_be_null'), ] operations = [ migrations.CreateModel( name='SEBooleanEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('value', models.BooleanField(default=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SEFcontextEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('selinuxtype', models.CharField(max_length=128)), ('current_selinuxtype', models.CharField(max_length=128, null=True)), ('filetype', models.CharField(max_length=16)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SEInterfaceEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('selinuxtype', models.CharField(max_length=128)), ('current_selinuxtype', models.CharField(max_length=128, null=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SELoginEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('selinuxuser', models.CharField(max_length=128)), ('current_selinuxuser', models.CharField(max_length=128, null=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SEModuleEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('disabled', models.BooleanField(default=False)), ('current_disabled', models.BooleanField(default=False)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SENodeEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('selinuxtype', models.CharField(max_length=128)), ('current_selinuxtype', models.CharField(max_length=128, null=True)), ('proto', models.CharField(max_length=4)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SEPermissiveEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SEPortEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('selinuxtype', models.CharField(max_length=128)), ('current_selinuxtype', models.CharField(max_length=128, null=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='SEUserEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('roles', models.CharField(max_length=128)), ('current_roles', models.CharField(max_length=128, null=True)), ('prefix', models.CharField(max_length=128)), ('current_prefix', models.CharField(max_length=128, null=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.AddField( model_name='interaction', name='sebooleans', field=models.ManyToManyField(to='Reporting.SEBooleanEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='sefcontexts', field=models.ManyToManyField(to='Reporting.SEFcontextEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='seinterfaces', field=models.ManyToManyField(to='Reporting.SEInterfaceEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='selogins', field=models.ManyToManyField(to='Reporting.SELoginEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='semodules', field=models.ManyToManyField(to='Reporting.SEModuleEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='senodes', field=models.ManyToManyField(to='Reporting.SENodeEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='sepermissives', field=models.ManyToManyField(to='Reporting.SEPermissiveEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='seports', field=models.ManyToManyField(to='Reporting.SEPortEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='seusers', field=models.ManyToManyField(to='Reporting.SEUserEntry'), preserve_default=True, ), ] src/lib/Bcfg2/Reporting/migrations/0006_add_user_group_entry_support.py000066400000000000000000000056371303523157100265340ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('Reporting', '0005_add_selinux_entry_support'), ] operations = [ migrations.CreateModel( name='POSIXGroupEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('gid', models.IntegerField(null=True)), ('current_gid', models.IntegerField(null=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='POSIXUserEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128, db_index=True)), ('hash_key', models.BigIntegerField(editable=False, db_index=True)), ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), ('exists', models.BooleanField(default=True)), ('uid', models.IntegerField(null=True)), ('current_uid', models.IntegerField(null=True)), ('group', models.CharField(max_length=64)), ('current_group', models.CharField(max_length=64, null=True)), ('gecos', models.CharField(max_length=1024)), ('current_gecos', models.CharField(max_length=1024, null=True)), ('home', models.CharField(max_length=1024)), ('current_home', models.CharField(max_length=1024, null=True)), ('shell', models.CharField(default=b'/bin/bash', max_length=1024)), ('current_shell', models.CharField(max_length=1024, null=True)), ], options={ 'ordering': ('state', 'name'), 'abstract': False, }, bases=(models.Model,), ), migrations.AddField( model_name='interaction', name='posixgroups', field=models.ManyToManyField(to='Reporting.POSIXGroupEntry'), preserve_default=True, ), migrations.AddField( model_name='interaction', name='posixusers', field=models.ManyToManyField(to='Reporting.POSIXUserEntry'), preserve_default=True, ), ] src/lib/Bcfg2/Reporting/migrations/0007_add_flag_fields_interaction.py000066400000000000000000000012221303523157100261460ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('Reporting', '0006_add_user_group_entry_support'), ] operations = [ migrations.AddField( model_name='interaction', name='dry_run', field=models.BooleanField(default=False), preserve_default=True, ), migrations.AddField( model_name='interaction', name='only_important', field=models.BooleanField(default=False), preserve_default=True, ), ] src/lib/Bcfg2/Reporting/migrations/__init__.py000066400000000000000000000000001303523157100215620ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/models.py000066400000000000000000000605561303523157100171600ustar00rootroot00000000000000"""Django models for Bcfg2 reports.""" import sys import django from django.core.exceptions import ImproperlyConfigured try: from django.db import models, connections except ImproperlyConfigured: e = sys.exc_info()[1] print("Reports: unable to import django models: %s" % e) sys.exit(1) from django.core.cache import cache from datetime import datetime, timedelta from Bcfg2.Compat import cPickle from Bcfg2.DBSettings import get_db_label TYPE_GOOD = 0 TYPE_BAD = 1 TYPE_MODIFIED = 2 TYPE_EXTRA = 3 TYPE_CHOICES = ( (TYPE_GOOD, 'Good'), (TYPE_BAD, 'Bad'), (TYPE_MODIFIED, 'Modified'), (TYPE_EXTRA, 'Extra'), ) _our_backend = None def convert_entry_type_to_id(type_name): """Convert a entry type to its entry id""" for e_id, e_name in TYPE_CHOICES: if e_name.lower() == type_name.lower(): return e_id return -1 def hash_entry(entry_dict): """ Build a key for this based on its data entry_dict = a dict of all the data identifying this """ dataset = [] for key in sorted(entry_dict.keys()): if key in ('id', 'hash_key') or key.startswith('_'): continue dataset.append((key, entry_dict[key])) return hash(cPickle.dumps(dataset)) def _quote(value): """ Quote a string to use as a table name or column Newer versions and various drivers require an argument https://code.djangoproject.com/ticket/13630 """ global _our_backend if not _our_backend: if django.VERSION[0] == 1 and django.VERSION[1] >= 7: _our_backend = connections[get_db_label('Reporting')].ops else: from django.db import backend try: _our_backend = backend.DatabaseOperations( connections[get_db_label('Reporting')]) except TypeError: _our_backend = backend.DatabaseOperations() return _our_backend.quote_name(value) class Client(models.Model): """Object representing every client we have seen stats for.""" creation = models.DateTimeField(auto_now_add=True) name = models.CharField(max_length=128,) current_interaction = models.ForeignKey('Interaction', null=True, blank=True, related_name="parent_client") expiration = models.DateTimeField(blank=True, null=True) def __str__(self): return self.name class InteractionManager(models.Manager): """Manages interactions objects.""" def recent_ids(self, maxdate=None): """ Returns the ids of most recent interactions for clients as of a date. Arguments: maxdate -- datetime object. Most recent date to pull. (default None) """ from django.db import connections cursor = connections[get_db_label('Reporting')].cursor() cfilter = "expiration is null" sql = 'select ri.id, x.client_id from ' + \ '(select client_id, MAX(timestamp) as timer from ' + \ _quote('Reporting_interaction') if maxdate: if not isinstance(maxdate, datetime): raise ValueError('Expected a datetime object') sql = sql + " where timestamp <= '%s' " % maxdate cfilter = "(expiration is null or expiration > '%s') and creation <= '%s'" % (maxdate, maxdate) sql = sql + ' GROUP BY client_id) x, ' + \ _quote('Reporting_interaction') + \ ' ri where ri.client_id = x.client_id AND' + \ ' ri.timestamp = x.timer and x.client_id in' + \ ' (select id from %s where %s)' % \ (_quote('Reporting_client'), cfilter) try: cursor.execute(sql) return [item[0] for item in cursor.fetchall()] except: '''FIXME - really need some error handling''' pass return [] def recent(self, maxdate=None): """ Returns the most recent interactions for clients as of a date Arguments: maxdate -- datetime object. Most recent date to pull. (dafault None) """ if maxdate and not isinstance(maxdate, datetime): raise ValueError('Expected a datetime object') return self.filter(id__in=self.recent_ids(maxdate)) class Interaction(models.Model): """ Models each reconfiguration operation interaction between client and server. """ client = models.ForeignKey(Client, related_name="interactions") timestamp = models.DateTimeField(db_index=True) # Timestamp for this record state = models.CharField(max_length=32) # good/bad/modified/etc repo_rev_code = models.CharField(max_length=64) # repo revision at time of interaction server = models.CharField(max_length=256) # server used for interaction good_count = models.IntegerField() # of good config-items total_count = models.IntegerField() # of total config-items bad_count = models.IntegerField(default=0) modified_count = models.IntegerField(default=0) extra_count = models.IntegerField(default=0) dry_run = models.BooleanField(default=False) only_important = models.BooleanField(default=False) actions = models.ManyToManyField("ActionEntry") packages = models.ManyToManyField("PackageEntry") paths = models.ManyToManyField("PathEntry") services = models.ManyToManyField("ServiceEntry") sebooleans = models.ManyToManyField("SEBooleanEntry") seports = models.ManyToManyField("SEPortEntry") sefcontexts = models.ManyToManyField("SEFcontextEntry") senodes = models.ManyToManyField("SENodeEntry") selogins = models.ManyToManyField("SELoginEntry") seusers = models.ManyToManyField("SEUserEntry") seinterfaces = models.ManyToManyField("SEInterfaceEntry") sepermissives = models.ManyToManyField("SEPermissiveEntry") semodules = models.ManyToManyField("SEModuleEntry") posixusers = models.ManyToManyField("POSIXUserEntry") posixgroups = models.ManyToManyField("POSIXGroupEntry") failures = models.ManyToManyField("FailureEntry") entry_types = ('actions', 'failures', 'packages', 'paths', 'services', 'sebooleans', 'seports', 'sefcontexts', 'senodes', 'selogins', 'seusers', 'seinterfaces', 'sepermissives', 'semodules', 'posixusers', 'posixgroups') # Formerly InteractionMetadata profile = models.ForeignKey("Group", related_name="+", null=True) groups = models.ManyToManyField("Group") bundles = models.ManyToManyField("Bundle") objects = InteractionManager() def __str__(self): return "With " + self.client.name + " @ " + self.timestamp.isoformat() def percentgood(self): if not self.total_count == 0: return (self.good_count / float(self.total_count)) * 100 else: return 0 def percentbad(self): if not self.total_count == 0: return ((self.total_count - self.good_count) / (float(self.total_count))) * 100 else: return 0 def isclean(self): if (self.bad_count == 0 and self.good_count == self.total_count): return True else: return False def isstale(self): if (self == self.client.current_interaction): # Is Mostrecent if(datetime.now() - self.timestamp > timedelta(hours=25)): return True else: return False else: #Search for subsequent Interaction for this client #Check if it happened more than 25 hrs ago. if (self.client.interactions.filter(timestamp__gt=self.timestamp) .order_by('timestamp')[0].timestamp - self.timestamp > timedelta(hours=25)): return True else: return False def save(self): super(Interaction, self).save() # call the real save... self.client.current_interaction = self.client.interactions.latest() self.client.save() # save again post update def delete(self): '''Override the default delete. Allows us to remove Performance items ''' pitems = list(self.performance_items.all()) super(Interaction, self).delete() for perf in pitems: if perf.interaction.count() == 0: perf.delete() def badcount(self): return self.total_count - self.good_count def bad(self): rv = [] for entry in self.entry_types: if entry == 'failures': continue rv.extend(getattr(self, entry).filter(state=TYPE_BAD)) return rv def modified(self): rv = [] for entry in self.entry_types: if entry == 'failures': continue rv.extend(getattr(self, entry).filter(state=TYPE_MODIFIED)) return rv def extra(self): rv = [] for entry in self.entry_types: if entry == 'failures': continue rv.extend(getattr(self, entry).filter(state=TYPE_EXTRA)) return rv class Meta: get_latest_by = 'timestamp' ordering = ['-timestamp'] unique_together = ("client", "timestamp") class Performance(models.Model): """Object representing performance data for any interaction.""" interaction = models.ForeignKey(Interaction, related_name="performance_items") metric = models.CharField(max_length=128) value = models.DecimalField(max_digits=32, decimal_places=16) def __str__(self): return self.metric class Group(models.Model): """ Groups extracted from interactions name - The group name TODO - Most of this is for future use TODO - set a default group """ name = models.CharField(max_length=255, unique=True) profile = models.BooleanField(default=False) public = models.BooleanField(default=False) category = models.CharField(max_length=1024, blank=True) comment = models.TextField(blank=True) groups = models.ManyToManyField("self", symmetrical=False) bundles = models.ManyToManyField("Bundle") def __unicode__(self): return self.name class Meta: ordering = ('name',) @staticmethod def prune_orphans(): '''Prune unused groups''' Group.objects.filter(interaction__isnull=True, group__isnull=True).delete() class Bundle(models.Model): """ Bundles extracted from interactions name - The bundle name """ name = models.CharField(max_length=255, unique=True) def __unicode__(self): return self.name class Meta: ordering = ('name',) @staticmethod def prune_orphans(): '''Prune unused bundles''' Bundle.objects.filter(interaction__isnull=True, group__isnull=True).delete() # new interaction models class FilePerms(models.Model): owner = models.CharField(max_length=128) group = models.CharField(max_length=128) mode = models.CharField(max_length=128) class Meta: unique_together = ('owner', 'group', 'mode') def empty(self): """Return true if we have no real data""" if self.owner or self.group or self.mode: return False else: return True class FileAcl(models.Model): """Placeholder""" name = models.CharField(max_length=128, db_index=True) class BaseEntry(models.Model): """ Abstract base for all entry types """ name = models.CharField(max_length=128, db_index=True) hash_key = models.BigIntegerField(editable=False, db_index=True) class Meta: abstract = True def save(self, *args, **kwargs): if 'hash_key' in kwargs: self.hash_key = kwargs['hash_key'] del kwargs['hash_key'] else: self.hash_key = hash_entry(self.__dict__) super(BaseEntry, self).save(*args, **kwargs) def class_name(self): return self.__class__.__name__ def short_list(self): """todo""" return [] @classmethod def entry_from_name(cls, name): try: newcls = globals()[name] if not isinstance(newcls(), cls): raise ValueError("%s is not an instance of %s" % (name, cls)) return newcls except KeyError: raise ValueError("Invalid type %s" % name) @classmethod def entry_from_type(cls, etype): for entry_cls in ENTRY_TYPES: if etype == entry_cls.ENTRY_TYPE: return entry_cls else: raise ValueError("Invalid type %s" % etype) @classmethod def entry_get_or_create(cls, act_dict): """Helper to quickly lookup an object""" cls_name = cls().__class__.__name__ act_hash = hash_entry(act_dict) # TODO - get form cache and validate act_key = "%s_%s" % (cls_name, act_hash) newact = cache.get(act_key) if newact: return newact acts = cls.objects.filter(hash_key=act_hash) if len(acts) > 0: for act in acts: for key in act_dict: if act_dict[key] != getattr(act, key): continue #match found newact = act break # worst case, its new if not newact: newact = cls(**act_dict) newact.save(hash_key=act_hash) cache.set(act_key, newact, 60 * 60) return newact def is_failure(self): return isinstance(self, FailureEntry) @classmethod def prune_orphans(cls): '''Remove unused entries''' # yeat another sqlite hack cls_orphans = [x['id'] for x in cls.objects.filter(interaction__isnull=True).values("id")] i = 0 while i < len(cls_orphans): cls.objects.filter(id__in=cls_orphans[i:i + 100]).delete() i += 100 class SuccessEntry(BaseEntry): """Base for successful entries""" state = models.IntegerField(choices=TYPE_CHOICES) exists = models.BooleanField(default=True) ENTRY_TYPE = r"Success" @property def entry_type(self): return self.ENTRY_TYPE def is_extra(self): return self.state == TYPE_EXTRA class Meta: abstract = True ordering = ('state', 'name') def short_list(self): """Return a list of problems""" rv = [] if self.is_extra(): rv.append("Extra") elif not self.exists: rv.append("Missing") return rv class FailureEntry(BaseEntry): """Represents objects that failed to bind""" entry_type = models.CharField(max_length=128) message = models.TextField() def is_failure(self): return True class ActionEntry(SuccessEntry): """ Action entry """ status = models.CharField(max_length=128, default="check") output = models.IntegerField(default=0) ENTRY_TYPE = r"Action" class SEBooleanEntry(SuccessEntry): """ SELinux boolean """ value = models.BooleanField(default=True) ENTRY_TYPE = r"SEBoolean" class SEPortEntry(SuccessEntry): """ SELinux port """ selinuxtype = models.CharField(max_length=128) current_selinuxtype = models.CharField(max_length=128, null=True) ENTRY_TYPE = r"SEPort" def selinuxtype_problem(self): """Check for an selinux type problem.""" if not self.current_selinuxtype: return True return self.selinuxtype != self.current_selinuxtype def short_list(self): """Return a list of problems""" rv = super(SEPortEntry, self).short_list() if self.selinuxtype_problem(): rv.append("Wrong SELinux type") return rv class SEFcontextEntry(SuccessEntry): """ SELinux file context """ selinuxtype = models.CharField(max_length=128) current_selinuxtype = models.CharField(max_length=128, null=True) filetype = models.CharField(max_length=16) ENTRY_TYPE = r"SEFcontext" def selinuxtype_problem(self): """Check for an selinux type problem.""" if not self.current_selinuxtype: return True return self.selinuxtype != self.current_selinuxtype def short_list(self): """Return a list of problems""" rv = super(SEFcontextEntry, self).short_list() if self.selinuxtype_problem(): rv.append("Wrong SELinux type") return rv class SENodeEntry(SuccessEntry): """ SELinux node """ selinuxtype = models.CharField(max_length=128) current_selinuxtype = models.CharField(max_length=128, null=True) proto = models.CharField(max_length=4) ENTRY_TYPE = r"SENode" def selinuxtype_problem(self): """Check for an selinux type problem.""" if not self.current_selinuxtype: return True return self.selinuxtype != self.current_selinuxtype def short_list(self): """Return a list of problems""" rv = super(SENodeEntry, self).short_list() if self.selinuxtype_problem(): rv.append("Wrong SELinux type") return rv class SELoginEntry(SuccessEntry): """ SELinux login """ selinuxuser = models.CharField(max_length=128) current_selinuxuser = models.CharField(max_length=128, null=True) ENTRY_TYPE = r"SELogin" class SEUserEntry(SuccessEntry): """ SELinux user """ roles = models.CharField(max_length=128) current_roles = models.CharField(max_length=128, null=True) prefix = models.CharField(max_length=128) current_prefix = models.CharField(max_length=128, null=True) ENTRY_TYPE = r"SEUser" class SEInterfaceEntry(SuccessEntry): """ SELinux interface """ selinuxtype = models.CharField(max_length=128) current_selinuxtype = models.CharField(max_length=128, null=True) ENTRY_TYPE = r"SEInterface" def selinuxtype_problem(self): """Check for an selinux type problem.""" if not self.current_selinuxtype: return True return self.selinuxtype != self.current_selinuxtype def short_list(self): """Return a list of problems""" rv = super(SEInterfaceEntry, self).short_list() if self.selinuxtype_problem(): rv.append("Wrong SELinux type") return rv class SEPermissiveEntry(SuccessEntry): """ SELinux permissive domain """ ENTRY_TYPE = r"SEPermissive" class SEModuleEntry(SuccessEntry): """ SELinux module """ disabled = models.BooleanField(default=False) current_disabled = models.BooleanField(default=False) ENTRY_TYPE = r"SEModule" class POSIXUserEntry(SuccessEntry): """ POSIX user """ uid = models.IntegerField(null=True) current_uid = models.IntegerField(null=True) group = models.CharField(max_length=64) current_group = models.CharField(max_length=64, null=True) gecos = models.CharField(max_length=1024) current_gecos = models.CharField(max_length=1024, null=True) home = models.CharField(max_length=1024) current_home = models.CharField(max_length=1024, null=True) shell = models.CharField(max_length=1024, default='/bin/bash') current_shell = models.CharField(max_length=1024, null=True) ENTRY_TYPE = r"POSIXUser" class POSIXGroupEntry(SuccessEntry): """ POSIX group """ gid = models.IntegerField(null=True) current_gid = models.IntegerField(null=True) ENTRY_TYPE = r"POSIXGroup" class PackageEntry(SuccessEntry): """ The new model for package information """ # if this is an extra entry target_version will be empty target_version = models.CharField(max_length=1024, default='') current_version = models.CharField(max_length=1024) verification_details = models.TextField(default="") ENTRY_TYPE = r"Package" # TODO - prune def version_problem(self): """Check for a version problem.""" if not self.current_version: return True if self.target_version != self.current_version: return True elif self.target_version == 'auto': return True else: return False def short_list(self): """Return a list of problems""" rv = super(PackageEntry, self).short_list() if self.is_extra(): return rv if not self.version_problem() or not self.exists: return rv if not self.current_version: rv.append("Missing") else: rv.append("Wrong version") return rv class PathEntry(SuccessEntry): """reason why modified or bad entry did not verify, or changed.""" PATH_TYPES = ( ("device", "Device"), ("directory", "Directory"), ("hardlink", "Hard Link"), ("nonexistent", "Non Existent"), ("permissions", "Permissions"), ("symlink", "Symlink"), ) DETAIL_UNUSED = 0 DETAIL_DIFF = 1 DETAIL_BINARY = 2 DETAIL_SENSITIVE = 3 DETAIL_SIZE_LIMIT = 4 DETAIL_VCS = 5 DETAIL_PRUNED = 6 DETAIL_CHOICES = ( (DETAIL_UNUSED, 'Unused'), (DETAIL_DIFF, 'Diff'), (DETAIL_BINARY, 'Binary'), (DETAIL_SENSITIVE, 'Sensitive'), (DETAIL_SIZE_LIMIT, 'Size limit exceeded'), (DETAIL_VCS, 'VCS output'), (DETAIL_PRUNED, 'Pruned paths'), ) path_type = models.CharField(max_length=128, choices=PATH_TYPES) target_perms = models.ForeignKey(FilePerms, related_name="+") current_perms = models.ForeignKey(FilePerms, related_name="+") acls = models.ManyToManyField(FileAcl) detail_type = models.IntegerField(default=0, choices=DETAIL_CHOICES) details = models.TextField(default='') ENTRY_TYPE = r"Path" def mode_problem(self): if self.current_perms.empty(): return False elif self.target_perms.mode != self.current_perms.mode: return True else: return False def has_detail(self): return self.detail_type != PathEntry.DETAIL_UNUSED def is_diff(self): return self.detail_type == PathEntry.DETAIL_DIFF def is_sensitive(self): return self.detail_type == PathEntry.DETAIL_SENSITIVE def is_binary(self): return self.detail_type == PathEntry.DETAIL_BINARY def is_too_large(self): return self.detail_type == PathEntry.DETAIL_SIZE_LIMIT def short_list(self): """Return a list of problems""" rv = super(PathEntry, self).short_list() if self.is_extra(): return rv if self.mode_problem(): rv.append("File mode") if self.detail_type == PathEntry.DETAIL_PRUNED: rv.append("Directory has extra files") elif self.detail_type != PathEntry.DETAIL_UNUSED: rv.append("Incorrect data") if hasattr(self, 'linkentry') and self.linkentry and \ self.linkentry.target_path != self.linkentry.current_path: rv.append("Incorrect target") return rv class LinkEntry(PathEntry): """Sym/Hard Link types""" target_path = models.CharField(max_length=1024, blank=True) current_path = models.CharField(max_length=1024, blank=True) def link_problem(self): return self.target_path != self.current_path class DeviceEntry(PathEntry): """Device types. Best I can tell the client driver needs work here""" DEVICE_TYPES = ( ("block", "Block"), ("char", "Char"), ("fifo", "Fifo"), ) device_type = models.CharField(max_length=16, choices=DEVICE_TYPES) target_major = models.IntegerField() target_minor = models.IntegerField() current_major = models.IntegerField() current_minor = models.IntegerField() class ServiceEntry(SuccessEntry): """ The new model for package information """ target_status = models.CharField(max_length=128, default='') current_status = models.CharField(max_length=128, default='') ENTRY_TYPE = r"Service" #TODO - prune def status_problem(self): return self.target_status != self.current_status def short_list(self): """Return a list of problems""" rv = super(ServiceEntry, self).short_list() if self.status_problem(): rv.append("Incorrect status") return rv ENTRY_TYPES = (ActionEntry, PackageEntry, PathEntry, ServiceEntry, SEBooleanEntry, SEPortEntry, SEFcontextEntry, SENodeEntry, SELoginEntry, SEUserEntry, SEInterfaceEntry, SEPermissiveEntry, SEModuleEntry) src/lib/Bcfg2/Reporting/south_migrations/000077500000000000000000000000001303523157100207055ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/south_migrations/0001_initial.py000066400000000000000000000707401303523157100233600ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Client' db.create_table('Reporting_client', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('creation', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_interaction', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='parent_client', null=True, to=orm['Reporting.Interaction'])), ('expiration', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), )) db.send_create_signal('Reporting', ['Client']) # Adding model 'Interaction' db.create_table('Reporting_interaction', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('client', self.gf('django.db.models.fields.related.ForeignKey')(related_name='interactions', to=orm['Reporting.Client'])), ('timestamp', self.gf('django.db.models.fields.DateTimeField')(db_index=True)), ('state', self.gf('django.db.models.fields.CharField')(max_length=32)), ('repo_rev_code', self.gf('django.db.models.fields.CharField')(max_length=64)), ('server', self.gf('django.db.models.fields.CharField')(max_length=256)), ('good_count', self.gf('django.db.models.fields.IntegerField')()), ('total_count', self.gf('django.db.models.fields.IntegerField')()), ('bad_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ('modified_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ('extra_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ('profile', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.Group'])), )) db.send_create_signal('Reporting', ['Interaction']) # Adding unique constraint on 'Interaction', fields ['client', 'timestamp'] db.create_unique('Reporting_interaction', ['client_id', 'timestamp']) # Adding M2M table for field actions on 'Interaction' db.create_table('Reporting_interaction_actions', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('actionentry', models.ForeignKey(orm['Reporting.actionentry'], null=False)) )) db.create_unique('Reporting_interaction_actions', ['interaction_id', 'actionentry_id']) # Adding M2M table for field packages on 'Interaction' db.create_table('Reporting_interaction_packages', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('packageentry', models.ForeignKey(orm['Reporting.packageentry'], null=False)) )) db.create_unique('Reporting_interaction_packages', ['interaction_id', 'packageentry_id']) # Adding M2M table for field paths on 'Interaction' db.create_table('Reporting_interaction_paths', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('pathentry', models.ForeignKey(orm['Reporting.pathentry'], null=False)) )) db.create_unique('Reporting_interaction_paths', ['interaction_id', 'pathentry_id']) # Adding M2M table for field services on 'Interaction' db.create_table('Reporting_interaction_services', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('serviceentry', models.ForeignKey(orm['Reporting.serviceentry'], null=False)) )) db.create_unique('Reporting_interaction_services', ['interaction_id', 'serviceentry_id']) # Adding M2M table for field failures on 'Interaction' db.create_table('Reporting_interaction_failures', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('failureentry', models.ForeignKey(orm['Reporting.failureentry'], null=False)) )) db.create_unique('Reporting_interaction_failures', ['interaction_id', 'failureentry_id']) # Adding M2M table for field groups on 'Interaction' db.create_table('Reporting_interaction_groups', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('group', models.ForeignKey(orm['Reporting.group'], null=False)) )) db.create_unique('Reporting_interaction_groups', ['interaction_id', 'group_id']) # Adding M2M table for field bundles on 'Interaction' db.create_table('Reporting_interaction_bundles', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('bundle', models.ForeignKey(orm['Reporting.bundle'], null=False)) )) db.create_unique('Reporting_interaction_bundles', ['interaction_id', 'bundle_id']) # Adding model 'Performance' db.create_table('Reporting_performance', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('interaction', self.gf('django.db.models.fields.related.ForeignKey')(related_name='performance_items', to=orm['Reporting.Interaction'])), ('metric', self.gf('django.db.models.fields.CharField')(max_length=128)), ('value', self.gf('django.db.models.fields.DecimalField')(max_digits=32, decimal_places=16)), )) db.send_create_signal('Reporting', ['Performance']) # Adding model 'Group' db.create_table('Reporting_group', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), ('profile', self.gf('django.db.models.fields.BooleanField')(default=False)), ('public', self.gf('django.db.models.fields.BooleanField')(default=False)), ('category', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), ('comment', self.gf('django.db.models.fields.TextField')(blank=True)), )) db.send_create_signal('Reporting', ['Group']) # Adding M2M table for field groups on 'Group' db.create_table('Reporting_group_groups', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_group', models.ForeignKey(orm['Reporting.group'], null=False)), ('to_group', models.ForeignKey(orm['Reporting.group'], null=False)) )) db.create_unique('Reporting_group_groups', ['from_group_id', 'to_group_id']) # Adding M2M table for field bundles on 'Group' db.create_table('Reporting_group_bundles', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('group', models.ForeignKey(orm['Reporting.group'], null=False)), ('bundle', models.ForeignKey(orm['Reporting.bundle'], null=False)) )) db.create_unique('Reporting_group_bundles', ['group_id', 'bundle_id']) # Adding model 'Bundle' db.create_table('Reporting_bundle', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), )) db.send_create_signal('Reporting', ['Bundle']) # Adding model 'FilePerms' db.create_table('Reporting_fileperms', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('owner', self.gf('django.db.models.fields.CharField')(max_length=128)), ('group', self.gf('django.db.models.fields.CharField')(max_length=128)), ('perms', self.gf('django.db.models.fields.CharField')(max_length=128)), )) db.send_create_signal('Reporting', ['FilePerms']) # Adding unique constraint on 'FilePerms', fields ['owner', 'group', 'perms'] db.create_unique('Reporting_fileperms', ['owner', 'group', 'perms']) # Adding model 'FileAcl' db.create_table('Reporting_fileacl', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), )) db.send_create_signal('Reporting', ['FileAcl']) # Adding model 'FailureEntry' db.create_table('Reporting_failureentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('entry_type', self.gf('django.db.models.fields.CharField')(max_length=128)), ('message', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal('Reporting', ['FailureEntry']) # Adding model 'ActionEntry' db.create_table('Reporting_actionentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('status', self.gf('django.db.models.fields.CharField')(default='check', max_length=128)), ('output', self.gf('django.db.models.fields.IntegerField')(default=0)), )) db.send_create_signal('Reporting', ['ActionEntry']) # Adding model 'PackageEntry' db.create_table('Reporting_packageentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('target_version', self.gf('django.db.models.fields.CharField')(default='', max_length=1024)), ('current_version', self.gf('django.db.models.fields.CharField')(max_length=1024)), ('verification_details', self.gf('django.db.models.fields.TextField')(default='')), )) db.send_create_signal('Reporting', ['PackageEntry']) # Adding model 'PathEntry' db.create_table('Reporting_pathentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('path_type', self.gf('django.db.models.fields.CharField')(max_length=128)), ('target_perms', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.FilePerms'])), ('current_perms', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.FilePerms'])), ('detail_type', self.gf('django.db.models.fields.IntegerField')(default=0)), ('details', self.gf('django.db.models.fields.TextField')(default='')), )) db.send_create_signal('Reporting', ['PathEntry']) # Adding M2M table for field acls on 'PathEntry' db.create_table('Reporting_pathentry_acls', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('pathentry', models.ForeignKey(orm['Reporting.pathentry'], null=False)), ('fileacl', models.ForeignKey(orm['Reporting.fileacl'], null=False)) )) db.create_unique('Reporting_pathentry_acls', ['pathentry_id', 'fileacl_id']) # Adding model 'LinkEntry' db.create_table('Reporting_linkentry', ( ('pathentry_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['Reporting.PathEntry'], unique=True, primary_key=True)), ('target_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), ('current_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), )) db.send_create_signal('Reporting', ['LinkEntry']) # Adding model 'DeviceEntry' db.create_table('Reporting_deviceentry', ( ('pathentry_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['Reporting.PathEntry'], unique=True, primary_key=True)), ('device_type', self.gf('django.db.models.fields.CharField')(max_length=16)), ('target_major', self.gf('django.db.models.fields.IntegerField')()), ('target_minor', self.gf('django.db.models.fields.IntegerField')()), ('current_major', self.gf('django.db.models.fields.IntegerField')()), ('current_minor', self.gf('django.db.models.fields.IntegerField')()), )) db.send_create_signal('Reporting', ['DeviceEntry']) # Adding model 'ServiceEntry' db.create_table('Reporting_serviceentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('target_status', self.gf('django.db.models.fields.CharField')(default='', max_length=128)), ('current_status', self.gf('django.db.models.fields.CharField')(default='', max_length=128)), )) db.send_create_signal('Reporting', ['ServiceEntry']) def backwards(self, orm): # Removing unique constraint on 'FilePerms', fields ['owner', 'group', 'perms'] db.delete_unique('Reporting_fileperms', ['owner', 'group', 'perms']) # Removing unique constraint on 'Interaction', fields ['client', 'timestamp'] db.delete_unique('Reporting_interaction', ['client_id', 'timestamp']) # Deleting model 'Client' db.delete_table('Reporting_client') # Deleting model 'Interaction' db.delete_table('Reporting_interaction') # Removing M2M table for field actions on 'Interaction' db.delete_table('Reporting_interaction_actions') # Removing M2M table for field packages on 'Interaction' db.delete_table('Reporting_interaction_packages') # Removing M2M table for field paths on 'Interaction' db.delete_table('Reporting_interaction_paths') # Removing M2M table for field services on 'Interaction' db.delete_table('Reporting_interaction_services') # Removing M2M table for field failures on 'Interaction' db.delete_table('Reporting_interaction_failures') # Removing M2M table for field groups on 'Interaction' db.delete_table('Reporting_interaction_groups') # Removing M2M table for field bundles on 'Interaction' db.delete_table('Reporting_interaction_bundles') # Deleting model 'Performance' db.delete_table('Reporting_performance') # Deleting model 'Group' db.delete_table('Reporting_group') # Removing M2M table for field groups on 'Group' db.delete_table('Reporting_group_groups') # Removing M2M table for field bundles on 'Group' db.delete_table('Reporting_group_bundles') # Deleting model 'Bundle' db.delete_table('Reporting_bundle') # Deleting model 'FilePerms' db.delete_table('Reporting_fileperms') # Deleting model 'FileAcl' db.delete_table('Reporting_fileacl') # Deleting model 'FailureEntry' db.delete_table('Reporting_failureentry') # Deleting model 'ActionEntry' db.delete_table('Reporting_actionentry') # Deleting model 'PackageEntry' db.delete_table('Reporting_packageentry') # Deleting model 'PathEntry' db.delete_table('Reporting_pathentry') # Removing M2M table for field acls on 'PathEntry' db.delete_table('Reporting_pathentry_acls') # Deleting model 'LinkEntry' db.delete_table('Reporting_linkentry') # Deleting model 'DeviceEntry' db.delete_table('Reporting_deviceentry') # Deleting model 'ServiceEntry' db.delete_table('Reporting_serviceentry') models = { 'Reporting.actionentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) }, 'Reporting.bundle': { 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'Reporting.client': { 'Meta': {'object_name': 'Client'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.deviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_major': ('django.db.models.fields.IntegerField', [], {}), 'current_minor': ('django.db.models.fields.IntegerField', [], {}), 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_major': ('django.db.models.fields.IntegerField', [], {}), 'target_minor': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.failureentry': { 'Meta': {'object_name': 'FailureEntry'}, 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileacl': { 'Meta': {'object_name': 'FileAcl'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileperms': { 'Meta': {'unique_together': "(('owner', 'group', 'perms'),)", 'object_name': 'FilePerms'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'perms': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.group': { 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Reporting.interaction': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), 'good_count': ('django.db.models.fields.IntegerField', [], {}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'total_count': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.linkentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, 'Reporting.packageentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) }, 'Reporting.pathentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) }, 'Reporting.performance': { 'Meta': {'object_name': 'Performance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) }, 'Reporting.serviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) } } complete_apps = ['Reporting']src/lib/Bcfg2/Reporting/south_migrations/0002_convert_perms_to_mode.py000066400000000000000000000303651303523157100263230ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models from django.conf import settings class Migration(SchemaMigration): def forwards(self, orm): # Removing unique constraint on 'FilePerms', fields ['owner', 'perms', 'group'] db.delete_unique('Reporting_fileperms', ['owner', 'perms', 'group']) # Renaming field 'FilePerms.perms' to 'FilePerms.mode' db.rename_column('Reporting_fileperms', 'perms', 'mode') if not settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': # Adding unique constraint on 'FilePerms', fields ['owner', 'group', 'mode'] db.create_unique('Reporting_fileperms', ['owner', 'group', 'mode']) def backwards(self, orm): # Removing unique constraint on 'FilePerms', fields ['owner', 'group', 'mode'] db.delete_unique('Reporting_fileperms', ['owner', 'group', 'mode']) # Renaming field 'FilePerms.mode' to 'FilePerms.perms' db.rename_column('Reporting_fileperms', 'mode', 'perms') if not settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': # Adding unique constraint on 'FilePerms', fields ['owner', 'perms', 'group'] db.create_unique('Reporting_fileperms', ['owner', 'perms', 'group']) models = { 'Reporting.actionentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) }, 'Reporting.bundle': { 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'Reporting.client': { 'Meta': {'object_name': 'Client'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.deviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_major': ('django.db.models.fields.IntegerField', [], {}), 'current_minor': ('django.db.models.fields.IntegerField', [], {}), 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_major': ('django.db.models.fields.IntegerField', [], {}), 'target_minor': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.failureentry': { 'Meta': {'object_name': 'FailureEntry'}, 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileacl': { 'Meta': {'object_name': 'FileAcl'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileperms': { 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.group': { 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Reporting.interaction': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), 'good_count': ('django.db.models.fields.IntegerField', [], {}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'total_count': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.linkentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, 'Reporting.packageentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) }, 'Reporting.pathentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) }, 'Reporting.performance': { 'Meta': {'object_name': 'Performance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) }, 'Reporting.serviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) } } complete_apps = ['Reporting'] src/lib/Bcfg2/Reporting/south_migrations/0003_expand_hash_key.py000066400000000000000000000313561303523157100250630ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'FailureEntry.hash_key' db.alter_column('Reporting_failureentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) # Changing field 'PackageEntry.hash_key' db.alter_column('Reporting_packageentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) # Changing field 'ServiceEntry.hash_key' db.alter_column('Reporting_serviceentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) # Changing field 'PathEntry.hash_key' db.alter_column('Reporting_pathentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) # Changing field 'ActionEntry.hash_key' db.alter_column('Reporting_actionentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) def backwards(self, orm): # Changing field 'FailureEntry.hash_key' db.alter_column('Reporting_failureentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) # Changing field 'PackageEntry.hash_key' db.alter_column('Reporting_packageentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) # Changing field 'ServiceEntry.hash_key' db.alter_column('Reporting_serviceentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) # Changing field 'PathEntry.hash_key' db.alter_column('Reporting_pathentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) # Changing field 'ActionEntry.hash_key' db.alter_column('Reporting_actionentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) models = { 'Reporting.actionentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) }, 'Reporting.bundle': { 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'Reporting.client': { 'Meta': {'object_name': 'Client'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.deviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_major': ('django.db.models.fields.IntegerField', [], {}), 'current_minor': ('django.db.models.fields.IntegerField', [], {}), 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_major': ('django.db.models.fields.IntegerField', [], {}), 'target_minor': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.failureentry': { 'Meta': {'object_name': 'FailureEntry'}, 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileacl': { 'Meta': {'object_name': 'FileAcl'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileperms': { 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.group': { 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Reporting.interaction': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), 'good_count': ('django.db.models.fields.IntegerField', [], {}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'total_count': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.linkentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, 'Reporting.packageentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) }, 'Reporting.pathentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) }, 'Reporting.performance': { 'Meta': {'object_name': 'Performance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) }, 'Reporting.serviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) } } complete_apps = ['Reporting']src/lib/Bcfg2/Reporting/south_migrations/0004_profile_can_be_null.py000066400000000000000000000270721303523157100257130ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Interaction.profile' db.alter_column('Reporting_interaction', 'profile_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['Reporting.Group'])) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'Interaction.profile' raise RuntimeError("Cannot reverse this migration. 'Interaction.profile' and its values cannot be restored.") models = { 'Reporting.actionentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) }, 'Reporting.bundle': { 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'Reporting.client': { 'Meta': {'object_name': 'Client'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.deviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_major': ('django.db.models.fields.IntegerField', [], {}), 'current_minor': ('django.db.models.fields.IntegerField', [], {}), 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_major': ('django.db.models.fields.IntegerField', [], {}), 'target_minor': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.failureentry': { 'Meta': {'object_name': 'FailureEntry'}, 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileacl': { 'Meta': {'object_name': 'FileAcl'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileperms': { 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.group': { 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Reporting.interaction': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), 'good_count': ('django.db.models.fields.IntegerField', [], {}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'total_count': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.linkentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, 'Reporting.packageentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) }, 'Reporting.pathentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) }, 'Reporting.performance': { 'Meta': {'object_name': 'Performance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) }, 'Reporting.serviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) } } complete_apps = ['Reporting']src/lib/Bcfg2/Reporting/south_migrations/0005_add_selinux_entry_support.py000066400000000000000000001020001303523157100272300ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'SELoginEntry' db.create_table('Reporting_seloginentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('selinuxuser', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_selinuxuser', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), )) db.send_create_signal('Reporting', ['SELoginEntry']) # Adding model 'SEUserEntry' db.create_table('Reporting_seuserentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('roles', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_roles', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), ('prefix', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_prefix', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), )) db.send_create_signal('Reporting', ['SEUserEntry']) # Adding model 'SEBooleanEntry' db.create_table('Reporting_sebooleanentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('value', self.gf('django.db.models.fields.BooleanField')(default=True)), )) db.send_create_signal('Reporting', ['SEBooleanEntry']) # Adding model 'SENodeEntry' db.create_table('Reporting_senodeentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), ('proto', self.gf('django.db.models.fields.CharField')(max_length=4)), )) db.send_create_signal('Reporting', ['SENodeEntry']) # Adding model 'SEFcontextEntry' db.create_table('Reporting_sefcontextentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), ('filetype', self.gf('django.db.models.fields.CharField')(max_length=16)), )) db.send_create_signal('Reporting', ['SEFcontextEntry']) # Adding model 'SEInterfaceEntry' db.create_table('Reporting_seinterfaceentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), )) db.send_create_signal('Reporting', ['SEInterfaceEntry']) # Adding model 'SEPermissiveEntry' db.create_table('Reporting_sepermissiveentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), )) db.send_create_signal('Reporting', ['SEPermissiveEntry']) # Adding model 'SEModuleEntry' db.create_table('Reporting_semoduleentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('disabled', self.gf('django.db.models.fields.BooleanField')(default=False)), ('current_disabled', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('Reporting', ['SEModuleEntry']) # Adding model 'SEPortEntry' db.create_table('Reporting_seportentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), )) db.send_create_signal('Reporting', ['SEPortEntry']) # Adding M2M table for field sebooleans on 'Interaction' db.create_table('Reporting_interaction_sebooleans', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('sebooleanentry', models.ForeignKey(orm['Reporting.sebooleanentry'], null=False)) )) db.create_unique('Reporting_interaction_sebooleans', ['interaction_id', 'sebooleanentry_id']) # Adding M2M table for field seports on 'Interaction' db.create_table('Reporting_interaction_seports', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('seportentry', models.ForeignKey(orm['Reporting.seportentry'], null=False)) )) db.create_unique('Reporting_interaction_seports', ['interaction_id', 'seportentry_id']) # Adding M2M table for field sefcontexts on 'Interaction' db.create_table('Reporting_interaction_sefcontexts', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('sefcontextentry', models.ForeignKey(orm['Reporting.sefcontextentry'], null=False)) )) db.create_unique('Reporting_interaction_sefcontexts', ['interaction_id', 'sefcontextentry_id']) # Adding M2M table for field senodes on 'Interaction' db.create_table('Reporting_interaction_senodes', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('senodeentry', models.ForeignKey(orm['Reporting.senodeentry'], null=False)) )) db.create_unique('Reporting_interaction_senodes', ['interaction_id', 'senodeentry_id']) # Adding M2M table for field selogins on 'Interaction' db.create_table('Reporting_interaction_selogins', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('seloginentry', models.ForeignKey(orm['Reporting.seloginentry'], null=False)) )) db.create_unique('Reporting_interaction_selogins', ['interaction_id', 'seloginentry_id']) # Adding M2M table for field seusers on 'Interaction' db.create_table('Reporting_interaction_seusers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('seuserentry', models.ForeignKey(orm['Reporting.seuserentry'], null=False)) )) db.create_unique('Reporting_interaction_seusers', ['interaction_id', 'seuserentry_id']) # Adding M2M table for field seinterfaces on 'Interaction' db.create_table('Reporting_interaction_seinterfaces', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('seinterfaceentry', models.ForeignKey(orm['Reporting.seinterfaceentry'], null=False)) )) db.create_unique('Reporting_interaction_seinterfaces', ['interaction_id', 'seinterfaceentry_id']) # Adding M2M table for field sepermissives on 'Interaction' db.create_table('Reporting_interaction_sepermissives', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('sepermissiveentry', models.ForeignKey(orm['Reporting.sepermissiveentry'], null=False)) )) db.create_unique('Reporting_interaction_sepermissives', ['interaction_id', 'sepermissiveentry_id']) # Adding M2M table for field semodules on 'Interaction' db.create_table('Reporting_interaction_semodules', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('semoduleentry', models.ForeignKey(orm['Reporting.semoduleentry'], null=False)) )) db.create_unique('Reporting_interaction_semodules', ['interaction_id', 'semoduleentry_id']) def backwards(self, orm): # Deleting model 'SELoginEntry' db.delete_table('Reporting_seloginentry') # Deleting model 'SEUserEntry' db.delete_table('Reporting_seuserentry') # Deleting model 'SEBooleanEntry' db.delete_table('Reporting_sebooleanentry') # Deleting model 'SENodeEntry' db.delete_table('Reporting_senodeentry') # Deleting model 'SEFcontextEntry' db.delete_table('Reporting_sefcontextentry') # Deleting model 'SEInterfaceEntry' db.delete_table('Reporting_seinterfaceentry') # Deleting model 'SEPermissiveEntry' db.delete_table('Reporting_sepermissiveentry') # Deleting model 'SEModuleEntry' db.delete_table('Reporting_semoduleentry') # Deleting model 'SEPortEntry' db.delete_table('Reporting_seportentry') # Removing M2M table for field sebooleans on 'Interaction' db.delete_table('Reporting_interaction_sebooleans') # Removing M2M table for field seports on 'Interaction' db.delete_table('Reporting_interaction_seports') # Removing M2M table for field sefcontexts on 'Interaction' db.delete_table('Reporting_interaction_sefcontexts') # Removing M2M table for field senodes on 'Interaction' db.delete_table('Reporting_interaction_senodes') # Removing M2M table for field selogins on 'Interaction' db.delete_table('Reporting_interaction_selogins') # Removing M2M table for field seusers on 'Interaction' db.delete_table('Reporting_interaction_seusers') # Removing M2M table for field seinterfaces on 'Interaction' db.delete_table('Reporting_interaction_seinterfaces') # Removing M2M table for field sepermissives on 'Interaction' db.delete_table('Reporting_interaction_sepermissives') # Removing M2M table for field semodules on 'Interaction' db.delete_table('Reporting_interaction_semodules') models = { 'Reporting.actionentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) }, 'Reporting.bundle': { 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'Reporting.client': { 'Meta': {'object_name': 'Client'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.deviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_major': ('django.db.models.fields.IntegerField', [], {}), 'current_minor': ('django.db.models.fields.IntegerField', [], {}), 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_major': ('django.db.models.fields.IntegerField', [], {}), 'target_minor': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.failureentry': { 'Meta': {'object_name': 'FailureEntry'}, 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileacl': { 'Meta': {'object_name': 'FileAcl'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileperms': { 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.group': { 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Reporting.interaction': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), 'good_count': ('django.db.models.fields.IntegerField', [], {}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'sebooleans': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEBooleanEntry']", 'symmetrical': 'False'}), 'sefcontexts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEFcontextEntry']", 'symmetrical': 'False'}), 'seinterfaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEInterfaceEntry']", 'symmetrical': 'False'}), 'selogins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SELoginEntry']", 'symmetrical': 'False'}), 'semodules': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEModuleEntry']", 'symmetrical': 'False'}), 'senodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SENodeEntry']", 'symmetrical': 'False'}), 'sepermissives': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPermissiveEntry']", 'symmetrical': 'False'}), 'seports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPortEntry']", 'symmetrical': 'False'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), 'seusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEUserEntry']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'total_count': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.linkentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, 'Reporting.packageentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) }, 'Reporting.pathentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) }, 'Reporting.performance': { 'Meta': {'object_name': 'Performance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) }, 'Reporting.sebooleanentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEBooleanEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'value': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'Reporting.sefcontextentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEFcontextEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'filetype': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seinterfaceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEInterfaceEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seloginentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SELoginEntry'}, 'current_selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.semoduleentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEModuleEntry'}, 'current_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.senodeentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SENodeEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'proto': ('django.db.models.fields.CharField', [], {'max_length': '4'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.sepermissiveentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPermissiveEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seportentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPortEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.serviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) }, 'Reporting.seuserentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEUserEntry'}, 'current_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'current_roles': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'roles': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['Reporting']src/lib/Bcfg2/Reporting/south_migrations/0006_add_user_group_entry_support.py000066400000000000000000000634001303523157100277460ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'POSIXGroupEntry' db.create_table('Reporting_posixgroupentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('gid', self.gf('django.db.models.fields.IntegerField')(null=True)), ('current_gid', self.gf('django.db.models.fields.IntegerField')(null=True)), )) db.send_create_signal('Reporting', ['POSIXGroupEntry']) # Adding model 'POSIXUserEntry' db.create_table('Reporting_posixuserentry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), ('state', self.gf('django.db.models.fields.IntegerField')()), ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), ('uid', self.gf('django.db.models.fields.IntegerField')(null=True)), ('current_uid', self.gf('django.db.models.fields.IntegerField')(null=True)), ('group', self.gf('django.db.models.fields.CharField')(max_length=64)), ('current_group', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)), ('gecos', self.gf('django.db.models.fields.CharField')(max_length=1024)), ('current_gecos', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), ('home', self.gf('django.db.models.fields.CharField')(max_length=1024)), ('current_home', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), ('shell', self.gf('django.db.models.fields.CharField')(default='/bin/bash', max_length=1024)), ('current_shell', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), )) db.send_create_signal('Reporting', ['POSIXUserEntry']) # Adding M2M table for field posixusers on 'Interaction' db.create_table('Reporting_interaction_posixusers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('posixuserentry', models.ForeignKey(orm['Reporting.posixuserentry'], null=False)) )) db.create_unique('Reporting_interaction_posixusers', ['interaction_id', 'posixuserentry_id']) # Adding M2M table for field posixgroups on 'Interaction' db.create_table('Reporting_interaction_posixgroups', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), ('posixgroupentry', models.ForeignKey(orm['Reporting.posixgroupentry'], null=False)) )) db.create_unique('Reporting_interaction_posixgroups', ['interaction_id', 'posixgroupentry_id']) def backwards(self, orm): # Deleting model 'POSIXGroupEntry' db.delete_table('Reporting_posixgroupentry') # Deleting model 'POSIXUserEntry' db.delete_table('Reporting_posixuserentry') # Removing M2M table for field posixusers on 'Interaction' db.delete_table('Reporting_interaction_posixusers') # Removing M2M table for field posixgroups on 'Interaction' db.delete_table('Reporting_interaction_posixgroups') models = { 'Reporting.actionentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) }, 'Reporting.bundle': { 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'Reporting.client': { 'Meta': {'object_name': 'Client'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.deviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_major': ('django.db.models.fields.IntegerField', [], {}), 'current_minor': ('django.db.models.fields.IntegerField', [], {}), 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_major': ('django.db.models.fields.IntegerField', [], {}), 'target_minor': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.failureentry': { 'Meta': {'object_name': 'FailureEntry'}, 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileacl': { 'Meta': {'object_name': 'FileAcl'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileperms': { 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.group': { 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Reporting.interaction': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), 'good_count': ('django.db.models.fields.IntegerField', [], {}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), 'posixgroups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXGroupEntry']", 'symmetrical': 'False'}), 'posixusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXUserEntry']", 'symmetrical': 'False'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'sebooleans': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEBooleanEntry']", 'symmetrical': 'False'}), 'sefcontexts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEFcontextEntry']", 'symmetrical': 'False'}), 'seinterfaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEInterfaceEntry']", 'symmetrical': 'False'}), 'selogins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SELoginEntry']", 'symmetrical': 'False'}), 'semodules': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEModuleEntry']", 'symmetrical': 'False'}), 'senodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SENodeEntry']", 'symmetrical': 'False'}), 'sepermissives': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPermissiveEntry']", 'symmetrical': 'False'}), 'seports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPortEntry']", 'symmetrical': 'False'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), 'seusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEUserEntry']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'total_count': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.linkentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, 'Reporting.packageentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) }, 'Reporting.pathentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) }, 'Reporting.performance': { 'Meta': {'object_name': 'Performance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) }, 'Reporting.posixgroupentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXGroupEntry'}, 'current_gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.posixuserentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXUserEntry'}, 'current_gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'current_group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'current_home': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'current_shell': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'current_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'group': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'home': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'shell': ('django.db.models.fields.CharField', [], {'default': "'/bin/bash'", 'max_length': '1024'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'Reporting.sebooleanentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEBooleanEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'value': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'Reporting.sefcontextentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEFcontextEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'filetype': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seinterfaceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEInterfaceEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seloginentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SELoginEntry'}, 'current_selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.semoduleentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEModuleEntry'}, 'current_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.senodeentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SENodeEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'proto': ('django.db.models.fields.CharField', [], {'max_length': '4'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.sepermissiveentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPermissiveEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seportentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPortEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.serviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) }, 'Reporting.seuserentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEUserEntry'}, 'current_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'current_roles': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'roles': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['Reporting']src/lib/Bcfg2/Reporting/south_migrations/0007_add_flag_fields_interaction.py000066400000000000000000000556151303523157100274070ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Interaction.dry_run' db.add_column('Reporting_interaction', 'dry_run', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) # Adding field 'Interaction.only_important' db.add_column('Reporting_interaction', 'only_important', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'Interaction.dry_run' db.delete_column('Reporting_interaction', 'dry_run') # Deleting field 'Interaction.only_important' db.delete_column('Reporting_interaction', 'only_important') models = { 'Reporting.actionentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) }, 'Reporting.bundle': { 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'Reporting.client': { 'Meta': {'object_name': 'Client'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.deviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_major': ('django.db.models.fields.IntegerField', [], {}), 'current_minor': ('django.db.models.fields.IntegerField', [], {}), 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_major': ('django.db.models.fields.IntegerField', [], {}), 'target_minor': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.failureentry': { 'Meta': {'object_name': 'FailureEntry'}, 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileacl': { 'Meta': {'object_name': 'FileAcl'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'Reporting.fileperms': { 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'Reporting.group': { 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Reporting.interaction': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), 'dry_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), 'good_count': ('django.db.models.fields.IntegerField', [], {}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'only_important': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), 'posixgroups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXGroupEntry']", 'symmetrical': 'False'}), 'posixusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXUserEntry']", 'symmetrical': 'False'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'sebooleans': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEBooleanEntry']", 'symmetrical': 'False'}), 'sefcontexts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEFcontextEntry']", 'symmetrical': 'False'}), 'seinterfaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEInterfaceEntry']", 'symmetrical': 'False'}), 'selogins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SELoginEntry']", 'symmetrical': 'False'}), 'semodules': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEModuleEntry']", 'symmetrical': 'False'}), 'senodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SENodeEntry']", 'symmetrical': 'False'}), 'sepermissives': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPermissiveEntry']", 'symmetrical': 'False'}), 'seports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPortEntry']", 'symmetrical': 'False'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), 'seusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEUserEntry']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'total_count': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.linkentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, 'Reporting.packageentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) }, 'Reporting.pathentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) }, 'Reporting.performance': { 'Meta': {'object_name': 'Performance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) }, 'Reporting.posixgroupentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXGroupEntry'}, 'current_gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.posixuserentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXUserEntry'}, 'current_gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'current_group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'current_home': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'current_shell': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'current_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'group': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'home': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'shell': ('django.db.models.fields.CharField', [], {'default': "'/bin/bash'", 'max_length': '1024'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'Reporting.sebooleanentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEBooleanEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'value': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'Reporting.sefcontextentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEFcontextEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'filetype': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seinterfaceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEInterfaceEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seloginentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SELoginEntry'}, 'current_selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.semoduleentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEModuleEntry'}, 'current_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.senodeentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SENodeEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'proto': ('django.db.models.fields.CharField', [], {'max_length': '4'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.sepermissiveentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPermissiveEntry'}, 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.seportentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPortEntry'}, 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) }, 'Reporting.serviceentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {}), 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) }, 'Reporting.seuserentry': { 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEUserEntry'}, 'current_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'current_roles': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'roles': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['Reporting']src/lib/Bcfg2/Reporting/south_migrations/__init__.py000066400000000000000000000000001303523157100230040ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templates/000077500000000000000000000000001303523157100173055ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templates/404.html000066400000000000000000000003111303523157100204750ustar00rootroot00000000000000{% extends 'base.html' %} {% block title %}Bcfg2 - Page not found{% endblock %} {% block fullcontent %}

    Page not found

    The page or object requested could not be found.

    {% endblock %} src/lib/Bcfg2/Reporting/templates/base-timeview.html000066400000000000000000000020471303523157100227370ustar00rootroot00000000000000{% extends "base.html" %} {% block timepiece %} {% if not timestamp %}Rendered at {% now "SHORT_DATETIME_FORMAT" %} | {% else %}View as of {{ timestamp|date:"SHORT_DATETIME_FORMAT" }} | {% endif %}{% spaceless %} [change]
    {% endspaceless %} {% endblock %} src/lib/Bcfg2/Reporting/templates/base.html000066400000000000000000000073561303523157100211200ustar00rootroot00000000000000{% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 Reporting System{% endblock %} {% block extra_header_info %}{% endblock %}
    {% block fullcontent %}

    {% block pagebanner %}Page Banner{% endblock %}

    {% block timepiece %}Rendered at {% now "Y-m-d H:i" %}{% endblock %}
    {% block content %}{% endblock %}
    {% endblock %}
    {% block sidemenu %} {% comment %} TODO {% endcomment %} {% endblock %}
    src/lib/Bcfg2/Reporting/templates/clients/000077500000000000000000000000001303523157100207465ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templates/clients/detail.html000066400000000000000000000125311303523157100231000ustar00rootroot00000000000000{% extends "base.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Client {{client.name}}{% endblock %} {% block extra_header_info %} {% endblock %} {% block body_onload %}javascript:clientdetailload(){% endblock %} {% block pagebanner %}Client Details{% endblock %} {% block content %}

    {{client.name}}

    [manage] View History | Jump to 
    {% if interaction.isstale %}
    This node did not run within the last 24 hours — it may be out of date.
    {% endif %} {% if interaction.server %} {% endif %} {% if interaction.profile %} {% endif %} {% if interaction.repo_rev_code %} {% endif %} {% if not interaction.isclean %} {% endif %}
    Timestamp{{interaction.timestamp}}
    Served by{{interaction.server}}
    Profile{{interaction.profile}}
    Revision{{interaction.repo_rev_code}}
    State{{interaction.state|capfirst}}
    Managed entries{{interaction.total_count}}
    Deviation{{interaction.percentbad|floatformat:"3"}}%
    {% for group in interaction.groups.all %} {% if forloop.first %}

    Group membership

    [+]
    {% endif %} {% if forloop.last %}
    {% endif %} {% endfor %} {% for bundle in interaction.bundles.all %} {% if forloop.first %}

    Bundle membership

    [+]
    {% endif %} {% if forloop.last %}
    {% endif %} {% endfor %} {% for entry_type, entry_list in entry_types.items %} {% if entry_list %}

    {{ entry_type|capfirst }} Entries — {{ entry_list|length }}

    [+]
    {% for entry in entry_list %} {% endfor %}
    {{entry.entry_type}} {{entry.name}}
    {% endif %} {% endfor %} {% if interaction.failures.all %}

    Failed Entries — {{ interaction.failures.all|length }}

    [+]
    {% for failure in interaction.failures.all %} {% endfor %}
    {% endif %} {% if entry_list %}

    Recent Interactions

    {% include "widgets/interaction_list.inc" %}
    {% endif %} {% endblock %} src/lib/Bcfg2/Reporting/templates/clients/detailed-list.html000066400000000000000000000041771303523157100243710ustar00rootroot00000000000000{% extends "base-timeview.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Detailed Client Listing{% endblock %} {% block pagebanner %}Clients - Detailed View{% endblock %} {% block content %} {% filter_navigator %}
    {% if entry_list %} {% for entry in entry_list %} {% endfor %}
    {% sort_link 'client' 'Node' %} {% sort_link 'state' 'State' %} {% sort_link '-good' 'Good' %} {% sort_link '-bad' 'Bad' %} {% sort_link '-modified' 'Modified' %} {% sort_link '-extra' 'Extra' %} {% sort_link 'timestamp' 'Last Run' %} {% sort_link 'server' 'Server' %}
    {{ entry.client.name }} {{ entry.state }} {{ entry.good_count }} {{ entry.bad_count }} {{ entry.modified_count }} {{ entry.extra_count }} {{ entry.timestamp|date:"SHORT_DATETIME_FORMAT"|safe }} {% if entry.server %} {{ entry.server }} {% else %}   {% endif %}
    {% else %}

    No client records are available.

    {% endif %}
    {% endblock %} src/lib/Bcfg2/Reporting/templates/clients/history.html000066400000000000000000000010031303523157100233270ustar00rootroot00000000000000{% extends "base.html" %} {% load bcfg2_tags %} {% block title %}Bcfg2 - Interaction History{% endblock %} {% block pagebanner %}Interaction history{% if client %} for {{ client.name }}{% endif %}{% endblock %} {% block extra_header_info %} {% endblock %} {% block content %}
    {% if entry_list %} {% filter_navigator %} {% include "widgets/interaction_list.inc" %} {% else %}

    No client records are available.

    {% endif %}
    {% page_navigator %} {% endblock %} src/lib/Bcfg2/Reporting/templates/clients/index.html000066400000000000000000000021341303523157100227430ustar00rootroot00000000000000{% extends "base-timeview.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block extra_header_info %} {% endblock%} {% block title %}Bcfg2 - Client Grid View{% endblock %} {% block pagebanner %}Clients - Grid View{% endblock %} {% block content %} {% filter_navigator %} {% if inter_list %} {% for inter in inter_list %} {% if forloop.first %}{% endif %} {% if forloop.last %} {% else %} {% if forloop.counter|divisibleby:"4" %}{% endif %} {% endif %} {% endfor %}
    {{ inter.client.name }}
    {% else %}

    No client records are available.

    {% endif %} {% endblock %} src/lib/Bcfg2/Reporting/templates/clients/manage.html000066400000000000000000000031231303523157100230630ustar00rootroot00000000000000{% extends "base.html" %} {% load url from bcfg2_compat %} {% block extra_header_info %} {% endblock%} {% block title %}Bcfg2 - Manage Clients{% endblock %} {% block pagebanner %}Clients - Manage{% endblock %} {% block content %}
    {% if message %}
    {{ message }}
    {% endif %} {% if clients %} {% for client in clients %} {% endfor %}
    Node Expiration Manage
    {{ client.name }} {% firstof client.expiration 'Active' %}
    {# here for no reason other then to validate #}
    {% else %}

    No client records are available.

    {% endif %}
    {% endblock %} src/lib/Bcfg2/Reporting/templates/config_items/000077500000000000000000000000001303523157100217535ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templates/config_items/common.html000066400000000000000000000032621303523157100241340ustar00rootroot00000000000000{% extends "base-timeview.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Common Problems{% endblock %} {% block extra_header_info %} {% endblock%} {% block pagebanner %}Common configuration problems{% endblock %} {% block content %} {% filter_navigator %}
    Showing items with more then {{ threshold }} entries
    {% for type_name, type_list in lists %}

    {{ type_name|capfirst }} entries

    [–]
    {% if type_list %} {% for item in type_list %} {% endfor %}
    TypeNameCountReason
    {{ item.ENTRY_TYPE }} {{ item.name }} {{ item.num_entries }} {{ item.short_list|join:"," }}
    {% else %}

    There are currently no inconsistent {{ type_name }} configuration entries.

    {% endif %}
    {% endfor %} {% endblock %} src/lib/Bcfg2/Reporting/templates/config_items/entry_status.html000066400000000000000000000025601303523157100254100ustar00rootroot00000000000000{% extends "base-timeview.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Entry Status{% endblock %} {% block extra_header_info %} {% endblock%} {% block pagebanner %}{{ entry.entry_type }} entry {{ entry.name }} status{% endblock %} {% block content %} {% filter_navigator %} {% if items %}
    {% for item, inters in items %} {% for inter in inters %} {% endfor %} {% endfor %}
    NameTimestampStateReason
    {{inter.client.name}} {{inter.timestamp|date:"SHORT_DATETIME_FORMAT"|safe}} {{ item.get_state_display }} ({{item.pk}}) {{item.short_list|join:","}}
    {% else %}

    There are currently no hosts with this configuration entry.

    {% endif %} {% endblock %} src/lib/Bcfg2/Reporting/templates/config_items/item-failure.html000066400000000000000000000005101303523157100252200ustar00rootroot00000000000000{% extends "config_items/item.html" %} {% load syntax_coloring %} {% block item_details %}

    This item failed to bind on the server

    {{ item.message|syntaxhilight:"py" }}
    {% endblock %} src/lib/Bcfg2/Reporting/templates/config_items/item.html000066400000000000000000000110061303523157100235750ustar00rootroot00000000000000{% extends "base.html" %} {% load split %} {% load syntax_coloring %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Element Details{% endblock %} {% block extra_header_info %} {% endblock%} {% block pagebanner %}Element Details{% endblock %} {% block content %}

    {{item.get_state_display}} {{item.entry_type}}: {{item.name}}

    {% block item_details %} {% if item.is_extra %}

    This item exists on the host but is not defined in the configuration.

    {% endif %} {% if not item.exists %}
    This item does not currently exist on the host but is specified to exist in the configuration.
    {% endif %} {# Really need a better test here #} {% if item.mode_problem or item.status_problem or item.linkentry.link_problem or item.version_problem %} {% if item.mode_problem %} {% if item.current_perms.owner %} {% endif %} {% if item.current_perms.group %} {% endif %} {% if item.current_perms.mode%} {% endif %} {% endif %} {% if item.status_problem %} {% endif %} {% if item.linkentry.link_problem %} {% endif %} {% if item.version_problem %} {% endif %} {% if item.selinuxtype_problem %} {% endif %}
    Problem TypeExpectedFound
    Owner {{item.target_perms.owner}} {{item.current_perms.owner}}
    Group {{item.target_perms.group}} {{item.current_perms.group}}
    Permissions {{item.target_perms.mode}} {{item.current_perms.mode}}
    Status {{item.target_status}} {{item.current_status}}
    {{item.get_path_type_display}} {{item.linkentry.target_path}} {{item.linkentry.current_path}}
    Package Version {{item.target_version|cut:"("|cut:")"}} {{item.current_version|cut:"("|cut:")"}}
    SELinux Type {{item.selinuxtype}} {{item.current_selinuxtype}}
    {% endif %} {% if item.has_detail %}
    {% if item.is_sensitive %}

    File contents unavailable, as they might contain sensitive data.

    {% else %}

    Incorrect file contents ({{item.get_detail_type_display}})

    {% endif %}
    {% if item.is_diff %}
    {{ item.details|syntaxhilight }}
    {% else %} {{ item.details|linebreaks }} {% endif %}
    {% endif %} {% if item.reason.unpruned %}

    Extra entries found

    {% for unpruned_item in item.reason.unpruned|split %} {% endfor %}
    {{ unpruned_item }}
    {% endif %} {% endblock %}

    Occurences on {{ timestamp|date:"SHORT_DATE_FORMAT" }}

    {% if associated_list %} {% for inter in associated_list %} {% endfor %}
    {{inter.client.name}} {{inter.timestamp}}
    {% else %}

    Missing client list

    {% endif %}
    {% endblock %} src/lib/Bcfg2/Reporting/templates/config_items/listing.html000066400000000000000000000025271303523157100243200ustar00rootroot00000000000000{% extends "base-timeview.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Element Listing{% endblock %} {% block extra_header_info %} {% endblock%} {% block pagebanner %}{{item_state|capfirst}} Element Listing{% endblock %} {% block content %} {% filter_navigator %} {% if item_list %} {% for type_name, type_data in item_list %}

    {{ type_name }} — {{ type_data|length }}

    [–]
    {% for entry in type_data %} {% endfor %}
    NameCountReason
    {{entry.name}} {{entry.num_entries}} {{entry.short_list|join:","}}
    {% endfor %} {% else %}

    There are currently no inconsistent configuration entries.

    {% endif %} {% endblock %} src/lib/Bcfg2/Reporting/templates/displays/000077500000000000000000000000001303523157100211355ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templates/displays/summary.html000066400000000000000000000027061303523157100235250ustar00rootroot00000000000000{% extends "base-timeview.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Client Summary{% endblock %} {% block pagebanner %}Clients - Summary{% endblock %} {% block body_onload %}javascript:hide_table_array(hide_tables){% endblock %} {% block extra_header_info %} {% endblock%} {% block content %}

    {{ node_count }} nodes reporting in

    {% if summary_data %} {% for summary in summary_data %}

    {{ summary.nodes|length }} {{ summary.label }}

    [+]
    {% for node in summary.nodes|sort_interactions_by_name %} {% endfor %}
    {{ node.client.name }}
    {% endfor %} {% else %}

    No data to report on

    {% endif %} {% endblock %} src/lib/Bcfg2/Reporting/templates/displays/timing.html000066400000000000000000000022031303523157100233070ustar00rootroot00000000000000{% extends "base-timeview.html" %} {% load bcfg2_tags %} {% load url from bcfg2_compat %} {% block title %}Bcfg2 - Performance Metrics{% endblock %} {% block pagebanner %}Performance Metrics{% endblock %} {% block extra_header_info %} {% endblock%} {% block content %}
    {% if metrics %} {% for metric in metrics|dictsort:"name" %} {% for mitem in metric|build_metric_list %} {% endfor %} {% endfor %}
    Name Parse Probe Inventory Install Config Total
    {{ metric.name }}{{ mitem }}
    {% else %}

    No metric data available

    {% endif %}
    {% endblock %} src/lib/Bcfg2/Reporting/templates/widgets/000077500000000000000000000000001303523157100207535ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templates/widgets/filter_bar.html000066400000000000000000000014401303523157100237510ustar00rootroot00000000000000{% spaceless %}
    {% if filters %} {% for filter, filter_url in filters %} {% if forloop.first %} Active filters (click to remove): {% endif %} {{ filter|capfirst }}{% if not forloop.last %}, {% endif %} {% if forloop.last %} {% if groups %}|{% endif %} {% endif %} {% endfor %} {% endif %} {% if groups %} {% endif %}
    {% endspaceless %} src/lib/Bcfg2/Reporting/templates/widgets/interaction_list.inc000066400000000000000000000032641303523157100250250ustar00rootroot00000000000000{% load bcfg2_tags %}
    {% if not client %} {% endif %} {% for entry in entry_list %} {% if not client %} {% endif %} {% endfor %}
    TimestampClientState Good Bad Modified Extra Server
    {{ entry.timestamp|date:"SHORT_DATETIME_FORMAT"|safe }} {{ entry.client.name }}{{ entry.state }} {{ entry.good_count }} {{ entry.bad_count }} {{ entry.modified_count }} {{ entry.extra_count }} {% if entry.server %} {{ entry.server }} {% else %}   {% endif %}
    src/lib/Bcfg2/Reporting/templates/widgets/page_bar.html000066400000000000000000000017041303523157100234030ustar00rootroot00000000000000{% spaceless %} {% for page, page_url in pager %} {% if forloop.first %}
    {% if prev_page %}< Prev {% endif %} {% if first_page %}1 ... {% endif %} {% endif %} {% ifequal page current_page %} {{ page }} {% else %} {{ page }} {% endifequal %} {% if forloop.last %} {% if last_page %} ... {{ total_pages }} {% endif %} {% if next_page %}Next > {% endif %} |{% for limit, limit_url in page_limits %} {{ limit }}{% endfor %}
    {% else %}   {% endif %} {% endfor %} {% endspaceless %} src/lib/Bcfg2/Reporting/templatetags/000077500000000000000000000000001303523157100200015ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templatetags/__init__.py000066400000000000000000000000001303523157100221000ustar00rootroot00000000000000src/lib/Bcfg2/Reporting/templatetags/bcfg2_compat.py000066400000000000000000000005021303523157100226760ustar00rootroot00000000000000from django.template import Library try: from django.templatetags.future import url as django_url except ImportError: # future is removed in django 1.9 from django.template.defaulttags import url as django_url register = Library() @register.tag def url(parser, token): return django_url(parser, token) src/lib/Bcfg2/Reporting/templatetags/bcfg2_tags.py000066400000000000000000000330051303523157100223550ustar00rootroot00000000000000import sys from copy import copy import django from django import template from django.conf import settings from django.core.urlresolvers import resolve, reverse, \ Resolver404, NoReverseMatch from django.utils.encoding import smart_str from django.utils.safestring import mark_safe from datetime import datetime, timedelta from Bcfg2.Reporting.utils import filter_list from Bcfg2.Reporting.models import Group register = template.Library() __PAGE_NAV_LIMITS__ = (10, 25, 50, 100) @register.inclusion_tag('widgets/page_bar.html', takes_context=True) def page_navigator(context): """ Creates paginated links. Expects the context to be a RequestContext and views.prepare_paginated_list() to have populated page information. """ fragment = dict() try: path = context['request'].META['PATH_INFO'] total_pages = int(context['total_pages']) records_per_page = int(context['records_per_page']) except KeyError: return fragment except ValueError: return fragment if total_pages < 2: return {} try: view, args, kwargs = resolve(path) current_page = int(kwargs.get('page_number', 1)) fragment['current_page'] = current_page fragment['page_number'] = current_page fragment['total_pages'] = total_pages fragment['records_per_page'] = records_per_page if current_page > 1: kwargs['page_number'] = current_page - 1 fragment['prev_page'] = reverse(view, args=args, kwargs=kwargs) if current_page < total_pages: kwargs['page_number'] = current_page + 1 fragment['next_page'] = reverse(view, args=args, kwargs=kwargs) view_range = 5 if total_pages > view_range: pager_start = current_page - 2 pager_end = current_page + 2 if pager_start < 1: pager_end += (1 - pager_start) pager_start = 1 if pager_end > total_pages: pager_start -= (pager_end - total_pages) pager_end = total_pages else: pager_start = 1 pager_end = total_pages if pager_start > 1: kwargs['page_number'] = 1 fragment['first_page'] = reverse(view, args=args, kwargs=kwargs) if pager_end < total_pages: kwargs['page_number'] = total_pages fragment['last_page'] = reverse(view, args=args, kwargs=kwargs) pager = [] for page in range(pager_start, int(pager_end) + 1): kwargs['page_number'] = page pager.append((page, reverse(view, args=args, kwargs=kwargs))) kwargs['page_number'] = 1 page_limits = [] for limit in __PAGE_NAV_LIMITS__: kwargs['page_limit'] = limit page_limits.append((limit, reverse(view, args=args, kwargs=kwargs))) # resolver doesn't like this del kwargs['page_number'] del kwargs['page_limit'] page_limits.append(('all', reverse(view, args=args, kwargs=kwargs) + "|all")) fragment['pager'] = pager fragment['page_limits'] = page_limits except Resolver404: path = "404" except NoReverseMatch: nr = sys.exc_info()[1] path = "NoReverseMatch: %s" % nr except ValueError: path = "ValueError" #FIXME - Handle these fragment['path'] = path return fragment @register.inclusion_tag('widgets/filter_bar.html', takes_context=True) def filter_navigator(context): try: path = context['request'].META['PATH_INFO'] view, args, kwargs = resolve(path) except (Resolver404, KeyError): return dict() # Strip any page limits and numbers if 'page_number' in kwargs: del kwargs['page_number'] if 'page_limit' in kwargs: del kwargs['page_limit'] # get a query string qs = context['request'].GET.urlencode() if qs: qs = '?' + qs filters = [] for filter in filter_list: if filter == 'group': continue if filter in kwargs: myargs = kwargs.copy() del myargs[filter] try: filters.append((filter, reverse(view, args=args, kwargs=myargs) + qs)) except NoReverseMatch: pass filters.sort(key=lambda x: x[0]) myargs = kwargs.copy() selected = True if 'group' in myargs: del myargs['group'] selected = False groups = [] try: groups.append(('---', reverse(view, args=args, kwargs=myargs) + qs, selected)) except NoReverseMatch: pass for group in Group.objects.values('name'): try: myargs['group'] = group['name'] groups.append((group['name'], reverse(view, args=args, kwargs=myargs) + qs, group['name'] == kwargs.get('group', ''))) except NoReverseMatch: pass return {'filters': filters, 'groups': groups} def _subtract_or_na(mdict, x, y): """ Shortcut for build_metric_list """ try: return round(mdict[x] - mdict[y], 4) except: return "n/a" @register.filter def build_metric_list(mdict): """ Create a list of metric table entries Moving this here to simplify the view. Should really handle the case where these are missing... """ td_list = [] # parse td_list.append(_subtract_or_na(mdict, 'config_parse', 'config_download')) #probe td_list.append(_subtract_or_na(mdict, 'probe_upload', 'start')) #inventory td_list.append(_subtract_or_na(mdict, 'inventory', 'initialization')) #install td_list.append(_subtract_or_na(mdict, 'install', 'inventory')) #cfg download & parse td_list.append(_subtract_or_na(mdict, 'config_parse', 'probe_upload')) #total td_list.append(_subtract_or_na(mdict, 'finished', 'start')) return td_list @register.filter def sort_interactions_by_name(value): """ Sort an interaction list by client name """ inters = list(value) inters.sort(key=lambda a: a.client.name) return inters class AddUrlFilter(template.Node): def __init__(self, filter_name, filter_value): self.filter_name = filter_name self.filter_value = filter_value self.fallback_view = 'Bcfg2.Reporting.views.render_history_view' def render(self, context): link = '#' try: path = context['request'].META['PATH_INFO'] view, args, kwargs = resolve(path) filter_value = self.filter_value.resolve(context, True) if filter_value: filter_name = smart_str(self.filter_name) filter_value = smart_str(filter_value) kwargs[filter_name] = filter_value # These two don't make sense if filter_name == 'server' and 'hostname' in kwargs: del kwargs['hostname'] elif filter_name == 'hostname' and 'server' in kwargs: del kwargs['server'] try: link = reverse(view, args=args, kwargs=kwargs) except NoReverseMatch: link = reverse(self.fallback_view, args=None, kwargs={filter_name: filter_value}) qs = context['request'].GET.urlencode() if qs: link += "?" + qs except NoReverseMatch: rm = sys.exc_info()[1] raise rm except (Resolver404, ValueError): pass return link @register.tag def add_url_filter(parser, token): """ Return a url with the filter added to the current view. Takes a new filter and resolves the current view with the new filter applied. Resolves to Bcfg2.Reporting.views.client_history by default. {% add_url_filter server=interaction.server %} """ try: tag_name, filter_pair = token.split_contents() filter_name, filter_value = filter_pair.split('=', 1) filter_name = filter_name.strip() filter_value = parser.compile_filter(filter_value) except ValueError: raise template.TemplateSyntaxError("%r tag requires exactly one argument" % token.contents.split()[0]) if not filter_name or not filter_value: raise template.TemplateSyntaxError("argument should be a filter=value pair") return AddUrlFilter(filter_name, filter_value) class MediaTag(template.Node): def __init__(self, filter_value): self.filter_value = filter_value def render(self, context): base = context['MEDIA_URL'] try: request = context['request'] try: base = request.environ['bcfg2.media_url'] except: if request.path != request.META['PATH_INFO']: offset = request.path.find(request.META['PATH_INFO']) if offset > 0: base = "%s/%s" % (request.path[:offset], \ context['MEDIA_URL'].strip('/')) except: pass return "%s/%s" % (base, self.filter_value) @register.tag def to_media_url(parser, token): """ Return a url relative to the media_url. {% to_media_url /bcfg2.css %} """ try: filter_value = token.split_contents()[1] filter_value = parser.compile_filter(filter_value) except ValueError: raise template.TemplateSyntaxError("%r tag requires exactly one argument" % token.contents.split()[0]) return MediaTag(filter_value) @register.filter def determine_client_state(entry): """ Determine client state. This is used to determine whether a client is reporting clean or dirty. If the client is reporting dirty, this will figure out just _how_ dirty and adjust the color accordingly. """ if entry.isstale(): return "stale-lineitem" if entry.state == 'clean': if entry.extra_count > 0: return "extra-lineitem" return "clean-lineitem" bad_percentage = 100 * (float(entry.bad_count) / entry.total_count) if bad_percentage < 33: thisdirty = "slightly-dirty-lineitem" elif bad_percentage < 66: thisdirty = "dirty-lineitem" else: thisdirty = "very-dirty-lineitem" return thisdirty @register.tag(name='qs') def do_qs(parser, token): """ qs tag accepts a name value pair and inserts or replaces it in the query string """ try: tag, name, value = token.split_contents() except ValueError: raise template.TemplateSyntaxError("%r tag requires exactly two arguments" % token.contents.split()[0]) return QsNode(name, value) class QsNode(template.Node): def __init__(self, name, value): self.name = template.Variable(name) self.value = template.Variable(value) def render(self, context): try: name = self.name.resolve(context) value = self.value.resolve(context) request = context['request'] qs = copy(request.GET) qs[name] = value return "?%s" % qs.urlencode() except template.VariableDoesNotExist: return '' except KeyError: if settings.TEMPLATE_DEBUG: raise Exception("'qs' tag requires context['request']") return '' except: return '' @register.tag def sort_link(parser, token): ''' Create a sort anchor tag. Reverse it if active. {% sort_link sort_key text %} ''' try: tag, sort_key, text = token.split_contents() except ValueError: raise template.TemplateSyntaxError("%r tag requires at least four arguments" \ % token.split_contents()[0]) return SortLinkNode(sort_key, text) class SortLinkNode(template.Node): __TMPL__ = "{% load bcfg2_tags %}{{ text }}" def __init__(self, sort_key, text): self.sort_key = template.Variable(sort_key) self.text = template.Variable(text) def _render_template(self, context): if django.VERSION[0] == 1 and django.VERSION[1] >= 8: return context.template.engine.from_string(self.__TMPL__) else: from django.template.loader import get_template_from_string return get_template_from_string(self.__TMPL__).render(context) def render(self, context): try: try: sort = context['request'].GET['sort'] except KeyError: #fall back on this sort = context.get('sort', '') sort_key = self.sort_key.resolve(context) text = self.text.resolve(context) # add arrows try: sort_base = sort_key.lstrip('-') if sort[0] == '-' and sort[1:] == sort_base: text = text + '▼' sort_key = sort_base elif sort_base == sort: text = text + '▲' sort_key = '-' + sort_base except IndexError: pass context.push() context['key'] = sort_key context['text'] = mark_safe(text) output = self._render_template(context) context.pop() return output except: if settings.DEBUG: raise raise return '' src/lib/Bcfg2/Reporting/templatetags/split.py000066400000000000000000000002201303523157100215000ustar00rootroot00000000000000from django import template register = template.Library() @register.filter def split(s): """split by newlines""" return s.split('\n') src/lib/Bcfg2/Reporting/templatetags/syntax_coloring.py000066400000000000000000000025441303523157100236020ustar00rootroot00000000000000from django import template from django.utils.encoding import smart_str from django.utils.html import conditional_escape from django.utils.safestring import mark_safe register = template.Library() # pylint: disable=E0611 try: from pygments import highlight from pygments.lexers import get_lexer_by_name from pygments.formatters import HtmlFormatter colorize = True except: colorize = False # pylint: enable=E0611 @register.filter def syntaxhilight(value, arg="diff", autoescape=None): """ Returns a syntax-hilighted version of Code; requires code/language arguments """ if autoescape: # Seems to cause a double escape #value = conditional_escape(value) arg = conditional_escape(arg) if colorize: try: output = smart_str('') lexer = get_lexer_by_name(arg) output += highlight(value, lexer, HtmlFormatter()) return mark_safe(output) except: return value else: return mark_safe(smart_str( '
    Tip: Install pygments ' 'for highlighting
    %s
    ') % value) syntaxhilight.needs_autoescape = True src/lib/Bcfg2/Reporting/urls.py000066400000000000000000000045351303523157100166550ustar00rootroot00000000000000from Bcfg2.Reporting.Compat import url, patterns # django compat imports from django.core.urlresolvers import reverse, NoReverseMatch from django.http import HttpResponsePermanentRedirect from Bcfg2.Reporting.utils import filteredUrls, paginatedUrls, timeviewUrls from Bcfg2.Reporting import views handler500 = 'Bcfg2.Reporting.views.server_error' def newRoot(request): try: grid_view = reverse('reports_grid_view') except NoReverseMatch: grid_view = '/grid' return HttpResponsePermanentRedirect(grid_view) urlpatterns = patterns('', (r'^$', newRoot), url(r'^manage/?$', views.client_manage, name='reports_client_manage'), url(r'^client/(?P[^/]+)/(?P\d+)/?$', views.client_detail, name='reports_client_detail_pk'), url(r'^client/(?P[^/]+)/?$', views.client_detail, name='reports_client_detail'), url(r'^element/(?P\w+)/(?P\d+)/(?P\d+)?/?$', views.config_item, name='reports_item'), url(r'^element/(?P\w+)/(?P\d+)/?$', views.config_item, name='reports_item'), url(r'^entry/(?P\w+)/(?P\w+)/?$', views.entry_status, name='reports_entry'), ) urlpatterns += patterns('', *timeviewUrls( (r'^summary/?$', views.display_summary, None, 'reports_summary'), (r'^timing/?$', views.display_timing, None, 'reports_timing'), (r'^common/group/(?P[^/]+)/(?P\d+)/?$', views.common_problems, None, 'reports_common_problems'), (r'^common/group/(?P[^/]+)+/?$', views.common_problems, None, 'reports_common_problems'), (r'^common/(?P\d+)/?$', views.common_problems, None, 'reports_common_problems'), (r'^common/?$', views.common_problems, None, 'reports_common_problems'), )) urlpatterns += patterns('', *filteredUrls(*timeviewUrls( (r'^grid/?$', views.client_index, None, 'reports_grid_view'), (r'^detailed/?$', views.client_detailed_list, None, 'reports_detailed_list'), (r'^elements/(?P\w+)/?$', views.config_item_list, None, 'reports_item_list'), ))) urlpatterns += patterns('', *paginatedUrls( *filteredUrls( (r'^history/?$', views.render_history_view, None, 'reports_history'), (r'^history/(?P[^/|]+)/?$', views.render_history_view, None, 'reports_client_history'), ))) src/lib/Bcfg2/Reporting/utils.py000077500000000000000000000101421303523157100170220ustar00rootroot00000000000000"""Helper functions for reports""" import re """List of filters provided by filteredUrls""" filter_list = ('server', 'state', 'group') class BatchFetch(object): """Fetch Django objects in smaller batches to save memory""" def __init__(self, obj, step=10000): self.count = 0 self.block_count = 0 self.obj = obj self.data = None self.step = step self.max = obj.count() def __iter__(self): return self def next(self): """Provide compatibility with python < 3.0""" return self.__next__() def __next__(self): """Return the next object from our array and fetch from the database when needed""" if self.block_count + self.count - self.step == self.max: raise StopIteration if self.block_count == 0 or self.count == self.step: # Without list() this turns into LIMIT 1 OFFSET x queries self.data = list(self.obj.all()[self.block_count: \ (self.block_count + self.step)]) self.block_count += self.step self.count = 0 self.count += 1 return self.data[self.count - 1] def generateUrls(fn): """ Parse url tuples and send to functions. Decorator for url generators. Handles url tuple parsing before the actual function is called. """ def url_gen(*urls): results = [] for url_tuple in urls: if isinstance(url_tuple, (list, tuple)): results += fn(*url_tuple) else: raise ValueError("Unable to handle compiled urls") return results return url_gen @generateUrls def paginatedUrls(pattern, view, kwargs=None, name=None): """ Takes a group of url tuples and adds paginated urls. Extends a url tuple to include paginated urls. Currently doesn't handle url() compiled patterns. """ results = [(pattern, view, kwargs, name)] tail = '' mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) if mtail: tail = mtail.group(1) pattern = pattern[:len(pattern) - len(tail)] results += [(pattern + "/(?P\d+)" + tail, view, kwargs)] results += [(pattern + "/(?P\d+)\|(?P\d+)" + tail, view, kwargs)] if not kwargs: kwargs = dict() kwargs['page_limit'] = 0 results += [(pattern + "/?\|(?Pall)" + tail, view, kwargs)] return results @generateUrls def filteredUrls(pattern, view, kwargs=None, name=None): """ Takes a url and adds filtered urls. Extends a url tuple to include filtered view urls. Currently doesn't handle url() compiled patterns. """ results = [(pattern, view, kwargs, name)] tail = '' mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) if mtail: tail = mtail.group(1) pattern = pattern[:len(pattern) - len(tail)] for filter in ('/state/(?P\w+)', '/group/(?P[^/]+)', '/group/(?P[^/]+)/(?P[A-Za-z]+)', '/server/(?P[^/]+)', '/server/(?P[^/]+)/(?P[A-Za-z]+)', '/server/(?P[^/]+)/group/(?P[^/]+)', '/server/(?P[^/]+)/group/(?P[^/]+)/(?P[A-Za-z]+)'): results += [(pattern + filter + tail, view, kwargs)] return results @generateUrls def timeviewUrls(pattern, view, kwargs=None, name=None): """ Takes a url and adds timeview urls Extends a url tuple to include filtered view urls. Currently doesn't handle url() compiled patterns. """ results = [(pattern, view, kwargs, name)] tail = '' mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) if mtail: tail = mtail.group(1) pattern = pattern[:len(pattern) - len(tail)] for filter in ('/(?P\d{4})-(?P\d{2})-(?P\d{2})/' + \ '(?P\d\d)-(?P\d\d)', '/(?P\d{4})-(?P\d{2})-(?P\d{2})'): results += [(pattern + filter + tail, view, kwargs)] return results src/lib/Bcfg2/Reporting/views.py000066400000000000000000000461501303523157100170240ustar00rootroot00000000000000""" Report views Functions to handle all of the reporting views. """ from datetime import datetime, timedelta import sys from time import strptime from django.template import Context, RequestContext from django.http import \ HttpResponse, HttpResponseRedirect, HttpResponseServerError, Http404 from django.shortcuts import render_to_response, get_object_or_404 from django.core.urlresolvers import \ resolve, reverse, Resolver404, NoReverseMatch from django.db import DatabaseError from django.db.models import Q, Count from Bcfg2.Reporting.models import * __SORT_FIELDS__ = ( 'client', 'state', 'good', 'bad', 'modified', 'extra', \ 'timestamp', 'server' ) class PaginationError(Exception): """This error is raised when pagination cannot be completed.""" pass def _in_bulk(model, ids): """ Short cut to fetch in bulk and trap database errors. sqlite will raise a "too many SQL variables" exception if this list is too long. Try using django and fetch manually if an error occurs returns a dict of this form { id: } """ try: return model.objects.in_bulk(ids) except DatabaseError: pass # if objects.in_bulk fails so will obejcts.filter(pk__in=ids) bulk_dict = {} [bulk_dict.__setitem__(i.id, i) \ for i in model.objects.all() if i.id in ids] return bulk_dict def server_error(request): """ 500 error handler. For now always return the debug response. Mailing isn't appropriate here. """ from django.views import debug return debug.technical_500_response(request, *sys.exc_info()) def timeview(fn): """ Setup a timeview view Handles backend posts from the calendar and converts date pieces into a 'timestamp' parameter """ def _handle_timeview(request, **kwargs): """Send any posts back.""" if request.method == 'POST' and request.POST.get('op', '') == 'timeview': cal_date = request.POST['cal_date'] try: fmt = "%Y/%m/%d" if cal_date.find(' ') > -1: fmt += " %H:%M" timestamp = datetime(*strptime(cal_date, fmt)[0:6]) view, args, kw = resolve(request.META['PATH_INFO']) kw['year'] = "%0.4d" % timestamp.year kw['month'] = "%02.d" % timestamp.month kw['day'] = "%02.d" % timestamp.day if cal_date.find(' ') > -1: kw['hour'] = timestamp.hour kw['minute'] = timestamp.minute return HttpResponseRedirect(reverse(view, args=args, kwargs=kw)) except KeyError: pass except: pass # FIXME - Handle this """Extract timestamp from args.""" timestamp = None try: timestamp = datetime(int(kwargs.pop('year')), int(kwargs.pop('month')), int(kwargs.pop('day')), int(kwargs.pop('hour', 0)), int(kwargs.pop('minute', 0)), 0) kwargs['timestamp'] = timestamp except KeyError: pass except: raise return fn(request, **kwargs) return _handle_timeview def _handle_filters(query, **kwargs): """ Applies standard filters to a query object Returns an updated query object query - query object to filter server -- Filter interactions by server state -- Filter interactions by state group -- Filter interactions by group """ if 'state' in kwargs and kwargs['state']: query = query.filter(state__exact=kwargs['state']) if 'server' in kwargs and kwargs['server']: query = query.filter(server__exact=kwargs['server']) if 'group' in kwargs and kwargs['group']: group = get_object_or_404(Group, name=kwargs['group']) query = query.filter(groups__id=group.pk) return query def config_item(request, pk, entry_type, interaction=None): """ Display a single entry. Displays information about a single entry. """ try: cls = BaseEntry.entry_from_name(entry_type) except ValueError: # TODO - handle this raise item = get_object_or_404(cls, pk=pk) # TODO - timestamp if interaction: try: inter = Interaction.objects.get(pk=interaction) except Interaction.DoesNotExist: raise Http404("Not a valid interaction") timestamp = inter.timestamp else: timestamp = datetime.now() ts_start = timestamp.replace(hour=1, minute=0, second=0, microsecond=0) ts_end = ts_start + timedelta(days=1) associated_list = item.interaction_set.select_related('client').filter(\ timestamp__gte=ts_start, timestamp__lt=ts_end) if item.is_failure(): template = 'config_items/item-failure.html' else: template = 'config_items/item.html' return render_to_response(template, {'item': item, 'associated_list': associated_list, 'timestamp': timestamp}, context_instance=RequestContext(request)) @timeview def config_item_list(request, item_state, timestamp=None, **kwargs): """Render a listing of affected elements""" state = convert_entry_type_to_id(item_state.lower()) if state < 0: raise Http404 current_clients = Interaction.objects.recent(timestamp) current_clients = [q['id'] for q in _handle_filters(current_clients, **kwargs).values('id')] lists = [] for etype in ENTRY_TYPES: ldata = etype.objects.filter(state=state, interaction__in=current_clients)\ .annotate(num_entries=Count('id')).select_related() if len(ldata) > 0: # Property doesn't render properly.. lists.append((etype.ENTRY_TYPE, ldata)) return render_to_response('config_items/listing.html', {'item_list': lists, 'item_state': item_state, 'timestamp': timestamp}, context_instance=RequestContext(request)) @timeview def entry_status(request, entry_type, pk, timestamp=None, **kwargs): """Render a listing of affected elements by type and name""" try: cls = BaseEntry.entry_from_name(entry_type) except ValueError: # TODO - handle this raise item = get_object_or_404(cls, pk=pk) current_clients = Interaction.objects.recent(timestamp) current_clients = [i['pk'] for i in _handle_filters(current_clients, **kwargs).values('pk')] # There is no good way to do this... items = [] seen = [] for it in cls.objects.filter(interaction__in=current_clients, name=item.name).select_related(): if it.pk not in seen: items.append((it, it.interaction_set.filter(pk__in=current_clients).order_by('client__name').select_related('client'))) seen.append(it.pk) return render_to_response('config_items/entry_status.html', {'entry': item, 'items': items, 'timestamp': timestamp}, context_instance=RequestContext(request)) @timeview def common_problems(request, timestamp=None, threshold=None, group=None): """Mine config entries""" if request.method == 'POST': try: threshold = int(request.POST['threshold']) view, args, kw = resolve(request.META['PATH_INFO']) kw['threshold'] = threshold return HttpResponseRedirect(reverse(view, args=args, kwargs=kw)) except: pass try: threshold = int(threshold) except: threshold = 10 if group: group_obj = get_object_or_404(Group, name=group) current_clients = [inter[0] for inter in \ Interaction.objects.recent(timestamp)\ .filter(groups=group_obj).values_list('id')] else: current_clients = Interaction.objects.recent_ids(timestamp) lists = [] for etype in ENTRY_TYPES: ldata = etype.objects.exclude(state=TYPE_GOOD).filter( interaction__in=current_clients).annotate(num_entries=Count('id')).filter(num_entries__gte=threshold)\ .order_by('-num_entries', 'name') if len(ldata) > 0: # Property doesn't render properly.. lists.append((etype.ENTRY_TYPE, ldata)) return render_to_response('config_items/common.html', {'lists': lists, 'timestamp': timestamp, 'threshold': threshold}, context_instance=RequestContext(request)) @timeview def client_index(request, timestamp=None, **kwargs): """ Render a grid view of active clients. Keyword parameters: timestamp -- datetime object to render from """ list = _handle_filters(Interaction.objects.recent(timestamp), **kwargs).\ select_related('client').order_by("client__name").all() return render_to_response('clients/index.html', {'inter_list': list, 'timestamp': timestamp}, context_instance=RequestContext(request)) @timeview def client_detailed_list(request, timestamp=None, **kwargs): """ Provides a more detailed list view of the clients. Allows for extra filters to be passed in. """ try: sort = request.GET['sort'] if sort[0] == '-': sort_key = sort[1:] else: sort_key = sort if not sort_key in __SORT_FIELDS__: raise ValueError if sort_key == "client": kwargs['orderby'] = "%s__name" % sort elif sort_key in ["good", "bad", "modified", "extra"]: kwargs['orderby'] = "%s_count" % sort else: kwargs['orderby'] = sort kwargs['sort'] = sort except (ValueError, KeyError): kwargs['orderby'] = "client__name" kwargs['sort'] = "client" kwargs['interaction_base'] = \ Interaction.objects.recent(timestamp).select_related() kwargs['page_limit'] = 0 return render_history_view(request, 'clients/detailed-list.html', **kwargs) def client_detail(request, hostname=None, pk=None): context = dict() client = get_object_or_404(Client, name=hostname) if(pk == None): inter = client.current_interaction maxdate = None else: inter = client.interactions.get(pk=pk) maxdate = inter.timestamp etypes = {TYPE_BAD: 'bad', TYPE_MODIFIED: 'modified', TYPE_EXTRA: 'extra'} edict = dict() for label in etypes.values(): edict[label] = [] for ekind in inter.entry_types: if ekind == 'failures': continue for ent in getattr(inter, ekind).all(): edict[etypes[ent.state]].append(ent) context['entry_types'] = edict context['interaction'] = inter return render_history_view(request, 'clients/detail.html', page_limit=5, client=client, maxdate=maxdate, context=context) def client_manage(request): """Manage client expiration""" message = '' if request.method == 'POST': try: client_name = request.POST.get('client_name', None) client_action = request.POST.get('client_action', None) client = Client.objects.get(name=client_name) if client_action == 'expire': client.expiration = datetime.now() client.save() message = "Expiration for %s set to %s." % \ (client_name, client.expiration.strftime("%Y-%m-%d %H:%M:%S")) elif client_action == 'unexpire': client.expiration = None client.save() message = "%s is now active." % client_name else: message = "Missing action" except Client.DoesNotExist: if not client_name: client_name = "" message = "Couldn't find client \"%s\"" % client_name return render_to_response('clients/manage.html', {'clients': Client.objects.order_by('name').all(), 'message': message}, context_instance=RequestContext(request)) @timeview def display_summary(request, timestamp=None): """ Display a summary of the bcfg2 world """ recent_data = Interaction.objects.recent(timestamp) \ .select_related() node_count = len(recent_data) if not timestamp: timestamp = datetime.now() collected_data = dict(clean=[], bad=[], modified=[], extra=[], stale=[]) for node in recent_data: if timestamp - node.timestamp > timedelta(hours=24): collected_data['stale'].append(node) # If stale check for uptime if node.bad_count > 0: collected_data['bad'].append(node) else: collected_data['clean'].append(node) if node.modified_count > 0: collected_data['modified'].append(node) if node.extra_count > 0: collected_data['extra'].append(node) # label, header_text, node_list summary_data = [] get_dict = lambda name, label: {'name': name, 'nodes': collected_data[name], 'label': label} if len(collected_data['clean']) > 0: summary_data.append(get_dict('clean', 'nodes are clean.')) if len(collected_data['bad']) > 0: summary_data.append(get_dict('bad', 'nodes are bad.')) if len(collected_data['modified']) > 0: summary_data.append(get_dict('modified', 'nodes were modified.')) if len(collected_data['extra']) > 0: summary_data.append(get_dict('extra', 'nodes have extra configurations.')) if len(collected_data['stale']) > 0: summary_data.append(get_dict('stale', 'nodes did not run within the last 24 hours.')) return render_to_response('displays/summary.html', {'summary_data': summary_data, 'node_count': node_count, 'timestamp': timestamp}, context_instance=RequestContext(request)) @timeview def display_timing(request, timestamp=None): perfs = Performance.objects.filter(interaction__in=Interaction.objects.recent_ids(timestamp))\ .select_related('interaction__client') mdict = dict() for perf in perfs: client = perf.interaction.client.name if client not in mdict: mdict[client] = { 'name': client } mdict[client][perf.metric] = perf.value return render_to_response('displays/timing.html', {'metrics': list(mdict.values()), 'timestamp': timestamp}, context_instance=RequestContext(request)) def render_history_view(request, template='clients/history.html', **kwargs): """ Provides a detailed history of a clients interactions. Renders a detailed history of a clients interactions. Allows for various filters and settings. Automatically sets pagination data into the context. Keyword arguments: interaction_base -- Interaction QuerySet to build on (default Interaction.objects) context -- Additional context data to render with page_number -- Page to display (default 1) page_limit -- Number of results per page, if 0 show all (default 25) client -- Client object to render hostname -- Client hostname to lookup and render. Returns a 404 if not found server -- Filter interactions by server state -- Filter interactions by state group -- Filter interactions by group entry_max -- Most recent interaction to display orderby -- Sort results using this field """ context = kwargs.get('context', dict()) max_results = int(kwargs.get('page_limit', 25)) page = int(kwargs.get('page_number', 1)) client = kwargs.get('client', None) if not client and 'hostname' in kwargs: client = get_object_or_404(Client, name=kwargs['hostname']) if client: context['client'] = client entry_max = kwargs.get('maxdate', None) context['entry_max'] = entry_max # Either filter by client or limit by clients iquery = kwargs.get('interaction_base', Interaction.objects) if client: iquery = iquery.filter(client__exact=client) iquery = iquery.select_related('client') if 'orderby' in kwargs and kwargs['orderby']: iquery = iquery.order_by(kwargs['orderby']) if 'sort' in kwargs: context['sort'] = kwargs['sort'] iquery = _handle_filters(iquery, **kwargs) if entry_max: iquery = iquery.filter(timestamp__lte=entry_max) if max_results < 0: max_results = 1 entry_list = [] if max_results > 0: try: rec_start, rec_end = prepare_paginated_list(request, context, iquery, page, max_results) except PaginationError: page_error = sys.exc_info()[1] if isinstance(page_error[0], HttpResponse): return page_error[0] return HttpResponseServerError(page_error) context['entry_list'] = iquery.all()[rec_start:rec_end] else: context['entry_list'] = iquery.all() return render_to_response(template, context, context_instance=RequestContext(request)) def prepare_paginated_list(request, context, paged_list, page=1, max_results=25): """ Prepare context and slice an object for pagination. """ if max_results < 1: raise PaginationError("Max results less then 1") if paged_list == None: raise PaginationError("Invalid object") try: nitems = paged_list.count() except TypeError: nitems = len(paged_list) rec_start = (page - 1) * int(max_results) try: total_pages = (nitems / int(max_results)) + 1 except: total_pages = 1 if page > total_pages: # If we passed beyond the end send back try: view, args, kwargs = resolve(request.META['PATH_INFO']) kwargs['page_number'] = total_pages raise PaginationError(HttpResponseRedirect(reverse(view, kwargs=kwargs))) except (Resolver404, NoReverseMatch, ValueError): raise "Accessing beyond last page. Unable to resolve redirect." context['total_pages'] = total_pages context['records_per_page'] = max_results return (rec_start, rec_start + int(max_results)) src/lib/Bcfg2/Server/000077500000000000000000000000001303523157100146045ustar00rootroot00000000000000src/lib/Bcfg2/Server/Admin.py000066400000000000000000001353351303523157100162200ustar00rootroot00000000000000""" Subcommands and helpers for bcfg2-admin """ import os import sys import time import glob import stat import random import socket import string import getpass import difflib import tarfile import argparse import lxml.etree import Bcfg2.Logger import Bcfg2.Options import Bcfg2.DBSettings import Bcfg2.Server.Core import Bcfg2.Client.Proxy from Bcfg2.Server.Plugin import PullSource, Generator, MetadataConsistencyError from Bcfg2.Utils import hostnames2ranges, Executor, safe_input import Bcfg2.Server.Plugins.Metadata try: from django.core.exceptions import ImproperlyConfigured from django.core import management import django import django.conf import Bcfg2.Server.models HAS_DJANGO = True if django.VERSION[0] == 1 and django.VERSION[1] >= 7: HAS_REPORTS = True else: try: import south # pylint: disable=W0611 HAS_REPORTS = True except ImportError: HAS_REPORTS = False except ImportError: HAS_DJANGO = False HAS_REPORTS = False class ccolors: # pylint: disable=C0103,W0232 """ ANSI color escapes to make colorizing text easier """ # pylint: disable=W1401 ADDED = '\033[92m' CHANGED = '\033[93m' REMOVED = '\033[91m' ENDC = '\033[0m' # pylint: enable=W1401 @classmethod def disable(cls): """ Disable all coloration """ cls.ADDED = '' cls.CHANGED = '' cls.REMOVED = '' cls.ENDC = '' def gen_password(length): """Generates a random alphanumeric password with length characters.""" chars = string.ascii_letters + string.digits return "".join(random.choice(chars) for i in range(length)) def print_table(rows, justify='left', hdr=True, vdelim=" ", padding=1): """Pretty print a table rows - list of rows ([[row 1], [row 2], ..., [row n]]) hdr - if True the first row is treated as a table header vdelim - vertical delimiter between columns padding - # of spaces around the longest element in the column justify - may be left,center,right """ hdelim = "=" justify = {'left': str.ljust, 'center': str.center, 'right': str.rjust}[justify.lower()] # Calculate column widths (longest item in each column # plus padding on both sides) cols = list(zip(*rows)) col_widths = [max([len(str(item)) + 2 * padding for item in col]) for col in cols] borderline = vdelim.join([w * hdelim for w in col_widths]) # Print out the table print(borderline) for row in rows: print(vdelim.join([justify(str(item), width) for (item, width) in zip(row, col_widths)])) if hdr: print(borderline) hdr = False class AdminCmd(Bcfg2.Options.Subcommand): # pylint: disable=W0223 """ Base class for all bcfg2-admin modes """ def setup(self): """ Perform post-init (post-options parsing), pre-run setup tasks """ pass def errExit(self, emsg): """ exit with an error """ print(emsg) raise SystemExit(1) class _ServerAdminCmd(AdminCmd): # pylint: disable=W0223 """ Base class for admin modes that run a Bcfg2 server. """ __plugin_whitelist__ = None __plugin_blacklist__ = None options = AdminCmd.options + Bcfg2.Server.Core.Core.options def __init__(self): AdminCmd.__init__(self) self.metadata = None def setup(self): if self.__plugin_whitelist__ is not None: Bcfg2.Options.setup.plugins = [ p for p in Bcfg2.Options.setup.plugins if p.name in self.__plugin_whitelist__] elif self.__plugin_blacklist__ is not None: Bcfg2.Options.setup.plugins = [ p for p in Bcfg2.Options.setup.plugins if p.name not in self.__plugin_blacklist__] try: self.core = Bcfg2.Server.Core.Core() except Bcfg2.Server.Core.CoreInitError: msg = sys.exc_info()[1] self.errExit("Core load failed: %s" % msg) self.core.load_plugins() self.core.fam.handle_event_set() self.metadata = self.core.metadata def shutdown(self): self.core.shutdown() class _ProxyAdminCmd(AdminCmd): # pylint: disable=W0223 """ Base class for admin modes that proxy to a running Bcfg2 server """ options = AdminCmd.options + Bcfg2.Client.Proxy.ComponentProxy.options def __init__(self): AdminCmd.__init__(self) self.proxy = None def setup(self): self.proxy = Bcfg2.Client.Proxy.ComponentProxy() class Backup(AdminCmd): """ Make a backup of the Bcfg2 repository """ options = AdminCmd.options + [Bcfg2.Options.Common.repository] def run(self, setup): timestamp = time.strftime('%Y%m%d%H%M%S') datastore = setup.repository fmt = 'gz' mode = 'w:' + fmt filename = timestamp + '.tar' + '.' + fmt out = tarfile.open(os.path.join(datastore, filename), mode=mode) out.add(datastore, os.path.basename(datastore)) out.close() print("Archive %s was stored under %s" % (filename, datastore)) class Client(_ServerAdminCmd): """ Create, modify, delete, or list client entries """ __plugin_whitelist__ = ["Metadata"] options = _ServerAdminCmd.options + [ Bcfg2.Options.PositionalArgument( "mode", choices=["add", "del", "delete", "remove", "rm", "up", "update", "list"]), Bcfg2.Options.PositionalArgument("hostname", nargs='?'), Bcfg2.Options.PositionalArgument("attributes", metavar="KEY=VALUE", nargs='*')] valid_attribs = ['profile', 'uuid', 'password', 'floating', 'secure', 'address', 'auth'] def get_attribs(self, setup): """ Get attributes for adding or updating a client from the command line """ attr_d = {} for i in setup.attributes: attr, val = i.split('=', 1) if attr not in self.valid_attribs: print("Attribute %s unknown. Valid attributes: %s" % (attr, self.valid_attribs)) raise SystemExit(1) attr_d[attr] = val return attr_d def run(self, setup): if setup.mode != 'list' and not setup.hostname: self.parser.error(" is required in %s mode" % setup.mode) elif setup.mode == 'list' and setup.hostname: self.logger.warning(" is not honored in list mode") if setup.mode == 'list': for client in self.metadata.list_clients(): print(client) else: include_attribs = True if setup.mode == 'add': func = self.metadata.add_client action = "adding" elif setup.mode in ['up', 'update']: func = self.metadata.update_client action = "updating" elif setup.mode in ['del', 'delete', 'rm', 'remove']: func = self.metadata.remove_client include_attribs = False action = "deleting" if include_attribs: args = (setup.hostname, self.get_attribs(setup)) else: args = (setup.hostname,) try: func(*args) except MetadataConsistencyError: err = sys.exc_info()[1] self.errExit("Error %s client %s: %s" % (setup.hostname, action, err)) class Compare(AdminCmd): """ Compare two hosts or two versions of a host specification """ help = "Given two XML files (as produced by bcfg2-info build or bcfg2 " + \ "-qnc) or two directories containing XML files (as produced by " + \ "bcfg2-info buildall or bcfg2-info builddir), output a detailed, " + \ "Bcfg2-centric diff." options = AdminCmd.options + [ Bcfg2.Options.Option( "-d", "--diff-lines", type=int, help="Show only N lines of a diff"), Bcfg2.Options.BooleanOption( "-c", "--color", help="Use colors even if not run from a TTY"), Bcfg2.Options.BooleanOption( "-q", "--quiet", help="Only show that entries differ, not how they differ"), Bcfg2.Options.PathOption("path1", metavar=""), Bcfg2.Options.PathOption("path2", metavar="")] changes = dict() def removed(self, msg, host): """ Record a removed element """ self.record("%sRemoved: %s%s" % (ccolors.REMOVED, msg, ccolors.ENDC), host) def added(self, msg, host): """ Record an removed element """ self.record("%sAdded: %s%s" % (ccolors.ADDED, msg, ccolors.ENDC), host) def changed(self, msg, host): """ Record a changed element """ self.record("%sChanged: %s%s" % (ccolors.CHANGED, msg, ccolors.ENDC), host) def record(self, msg, host): """ Record a new removed/added/changed message for the given host """ if msg not in self.changes: self.changes[msg] = [host] else: self.changes[msg].append(host) def udiff(self, lines1, lines2, **kwargs): """ get a unified diff with control lines stripped """ lines = None if "lines" in kwargs: if kwargs['lines'] is not None: lines = int(kwargs['lines']) del kwargs['lines'] if lines == 0: return [] kwargs['n'] = 0 diff = [] for line in difflib.unified_diff(lines1, lines2, **kwargs): if (line.startswith("--- ") or line.startswith("+++ ") or line.startswith("@@ ")): continue if lines is not None and len(diff) > lines: diff.append(" ...") break if line.startswith("+"): diff.extend(" %s%s%s" % (ccolors.ADDED, l, ccolors.ENDC) for l in line.splitlines()) elif line.startswith("-"): diff.extend(" %s%s%s" % (ccolors.REMOVED, l, ccolors.ENDC) for l in line.splitlines()) return diff def _bundletype(self, el): """ Get a human-friendly representation of the type of the given bundle -- independent or not """ if el.get("tag") == "Independent": return "Independent bundle" else: return "Bundle" def _get_filelists(self, setup): """ Get a list of 2-tuples of files to compare """ files = [] if os.path.isdir(setup.path1) and os.path.isdir(setup.path1): for fpath in glob.glob(os.path.join(setup.path1, '*')): fname = os.path.basename(fpath) if os.path.exists(os.path.join(setup.path2, fname)): files.append((os.path.join(setup.path1, fname), os.path.join(setup.path2, fname))) else: if fname.endswith(".xml"): host = fname[0:-4] else: host = fname self.removed(host, '') for fpath in glob.glob(os.path.join(setup.path2, '*')): fname = os.path.basename(fpath) if not os.path.exists(os.path.join(setup.path1, fname)): if fname.endswith(".xml"): host = fname[0:-4] else: host = fname self.added(host, '') elif os.path.isfile(setup.path1) and os.path.isfile(setup.path2): files.append((setup.path1, setup.path2)) else: self.errExit("Cannot diff a file and a directory") return files def run(self, setup): # pylint: disable=R0912,R0914,R0915 if not sys.stdout.isatty() and not setup.color: ccolors.disable() files = self._get_filelists(setup) for file1, file2 in files: host = None if os.path.basename(file1) == os.path.basename(file2): fname = os.path.basename(file1) if fname.endswith(".xml"): host = fname[0:-4] else: host = fname xdata1 = lxml.etree.parse(file1).getroot() xdata2 = lxml.etree.parse(file2).getroot() elements1 = dict() elements2 = dict() bundles1 = [el.get("name") for el in xdata1.iterchildren()] bundles2 = [el.get("name") for el in xdata2.iterchildren()] for el in xdata1.iterchildren(): if el.get("name") not in bundles2: self.removed("%s %s" % (self._bundletype(el), el.get("name")), host) for el in xdata2.iterchildren(): if el.get("name") not in bundles1: self.added("%s %s" % (self._bundletype(el), el.get("name")), host) for bname in bundles1: bundle = xdata1.find("*[@name='%s']" % bname) for el in bundle.getchildren(): elements1["%s:%s" % (el.tag, el.get("name"))] = el for bname in bundles2: bundle = xdata2.find("*[@name='%s']" % bname) for el in bundle.getchildren(): elements2["%s:%s" % (el.tag, el.get("name"))] = el for el in elements1.values(): elid = "%s:%s" % (el.tag, el.get("name")) if elid not in elements2: self.removed("Element %s" % elid, host) else: el2 = elements2[elid] if (el.getparent().get("name") != el2.getparent().get("name")): self.changed( "Element %s was in bundle %s, " "now in bundle %s" % (elid, el.getparent().get("name"), el2.getparent().get("name")), host) attr1 = sorted(["%s=\"%s\"" % (attr, el.get(attr)) for attr in el.attrib]) attr2 = sorted(["%s=\"%s\"" % (attr, el.get(attr)) for attr in el2.attrib]) if attr1 != attr2: err = ["Element %s has different attributes" % elid] if not setup.quiet: err.extend(self.udiff(attr1, attr2)) self.changed("\n".join(err), host) if el.text != el2.text: if el.text is None: self.changed("Element %s content was added" % elid, host) elif el2.text is None: self.changed("Element %s content was removed" % elid, host) else: err = ["Element %s has different content" % elid] if not setup.quiet: err.extend( self.udiff(el.text.splitlines(), el2.text.splitlines(), lines=setup.diff_lines)) self.changed("\n".join(err), host) for el in elements2.values(): elid = "%s:%s" % (el.tag, el.get("name")) if elid not in elements2: self.removed("Element %s" % elid, host) for change, hosts in self.changes.items(): hlist = [h for h in hosts if h is not None] if len(files) > 1 and len(hlist): print("===== %s =====" % "\n ".join(hostnames2ranges(hlist))) print(change) if len(files) > 1 and len(hlist): print("") class ExpireCache(_ProxyAdminCmd): """ Expire the metadata cache """ options = _ProxyAdminCmd.options + [ Bcfg2.Options.PositionalArgument( "hostname", nargs="*", default=[], help="Expire cache for the given host(s)")] def run(self, setup): clients = None if setup.hostname is not None and len(setup.hostname) > 0: clients = setup.hostname try: self.proxy.expire_metadata_cache(clients) except Bcfg2.Client.Proxy.ProxyError: self.errExit("Proxy Error: %s" % sys.exc_info()[1]) class Init(AdminCmd): """Interactively initialize a new repository.""" options = AdminCmd.options + [ Bcfg2.Options.Common.repository, Bcfg2.Options.Common.plugins] # default config file config = '''[server] repository = %s plugins = %s # Uncomment the following to listen on all interfaces #listen_all = true [database] #engine = sqlite3 # 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'. #name = # Or path to database file if using sqlite3. #/etc/bcfg2.sqlite is default path if left empty #user = # Not used with sqlite3. #password = # Not used with sqlite3. #host = # Not used with sqlite3. #port = [reporting] transport = LocalFilesystem [communication] password = %s certificate = %s key = %s ca = %s [components] bcfg2 = %s ''' # Default groups groups = ''' ''' # Default contents of clients.xml clients = ''' ''' def __init__(self): AdminCmd.__init__(self) self.data = dict() def _set_defaults(self, setup): """Set default parameters.""" self.data['plugins'] = setup.plugins self.data['configfile'] = setup.config self.data['repopath'] = setup.repository self.data['password'] = gen_password(8) self.data['shostname'] = socket.getfqdn() self.data['server_uri'] = "https://%s:6789" % self.data['shostname'] self.data['country'] = 'US' self.data['state'] = 'Illinois' self.data['location'] = 'Argonne' if os.path.exists("/etc/pki/tls"): self.data['keypath'] = "/etc/pki/tls/private/bcfg2.key" self.data['certpath'] = "/etc/pki/tls/certs/bcfg2.crt" elif os.path.exists("/etc/ssl"): self.data['keypath'] = "/etc/ssl/bcfg2.key" self.data['certpath'] = "/etc/ssl/bcfg2.crt" else: basepath = os.path.dirname(self.data['configfile']) self.data['keypath'] = os.path.join(basepath, "bcfg2.key") self.data['certpath'] = os.path.join(basepath, 'bcfg2.crt') def input_with_default(self, msg, default_name): """ Prompt for input with the given message, taking the default from ``self.data`` """ val = safe_input("%s [%s]: " % (msg, self.data[default_name])) if val: self.data[default_name] = val def run(self, setup): self._set_defaults(setup) # Prompt the user for input self._prompt_server() self._prompt_config() self._prompt_repopath() self._prompt_password() self._prompt_keypath() self._prompt_certificate() # Initialize the repository self.init_repo() def _prompt_server(self): """Ask for the server name and URI.""" self.input_with_default("What is the server's hostname", 'shostname') # reset default server URI self.data['server_uri'] = "https://%s:6789" % self.data['shostname'] self.input_with_default("Server location", 'server_uri') def _prompt_config(self): """Ask for the configuration file path.""" self.input_with_default("Path to Bcfg2 configuration", 'configfile') def _prompt_repopath(self): """Ask for the repository path.""" while True: self.input_with_default("Location of Bcfg2 repository", 'repopath') if os.path.isdir(self.data['repopath']): response = safe_input("Directory %s exists. Overwrite? [y/N]:" % self.data['repopath']) if response.lower().strip() == 'y': break else: break def _prompt_password(self): """Ask for a password or generate one if none is provided.""" newpassword = getpass.getpass( "Input password used for communication verification " "(without echoing; leave blank for random): ").strip() if len(newpassword) != 0: self.data['password'] = newpassword def _prompt_certificate(self): """Ask for the key details (country, state, and location).""" print("The following questions affect SSL certificate generation.") print("If no data is provided, the default values are used.") self.input_with_default("Country code for certificate", 'country') self.input_with_default("State or Province Name (full name) for " "certificate", 'state') self.input_with_default("Locality Name (e.g., city) for certificate", 'location') def _prompt_keypath(self): """ Ask for the key pair location. Try to use sensible defaults depending on the OS """ self.input_with_default("Path where Bcfg2 server private key will be " "created", 'keypath') self.input_with_default("Path where Bcfg2 server cert will be created", 'certpath') def _init_plugins(self): """Initialize each plugin-specific portion of the repository.""" for plugin in self.data['plugins']: kwargs = dict() if issubclass(plugin, Bcfg2.Server.Plugins.Metadata.Metadata): kwargs.update( dict(groups_xml=self.groups, clients_xml=self.clients % self.data['shostname'])) plugin.init_repo(self.data['repopath'], **kwargs) def create_conf(self): """ create the config file """ confdata = self.config % ( self.data['repopath'], ','.join(p.__name__ for p in self.data['plugins']), self.data['password'], self.data['certpath'], self.data['keypath'], self.data['certpath'], self.data['server_uri']) # Don't overwrite existing bcfg2.conf file if os.path.exists(self.data['configfile']): result = safe_input("\nWarning: %s already exists. " "Overwrite? [y/N]: " % self.data['configfile']) if result not in ['Y', 'y']: print("Leaving %s unchanged" % self.data['configfile']) return try: open(self.data['configfile'], "w").write(confdata) os.chmod(self.data['configfile'], stat.S_IRUSR | stat.S_IWUSR) # 0600 except: # pylint: disable=W0702 self.errExit("Error trying to write configuration file '%s': %s" % (self.data['configfile'], sys.exc_info()[1])) def init_repo(self): """Setup a new repo and create the content of the configuration file.""" # Create the repository path = os.path.join(self.data['repopath'], 'etc') try: os.makedirs(path) self._init_plugins() print("Repository created successfuly in %s" % self.data['repopath']) except OSError: print("Failed to create %s." % path) # Create the configuration file and SSL key self.create_conf() self.create_key() def create_key(self): """Creates a bcfg2.key at the directory specifed by keypath.""" cmd = Executor(timeout=120) subject = "/C=%s/ST=%s/L=%s/CN=%s" % ( self.data['country'], self.data['state'], self.data['location'], self.data['shostname']) key = cmd.run(["openssl", "req", "-batch", "-x509", "-nodes", "-subj", subject, "-days", "1000", "-newkey", "rsa:2048", "-keyout", self.data['keypath'], "-noout"]) if not key.success: print("Error generating key: %s" % key.error) return os.chmod(self.data['keypath'], stat.S_IRUSR | stat.S_IWUSR) # 0600 csr = cmd.run(["openssl", "req", "-batch", "-new", "-subj", subject, "-key", self.data['keypath']]) if not csr.success: print("Error generating certificate signing request: %s" % csr.error) return cert = cmd.run(["openssl", "x509", "-req", "-days", "1000", "-signkey", self.data['keypath'], "-out", self.data['certpath']], inputdata=csr.stdout) if not cert.success: print("Error signing certificate: %s" % cert.error) return class Minestruct(_ServerAdminCmd): """ Extract extra entry lists from statistics """ options = _ServerAdminCmd.options + [ Bcfg2.Options.PathOption( "-f", "--outfile", type=argparse.FileType('w'), default=sys.stdout, help="Write to the given file"), Bcfg2.Options.Option( "-g", "--groups", help="Only build config for groups", type=Bcfg2.Options.Types.colon_list, default=[]), Bcfg2.Options.PositionalArgument("hostname")] def run(self, setup): try: extra = set() for source in self.core.plugins_by_type(PullSource): for item in source.GetExtra(setup.hostname): extra.add(item) except: # pylint: disable=W0702 self.errExit("Failed to find extra entry info for client %s: %s" % (setup.hostname, sys.exc_info()[1])) root = lxml.etree.Element("Base") self.logger.info("Found %d extra entries" % len(extra)) add_point = root for grp in setup.groups: add_point = lxml.etree.SubElement(add_point, "Group", name=grp) for tag, name in extra: self.logger.info("%s: %s" % (tag, name)) lxml.etree.SubElement(add_point, tag, name=name) lxml.etree.ElementTree(root).write(setup.outfile, pretty_print=True) class Perf(_ProxyAdminCmd): """ Get performance data from server """ def run(self, setup): output = [('Name', 'Min', 'Max', 'Mean', 'Count')] data = self.proxy.get_statistics() for key in sorted(data.keys()): output.append( (key, ) + tuple(["%.06f" % item for item in data[key][:-1]] + [data[key][-1]])) print_table(output) class Pull(_ServerAdminCmd): """ Retrieves entries from clients and integrates the information into the repository """ options = _ServerAdminCmd.options + [ Bcfg2.Options.Common.interactive, Bcfg2.Options.BooleanOption( "-s", "--stdin", help="Read lists of from stdin " "instead of the command line"), Bcfg2.Options.PositionalArgument("hostname", nargs='?'), Bcfg2.Options.PositionalArgument("entrytype", nargs='?'), Bcfg2.Options.PositionalArgument("entryname", nargs='?')] def __init__(self): _ServerAdminCmd.__init__(self) self.interactive = False def setup(self): if (not Bcfg2.Options.setup.stdin and not (Bcfg2.Options.setup.hostname and Bcfg2.Options.setup.entrytype and Bcfg2.Options.setup.entryname)): print("You must specify either --stdin or a hostname, entry type, " "and entry name on the command line.") self.errExit(self.usage()) _ServerAdminCmd.setup(self) def run(self, setup): self.interactive = setup.interactive if setup.stdin: for line in sys.stdin: try: self.PullEntry(*line.split(None, 3)) except SystemExit: print(" for %s" % line) except: print("Bad entry: %s" % line.strip()) else: self.PullEntry(setup.hostname, setup.entrytype, setup.entryname) def BuildNewEntry(self, client, etype, ename): """Construct a new full entry for given client/entry from statistics. """ new_entry = {'type': etype, 'name': ename} pull_sources = self.core.plugins_by_type(PullSource) for plugin in pull_sources: try: (owner, group, mode, contents) = \ plugin.GetCurrentEntry(client, etype, ename) break except Bcfg2.Server.Plugin.PluginExecutionError: if plugin == pull_sources[-1]: self.errExit("Pull Source failure; could not fetch " "current state") try: data = {'owner': owner, 'group': group, 'mode': mode, 'text': contents} except UnboundLocalError: self.errExit("Unable to build entry") for key, val in list(data.items()): if val: new_entry[key] = val return new_entry def Choose(self, choices): """Determine where to put pull data.""" if self.interactive: for choice in choices: print("Plugin returned choice:") if id(choice) == id(choices[0]): print("(current entry) ") if choice.all: print(" => global entry") elif choice.group: print(" => group entry: %s (prio %d)" % (choice.group, choice.prio)) else: print(" => host entry: %s" % (choice.hostname)) # flush input buffer ans = safe_input("Use this entry? [yN]: ") in ['y', 'Y'] if ans: return choice return False else: if not choices: return False return choices[0] def PullEntry(self, client, etype, ename): """Make currently recorded client state correct for entry.""" new_entry = self.BuildNewEntry(client, etype, ename) meta = self.core.build_metadata(client) # Find appropriate plugin in core glist = [gen for gen in self.core.plugins_by_type(Generator) if ename in gen.Entries.get(etype, {})] if len(glist) != 1: self.errExit("Got wrong numbers of matching generators for entry:" "%s" % ([g.name for g in glist])) plugin = glist[0] if not isinstance(plugin, Bcfg2.Server.Plugin.PullTarget): self.errExit("Configuration upload not supported by plugin %s" % plugin.name) try: choices = plugin.AcceptChoices(new_entry, meta) specific = self.Choose(choices) if specific: plugin.AcceptPullData(specific, new_entry, self.logger) except Bcfg2.Server.Plugin.PluginExecutionError: self.errExit("Configuration upload not supported by plugin %s" % plugin.name) # Commit if running under a VCS for vcsplugin in list(self.core.plugins.values()): if isinstance(vcsplugin, Bcfg2.Server.Plugin.Version): files = "%s/%s" % (plugin.data, ename) comment = 'file "%s" pulled from host %s' % (files, client) vcsplugin.commit_data([files], comment) class _ReportsCmd(AdminCmd): # pylint: disable=W0223 """ Base command for all admin modes dealing with the reporting subsystem """ def __init__(self): AdminCmd.__init__(self) self.reports_entries = () self.reports_classes = () def setup(self): # this has to be imported after options are parsed, because # Django finalizes its settings as soon as it's loaded, which # means that if we import this before Bcfg2.DBSettings has # been populated, Django gets a null configuration, and # subsequent updates to Bcfg2.DBSettings won't help. import Bcfg2.Reporting.models # pylint: disable=W0621 self.reports_entries = (Bcfg2.Reporting.models.Group, Bcfg2.Reporting.models.Bundle, Bcfg2.Reporting.models.FailureEntry, Bcfg2.Reporting.models.ActionEntry, Bcfg2.Reporting.models.PathEntry, Bcfg2.Reporting.models.PackageEntry, Bcfg2.Reporting.models.PathEntry, Bcfg2.Reporting.models.ServiceEntry) self.reports_classes = self.reports_entries + ( Bcfg2.Reporting.models.Client, Bcfg2.Reporting.models.Interaction, Bcfg2.Reporting.models.Performance) if HAS_DJANGO: class _DjangoProxyCmd(AdminCmd): """ Base for admin modes that proxy a command through the Django management system """ command = None args = [] kwargs = {} def run(self, _): '''Call a django command''' if self.command is not None: command = self.command else: command = self.__class__.__name__.lower() args = [command] + self.args management.call_command(*args, **self.kwargs) class DBShell(_DjangoProxyCmd): """ Call the Django 'dbshell' command on the database """ class Shell(_DjangoProxyCmd): """ Call the Django 'shell' command on the database """ class ValidateDB(_DjangoProxyCmd): """ Call the Django 'validate' command on the database """ command = "validate" class Syncdb(AdminCmd): """ Sync the Django ORM with the configured database """ def run(self, setup): try: Bcfg2.DBSettings.sync_databases( interactive=False, verbosity=setup.verbose + setup.debug) except ImproperlyConfigured: err = sys.exc_info()[1] self.logger.error("Django configuration problem: %s" % err) raise SystemExit(1) except: err = sys.exc_info()[1] self.logger.error("Database update failed: %s" % err) raise SystemExit(1) if django.VERSION[0] == 1 and django.VERSION[1] >= 7: class Makemigrations(_DjangoProxyCmd): """ Call the 'makemigrations' command on the database """ args = ['Reporting'] else: class Schemamigration(_DjangoProxyCmd): """ Call the South 'schemamigration' command on the database """ args = ['Bcfg2.Reporting'] kwargs = {'auto': True} if HAS_REPORTS: import datetime class ScrubReports(_ReportsCmd): """ Perform a thorough scrub and cleanup of the Reporting database """ def setup(self): _ReportsCmd.setup(self) # this has to be imported after options are parsed, # because Django finalizes its settings as soon as it's # loaded, which means that if we import this before # Bcfg2.DBSettings has been populated, Django gets a null # configuration, and subsequent updates to # Bcfg2.DBSettings won't help. from Bcfg2.Reporting.Compat import transaction self.run = transaction.atomic(self.run) def run(self, _): # pylint: disable=E0202 # Cleanup unused entries for cls in self.reports_entries: try: start_count = cls.objects.count() cls.prune_orphans() self.logger.info("Pruned %d %s records" % (start_count - cls.objects.count(), cls.__name__)) except: # pylint: disable=W0702 print("Failed to prune %s: %s" % (cls.__name__, sys.exc_info()[1])) class InitReports(AdminCmd): """ Initialize the Reporting database """ def run(self, setup): verbose = setup.verbose + setup.debug try: Bcfg2.DBSettings.sync_databases(interactive=False, verbosity=verbose) Bcfg2.DBSettings.migrate_databases(interactive=False, verbosity=verbose) except: # pylint: disable=W0702 self.errExit("%s failed: %s" % (self.__class__.__name__.title(), sys.exc_info()[1])) class UpdateReports(InitReports): """ Apply updates to the reporting database """ class ReportsStats(_ReportsCmd): """ Print Reporting database statistics """ def run(self, _): for cls in self.reports_classes: print("%s has %s records" % (cls.__name__, cls.objects.count())) class PurgeReports(_ReportsCmd): """ Purge records from the Reporting database """ options = AdminCmd.options + [ Bcfg2.Options.Option("--client", help="Client to operate on"), Bcfg2.Options.Option("--days", type=int, metavar='N', help="Records older than N days"), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.BooleanOption("--expired", help="Expired clients only"), Bcfg2.Options.Option("--state", help="Purge entries in state", choices=['dirty', 'clean', 'modified']), required=False)] def run(self, setup): if setup.days: maxdate = datetime.datetime.now() - \ datetime.timedelta(days=setup.days) else: maxdate = None starts = {} for cls in self.reports_classes: starts[cls] = cls.objects.count() if setup.expired: self.purge_expired(maxdate) else: self.purge(setup.client, maxdate, setup.state) for cls in self.reports_classes: self.logger.info("Purged %s %s records" % (starts[cls] - cls.objects.count(), cls.__name__)) def purge(self, client=None, maxdate=None, state=None): '''Purge historical data from the database''' # indicates whether or not a client should be deleted filtered = False if not client and not maxdate and not state: self.errExit("Refusing to prune all data. Specify an option " "to %s" % self.__class__.__name__.lower()) ipurge = Bcfg2.Reporting.models.Interaction.objects if client: try: cobj = Bcfg2.Reporting.models.Client.objects.get( name=client) ipurge = ipurge.filter(client=cobj) except Bcfg2.Reporting.models.Client.DoesNotExist: self.errExit("Client %s not in database" % client) self.logger.debug("Filtering by client: %s" % client) if maxdate: filtered = True self.logger.debug("Filtering by maxdate: %s" % maxdate) ipurge = ipurge.filter(timestamp__lt=maxdate) if django.conf.settings.DATABASES['default']['ENGINE'] == \ 'django.db.backends.sqlite3': grp_limit = 100 else: grp_limit = 1000 if state: filtered = True self.logger.debug("Filtering by state: %s" % state) ipurge = ipurge.filter(state=state) count = ipurge.count() rnum = 0 try: while rnum < count: grp = list(ipurge[:grp_limit].values("id")) # just in case... if not grp: break Bcfg2.Reporting.models.Interaction.objects.filter( id__in=[x['id'] for x in grp]).delete() rnum += len(grp) self.logger.debug("Deleted %s of %s" % (rnum, count)) except: # pylint: disable=W0702 self.logger.error("Failed to remove interactions: %s" % sys.exc_info()[1]) # Prune any orphaned ManyToMany relations for m2m in self.reports_entries: self.logger.debug("Pruning any orphaned %s objects" % m2m.__name__) m2m.prune_orphans() if client and not filtered: # Delete the client, ping data is automatic try: self.logger.debug("Purging client %s" % client) cobj.delete() except: # pylint: disable=W0702 self.logger.error("Failed to delete client %s: %s" % (client, sys.exc_info()[1])) def purge_expired(self, maxdate=None): """ Purge expired clients from the Reporting database """ if maxdate: if not isinstance(maxdate, datetime.datetime): raise TypeError("maxdate is not a DateTime object") self.logger.debug("Filtering by maxdate: %s" % maxdate) clients = Bcfg2.Reporting.models.Client.objects.filter( expiration__lt=maxdate) else: clients = Bcfg2.Reporting.models.Client.objects.filter( expiration__isnull=False) for client in clients: self.logger.debug("Purging client %s" % client) Bcfg2.Reporting.models.Interaction.objects.filter( client=client).delete() client.delete() class ReportsSQLAll(_DjangoProxyCmd): """ Call the Django 'sqlall' command on the Reporting database """ args = ["Reporting"] class Viz(_ServerAdminCmd): """ Produce graphviz diagrams of metadata structures """ options = _ServerAdminCmd.options + [ Bcfg2.Options.BooleanOption( "-H", "--includehosts", help="Include hosts in the viz output"), Bcfg2.Options.BooleanOption( "-b", "--includebundles", help="Include bundles in the viz output"), Bcfg2.Options.BooleanOption( "-k", "--includekey", help="Show a key for different digraph shapes"), Bcfg2.Options.Option( "-c", "--only-client", metavar="", help="Only show groups and bundles for the named client"), Bcfg2.Options.PathOption( "-o", "--outfile", help="Write viz output to an output file")] colors = ['steelblue1', 'chartreuse', 'gold', 'magenta', 'indianred1', 'limegreen', 'orange1', 'lightblue2', 'green1', 'blue1', 'yellow1', 'darkturquoise', 'gray66'] __plugin_blacklist__ = ['DBStats', 'Cfg', 'Pkgmgr', 'Packages', 'Rules', 'Decisions', 'Deps', 'Git', 'Svn', 'Fossil', 'Bzr', 'Bundler'] def run(self, setup): if setup.outfile: fmt = setup.outfile.split('.')[-1] else: fmt = 'png' exc = Executor() cmd = ["dot", "-T", fmt] if setup.outfile: cmd.extend(["-o", setup.outfile]) inputlist = ["digraph groups {", '\trankdir="LR";', self.metadata.viz(setup.includehosts, setup.includebundles, setup.includekey, setup.only_client, self.colors)] if setup.includekey: inputlist.extend( ["\tsubgraph cluster_key {", '\tstyle="filled";', '\tcolor="lightblue";', '\tBundle [ shape="septagon" ];', '\tGroup [shape="ellipse"];', '\tGroup Category [shape="trapezium"];\n', '\tProfile [style="bold", shape="ellipse"];', '\tHblock [label="Host1|Host2|Host3",shape="record"];', '\tlabel="Key";', "\t}"]) inputlist.append("}") idata = "\n".join(inputlist) try: result = exc.run(cmd, inputdata=idata) except OSError: # on some systems (RHEL 6), you cannot run dot with # shell=True. on others (Gentoo with Python 2.7), you # must. In yet others (RHEL 5), either way works. I have # no idea what the difference is, but it's kind of a PITA. result = exc.run(cmd, shell=True, inputdata=idata) if not result.success: self.errExit("Error running %s: %s" % (cmd, result.error)) if not setup.outfile: print(result.stdout) class Xcmd(_ProxyAdminCmd): """ XML-RPC Command Interface """ options = _ProxyAdminCmd.options + [ Bcfg2.Options.PositionalArgument("command"), Bcfg2.Options.PositionalArgument("arguments", nargs='*')] def run(self, setup): try: data = getattr(self.proxy, setup.command)(*setup.arguments) except Bcfg2.Client.Proxy.ProxyError: self.errExit("Proxy Error: %s" % sys.exc_info()[1]) if data is not None: print(data) class CLI(Bcfg2.Options.CommandRegistry): """ CLI class for bcfg2-admin """ def __init__(self): Bcfg2.Options.CommandRegistry.__init__(self) self.register_commands(globals().values(), parent=AdminCmd) parser = Bcfg2.Options.get_parser( description="Manage a running Bcfg2 server", components=[self]) parser.add_options(self.subcommand_options) parser.parse() def run(self): """ Run bcfg2-admin """ try: cmd = self.commands[Bcfg2.Options.setup.subcommand] if hasattr(cmd, 'setup'): cmd.setup() return self.runcommand() finally: self.shutdown() src/lib/Bcfg2/Server/BuiltinCore.py000066400000000000000000000142271303523157100174030ustar00rootroot00000000000000""" The core of the builtin Bcfg2 server. """ import os import sys import time import socket import daemon import Bcfg2.Options import Bcfg2.Server.Statistics from Bcfg2.Server.Core import NetworkCore, NoExposedMethod from Bcfg2.Compat import xmlrpclib, urlparse from Bcfg2.Server.SSLServer import XMLRPCServer from lockfile import LockFailed, LockTimeout # pylint: disable=E0611 try: from daemon.pidfile import TimeoutPIDLockFile except ImportError: from daemon.pidlockfile import TimeoutPIDLockFile # pylint: enable=E0611 class BuiltinCore(NetworkCore): """ The built-in server core """ name = 'bcfg2-server' def __init__(self): NetworkCore.__init__(self) #: The :class:`Bcfg2.Server.SSLServer.XMLRPCServer` instance #: powering this server core self.server = None daemon_args = dict(uid=Bcfg2.Options.setup.daemon_uid, gid=Bcfg2.Options.setup.daemon_gid, umask=int(Bcfg2.Options.setup.umask, 8), detach_process=True, files_preserve=self._logfilehandles()) if Bcfg2.Options.setup.daemon: daemon_args['pidfile'] = TimeoutPIDLockFile( Bcfg2.Options.setup.daemon, acquire_timeout=5) #: The :class:`daemon.DaemonContext` used to drop #: privileges, write the PID file (with :class:`PidFile`), #: and daemonize this core. self.context = daemon.DaemonContext(**daemon_args) __init__.__doc__ = NetworkCore.__init__.__doc__.split('.. -----')[0] def _logfilehandles(self, logger=None): """ Get a list of all filehandles logger, that have to be handled with DaemonContext.files_preserve to keep looging working. :param logger: The logger to get the file handles of. By default, self.logger is used. :type logger: logging.Logger """ if logger is None: logger = self.logger handles = [handler.stream.fileno() for handler in logger.handlers if hasattr(handler, 'stream')] if logger.parent: handles += self._logfilehandles(logger.parent) return handles def _dispatch(self, method, args, dispatch_dict): """ Dispatch XML-RPC method calls :param method: XML-RPC method name :type method: string :param args: Paramaters to pass to the method :type args: tuple :param dispatch_dict: A dict of method name -> function that can be used to provide custom mappings :type dispatch_dict: dict :returns: The return value of the method call :raises: :exc:`xmlrpclib.Fault` """ if method in dispatch_dict: method_func = dispatch_dict[method] else: try: method_func = self._resolve_exposed_method(method) except NoExposedMethod: self.logger.error("Unknown method %s" % (method)) raise xmlrpclib.Fault(xmlrpclib.METHOD_NOT_FOUND, "Unknown method %s" % method) try: method_start = time.time() try: return method_func(*args) finally: Bcfg2.Server.Statistics.stats.add_value( method, time.time() - method_start) except xmlrpclib.Fault: raise except Exception: err = sys.exc_info()[1] if getattr(err, "log", True): self.logger.error(err, exc_info=True) raise xmlrpclib.Fault(getattr(err, "fault_code", 1), str(err)) def _daemonize(self): """ Open :attr:`context` to drop privileges, write the PID file, and daemonize the server core. """ # Attempt to ensure lockfile is able to be created and not stale try: self.context.pidfile.acquire() except LockFailed: err = sys.exc_info()[1] self.logger.error("Failed to daemonize %s: %s" % (self.name, err)) return False except LockTimeout: try: # attempt to break the lock os.kill(self.context.pidfile.read_pid(), 0) except (OSError, TypeError): # No process with locked PID self.context.pidfile.break_lock() else: err = sys.exc_info()[1] self.logger.error("Failed to daemonize %s: Failed to acquire" "lock on %s" % (self.name, Bcfg2.Options.setup.daemon)) return False else: self.context.pidfile.release() self.context.open() self.logger.info("%s daemonized" % self.name) return True def _run(self): """ Create :attr:`server` to start the server listening. """ hostname, port = urlparse(Bcfg2.Options.setup.server)[1].split(':') server_address = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][4] try: self.server = XMLRPCServer(Bcfg2.Options.setup.listen_all, server_address, keyfile=Bcfg2.Options.setup.key, certfile=Bcfg2.Options.setup.cert, register=False, ca=Bcfg2.Options.setup.ca, protocol=Bcfg2.Options.setup.protocol) except: # pylint: disable=W0702 err = sys.exc_info()[1] self.logger.error("Server startup failed: %s" % err) self.context.close() return False return True def _block(self): """ Enter the blocking infinite loop. """ self.server.register_instance(self) try: self.server.serve_forever() finally: self.server.server_close() self.context.close() self.shutdown() src/lib/Bcfg2/Server/Cache.py000066400000000000000000000135551303523157100161720ustar00rootroot00000000000000""" ``Bcfg2.Server.Cache`` is an implementation of a simple memory-backed cache. Right now this doesn't provide many features, but more (time-based expiration, etc.) can be added as necessary. The normal workflow is to get a Cache object, which is simply a dict interface to the unified cache that automatically uses a certain tag set. For instance: .. code-block:: python groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups") groupcache['foo.example.com'] = ['group1', 'group2'] This would create a Cache object that automatically tags its entries with ``frozenset(["Probes", "probegroups"])``, and store the list ``['group1', 'group1']`` with the *additional* tag ``foo.example.com``. So the unified backend cache would then contain a single entry: .. code-block:: python {frozenset(["Probes", "probegroups", "foo.example.com"]): ['group1', 'group2']} In addition to the dict interface, Cache objects (returned from :func:`Bcfg2.Server.Cache.Cache`) have one additional method, ``expire()``, which is mostly identical to :func:`Bcfg2.Server.Cache.expire`, except that it is specific to the tag set of the cache object. E.g., to expire all ``foo.example.com`` records for a given cache, you could do: .. code-block:: python groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups") groupcache.expire("foo.example.com") This is mostly functionally identical to: .. code-block:: python Bcfg2.Server.Cache.expire("Probes", "probegroups", "foo.example.com") It's not completely identical, though; the first example will expire, at most, exactly one item from the cache. The second example will expire all items that are tagged with a superset of the given tags. To illustrate the difference, consider the following two examples: .. code-block:: python groupcache = Bcfg2.Server.Cache.Cache("Probes") groupcache.expire("probegroups") Bcfg2.Server.Cache.expire("Probes", "probegroups") The former will not expire any data, because there is no single datum tagged with ``"Probes", "probegroups"``. The latter will expire *all* items tagged with ``"Probes", "probegroups"`` -- i.e., the entire cache. In this case, the latter call is equivalent to: .. code-block:: python groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups") groupcache.expire() """ from Bcfg2.Compat import MutableMapping class _Cache(MutableMapping): """ The object returned by :func:`Bcfg2.Server.Cache.Cache` that presents a dict-like interface to the portion of the unified cache that uses the specified tags. """ def __init__(self, registry, tags): self._registry = registry self._tags = tags def __getitem__(self, key): return self._registry[self._tags | set([key])] def __setitem__(self, key, value): self._registry[self._tags | set([key])] = value def __delitem__(self, key): del self._registry[self._tags | set([key])] def __iter__(self): for item in self._registry.iterate(*self._tags): yield list(item.difference(self._tags))[0] def keys(self): """ List cache keys """ return list(iter(self)) def __len__(self): return len(list(iter(self))) def expire(self, key=None): """ expire all items, or a specific item, from the cache """ if key is None: expire(*self._tags) else: tags = self._tags | set([key]) # py 2.5 doesn't support mixing *args and explicit keyword # args kwargs = dict(exact=True) expire(*tags, **kwargs) def __repr__(self): return repr(dict(self)) def __str__(self): return str(dict(self)) class _CacheRegistry(dict): """ The grand unified cache backend which contains all cache items. """ def iterate(self, *tags): """ Iterate over all items that match the given tags *and* have exactly one additional tag. This is used to get items for :class:`Bcfg2.Server.Cache._Cache` objects that have been instantiated via :func:`Bcfg2.Server.Cache.Cache`. """ tags = frozenset(tags) for key in self.keys(): if key.issuperset(tags) and len(key.difference(tags)) == 1: yield key def iter_all(self, *tags): """ Iterate over all items that match the given tags, regardless of how many additional tags they have (or don't have). This is used to expire all cache data that matches a set of tags. """ tags = frozenset(tags) for key in list(self.keys()): if key.issuperset(tags): yield key _cache = _CacheRegistry() # pylint: disable=C0103 _hooks = [] # pylint: disable=C0103 def Cache(*tags): # pylint: disable=C0103 """ A dict interface to the cache data tagged with the given tags. """ return _Cache(_cache, frozenset(tags)) def expire(*tags, **kwargs): """ Expire all items, a set of items, or one specific item from the cache. If ``exact`` is set to True, then if the given tag set doesn't match exactly one item in the cache, nothing will be expired. """ exact = kwargs.pop("exact", False) count = 0 if not tags: count = len(_cache) _cache.clear() elif exact: if frozenset(tags) in _cache: count = 1 del _cache[frozenset(tags)] else: for match in _cache.iter_all(*tags): count += 1 del _cache[match] for hook in _hooks: hook(tags, exact, count) def add_expire_hook(func): """ Add a hook that will be called when an item is expired from the cache. The callable passed in must take three options: the first will be the tag set that was expired; the second will be the state of the ``exact`` flag (True or False); and the third will be the number of items that were expired from the cache. """ _hooks.append(func) src/lib/Bcfg2/Server/CherrypyCore.py000066400000000000000000000143171303523157100176020ustar00rootroot00000000000000""" The core of the `CherryPy `_-powered server. """ import sys import time import Bcfg2.Server.Statistics from Bcfg2.Compat import urlparse, xmlrpclib, b64decode from Bcfg2.Server.Core import NetworkCore import cherrypy from cherrypy.lib import xmlrpcutil from cherrypy._cptools import ErrorTool from cherrypy.process.plugins import Daemonizer, DropPrivileges, PIDFile def on_error(*args, **kwargs): # pylint: disable=W0613 """ CherryPy error handler that handles :class:`xmlrpclib.Fault` objects and so allows for the possibility of returning proper error codes. This obviates the need to use :func:`cherrypy.lib.xmlrpc.on_error`, the builtin CherryPy xmlrpc tool, which does not handle xmlrpclib.Fault objects and returns the same error code for every error.""" err = sys.exc_info()[1] if not isinstance(err, xmlrpclib.Fault): err = xmlrpclib.Fault(xmlrpclib.INTERNAL_ERROR, str(err)) xmlrpcutil._set_response(xmlrpclib.dumps(err)) # pylint: disable=W0212 cherrypy.tools.xmlrpc_error = ErrorTool(on_error) class CherrypyCore(NetworkCore): """ The CherryPy-based server core. """ #: Base CherryPy config for this class. We enable the #: ``xmlrpc_error`` tool created from :func:`on_error` and the #: ``bcfg2_authn`` tool created from :func:`do_authn`. _cp_config = {'tools.xmlrpc_error.on': True, 'tools.bcfg2_authn.on': True} def __init__(self): NetworkCore.__init__(self) cherrypy.tools.bcfg2_authn = cherrypy.Tool('on_start_resource', self.do_authn) #: List of exposed plugin RMI self.rmi = self._get_rmi() cherrypy.engine.subscribe('stop', self.shutdown) __init__.__doc__ = NetworkCore.__init__.__doc__.split('.. -----')[0] def do_authn(self): """ Perform authentication by calling :func:`Bcfg2.Server.Core.NetworkCore.authenticate`. This is implemented as a CherryPy tool.""" try: header = cherrypy.request.headers['Authorization'] except KeyError: self.critical_error("No authentication data presented") auth_content = header.split()[1] auth_content = b64decode(auth_content) try: username, password = auth_content.split(":") except ValueError: username = auth_content password = "" # FIXME: Get client cert cert = None address = (cherrypy.request.remote.ip, cherrypy.request.remote.port) rpcmethod = xmlrpcutil.process_body()[1] if rpcmethod == 'ERRORMETHOD': raise Exception("Unknown error processing XML-RPC request body") if (not self.check_acls(address[0], rpcmethod) or not self.authenticate(cert, username, password, address)): raise cherrypy.HTTPError(401) @cherrypy.expose def default(self, *args, **params): # pylint: disable=W0613 """ Handle all XML-RPC calls. It was necessary to make enough changes to the stock CherryPy :class:`cherrypy._cptools.XMLRPCController` to support plugin RMI and prepending the client address that we just rewrote it. It clearly wasn't written with inheritance in mind.""" rpcparams, rpcmethod = xmlrpcutil.process_body() if rpcmethod == 'ERRORMETHOD': raise Exception("Unknown error processing XML-RPC request body") elif "." not in rpcmethod: address = (cherrypy.request.remote.ip, cherrypy.request.remote.name) rpcparams = (address, ) + rpcparams handler = getattr(self, rpcmethod, None) if not handler or not getattr(handler, "exposed", False): raise Exception('Method "%s" is not supported' % rpcmethod) else: try: handler = self.rmi[rpcmethod] except KeyError: raise Exception('Method "%s" is not supported' % rpcmethod) method_start = time.time() try: body = handler(*rpcparams, **params) finally: Bcfg2.Server.Statistics.stats.add_value(rpcmethod, time.time() - method_start) xmlrpcutil.respond(body, 'utf-8', True) return cherrypy.serving.response.body def _daemonize(self): """ Drop privileges, daemonize with :class:`cherrypy.process.plugins.Daemonizer` and write a PID file with :class:`cherrypy.process.plugins.PIDFile`. """ self._drop_privileges() Daemonizer(cherrypy.engine).subscribe() PIDFile(cherrypy.engine, Bcfg2.Options.setup.daemon).subscribe() return True def _drop_privileges(self): """ Drop privileges with :class:`cherrypy.process.plugins.DropPrivileges` """ DropPrivileges(cherrypy.engine, uid=Bcfg2.Options.setup.daemon_uid, gid=Bcfg2.Options.setup.daemon_gid, umask=int(Bcfg2.Options.setup.umask, 8)).subscribe() def _run(self): """ Start the server listening. """ hostname, port = urlparse(Bcfg2.Options.setup.server)[1].split(':') if Bcfg2.Options.setup.listen_all: hostname = '0.0.0.0' config = {'engine.autoreload.on': False, 'server.socket_port': int(port), 'server.socket_host': hostname} if Bcfg2.Options.setup.cert and Bcfg2.Options.setup.key: config.update({'server.ssl_module': 'pyopenssl', 'server.ssl_certificate': Bcfg2.Options.setup.cert, 'server.ssl_private_key': Bcfg2.Options.setup.key}) if Bcfg2.Options.setup.debug: config['log.screen'] = True cherrypy.config.update(config) cherrypy.tree.mount(self, '/', {'/': Bcfg2.Options.setup}) cherrypy.engine.start() return True def _block(self): """ Enter the blocking infinite server loop. :func:`Bcfg2.Server.Core.NetworkCore.shutdown` is called on exit by a :meth:`subscription ` on the top-level CherryPy engine.""" cherrypy.engine.block() src/lib/Bcfg2/Server/Core.py000066400000000000000000001714511303523157100160570ustar00rootroot00000000000000""" Bcfg2.Server.Core provides the base core object that server core implementations inherit from. """ import os import pwd import atexit import logging import select import sys import threading import time import inspect import lxml.etree import daemon import Bcfg2.Server import Bcfg2.Logger import Bcfg2.Options import Bcfg2.DBSettings import Bcfg2.Server.Statistics import Bcfg2.Server.FileMonitor from itertools import chain from Bcfg2.Server.Cache import Cache from Bcfg2.Compat import xmlrpclib, wraps # pylint: disable=W0622 from Bcfg2.Server.Plugin.exceptions import * # pylint: disable=W0401,W0614 from Bcfg2.Server.Plugin.interfaces import * # pylint: disable=W0401,W0614 from Bcfg2.Server.Statistics import track_statistics try: from django.core.exceptions import ImproperlyConfigured import django import django.conf HAS_DJANGO = True except ImportError: HAS_DJANGO = False try: import psyco psyco.full() except ImportError: pass def exposed(func): """ Decorator that sets the ``exposed`` attribute of a function to ``True`` expose it via XML-RPC. This currently works for both the builtin and CherryPy cores, although if other cores are added this may need to be made a core-specific function. :param func: The function to decorate :type func: callable :returns: callable - the decorated function""" func.exposed = True return func def sort_xml(node, key=None): """ Recursively sort an XML document in a deterministic fashion. This shouldn't be used to perform a *useful* sort, merely to put XML in a deterministic, replicable order. The document is sorted in-place. :param node: The root node of the XML tree to sort :type node: lxml.etree._Element or lxml.etree.ElementTree :param key: The key to sort by :type key: callable :returns: None """ for child in node: sort_xml(child, key) try: sorted_children = sorted(node, key=key) except TypeError: sorted_children = node node[:] = sorted_children def close_db_connection(func): """ Decorator that closes the Django database connection at the end of the function. This should decorate any exposed function that might open a database connection. """ @wraps(func) def inner(self, *args, **kwargs): """ The decorated function """ rv = func(self, *args, **kwargs) if self._database_available: # pylint: disable=W0212 self.logger.debug("%s: Closing database connection" % threading.current_thread().getName()) if django.VERSION[0] == 1 and django.VERSION[1] >= 7: for connection in django.db.connections.all(): connection.close() else: django.db.close_connection() # pylint: disable=E1101 return rv return inner class CoreInitError(Exception): """ Raised when the server core cannot be initialized. """ pass class NoExposedMethod (Exception): """ Raised when an XML-RPC method is called, but there is no method exposed with the given name. """ class DefaultACL(Plugin, ClientACLs): """ Default ACL 'plugin' that provides security by default. This is only loaded if no other ClientACLs plugin is enabled. """ create = False def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.ClientACLs.__init__(self) def check_acl_ip(self, address, rmi): return (("." not in rmi and not rmi.endswith("_debug") and rmi != 'get_statistics' and rmi != 'expire_metadata_cache') or address[0] == "127.0.0.1") # in core we frequently want to catch all exceptions, regardless of # type, so disable the pylint rule that catches that. # pylint: disable=W0702 class Core(object): """ The server core is the container for all Bcfg2 server logic and modules. All core implementations must inherit from ``Core``. """ options = [ Bcfg2.Options.Common.plugins, Bcfg2.Options.Common.repository, Bcfg2.Options.Common.filemonitor, Bcfg2.Options.BooleanOption( "--no-fam-blocking", cf=('server', 'fam_blocking'), dest="fam_blocking", default=True, help='FAM blocks on startup until all events are processed'), Bcfg2.Options.BooleanOption( cf=('logging', 'performance'), dest="perflog", help="Periodically log performance statistics"), Bcfg2.Options.Option( cf=('logging', 'performance_interval'), default=300.0, type=Bcfg2.Options.Types.timeout, help="Performance statistics logging interval in seconds"), Bcfg2.Options.Option( cf=('caching', 'client_metadata'), dest='client_metadata_cache', default='off', choices=['off', 'on', 'initial', 'cautious', 'aggressive'])] #: The name of this server core. This can be overridden by core #: implementations to provide a more specific name. name = "Core" def __init__(self): # pylint: disable=R0912,R0915 """ .. automethod:: _run .. automethod:: _block .. ----- .. automethod:: _file_monitor_thread .. automethod:: _perflog_thread """ # Record the core as a module variable Bcfg2.Server.core = self #: A :class:`logging.Logger` object for use by the core self.logger = logging.getLogger('bcfg2-server') #: Log levels for the various logging handlers with debug True #: and False. Each loglevel dict is a dict of ``logger name #: => log level``; the logger names are set in #: :mod:`Bcfg2.Logger`. The logger name ``default`` is #: special, and will be used for any log handlers whose name #: does not appear elsewhere in the dict. At a minimum, #: ``default`` must be provided. self._loglevels = { True: dict(default=logging.DEBUG), False: dict(console=logging.INFO, default=Bcfg2.Logger.default_log_level())} #: Used to keep track of the current debug state of the core. self.debug_flag = False # enable debugging on the core now. debugging is enabled on # everything else later if Bcfg2.Options.setup.debug: self.set_core_debug(None, Bcfg2.Options.setup.debug) try: #: The :class:`Bcfg2.Server.FileMonitor.FileMonitor` #: object used by the core to monitor for Bcfg2 data #: changes. self.fam = Bcfg2.Server.FileMonitor.get_fam() except IOError: msg = "Failed to instantiate fam driver %s" % \ Bcfg2.Options.setup.filemonitor self.logger.error(msg, exc_info=1) raise CoreInitError(msg) #: Path to bcfg2.conf self.cfile = Bcfg2.Options.setup.config #: Dict of plugins that are enabled. Keys are the plugin #: names (just the plugin name, in the correct case; e.g., #: "Cfg", not "Bcfg2.Server.Plugins.Cfg"), and values are #: plugin objects. self.plugins = {} #: Blacklist of plugins that conflict with enabled plugins. #: If two plugins are loaded that conflict with each other, #: the first one loaded wins. self.plugin_blacklist = {} #: The Metadata plugin self.metadata = None #: Revision of the Bcfg2 specification. This will be sent to #: the client in the configuration, and can be set by a #: :class:`Bcfg2.Server.Plugin.interfaces.Version` plugin. self.revision = '-1' atexit.register(self.shutdown) #: if :func:`Bcfg2.Server.Core.shutdown` is called explicitly, #: then :mod:`atexit` calls it *again*, so it gets called #: twice. This is potentially bad, so we use #: :attr:`Bcfg2.Server.Core._running` as a flag to determine #: if the core needs to be shutdown, and only do it once. self._running = True #: Threading event to signal worker threads (e.g., #: :attr:`fam_thread`) to shutdown self.terminate = threading.Event() #: RLock to be held on writes to the backend db self.db_write_lock = threading.RLock() # mapping of group name => plugin name to record where groups # that are created by Connector plugins came from self._dynamic_groups = dict() #: The FAM :class:`threading.Thread`, #: :func:`_file_monitor_thread` self.fam_thread = \ threading.Thread(name="%sFAMThread" % Bcfg2.Options.setup.filemonitor.__name__, target=self._file_monitor_thread) #: The :class:`threading.Thread` that reports performance #: statistics to syslog. self.perflog_thread = None if Bcfg2.Options.setup.perflog: self.perflog_thread = \ threading.Thread(name="PerformanceLoggingThread", target=self._perflog_thread) #: A :func:`threading.Lock` for use by #: :func:`Bcfg2.Server.FileMonitor.FileMonitor.handle_event_set` self.lock = threading.Lock() #: A :class:`Bcfg2.Server.Cache.Cache` object for caching client #: metadata self.metadata_cache = Cache("Metadata") #: Whether or not it's possible to use the Django database #: backend for plugins that have that capability self._database_available = False if HAS_DJANGO: try: Bcfg2.DBSettings.sync_databases(interactive=False, verbosity=0) self._database_available = True except ImproperlyConfigured: self.logger.error("Django configuration problem: %s" % sys.exc_info()[1]) except: self.logger.error("Updating database %s failed: %s" % (Bcfg2.Options.setup.db_name, sys.exc_info()[1])) def __str__(self): return self.__class__.__name__ def plugins_by_type(self, base_cls): """ Return a list of loaded plugins that match the passed type. The returned list is sorted in ascending order by the plugins' ``sort_order`` value. The :attr:`Bcfg2.Server.Plugin.base.Plugin.sort_order` defaults to 500, but can be overridden by individual plugins. Plugins with the same numerical sort_order value are sorted in alphabetical order by their name. :param base_cls: The base plugin interface class to match (see :mod:`Bcfg2.Server.Plugin.interfaces`) :type base_cls: type :returns: list of :attr:`Bcfg2.Server.Plugin.base.Plugin` objects """ return sorted([plugin for plugin in self.plugins.values() if isinstance(plugin, base_cls)], key=lambda p: (p.sort_order, p.name)) def _perflog_thread(self): """ The thread that periodically logs performance statistics to syslog. """ self.logger.debug("Performance logging thread starting") while not self.terminate.isSet(): self.terminate.wait(Bcfg2.Options.setup.performance_interval) if not self.terminate.isSet(): for name, stats in self.get_statistics(None).items(): self.logger.info("Performance statistics: " "%s min=%.06f, max=%.06f, average=%.06f, " "count=%d" % ((name, ) + stats)) self.logger.info("Performance logging thread terminated") def _file_monitor_thread(self): """ The thread that runs the :class:`Bcfg2.Server.FileMonitor.FileMonitor`. This also queries :class:`Bcfg2.Server.Plugin.interfaces.Version` plugins for the current revision of the Bcfg2 repo. """ self.logger.debug("File monitor thread starting") famfd = self.fam.fileno() terminate = self.terminate while not terminate.isSet(): if famfd: select.select([famfd], [], [], 2) elif not self.fam.pending(): terminate.wait(15) if self.fam.pending(): try: self._update_vcs_revision() except: self.logger.error("Error updating VCS revision: %s" % sys.exc_info()[1]) try: self.fam.handle_event_set(self.lock) except: self.logger.error("Error handling event set: %s" % sys.exc_info()[1]) self.logger.info("File monitor thread terminated") @track_statistics() def _update_vcs_revision(self): """ Update the revision of the current configuration on-disk from the VCS plugin """ for plugin in self.plugins_by_type(Version): try: newrev = plugin.get_revision() if newrev != self.revision: self.logger.debug("Updated to revision %s" % newrev) self.revision = newrev break except: self.logger.warning("Error getting revision from %s: %s" % (plugin.name, sys.exc_info()[1])) self.revision = '-1' def load_plugins(self): """ Load all plugins, setting :attr:`Bcfg2.Server.Core.BaseCore.plugins` and :attr:`Bcfg2.Server.Core.BaseCore.metadata` as side effects. This does not start plugin threads; that is done later, in :func:`Bcfg2.Server.Core.BaseCore.run` """ for plugin in Bcfg2.Options.setup.plugins: if plugin not in self.plugins: self.init_plugin(plugin) # Remove blacklisted plugins for plugin, blacklist in list(self.plugin_blacklist.items()): if len(blacklist) > 0: self.logger.error("The following plugins conflict with %s;" "Unloading %s" % (plugin, blacklist)) for plug in blacklist: del self.plugins[plug] # Log deprecated and experimental plugins expl = [] depr = [] for plug in list(self.plugins.values()): if plug.experimental: expl.append(plug) if plug.deprecated: depr.append(plug) if expl: self.logger.info("Loading experimental plugin(s): %s" % (" ".join([x.name for x in expl]))) self.logger.info("NOTE: Interfaces subject to change") if depr: self.logger.info("Loading deprecated plugin(s): %s" % (" ".join([x.name for x in depr]))) # Find the metadata plugin and set self.metadata mlist = self.plugins_by_type(Metadata) if len(mlist) >= 1: self.metadata = mlist[0] if len(mlist) > 1: self.logger.error("Multiple Metadata plugins loaded; using %s" % self.metadata) else: self.logger.error("No Metadata plugin loaded; " "failed to instantiate Core") raise CoreInitError("No Metadata Plugin") # ensure that an ACL plugin is loaded if not self.plugins_by_type(Bcfg2.Server.Plugin.ClientACLs): self.init_plugin(DefaultACL) def init_plugin(self, plugin): """ Import and instantiate a single plugin. The plugin is stored to :attr:`plugins`. :param plugin: The plugin class to load. :type plugin: type :returns: None """ self.logger.debug("Loading plugin %s" % plugin.name) # Blacklist conflicting plugins cplugs = [conflict for conflict in plugin.conflicts if conflict in self.plugins] self.plugin_blacklist[plugin.name] = cplugs try: self.plugins[plugin.name] = plugin(self) except PluginInitError: self.logger.error("Failed to instantiate plugin %s" % plugin, exc_info=1) except OSError: err = sys.exc_info()[1] self.logger.error("Failed to add a file monitor while " "instantiating plugin %s: %s" % (plugin, err)) except: self.logger.error("Unexpected instantiation failure for plugin %s" % plugin, exc_info=1) @close_db_connection def shutdown(self): """ Perform plugin and FAM shutdown tasks. """ if not self._running: self.logger.debug("%s: Core already shut down" % self.name) return self.logger.info("%s: Shutting down core..." % self.name) if not self.terminate.isSet(): self.terminate.set() self._running = False self.fam.shutdown() self.logger.info("%s: FAM shut down" % self.name) for plugin in list(self.plugins.values()): plugin.shutdown() self.logger.info("%s: All plugins shut down" % self.name) @property def metadata_cache_mode(self): """ Get the client :attr:`metadata_cache` mode. Options are off, initial, cautious, aggressive, on (synonym for cautious). See :ref:`server-caching` for more details. """ mode = Bcfg2.Options.setup.client_metadata_cache if mode == "on": return "cautious" else: return mode def client_run_hook(self, hook, metadata): """ Invoke hooks from :class:`Bcfg2.Server.Plugin.interfaces.ClientRunHooks` plugins for a given stage. :param hook: The name of the stage to run hooks for. A stage can be any abstract function defined in the :class:`Bcfg2.Server.Plugin.interfaces.ClientRunHooks` interface. :type hook: string :param metadata: Client metadata to run the hook for. This will be passed as the sole argument to each hook. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ self.logger.debug("Running %s hooks for %s" % (hook, metadata.hostname)) start = time.time() try: for plugin in self.plugins_by_type(ClientRunHooks): try: getattr(plugin, hook)(metadata) except AttributeError: err = sys.exc_info()[1] self.logger.error("Unknown attribute: %s" % err) raise except: err = sys.exc_info()[1] self.logger.error("%s: Error invoking hook %s: %s" % (plugin, hook, err)) finally: Bcfg2.Server.Statistics.stats.add_value( "%s:client_run_hook:%s" % (self.__class__.__name__, hook), time.time() - start) @track_statistics() def validate_structures(self, metadata, data): """ Checks the data structures by calling the :func:`Bcfg2.Server.Plugin.interfaces.StructureValidator.validate_structures` method of :class:`Bcfg2.Server.Plugin.interfaces.StructureValidator` plugins. :param metadata: Client metadata to validate structures for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param data: The list of structures (i.e., bundles) for this client :type data: list of lxml.etree._Element objects """ self.logger.debug("Validating structures for %s" % metadata.hostname) for plugin in self.plugins_by_type(StructureValidator): try: plugin.validate_structures(metadata, data) except ValidationError: err = sys.exc_info()[1] self.logger.error("Plugin %s structure validation failed: %s" % (plugin.name, err)) raise except: self.logger.error("Plugin %s: unexpected structure validation " "failure" % plugin.name, exc_info=1) @track_statistics() def validate_goals(self, metadata, data): """ Checks that the config matches the goals enforced by :class:`Bcfg2.Server.Plugin.interfaces.GoalValidator` plugins by calling :func:`Bcfg2.Server.Plugin.interfaces.GoalValidator.validate_goals`. :param metadata: Client metadata to validate goals for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param data: The list of structures (i.e., bundles) for this client :type data: list of lxml.etree._Element objects """ self.logger.debug("Validating goals for %s" % metadata.hostname) for plugin in self.plugins_by_type(GoalValidator): try: plugin.validate_goals(metadata, data) except ValidationError: err = sys.exc_info()[1] self.logger.error("Plugin %s goal validation failed: %s" % (plugin.name, err.message)) raise except: self.logger.error("Plugin %s: unexpected goal validation " "failure" % plugin.name, exc_info=1) @track_statistics() def GetStructures(self, metadata): """ Get all structures (i.e., bundles) for the given client :param metadata: Client metadata to get structures for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: list of :class:`lxml.etree._Element` objects """ self.logger.debug("Getting structures for %s" % metadata.hostname) structures = list( chain(*[struct.BuildStructures(metadata) for struct in self.plugins_by_type(Structure)])) sbundles = [b.get('name') for b in structures if b.tag == 'Bundle' or b.tag == 'Independent'] missing = [b for b in metadata.bundles if b not in sbundles] if missing: self.logger.error("Client %s configuration missing bundles: %s" % (metadata.hostname, ':'.join(missing))) return structures @track_statistics() def BindStructures(self, structures, metadata, config): """ Given a list of structures (i.e. bundles), bind all the entries in them and add the structures to the config. :param structures: The list of structures for this client :type structures: list of lxml.etree._Element objects :param metadata: Client metadata to bind structures for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param config: The configuration document to add fully-bound structures to. Modified in-place. :type config: lxml.etree._Element """ self.logger.debug("Binding structures for %s" % metadata.hostname) for astruct in structures: try: self.BindStructure(astruct, metadata) config.append(astruct) except: self.logger.error("error in BindStructure", exc_info=1) @track_statistics() def BindStructure(self, structure, metadata): """ Bind all elements in a single structure (i.e., bundle). :param structure: The structure to bind. Modified in-place. :type structures: lxml.etree._Element :param metadata: Client metadata to bind structure for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ self.logger.debug("Binding structure %s for %s" % (structure.get("name", "unknown"), metadata.hostname)) for entry in structure.getchildren(): if entry.tag.startswith("Bound"): entry.tag = entry.tag[5:] continue try: self.Bind(entry, metadata) except: exc = sys.exc_info()[1] if 'failure' not in entry.attrib: entry.set('failure', 'bind error: %s' % exc) if isinstance(exc, PluginExecutionError): msg = "Failed to bind entry" else: msg = "Unexpected failure binding entry" self.logger.error("%s %s:%s: %s" % (msg, entry.tag, entry.get('name'), exc)) def Bind(self, entry, metadata): """ Bind a single entry using the appropriate generator. :param entry: The entry to bind. Modified in-place. :type entry: lxml.etree._Element :param metadata: Client metadata to bind structure for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ start = time.time() if 'altsrc' in entry.attrib: oldname = entry.get('name') entry.set('name', entry.get('altsrc')) entry.set('realname', oldname) del entry.attrib['altsrc'] try: ret = self.Bind(entry, metadata) entry.set('name', oldname) del entry.attrib['realname'] return ret except: self.logger.error( "Failed binding entry %s:%s with altsrc %s: %s" % (entry.tag, entry.get('realname'), entry.get('name'), sys.exc_info()[1])) entry.set('name', oldname) self.logger.error("Falling back to %s:%s" % (entry.tag, entry.get('name'))) generators = self.plugins_by_type(Generator) glist = [gen for gen in generators if entry.get('name') in gen.Entries.get(entry.tag, {})] if len(glist) == 1: return glist[0].Entries[entry.tag][entry.get('name')](entry, metadata) elif len(glist) > 1: generators = ", ".join([gen.name for gen in glist]) self.logger.error("%s %s served by multiple generators: %s" % (entry.tag, entry.get('name'), generators)) g2list = [gen for gen in generators if gen.HandlesEntry(entry, metadata)] try: if len(g2list) == 1: return g2list[0].HandleEntry(entry, metadata) entry.set('failure', 'no matching generator') raise PluginExecutionError("No matching generator: %s:%s" % (entry.tag, entry.get('name'))) finally: Bcfg2.Server.Statistics.stats.add_value("%s:Bind:%s" % (self.__class__.__name__, entry.tag), time.time() - start) def BuildConfiguration(self, client): """ Build the complete configuration for a client. :param client: The hostname of the client to build the configuration for :type client: string :returns: :class:`lxml.etree._Element` - A complete Bcfg2 configuration document """ self.logger.debug("Building configuration for %s" % client) start = time.time() config = lxml.etree.Element("Configuration", version='2.0', revision=str(self.revision)) try: meta = self.build_metadata(client) except MetadataConsistencyError: self.logger.error("Metadata consistency error for client %s" % client) return lxml.etree.Element("error", type='metadata error') self.client_run_hook("start_client_run", meta) try: structures = self.GetStructures(meta) except: self.logger.error("Error in GetStructures", exc_info=1) return lxml.etree.Element("error", type='structure error') self.validate_structures(meta, structures) # Perform altsrc consistency checking esrcs = {} for struct in structures: for entry in struct: key = (entry.tag, entry.get('name')) if key in esrcs: if esrcs[key] != entry.get('altsrc'): self.logger.error("Found inconsistent altsrc mapping " "for entry %s:%s" % key) else: esrcs[key] = entry.get('altsrc', None) del esrcs self.BindStructures(structures, meta, config) self.validate_goals(meta, config) self.client_run_hook("end_client_run", meta) sort_xml(config, key=lambda e: e.get('name')) self.logger.info("Generated config for %s in %.03f seconds" % (client, time.time() - start)) return config def HandleEvent(self, event): """ Handle a change in the Bcfg2 config file. :param event: The event to handle :type event: Bcfg2.Server.FileMonitor.Event """ if event.filename != self.cfile: self.logger.error("Got event for unknown file: %s" % event.filename) return if event.code2str() in ['deleted', 'exists']: # ignore config file deletion, and ignore the initial # 'exists' event as well. we've already parsed options on # startup, we don't need to parse them twice. return Bcfg2.Options.get_parser().reparse() self.metadata_cache.expire() def block_for_fam_events(self, handle_events=False): """ Block until all fam events have been handleed, optionally handling events as well. (Setting ``handle_events=True`` is useful for local server cores that don't spawn an event handling thread.)""" slept = 0 log_interval = 3 if handle_events: self.fam.handle_events_in_interval(1) slept += 1 if Bcfg2.Options.setup.fam_blocking: time.sleep(1) slept += 1 while self.fam.pending() != 0: time.sleep(1) slept += 1 if slept % log_interval == 0: self.logger.debug("Sleeping to handle FAM events...") self.logger.debug("Slept %s seconds while handling FAM events" % slept) def run(self): """ Run the server core. This calls :func:`_run`, starts the :attr:`fam_thread`, and calls :func:`_block`, but note that it is the responsibility of the server core implementation to call :func:`shutdown` under normal operation. This also handles creation of the directory containing the pidfile, if necessary.""" if not self._run(): self.shutdown() return False try: self.load_plugins() self.fam.start() self.fam_thread.start() self.fam.AddMonitor(self.cfile, self) if self.perflog_thread is not None: self.perflog_thread.start() for plug in self.plugins_by_type(Threaded): plug.start_threads() self.block_for_fam_events() self._block() except: self.shutdown() raise def _run(self): """ Start up the server; this method should return immediately. This must be overridden by a core implementation. """ raise NotImplementedError def _block(self): """ Enter the infinite loop. This method should not return until the server is killed. This must be overridden by a core implementation. """ raise NotImplementedError def GetDecisions(self, metadata, mode): """ Get the decision list for a client. :param metadata: Client metadata to get the decision list for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param mode: The decision mode ("whitelist" or "blacklist") :type mode: string :returns: list of Decision tuples ``(, )`` """ self.logger.debug("Getting decision list for %s" % metadata.hostname) result = [] for plugin in self.plugins_by_type(Decision): try: result.extend(plugin.GetDecisions(metadata, mode)) except: self.logger.error("Plugin: %s failed to generate decision list" % plugin.name, exc_info=1) return result @track_statistics() def check_acls(self, address, rmi): """ Check client IP address and metadata object against all :class:`Bcfg2.Server.Plugin.interfaces.ClientACLs` plugins. If any ACL plugin denies access, then access is denied. ACLs are checked in two phases: First, with the client IP address; and second, with the client metadata object. This lets an ACL interface do a quick rejection based on IP before metadata is ever built. :param address: The address pair of the client to check ACLs for :type address: tuple of (, ) :param rmi: The fully-qualified name of the RPC call :param rmi: string :returns: bool """ plugins = self.plugins_by_type(Bcfg2.Server.Plugin.ClientACLs) try: ip_checks = [p.check_acl_ip(address, rmi) for p in plugins] except: self.logger.error("Unexpected error checking ACLs for %s for %s: " "%s" % (address[0], rmi, sys.exc_info()[1])) return False # failsafe if all(ip_checks): # if all ACL plugins return True (allow), then allow self.logger.debug("Client %s passed IP-based ACL checks for %s" % (address[0], rmi)) return True elif False in ip_checks: # if any ACL plugin returned False (deny), then deny self.logger.warning("Client %s failed IP-based ACL checks for %s" % (address[0], rmi)) return False # else, no plugins returned False, but not all plugins # returned True, so some plugin returned None (defer), so # defer. client, metadata = self.resolve_client(address) try: rv = all(p.check_acl_metadata(metadata, rmi) for p in plugins) if rv: self.logger.debug( "Client %s passed metadata ACL checks for %s" % (metadata.hostname, rmi)) else: self.logger.warning( "Client %s failed metadata ACL checks for %s" % (metadata.hostname, rmi)) return rv except: self.logger.error("Unexpected error checking ACLs for %s for %s: " "%s" % (client, rmi, sys.exc_info()[1])) return False # failsafe @track_statistics() def build_metadata(self, client_name): """ Build initial client metadata for a client :param client_name: The name of the client to build metadata for :type client_name: string :returns: :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` """ if not hasattr(self, 'metadata'): # some threads start before metadata is even loaded raise MetadataRuntimeError("Metadata not loaded yet") if self.metadata_cache_mode == 'initial': # the Metadata plugin handles loading the cached data if # we're only caching the initial metadata object imd = None else: imd = self.metadata_cache.get(client_name, None) if not imd: self.logger.debug("Building metadata for %s" % client_name) try: imd = self.metadata.get_initial_metadata(client_name) except MetadataConsistencyError: self.critical_error( "Client metadata resolution error for %s: %s" % (client_name, sys.exc_info()[1])) connectors = self.plugins_by_type(Connector) for conn in connectors: groups = conn.get_additional_groups(imd) groupnames = [] for group in groups: if hasattr(group, "name"): groupname = group.name if groupname in self._dynamic_groups: if self._dynamic_groups[groupname] == conn.name: self.metadata.groups[groupname] = group else: self.logger.warning( "Refusing to clobber dynamic group %s " "defined by %s" % (self._dynamic_groups[groupname], groupname)) elif groupname in self.metadata.groups: # not recorded as a dynamic group, but # present in metadata.groups -- i.e., a # static group self.logger.warning( "Refusing to clobber predefined group %s" % groupname) else: self.metadata.groups[groupname] = group self._dynamic_groups[groupname] = conn.name groupnames.append(groupname) else: groupnames.append(group) self.metadata.merge_additional_groups(imd, groupnames) for conn in connectors: data = conn.get_additional_data(imd) self.metadata.merge_additional_data(imd, conn.name, data) imd.query.by_name = self.build_metadata if self.metadata_cache_mode in ['cautious', 'aggressive']: self.metadata_cache[client_name] = imd else: self.logger.debug("Using cached metadata object for %s" % client_name) return imd def process_statistics(self, client_name, statistics): """ Process uploaded statistics for client. :param client_name: The name of the client to process statistics for :type client_name: string :param statistics: The statistics document to process :type statistics: lxml.etree._Element """ self.logger.debug("Processing statistics for %s" % client_name) meta = self.build_metadata(client_name) state = statistics.find(".//Statistics") if state.get('version') >= '2.0': for plugin in self.plugins_by_type(Statistics): try: plugin.process_statistics(meta, statistics) except: self.logger.error("Plugin %s failed to process stats from " "%s" % (plugin.name, meta.hostname), exc_info=1) self.logger.info("Client %s reported state %s" % (client_name, state.get('state'))) self.client_run_hook("end_statistics", meta) @track_statistics() def resolve_client(self, address, cleanup_cache=False, metadata=True): """ Given a client address, get the client hostname and optionally metadata. :param address: The address pair of the client to get the canonical hostname for. :type address: tuple of (, ) :param cleanup_cache: Tell the :class:`Bcfg2.Server.Plugin.interfaces.Metadata` plugin in :attr:`metadata` to clean up any client or session cache it might keep :type cleanup_cache: bool :param metadata: Build a :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` object for this client as well. This is offered for convenience. :type metadata: bool :returns: tuple - If ``metadata`` is False, returns ``(, None)``; if ``metadata`` is True, returns ``(, )`` """ try: client = self.metadata.resolve_client(address, cleanup_cache=cleanup_cache) if metadata: meta = self.build_metadata(client) else: meta = None except MetadataConsistencyError: err = sys.exc_info()[1] self.critical_error("Client metadata resolution error for %s: %s" % (address[0], err)) except MetadataRuntimeError: err = sys.exc_info()[1] self.critical_error('Metadata system runtime failure for %s: %s' % (address[0], err)) return (client, meta) def critical_error(self, message): """ Log an error with its traceback and return an XML-RPC fault to the client. :param message: The message to log and return to the client :type message: string :raises: :exc:`xmlrpclib.Fault` """ self.logger.error(message, exc_info=1) raise xmlrpclib.Fault(xmlrpclib.APPLICATION_ERROR, "Critical failure: %s" % message) def _get_rmi_objects(self): """ Get a dict (name: object) of all objects that may have RMI calls. Currently, that includes all plugins and the FAM. """ rv = {self.fam.__class__.__name__: self.fam} rv.update(self.plugins) return rv def _get_rmi(self): """ Get a list of RMI calls exposed by plugins """ rmi = dict() for pname, pinst in self._get_rmi_objects().items(): for mname in pinst.__rmi__: rmi["%s.%s" % (pname, mname)] = getattr(pinst, mname) return rmi def _resolve_exposed_method(self, method_name): """ Resolve a method name to the callable that implements that method. :param method_name: Name of the method to resolve :type method_name: string :returns: callable """ try: func = getattr(self, method_name) except AttributeError: raise NoExposedMethod(method_name) if not getattr(func, "exposed", False): raise NoExposedMethod(method_name) return func # XMLRPC handlers start here @exposed def listMethods(self, address): # pylint: disable=W0613 """ List all exposed methods, including plugin RMI. :param address: Client (address, port) pair :type address: tuple :returns: list of exposed method names """ methods = [name for name, func in inspect.getmembers(self, callable) if (getattr(func, "exposed", False) and self.check_acls(address, name))] methods.extend([m for m in self._get_rmi().keys() if self.check_acls(address, m)]) return methods @exposed def methodHelp(self, address, method_name): # pylint: disable=W0613 """ Get help from the docstring of an exposed method :param address: Client (address, port) pair :type address: tuple :param method_name: The name of the method to get help on :type method_name: string :returns: string - The help message from the method's docstring """ try: func = self._resolve_exposed_method(method_name) except NoExposedMethod: return "" return func.__doc__ @exposed @track_statistics() @close_db_connection def DeclareVersion(self, address, version): """ Declare the client version. :param address: Client (address, port) pair :type address: tuple :param version: The client's declared version :type version: string :returns: bool - True on success :raises: :exc:`xmlrpclib.Fault` """ client = self.resolve_client(address, metadata=False)[0] self.logger.debug("%s is running Bcfg2 client version %s" % (client, version)) try: self.metadata.set_version(client, version) except (MetadataConsistencyError, MetadataRuntimeError): err = sys.exc_info()[1] self.critical_error("Unable to set version for %s: %s" % (client, err)) return True @exposed @close_db_connection def GetProbes(self, address): """ Fetch probes for the client. :param address: Client (address, port) pair :type address: tuple :returns: lxml.etree._Element - XML tree describing probes for this client :raises: :exc:`xmlrpclib.Fault` """ resp = lxml.etree.Element('probes') client, metadata = self.resolve_client(address, cleanup_cache=True) self.logger.debug("Getting probes for %s" % client) try: for plugin in self.plugins_by_type(Probing): for probe in plugin.GetProbes(metadata): resp.append(probe) self.logger.debug("Sending probe list to %s" % client) return lxml.etree.tostring(resp, xml_declaration=False).decode('UTF-8') except: err = sys.exc_info()[1] self.critical_error("Error determining probes for %s: %s" % (client, err)) @exposed @close_db_connection def RecvProbeData(self, address, probedata): """ Receive probe data from clients. :param address: Client (address, port) pair :type address: tuple :returns: bool - True on success :raises: :exc:`xmlrpclib.Fault` """ client, metadata = self.resolve_client(address) self.logger.debug("Receiving probe data from %s" % client) if self.metadata_cache_mode == 'cautious': # clear the metadata cache right after building the # metadata object; that way the cache is cleared for any # new probe data that's received, but the metadata object # that's created for RecvProbeData doesn't get cached. # I.e., the next metadata object that's built, after probe # data is processed, is cached. self.metadata_cache.expire(client) try: xpdata = lxml.etree.XML(probedata.encode('utf-8'), parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: err = sys.exc_info()[1] self.critical_error("Failed to parse probe data from client %s: %s" % (client, err)) sources = [] for data in xpdata: source = data.get('source') if source not in sources: if source not in self.plugins: self.logger.warning("Failed to locate plugin %s" % source) continue sources.append(source) for source in sources: datalist = [data for data in xpdata if data.get('source') == source] try: self.plugins[source].ReceiveData(metadata, datalist) except: err = sys.exc_info()[1] self.critical_error("Failed to process probe data from client " "%s: %s" % (client, err)) return True @exposed @close_db_connection def AssertProfile(self, address, profile): """ Set profile for a client. :param address: Client (address, port) pair :type address: tuple :returns: bool - True on success :raises: :exc:`xmlrpclib.Fault` """ client = self.resolve_client(address, metadata=False)[0] self.logger.debug("%s sets its profile to %s" % (client, profile)) try: self.metadata.set_profile(client, profile, address) except (MetadataConsistencyError, MetadataRuntimeError): err = sys.exc_info()[1] self.critical_error("Unable to assert profile for %s: %s" % (client, err)) return True @exposed @close_db_connection def GetConfig(self, address): """ Build config for a client by calling :func:`BuildConfiguration`. :param address: Client (address, port) pair :type address: tuple :returns: lxml.etree._Element - The full configuration document for the client :raises: :exc:`xmlrpclib.Fault` """ client = self.resolve_client(address)[0] try: config = self.BuildConfiguration(client) return lxml.etree.tostring(config, xml_declaration=False).decode('UTF-8') except MetadataConsistencyError: self.critical_error("Metadata consistency failure for %s" % client) @exposed @close_db_connection def RecvStats(self, address, stats): """ Act on statistics upload with :func:`process_statistics`. :param address: Client (address, port) pair :type address: tuple :returns: bool - True on success :raises: :exc:`xmlrpclib.Fault` """ client = self.resolve_client(address)[0] sdata = lxml.etree.XML(stats.encode('utf-8'), parser=Bcfg2.Server.XMLParser) self.process_statistics(client, sdata) return True @exposed @close_db_connection def GetDecisionList(self, address, mode): """ Get the decision list for the client with :func:`GetDecisions`. :param address: Client (address, port) pair :type address: tuple :returns: list of decision tuples :raises: :exc:`xmlrpclib.Fault` """ metadata = self.resolve_client(address)[1] return self.GetDecisions(metadata, mode) @property def database_available(self): """ True if the database is configured and available, False otherwise. """ return self._database_available @exposed def get_statistics(self, _): """ Get current statistics about component execution from :attr:`Bcfg2.Server.Statistics.stats`. :returns: dict - The statistics data as returned by :func:`Bcfg2.Server.Statistics.Statistics.display` """ return Bcfg2.Server.Statistics.stats.display() @exposed def toggle_debug(self, address): """ Toggle debug status of the FAM and all plugins :param address: Client (address, port) pair :type address: tuple :returns: bool - The new debug state of the FAM """ return self.set_debug(address, not self.debug_flag) @exposed def toggle_core_debug(self, address): """ Toggle debug status of the server core :param address: Client (address, hostname) pair :type address: tuple :returns: bool - The new debug state of the FAM """ return self.set_core_debug(address, not self.debug_flag) @exposed def toggle_fam_debug(self, address): """ Toggle debug status of the FAM :returns: bool - The new debug state of the FAM """ self.logger.warning("Deprecated method set_fam_debug called by %s" % address[0]) return "This method is deprecated and will be removed in a future " + \ "release\n%s" % self.fam.toggle_debug() @exposed def set_debug(self, address, debug): """ Explicitly set debug status of the FAM and all plugins :param address: Client (address, hostname) pair :type address: tuple :param debug: The new debug status. This can either be a boolean, or a string describing the state (e.g., "true" or "false"; case-insensitive) :type debug: bool or string :returns: bool - The new debug state """ if debug not in [True, False]: debug = debug.lower() == "true" for plugin in self.plugins.values(): plugin.set_debug(debug) rv = self.set_core_debug(address, debug) return self.fam.set_debug(debug) and rv @exposed def set_core_debug(self, _, debug): """ Explicity set debug status of the server core :param debug: The new debug status. This can either be a boolean, or a string describing the state (e.g., "true" or "false"; case-insensitive) :type debug: bool or string :returns: bool - The new debug state of the FAM """ if debug not in [True, False]: debug = debug.lower() == "true" self.debug_flag = debug self.logger.info("Core: debug = %s" % debug) levels = self._loglevels[self.debug_flag] for handler in logging.root.handlers: try: level = levels.get(handler.name, levels['default']) self.logger.debug("Setting %s log handler to %s" % (handler.name, logging.getLevelName(level))) except AttributeError: level = levels['default'] self.logger.debug("Setting unknown log handler %s to %s" % (handler, logging.getLevelName(level))) handler.setLevel(level) return self.debug_flag @exposed def set_fam_debug(self, address, debug): """ Explicitly set debug status of the FAM :param debug: The new debug status of the FAM. This can either be a boolean, or a string describing the state (e.g., "true" or "false"; case-insensitive) :type debug: bool or string :returns: bool - The new debug state of the FAM """ if debug not in [True, False]: debug = debug.lower() == "true" self.logger.warning("Deprecated method set_fam_debug called by %s" % address[0]) return "This method is deprecated and will be removed in a future " + \ "release\n%s" % self.fam.set_debug(debug) @exposed def expire_metadata_cache(self, _, hostnames=None): """ Expire the metadata cache for one or all clients :param hostnames: A list of hostnames to expire the metadata cache for or None. If None the cache of all clients will be expired. :type hostnames: None or list of strings """ if hostnames is not None: for hostname in hostnames: self.metadata_cache.expire(hostname) else: self.metadata_cache.expire() class NetworkCore(Core): """ A server core that actually listens on the network, can be daemonized, etc.""" options = Core.options + [ Bcfg2.Options.Common.daemon, Bcfg2.Options.Common.syslog, Bcfg2.Options.Common.location, Bcfg2.Options.Common.ssl_ca, Bcfg2.Options.Common.protocol, Bcfg2.Options.PathOption( '--ssl-key', cf=('communication', 'key'), dest="key", help='Path to SSL key', default="/etc/pki/tls/private/bcfg2.key"), Bcfg2.Options.PathOption( cf=('communication', 'certificate'), dest="cert", help='Path to SSL certificate', default="/etc/pki/tls/certs/bcfg2.crt"), Bcfg2.Options.BooleanOption( '--listen-all', cf=('server', 'listen_all'), default=False, help="Listen on all interfaces"), Bcfg2.Options.Option( cf=('server', 'umask'), default='0077', help='Server umask', type=Bcfg2.Options.Types.octal), Bcfg2.Options.Option( cf=('server', 'user'), default=0, dest='daemon_uid', type=Bcfg2.Options.Types.username, help="User to run the server daemon as"), Bcfg2.Options.Option( cf=('server', 'group'), default=0, dest='daemon_gid', type=Bcfg2.Options.Types.groupname, help="Group to run the server daemon as")] def __init__(self): Core.__init__(self) #: The CA that signed the server cert self.ca = Bcfg2.Options.setup.ca if self._database_available: db_settings = django.conf.settings.DATABASES['default'] if (Bcfg2.Options.setup.daemon and Bcfg2.Options.setup.daemon_uid and db_settings['ENGINE'].endswith(".sqlite3") and not os.path.exists(db_settings['NAME'])): # syncdb will create the sqlite database, and we're # going to daemonize, dropping privs to a non-root # user, so we need to chown the database after # creating it try: os.chown(db_settings['NAME'], Bcfg2.Options.setup.daemon_uid, Bcfg2.Options.setup.daemon_gid) except OSError: err = sys.exc_info()[1] self.logger.error("Failed to set ownership of database " "at %s: %s" % (db_settings['NAME'], err)) __init__.__doc__ = Core.__init__.__doc__.split(".. -----")[0] + \ "\n.. automethod:: _daemonize\n" def __str__(self): if hasattr(Bcfg2.Options.setup, "server"): return "%s(%s)" % (self.__class__.__name__, Bcfg2.Options.setup.server) else: return Core.__str__(self) def run(self): """ Run the server core. This calls :func:`_daemonize` before calling :func:`Bcfg2.Server.Core.Core.run` to run the server core. """ if Bcfg2.Options.setup.daemon: # if we're dropping privs, then the pidfile is likely # /var/run/bcfg2-server/bcfg2-server.pid or similar. # since some OSes clean directories out of /var/run on # reboot, we need to ensure that the directory containing # the pidfile exists and has the appropriate permissions piddir = os.path.dirname(Bcfg2.Options.setup.daemon) if not os.path.exists(piddir): os.makedirs(piddir) os.chown(piddir, Bcfg2.Options.setup.daemon_uid, Bcfg2.Options.setup.daemon_gid) os.chmod(piddir, 493) # 0775 if not self._daemonize(): return False # rewrite $HOME. pulp stores its auth creds in ~/.pulp, so # this is necessary to make that work when privileges are # dropped os.environ['HOME'] = \ pwd.getpwuid(Bcfg2.Options.setup.daemon_uid)[5] else: os.umask(int(Bcfg2.Options.setup.umask, 8)) Core.run(self) def authenticate(self, cert, user, password, address): """ Authenticate a client connection with :func:`Bcfg2.Server.Plugin.interfaces.Metadata.AuthenticateConnection`. :param cert: an x509 certificate :type cert: dict :param user: The username of the user trying to authenticate :type user: string :param password: The password supplied by the client :type password: string :param address: An address pair of ``(, )`` :type address: tuple :return: bool - True if the authenticate succeeds, False otherwise """ if self.ca: acert = cert else: # No ca, so no cert validation can be done acert = None return self.metadata.AuthenticateConnection(acert, user, password, address) def _daemonize(self): """ Daemonize the server and write the pidfile. This must be overridden by a core implementation. """ raise NotImplementedError def _drop_privileges(self): """ This is called if not daemonized and running as root to drop the privileges to the configured daemon_uid and daemon_gid. """ daemon.daemon.change_process_owner( Bcfg2.Options.setup.daemon_uid, Bcfg2.Options.setup.daemon_gid) self.logger.debug("Dropped privileges to %s:%s." % (os.getuid(), os.getgid())) src/lib/Bcfg2/Server/Encryption.py000077500000000000000000000632641303523157100173260ustar00rootroot00000000000000""" Bcfg2.Server.Encryption provides a number of convenience methods for handling encryption in Bcfg2. See :ref:`server-encryption` for more details. """ import os import sys import copy import logging import lxml.etree import Bcfg2.Logger import Bcfg2.Options from M2Crypto import Rand from M2Crypto.EVP import Cipher, EVPError from Bcfg2.Utils import safe_input from Bcfg2.Server import XMLParser from Bcfg2.Compat import md5, b64encode, b64decode, StringIO #: Constant representing the encryption operation for #: :class:`M2Crypto.EVP.Cipher`, which uses a simple integer. This #: makes our code more readable. ENCRYPT = 1 #: Constant representing the decryption operation for #: :class:`M2Crypto.EVP.Cipher`, which uses a simple integer. This #: makes our code more readable. DECRYPT = 0 #: Default initialization vector. For best security, you should use a #: unique IV for each message. :func:`ssl_encrypt` does this in an #: automated fashion. IV = r'\0' * 16 class _OptionContainer(object): """ Container for options loaded at import-time to configure encryption """ options = [ Bcfg2.Options.BooleanOption( cf=("encryption", "lax_decryption"), help="Decryption failures should cause warnings, not errors"), Bcfg2.Options.Option( cf=("encryption", "algorithm"), default="aes_256_cbc", type=lambda v: v.lower().replace("-", "_"), help="The encryption algorithm to use"), Bcfg2.Options.Option( cf=("encryption", "*"), dest='passphrases', default=dict(), help="Encryption passphrases")] Bcfg2.Options.get_parser().add_component(_OptionContainer) Rand.rand_seed(os.urandom(1024)) def _cipher_filter(cipher, instr): """ M2Crypto reads and writes file-like objects, so this uses StringIO to pass data through it """ inbuf = StringIO(instr) outbuf = StringIO() while 1: buf = inbuf.read() if not buf: break outbuf.write(cipher.update(buf)) outbuf.write(cipher.final()) rv = outbuf.getvalue() inbuf.close() outbuf.close() return rv def str_encrypt(plaintext, key, iv=IV, algorithm=None, salt=None): """ Encrypt a string with a key. For a higher-level encryption interface, see :func:`ssl_encrypt`. :param plaintext: The plaintext data to encrypt :type plaintext: string :param key: The key to encrypt the data with :type key: string :param iv: The initialization vector :type iv: string :param algorithm: The cipher algorithm to use :type algorithm: string :param salt: The salt to use :type salt: string :returns: string - The decrypted data """ if algorithm is None: algorithm = Bcfg2.Options.setup.algorithm cipher = Cipher(alg=algorithm, key=key, iv=iv, op=ENCRYPT, salt=salt) return _cipher_filter(cipher, plaintext) def str_decrypt(crypted, key, iv=IV, algorithm=None): """ Decrypt a string with a key. For a higher-level decryption interface, see :func:`ssl_decrypt`. :param crypted: The raw binary encrypted data :type crypted: string :param key: The encryption key to decrypt with :type key: string :param iv: The initialization vector :type iv: string :param algorithm: The cipher algorithm to use :type algorithm: string :returns: string - The decrypted data """ if algorithm is None: algorithm = Bcfg2.Options.setup.algorithm cipher = Cipher(alg=algorithm, key=key, iv=iv, op=DECRYPT) return _cipher_filter(cipher, crypted) def ssl_decrypt(data, passwd, algorithm=None): """ Decrypt openssl-encrypted data. This can decrypt data encrypted by :func:`ssl_encrypt`, or ``openssl enc``. It performs a base64 decode first if the data is base64 encoded, and automatically determines the salt and initialization vector (both of which are embedded in the encrypted data). :param data: The encrypted data (either base64-encoded or raw binary) to decrypt :type data: string :param passwd: The password to use to decrypt the data :type passwd: string :param algorithm: The cipher algorithm to use :type algorithm: string :returns: string - The decrypted data """ # base64-decode the data data = b64decode(data) salt = data[8:16] # pylint: disable=E1101,E1121 hashes = [md5(passwd + salt).digest()] for i in range(1, 3): hashes.append(md5(hashes[i - 1] + passwd + salt).digest()) # pylint: enable=E1101,E1121 key = hashes[0] + hashes[1] iv = hashes[2] return str_decrypt(data[16:], key=key, iv=iv, algorithm=algorithm) def ssl_encrypt(plaintext, passwd, algorithm=None, salt=None): """ Encrypt data in a format that is openssl compatible. :param plaintext: The plaintext data to encrypt :type plaintext: string :param passwd: The password to use to encrypt the data :type passwd: string :param algorithm: The cipher algorithm to use :type algorithm: string :param salt: The salt to use. If none is provided, one will be randomly generated. :type salt: bytes :returns: string - The base64-encoded, salted, encrypted string. The string includes a trailing newline to make it fully compatible with openssl command-line tools. """ if salt is None: salt = Rand.rand_bytes(8) # pylint: disable=E1101,E1121 hashes = [md5(passwd + salt).digest()] for i in range(1, 3): hashes.append(md5(hashes[i - 1] + passwd + salt).digest()) # pylint: enable=E1101,E1121 key = hashes[0] + hashes[1] iv = hashes[2] crypted = str_encrypt(plaintext, key=key, salt=salt, iv=iv, algorithm=algorithm) return b64encode("Salted__" + salt + crypted) + "\n" def is_encrypted(val): """ Make a best guess if the value is encrypted or not. This just checks to see if ``val`` is a base64-encoded string whose content starts with "Salted\\_\\_", so it may have (rare) false positives. It will not have false negatives. """ try: return b64decode(val).startswith("Salted__") except: # pylint: disable=W0702 return False def bruteforce_decrypt(crypted, passphrases=None, algorithm=None): """ Convenience method to decrypt the given encrypted string by trying the given passphrases or all passphrases sequentially until one is found that works. :param crypted: The data to decrypt :type crypted: string :param passphrases: The passphrases to try. :type passphrases: list :param algorithm: The cipher algorithm to use :type algorithm: string :returns: string - The decrypted data :raises: :class:`M2Crypto.EVP.EVPError`, if the data cannot be decrypted """ if passphrases is None: passphrases = Bcfg2.Options.setup.passphrases.values() for passwd in passphrases: try: return ssl_decrypt(crypted, passwd, algorithm=algorithm) except EVPError: pass raise EVPError("Failed to decrypt") def print_xml(element, keep_text=False): """ Render an XML element for error output. This prefixes the line number and removes children for nicer display. :param element: The element to render :type element: lxml.etree._Element :param keep_text: Do not discard text content from the element for display :type keep_text: boolean """ xml = None if len(element) or element.text: el = copy.copy(element) if el.text and not keep_text: el.text = '...' for child in el.iterchildren(): el.remove(child) xml = lxml.etree.tostring( el, xml_declaration=False).decode("UTF-8").strip() else: xml = lxml.etree.tostring( element, xml_declaration=False).decode("UTF-8").strip() return "%s (line %s)" % (xml, element.sourceline) class PassphraseError(Exception): """ Exception raised when there's a problem determining the passphrase to encrypt or decrypt with """ class DecryptError(Exception): """ Exception raised when decryption fails. """ class EncryptError(Exception): """ Exception raised when encryption fails. """ class CryptoTool(object): """ Generic decryption/encryption interface base object """ def __init__(self, filename): self.logger = logging.getLogger(self.__class__.__name__) self.filename = filename self.data = open(self.filename).read() self.pname, self.passphrase = self._get_passphrase() def _get_passphrase(self): """ get the passphrase for the current file """ if not Bcfg2.Options.setup.passphrases: raise PassphraseError("No passphrases available in %s" % Bcfg2.Options.setup.config) pname = None if Bcfg2.Options.setup.passphrase: pname = Bcfg2.Options.setup.passphrase if pname: try: passphrase = Bcfg2.Options.setup.passphrases[pname] self.logger.debug("Using passphrase %s specified on command " "line" % pname) return (pname, passphrase) except KeyError: raise PassphraseError("Could not find passphrase %s in %s" % (pname, Bcfg2.Options.setup.config)) else: if len(Bcfg2.Options.setup.passphrases) == 1: pname, passphrase = Bcfg2.Options.setup.passphrases.items()[0] self.logger.info("Using passphrase %s" % pname) return (pname, passphrase) elif len(Bcfg2.Options.setup.passphrases) > 1: return (None, None) raise PassphraseError("No passphrase could be determined") def get_destination_filename(self, original_filename): """ Get the filename where data should be written """ return original_filename def write(self, data): """ write data to disk """ new_fname = self.get_destination_filename(self.filename) try: self._write(new_fname, data) self.logger.info("Wrote data to %s" % new_fname) return True except IOError: err = sys.exc_info()[1] self.logger.error("Error writing data from %s to %s: %s" % (self.filename, new_fname, err)) return False def _write(self, filename, data): """ Perform the actual write of data. This is separate from :func:`CryptoTool.write` so it can be easily overridden. """ open(filename, "wb").write(data) class Decryptor(CryptoTool): """ Decryptor interface """ def decrypt(self): """ decrypt the file, returning the encrypted data """ raise NotImplementedError class Encryptor(CryptoTool): """ encryptor interface """ def encrypt(self): """ encrypt the file, returning the encrypted data """ raise NotImplementedError class CfgEncryptor(Encryptor): """ encryptor class for Cfg files """ def __init__(self, filename): Encryptor.__init__(self, filename) if self.passphrase is None: raise PassphraseError("Multiple passphrases found in %s, " "specify one on the command line with -p" % Bcfg2.Options.setup.config) def encrypt(self): if is_encrypted(self.data): raise EncryptError("Data is alraedy encrypted") return ssl_encrypt(self.data, self.passphrase) def get_destination_filename(self, original_filename): return original_filename + ".crypt" class CfgDecryptor(Decryptor): """ Decrypt Cfg files """ def decrypt(self): """ decrypt the given file, returning the plaintext data """ if self.passphrase: try: return ssl_decrypt(self.data, self.passphrase) except EVPError: raise DecryptError("Could not decrypt %s with the " "specified passphrase" % self.filename) except: raise DecryptError("Error decrypting %s: %s" % (self.filename, sys.exc_info()[1])) else: # no passphrase given, brute force try: return bruteforce_decrypt(self.data) except EVPError: raise DecryptError("Could not decrypt %s with any passphrase" % self.filename) def get_destination_filename(self, original_filename): if original_filename.endswith(".crypt"): return original_filename[:-6] else: return Decryptor.get_destination_filename(self, original_filename) class PropertiesCryptoMixin(object): """ Mixin to provide some common methods for Properties crypto """ default_xpath = '//*[@encrypted]' def _get_elements(self, xdata): """ Get the list of elements to encrypt or decrypt """ if Bcfg2.Options.setup.xpath: elements = xdata.xpath(Bcfg2.Options.setup.xpath) if not elements: self.logger.warning("XPath expression %s matched no elements" % Bcfg2.Options.setup.xpath) else: elements = xdata.xpath(self.default_xpath) if not elements: elements = list(xdata.getiterator(tag=lxml.etree.Element)) # filter out elements without text data for el in elements[:]: if not el.text: elements.remove(el) if Bcfg2.Options.setup.interactive: for element in elements[:]: if len(element): elt = copy.copy(element) for child in elt.iterchildren(): elt.remove(child) else: elt = element print(lxml.etree.tostring( elt, xml_declaration=False).decode("UTF-8").strip()) ans = safe_input("Encrypt this element? [y/N] ") if not ans.lower().startswith("y"): elements.remove(element) return elements def _get_element_passphrase(self, element): """ Get the passphrase to use to encrypt or decrypt a given element """ pname = element.get("encrypted") if pname in Bcfg2.Options.setup.passphrases: passphrase = Bcfg2.Options.setup.passphrases[pname] else: if pname: self.logger.warning("Passphrase %s not found in %s, " "using passphrase given on command line" % (pname, Bcfg2.Options.setup.config)) if self.passphrase: passphrase = self.passphrase pname = self.pname else: self.logger.warning("No passphrase specified for %s element" % element.tag) raise PassphraseError("Multiple passphrases found in %s, " "specify one on the command line with " "-p" % Bcfg2.Options.setup.config) return (pname, passphrase) def _write(self, filename, data): """ Write the data """ data.getroottree().write(filename, xml_declaration=False, pretty_print=True) class PropertiesEncryptor(Encryptor, PropertiesCryptoMixin): """ encryptor class for Properties files """ def encrypt(self): xdata = lxml.etree.XML(self.data, parser=XMLParser) for elt in self._get_elements(xdata): if is_encrypted(elt.text): raise EncryptError("Element is already encrypted: %s" % print_xml(elt)) try: pname, passphrase = self._get_element_passphrase(elt) except PassphraseError: raise EncryptError(str(sys.exc_info()[1])) self.logger.debug("Encrypting %s" % print_xml(elt)) elt.text = ssl_encrypt(elt.text, passphrase).strip() elt.set("encrypted", pname) return xdata def _write(self, filename, data): PropertiesCryptoMixin._write(self, filename, data) class PropertiesDecryptor(Decryptor, PropertiesCryptoMixin): """ decryptor class for Properties files """ def decrypt(self): decrypted_any = False xdata = lxml.etree.XML(self.data, parser=XMLParser) for elt in self._get_elements(xdata): try: pname, passphrase = self._get_element_passphrase(elt) except PassphraseError: raise DecryptError(str(sys.exc_info()[1])) self.logger.debug("Decrypting %s" % print_xml(elt)) try: decrypted = ssl_decrypt(elt.text, passphrase).strip() elt.text = decrypted.encode('ascii', 'xmlcharrefreplace') elt.set("encrypted", pname) decrypted_any = True except (EVPError, TypeError): self.logger.error("Could not decrypt %s, skipping" % print_xml(elt)) except UnicodeDecodeError: # we managed to decrypt the value, but it contains # content that can't even be encoded into xml # entities. what probably happened here is that we # coincidentally could decrypt a value encrypted with # a different key, and wound up with gibberish. self.logger.warning("Decrypted %s to gibberish, skipping" % elt.tag) if decrypted_any: return xdata else: raise DecryptError("Failed to decrypt any data in %s" % self.filename) def _write(self, filename, data): PropertiesCryptoMixin._write(self, filename, data) class CLI(object): """ The bcfg2-crypt CLI """ options = [ Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.BooleanOption( "--encrypt", help='Encrypt the specified file'), Bcfg2.Options.BooleanOption( "--decrypt", help='Decrypt the specified file')), Bcfg2.Options.BooleanOption( "--stdout", help='Decrypt or encrypt the specified file to stdout'), Bcfg2.Options.Option( "-p", "--passphrase", metavar="NAME", help='Encryption passphrase name'), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.BooleanOption( "--properties", help='Encrypt the specified file as a Properties file'), Bcfg2.Options.BooleanOption( "--cfg", help='Encrypt the specified file as a Cfg file')), Bcfg2.Options.OptionGroup( Bcfg2.Options.Common.interactive, Bcfg2.Options.Option( "--xpath", help='XPath expression to select elements to encrypt'), title="Options for handling Properties files"), Bcfg2.Options.OptionGroup( Bcfg2.Options.BooleanOption( "--remove", help='Remove the plaintext file after encrypting'), title="Options for handling Cfg files"), Bcfg2.Options.PathOption( "files", help="File(s) to encrypt or decrypt", nargs='+')] def __init__(self, argv=None): parser = Bcfg2.Options.get_parser( description="Encrypt and decrypt Bcfg2 data", components=[self, _OptionContainer]) parser.parse(argv=argv) self.logger = logging.getLogger(parser.prog) if Bcfg2.Options.setup.decrypt: if Bcfg2.Options.setup.remove: self.logger.error("--remove cannot be used with --decrypt, " "ignoring --remove") Bcfg2.Options.setup.remove = False elif Bcfg2.Options.setup.interactive: self.logger.error("Cannot decrypt interactively") Bcfg2.Options.setup.interactive = False def _is_properties(self, filename): """ Determine if a given file is a Properties file or not """ if Bcfg2.Options.setup.properties: return True elif Bcfg2.Options.setup.cfg: return False elif filename.endswith(".xml"): try: xroot = lxml.etree.parse(filename).getroot() return xroot.tag == "Properties" except lxml.etree.XMLSyntaxError: return False else: return False def run(self): # pylint: disable=R0912,R0915 """ Run bcfg2-crypt """ for fname in Bcfg2.Options.setup.files: if not os.path.exists(fname): self.logger.error("%s does not exist, skipping" % fname) continue # figure out if we need to encrypt this as a Properties file # or as a Cfg file try: props = self._is_properties(fname) except IOError: err = sys.exc_info()[1] self.logger.error("Error reading %s, skipping: %s" % (fname, err)) continue if props: ftype = "Properties" if Bcfg2.Options.setup.remove: self.logger.info("Cannot use --remove with Properties " "file %s, ignoring for this file" % fname) tools = (PropertiesEncryptor, PropertiesDecryptor) else: ftype = "Cfg" if Bcfg2.Options.setup.xpath: self.logger.error("Specifying --xpath with --cfg is " "nonsensical, ignoring --xpath") Bcfg2.Options.setup.xpath = None if Bcfg2.Options.setup.interactive: self.logger.error("Cannot use interactive mode with " "--cfg, ignoring --interactive") Bcfg2.Options.setup.interactive = False tools = (CfgEncryptor, CfgDecryptor) data = None mode = None if Bcfg2.Options.setup.encrypt: try: tool = tools[0](fname) except PassphraseError: self.logger.error(str(sys.exc_info()[1])) continue except IOError: self.logger.error("Error reading %s, skipping: %s" % (fname, err)) continue mode = "encrypt" self.logger.debug("Encrypting %s file %s" % (ftype, fname)) elif Bcfg2.Options.setup.decrypt: try: tool = tools[1](fname) except PassphraseError: self.logger.error(str(sys.exc_info()[1])) continue except IOError: self.logger.error("Error reading %s, skipping: %s" % (fname, err)) continue mode = "decrypt" self.logger.debug("Decrypting %s file %s" % (ftype, fname)) else: self.logger.info("Neither --encrypt nor --decrypt specified, " "determining mode") try: tool = tools[1](fname) except PassphraseError: self.logger.error(str(sys.exc_info()[1])) continue except IOError: self.logger.error("Error reading %s, skipping: %s" % (fname, err)) continue try: self.logger.debug("Trying to decrypt %s file %s" % (ftype, fname)) data = tool.decrypt() mode = "decrypt" self.logger.debug("Decrypted %s file %s" % (ftype, fname)) except DecryptError: self.logger.info("Failed to decrypt %s, trying encryption" % fname) try: tool = tools[0](fname) except PassphraseError: self.logger.error(str(sys.exc_info()[1])) continue except IOError: self.logger.error("Error reading %s, skipping: %s" % (fname, err)) continue mode = "encrypt" self.logger.debug("Encrypting %s file %s" % (ftype, fname)) if data is None: try: data = getattr(tool, mode)() except (EncryptError, DecryptError): self.logger.error("Failed to %s %s, skipping: %s" % (mode, fname, sys.exc_info()[1])) continue if Bcfg2.Options.setup.stdout: if len(Bcfg2.Options.setup.files) > 1: print("----- %s -----" % fname) print(data) if len(Bcfg2.Options.setup.files) > 1: print("") else: tool.write(data) if (Bcfg2.Options.setup.remove and tool.get_destination_filename(fname) != fname): try: os.unlink(fname) except IOError: err = sys.exc_info()[1] self.logger.error("Error removing %s: %s" % (fname, err)) continue src/lib/Bcfg2/Server/FileMonitor/000077500000000000000000000000001303523157100170335ustar00rootroot00000000000000src/lib/Bcfg2/Server/FileMonitor/Gamin.py000066400000000000000000000073141303523157100204450ustar00rootroot00000000000000""" File monitor backend with `Gamin `_ support. """ import os import stat from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \ GAMChanged, GAMDeleted from Bcfg2.Server.FileMonitor import Event, FileMonitor class GaminEvent(Event): """ This class maps Gamin event constants to FAM :ref:`event codes `. """ #: The map of gamin event constants (which mirror FAM event names #: closely) to :ref:`event codes ` action_map = {GAMCreated: 'created', GAMExists: 'exists', GAMChanged: 'changed', GAMDeleted: 'deleted', GAMEndExist: 'endExist'} def __init__(self, request_id, filename, code): Event.__init__(self, request_id, filename, code) if code in self.action_map: self.action = self.action_map[code] __init__.__doc__ = Event.__init__.__doc__ class Gamin(FileMonitor): """ File monitor backend with `Gamin `_ support. **Deprecated.** """ #: The Gamin backend is deprecated, but better than pseudo, so it #: has a medium priority. __priority__ = 50 def __init__(self): FileMonitor.__init__(self) #: The :class:`Gamin.WatchMonitor` object for this monitor. self.mon = None #: The counter used to produce monotonically increasing #: monitor handle IDs self.counter = 0 #: The queue used to record monitors that are added before #: :func:`start` has been called and :attr:`mon` is created. self.add_q = [] self.logger.warning("The Gamin file monitor backend is deprecated. " "Please switch to a supported file monitor.") __init__.__doc__ = FileMonitor.__init__.__doc__ def start(self): """ The Gamin watch monitor in :attr:`mon` must be created by the daemonized process, so is created in ``start()``. Before the :class:`Gamin.WatchMonitor` object is created, monitors are added to :attr:`add_q`, and are created once the watch monitor is created.""" FileMonitor.start(self) self.mon = WatchMonitor() for monitor in self.add_q: self.AddMonitor(*monitor) self.add_q = [] def fileno(self): if self.started: return self.mon.get_fd() else: return None fileno.__doc__ = FileMonitor.fileno.__doc__ def queue(self, path, action, request_id): """ Create a new :class:`GaminEvent` and add it to the :attr:`events` queue for later handling. """ self.events.append(GaminEvent(request_id, path, action)) def AddMonitor(self, path, obj, handle=None): if handle is None: handle = self.counter self.counter += 1 if not self.started: self.add_q.append((path, obj, handle)) return handle mode = os.stat(path)[stat.ST_MODE] # Flush queued gamin events while self.mon.event_pending(): self.mon.handle_one_event() if stat.S_ISDIR(mode): self.mon.watch_directory(path, self.queue, handle) else: self.mon.watch_file(path, self.queue, handle) self.handles[handle] = obj return handle AddMonitor.__doc__ = FileMonitor.AddMonitor.__doc__ def pending(self): return FileMonitor.pending(self) or self.mon.event_pending() pending.__doc__ = FileMonitor.pending.__doc__ def get_event(self): if self.mon.event_pending(): self.mon.handle_one_event() return FileMonitor.get_event(self) get_event.__doc__ = FileMonitor.get_event.__doc__ src/lib/Bcfg2/Server/FileMonitor/Inotify.py000066400000000000000000000234431303523157100210340ustar00rootroot00000000000000"""File monitor backend with `inotify `_ support. """ import os import errno import pyinotify from Bcfg2.Compat import reduce # pylint: disable=W0622 from Bcfg2.Server.FileMonitor import Event from Bcfg2.Server.FileMonitor.Pseudo import Pseudo class Inotify(Pseudo, pyinotify.ProcessEvent): """ File monitor backend with `inotify `_ support. """ __rmi__ = Pseudo.__rmi__ + ["list_watches", "list_paths"] #: Inotify is the best FAM backend, so it gets a very high #: priority __priority__ = 99 # pylint: disable=E1101 #: Map pyinotify event constants to FAM :ref:`event codes #: `. The mapping is not #: terrifically exact. action_map = {pyinotify.IN_CREATE: 'created', pyinotify.IN_DELETE: 'deleted', pyinotify.IN_MODIFY: 'changed', pyinotify.IN_MOVED_FROM: 'deleted', pyinotify.IN_MOVED_TO: 'created'} # pylint: enable=E1101 #: The pyinotify event mask. We only ask for events that are #: listed in :attr:`action_map` mask = reduce(lambda x, y: x | y, action_map.keys()) def __init__(self): Pseudo.__init__(self) pyinotify.ProcessEvent.__init__(self) #: inotify can't set useful monitors directly on files, only #: on directories, so when a monitor is added on a file we add #: its parent directory to ``event_filter`` and then only #: produce events on a file in that directory if the file is #: listed in ``event_filter``. Keys are directories -- the #: parent directories of individual files that are monitored #: -- and values are lists of full paths to files in each #: directory that events *should* be produced for. An event #: on a file whose parent directory is in ``event_filter`` but #: which is not itself listed will be silently suppressed. self.event_filter = dict() #: inotify doesn't like monitoring a path twice, so we keep a #: dict of :class:`pyinotify.Watch` objects, keyed by monitor #: path, to avoid trying to create duplicate monitors. #: (Duplicates can happen if an object accidentally requests #: duplicate monitors, or if two files in a single directory #: are both individually monitored, since inotify can't set #: monitors on the files but only on the parent directories.) self.watches_by_path = dict() #: The :class:`pyinotify.ThreadedNotifier` object. This is #: created in :func:`start` after the server is done #: daemonizing. self.notifier = None #: The :class:`pyinotify.WatchManager` object. This is created #: in :func:`start` after the server is done daemonizing. self.watchmgr = None #: The queue used to record monitors that are added before #: :func:`start` has been called and :attr:`notifier` and #: :attr:`watchmgr` are created. self.add_q = [] def start(self): """ The inotify notifier and manager objects in :attr:`notifier` and :attr:`watchmgr` must be created by the daemonized process, so they are created in ``start()``. Before those objects are created, monitors are added to :attr:`add_q`, and are created once the :class:`pyinotify.ThreadedNotifier` and :class:`pyinotify.WatchManager` objects are created.""" Pseudo.start(self) self.watchmgr = pyinotify.WatchManager() self.notifier = pyinotify.ThreadedNotifier(self.watchmgr, self) self.notifier.start() for monitor in self.add_q: self.AddMonitor(*monitor) self.add_q = [] def fileno(self): if self.started: return self.watchmgr.get_fd() else: return None fileno.__doc__ = Pseudo.fileno.__doc__ def process_default(self, ievent): """ Process all inotify events received. This process a :class:`pyinotify._Event` object, creates a :class:`Bcfg2.Server.FileMonitor.Event` object from it, and adds that event to :attr:`events`. :param ievent: Event to be processed :type ievent: pyinotify._Event """ action = ievent.maskname for amask, aname in self.action_map.items(): if ievent.mask & amask: action = aname break else: # event action is not in the mask, and thus is not # something we care about self.debug_log("Ignoring event %s for %s" % (action, ievent.pathname)) return try: watch = self.watchmgr.watches[ievent.wd] except KeyError: self.logger.error("Error handling event %s for %s: " "Watch %s not found" % (action, ievent.pathname, ievent.wd)) return # FAM-style file monitors return the full path to the parent # directory that is being watched, relative paths to anything # contained within the directory. since we can't use inotify # to watch files directly, we have to sort of guess at whether # this watch was actually added on a file (and thus is in # self.event_filter because we're filtering out other events # on the directory) or was added directly on a directory. if (watch.path == ievent.pathname or ievent.wd in self.event_filter): path = ievent.pathname else: # relative path path = os.path.basename(ievent.pathname) # figure out the handleID. start with the path of the event; # that should catch events on files that are watched directly. # (we have to watch the directory that a file is in, so this # lets us handle events on different files in the same # directory -- and thus under the same watch -- with different # objects.) If the path to the event doesn't have a handler, # use the path of the watch itself. handleID = ievent.pathname if handleID not in self.handles: handleID = watch.path evt = Event(handleID, path, action) if (ievent.wd not in self.event_filter or ievent.pathname in self.event_filter[ievent.wd]): self.events.append(evt) def AddMonitor(self, path, obj, handleID=None): # strip trailing slashes path = path.rstrip("/") if not self.started: self.add_q.append((path, obj)) return path if not os.path.isdir(path): # inotify is a little wonky about watching files. for # instance, if you watch /tmp/foo, and then do 'mv # /tmp/bar /tmp/foo', it processes that as a deletion of # /tmp/foo (which it technically _is_, but that's rather # useless -- we care that /tmp/foo changed, not that it # was first deleted and then created). In order to # effectively watch a file, we have to watch the directory # it's in, and filter out events for other files in the # same directory that are not similarly watched. # watch_transient_file requires a Processor _class_, not # an object, so we can't have this object handle events, # which is Wrong, so we can't use that function. watch_path = os.path.dirname(path) is_dir = False else: watch_path = path is_dir = True # see if this path is already being watched try: watchdir = self.watches_by_path[watch_path] except KeyError: if not os.path.exists(watch_path): raise OSError(errno.ENOENT, "No such file or directory: '%s'" % path) watchdir = self.watchmgr.add_watch(watch_path, self.mask, quiet=False)[watch_path] self.watches_by_path[watch_path] = watchdir produce_exists = True if not is_dir: if watchdir not in self.event_filter: self.event_filter[watchdir] = [path] elif path not in self.event_filter[watchdir]: self.event_filter[watchdir].append(path) else: # we've been asked to watch a file that we're already # watching, so we don't need to produce 'exists' # events produce_exists = False # inotify doesn't produce initial 'exists' events, so we # inherit from Pseudo to produce those if produce_exists: return Pseudo.AddMonitor(self, path, obj, handleID=path) else: self.handles[path] = obj return path AddMonitor.__doc__ = Pseudo.AddMonitor.__doc__ def shutdown(self): if self.started and self.notifier: self.notifier.stop() Pseudo.shutdown(self) shutdown.__doc__ = Pseudo.shutdown.__doc__ def list_watches(self): """ XML-RPC that returns a list of current inotify watches for debugging purposes. """ return list(self.watches_by_path.keys()) def list_paths(self): """ XML-RPC that returns a list of paths that are handled for debugging purposes. Because inotify doesn't like watching files, but prefers to watch directories, this will be different from :func:`Bcfg2.Server.FileMonitor.Inotify.Inotify.ListWatches`. For instance, if a plugin adds a monitor to ``/var/lib/bcfg2/Plugin/foo.xml``, :func:`ListPaths` will return ``/var/lib/bcfg2/Plugin/foo.xml``, while :func:`ListWatches` will return ``/var/lib/bcfg2/Plugin``. """ return list(self.handles.keys()) src/lib/Bcfg2/Server/FileMonitor/Pseudo.py000066400000000000000000000020761303523157100206510ustar00rootroot00000000000000""" Pseudo provides static monitor support for file alteration events. That is, it only produces "exists" and "endExist" events and does not monitor for ongoing changes. """ import os from Bcfg2.Server.FileMonitor import FileMonitor, Event class Pseudo(FileMonitor): """ File monitor that only produces events on server startup and doesn't actually monitor for ongoing changes at all. """ #: The ``Pseudo`` monitor should only be used if no other FAM #: backends are available. __priority__ = 1 def AddMonitor(self, path, obj, handleID=None): if handleID is None: handleID = len(list(self.handles.keys())) if os.path.exists(path): self.events.append(Event(handleID, path, 'exists')) if os.path.isdir(path): dirlist = os.listdir(path) for fname in dirlist: self.events.append(Event(handleID, fname, 'exists')) self.events.append(Event(handleID, path, 'endExist')) if obj is not None: self.handles[handleID] = obj return handleID src/lib/Bcfg2/Server/FileMonitor/__init__.py000066400000000000000000000345451303523157100211570ustar00rootroot00000000000000""" Bcfg2.Server.FileMonitor provides the support for monitoring files. The FAM acts as a dispatcher for events: An event is detected on a file (e.g., the file content is changed), and then that event is dispatched to the ``HandleEvent`` method of an object that knows how to handle the event. Consequently, :func:`Bcfg2.Server.FileMonitor.FileMonitor.AddMonitor` takes two arguments: the path to monitor, and the object that handles events detected on that event. ``HandleEvent`` is called with a single argument, the :class:`Bcfg2.Server.FileMonitor.Event` object to be handled. Assumptions ----------- The FAM API Bcfg2 uses is based on the API of SGI's `File Alteration Monitor `_ (also called "FAM"). Consequently, a few assumptions apply: * When a file or directory is monitored for changes, we call that a "monitor"; other backends my use the term "watch," but for consistency we will use "monitor." * Monitors can be set on files or directories. * A monitor set on a directory monitors all files within that directory, non-recursively. If the object that requested the monitor wishes to monitor recursively, it must implement that itself. * Setting a monitor immediately produces "exists" and "endExist" events for the monitored file or directory and all files or directories contained within it (non-recursively). * An event on a file or directory that is monitored directly yields the full path to the file or directory. * An event on a file or directory that is *only* contained within a monitored directory yields the relative path to the file or directory within the monitored parent. It is the responsibility of the handler to reconstruct full paths as necessary. * Each monitor that is set must have a unique ID that identifies it, in order to make it possible to reconstruct full paths as necessary. This ID will be stored in :attr:`Bcfg2.Server.FileMonitor.FileMonitor.handles`. It may be any hashable value; some FAM backends use monotonically increasing integers, while others use the path to the monitor. Base Classes ------------ """ import os import sys import fnmatch import Bcfg2.Options from time import sleep, time from Bcfg2.Logger import Debuggable class Event(object): """ Base class for all FAM events. """ def __init__(self, request_id, filename, code): """ :param request_id: The handler ID of the monitor that produced this event :type request_id: Varies :param filename: The file or directory on which the event was detected. An event on a file or directory that is monitored directly yields the full path to the file or directory; an event on a file or directory that is *only* contained within a monitored directory yields the relative path to the file or directory within the monitored parent. :type filename: string :param code: The :ref:`event code ` produced. I.e., the type of event. :type code: string """ #: The handler ID of the monitor that produced this event self.requestID = request_id #: The file or directory on which the event was detected. An #: event on a file or directory that is monitored directly #: yields the full path to the file or directory; an event on #: a file or directory that is *only* contained within a #: monitored directory yields the relative path to the file or #: directory within the monitored parent. self.filename = filename #: The :ref:`event code ` #: produced. I.e., the type of event. self.action = code def code2str(self): """ Return the :ref:`event code ` for this event. This is just an alias for :attr:`action`. """ return self.action def __str__(self): return "%s: %s %s" % (self.__class__.__name__, self.filename, self.action) def __repr__(self): return "%s (request ID %s)" % (str(self), self.requestID) class FileMonitor(Debuggable): """ The base class that all FAM implementions must inherit. The simplest instance of a FileMonitor subclass needs only to add monitor objects to :attr:`handles` and received events to :attr:`events`; the basic interface will handle the rest. """ options = [ Bcfg2.Options.Option( cf=('server', 'ignore_files'), help='File globs to ignore', type=Bcfg2.Options.Types.comma_list, default=['*~', '*#', '.#*', '*.swp', '*.swpx', '.*.swx', 'SCCS', '.svn', '4913', '.gitignore'])] #: The relative priority of this FAM backend. Better backends #: should have higher priorities. __priority__ = -1 #: List of names of methods to be exposed as XML-RPC functions __rmi__ = Debuggable.__rmi__ + ["list_event_handlers"] def __init__(self): """ :param ignore: A list of filename globs describing events that should be ignored (i.e., not processed by any object) :type ignore: list of strings (filename globs) :param debug: Produce debugging information about the events received and handled. :type debug: bool .. ----- .. autoattribute:: __priority__ """ Debuggable.__init__(self) #: A dict that records which objects handle which events. #: Keys are monitor handle IDs and values are objects whose #: ``HandleEvent`` method will be called to handle an event self.handles = dict() #: Queue of events to handle self.events = [] #: List of filename globs to ignore events for. For events #: that include the full path, both the full path and the bare #: filename will be checked against ``ignore``. self.ignore = Bcfg2.Options.setup.ignore_files #: Whether or not the FAM has been started. See :func:`start`. self.started = False def __str__(self): return "%s: %s" % (__name__, self.__class__.__name__) def __repr__(self): return "%s (%s events, fd %s)" % (self.__class__.__name__, len(self.events), self.fileno()) def start(self): """ Start threads or anything else that needs to be done after the server forks and daemonizes. Note that monitors may (and almost certainly will) be added before ``start()`` is called, so if a backend depends on being started to add monitors, those requests will need to be enqueued and added after ``start()``. See :class:`Bcfg2.Server.FileMonitor.Inotify.Inotify` for an example of this. """ self.started = True def should_ignore(self, event): """ Returns True if an event should be ignored, False otherwise. For events that include the full path, both the full path and the bare filename will be checked against :attr:`ignore`. If the event is ignored, a debug message will be logged with :func:`debug_log`. :param event: Check if this event matches :attr:`ignore` :type event: Bcfg2.Server.FileMonitor.Event :returns: bool - Whether not to ignore the event """ for pattern in self.ignore: if (fnmatch.fnmatch(event.filename, pattern) or fnmatch.fnmatch(os.path.split(event.filename)[-1], pattern)): self.debug_log("Ignoring %s" % event) return True return False def pending(self): """ Returns True if there are pending events (i.e., events in :attr:`events` that have not been processed), False otherwise. """ return bool(self.events) def get_event(self): """ Get the oldest pending event in :attr:`events`. :returns: :class:`Bcfg2.Server.FileMonitor.Event` """ return self.events.pop(0) def fileno(self): """ Get the file descriptor of the file monitor thread. :returns: int - The FD number """ return 0 def handle_one_event(self, event): """ Handle the given event by dispatching it to the object that handles it. This is only called by :func:`handle_event_set`, so if a backend overrides that method it does not necessarily need to implement this function. :param event: The event to handle. :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ if not self.started: self.start() if self.should_ignore(event): return if event.requestID not in self.handles: self.logger.info("Got event for unexpected id %s, file %s" % (event.requestID, event.filename)) return self.debug_log("Dispatching event %s %s to obj %s" % (event.code2str(), event.filename, self.handles[event.requestID])) try: self.handles[event.requestID].HandleEvent(event) except KeyboardInterrupt: raise except: # pylint: disable=W0702 err = sys.exc_info()[1] self.logger.error("Error in handling of event %s for %s: %s" % (event.code2str(), event.filename, err)) def handle_event_set(self, lock=None): """ Handle all pending events. :param lock: A thread lock to use while handling events. If None, then no thread locking will be performed. This can possibly lead to race conditions in event handling, although it's unlikely to cause any real problems. :type lock: threading.Lock :returns: None """ if not self.started: self.start() count = 0 start = time() if lock: lock.acquire() while self.pending(): self.handle_one_event(self.get_event()) count += 1 if lock: lock.release() end = time() if count > 0: self.logger.info("Handled %d events in %.03fs" % (count, (end - start))) def handle_events_in_interval(self, interval): """ Handle events for the specified period of time (in seconds). This call will block for ``interval`` seconds and handle all events received during that period by calling :func:`handle_event_set`. :param interval: The interval, in seconds, during which events should be handled. Any events that are already pending when :func:`handle_events_in_interval` is called will also be handled. :type interval: int :returns: None """ if not self.started: self.start() end = time() + interval while time() < end: if self.pending(): self.handle_event_set() end = time() + interval else: sleep(0.5) def shutdown(self): """ Handle any tasks required to shut down the monitor. """ self.debug_log("Shutting down %s file monitor" % self.__class__.__name__) self.started = False def AddMonitor(self, path, obj, handleID=None): """ Monitor the specified path, alerting obj to events. This method must be overridden by a subclass of :class:`Bcfg2.Server.FileMonitor.FileMonitor`. :param path: The path to monitor :type path: string :param obj: The object whose ``HandleEvent`` method will be called when an event is produced. :type obj: Varies :param handleID: The handle ID to use for the monitor. This is useful when requests to add a monitor must be enqueued and the actual monitors added after :func:`start` is called. :type handleID: Varies :returns: Varies - The handler ID for the newly created monitor """ raise NotImplementedError def list_event_handlers(self): """ XML-RPC that returns :attr:`Bcfg2.Server.FileMonitor.FileMonitor.handles` for debugging purposes. """ rv = dict() for watch, handler in self.handles.items(): rv[watch] = getattr(handler, "name", handler.__class__.__name__) return rv #: A module-level FAM object that all plugins, etc., can use. This #: should not be used directly, but retrieved via :func:`get_fam`. _FAM = None def get_fam(): """ Get a :class:`Bcfg2.Server.FileMonitor.FileMonitor` object. If :attr:`_FAM` has not been populated, then a new default FileMonitor will be created. :returns: :class:`Bcfg2.Server.FileMonitor.FileMonitor` """ global _FAM # pylint: disable=W0603 if _FAM is None: _FAM = Bcfg2.Options.setup.filemonitor() return _FAM #: A dict of all available FAM backends. Keys are the human-readable #: names of the backends, which are used in bcfg2.conf to select a #: backend; values are the backend classes. In addition, the #: ``default`` key will be set to the best FAM backend as determined #: by :attr:`Bcfg2.Server.FileMonitor.FileMonitor.__priority__` available = dict() # pylint: disable=C0103 # TODO: loading the monitor drivers should be automatic try: from Bcfg2.Server.FileMonitor.Pseudo import Pseudo available['pseudo'] = Pseudo except ImportError: pass try: from Bcfg2.Server.FileMonitor.Gamin import Gamin available['gamin'] = Gamin except ImportError: pass try: from Bcfg2.Server.FileMonitor.Inotify import Inotify available['inotify'] = Inotify except ImportError: pass for fdrv in reversed(sorted(available.keys(), key=lambda k: available[k].__priority__)): if fdrv in available: available['default'] = available[fdrv] break src/lib/Bcfg2/Server/Info.py000066400000000000000000000771251303523157100160650ustar00rootroot00000000000000""" Subcommands and helpers for bcfg2-info """ # -*- coding: utf-8 -*- import os import sys import cmd import math import time import copy import pipes import fnmatch import argparse import operator import lxml.etree import traceback from code import InteractiveConsole import Bcfg2.Logger import Bcfg2.Options import Bcfg2.Server.Core import Bcfg2.Server.Plugin import Bcfg2.Client.Tools.POSIX from Bcfg2.Compat import any # pylint: disable=W0622 try: try: import cProfile as profile except ImportError: import profile import pstats HAS_PROFILE = True except ImportError: HAS_PROFILE = False def print_tabular(rows): """Print data in tabular format.""" cmax = tuple([max([len(str(row[index])) for row in rows]) + 1 for index in range(len(rows[0]))]) fstring = (" %%-%ss |" * len(cmax)) % cmax fstring = ('|'.join([" %%-%ss "] * len(cmax))) % cmax print(fstring % rows[0]) print((sum(cmax) + (len(cmax) * 2) + (len(cmax) - 1)) * '=') for row in rows[1:]: print(fstring % row) def display_trace(trace): """ display statistics from a profile trace """ stats = pstats.Stats(trace) stats.sort_stats('cumulative', 'calls', 'time') stats.print_stats(200) def load_interpreters(): """ Load a dict of available Python interpreters """ interpreters = dict(python=lambda v: InteractiveConsole(v).interact()) default = "python" try: import bpython.cli interpreters["bpython"] = lambda v: bpython.cli.main(args=[], locals_=v) default = "bpython" except ImportError: pass try: # whether ipython is actually better than bpython is # up for debate, but this is the behavior that existed # before --interpreter was added, so we call IPython # better import IPython # pylint: disable=E1101 if hasattr(IPython, "Shell"): interpreters["ipython"] = lambda v: \ IPython.Shell.IPShell(argv=[], user_ns=v).mainloop() default = "ipython" elif hasattr(IPython, "embed"): interpreters["ipython"] = lambda v: IPython.embed(user_ns=v) default = "ipython" else: print("Unknown IPython API version") # pylint: enable=E1101 except ImportError: pass return (interpreters, default) class InfoCmd(Bcfg2.Options.Subcommand): # pylint: disable=W0223 """ Base class for bcfg2-info subcommands """ def _expand_globs(self, globs, candidates): """ Given a list of globs, select the items from candidates that match the globs """ # special cases to speed things up: if not globs or '*' in globs: return candidates has_wildcards = False for glob in globs: # check if any wildcard characters are in the string if set('*?[]') & set(glob): has_wildcards = True break if not has_wildcards: return globs rv = set() cset = set(candidates) for glob in globs: rv.update(c for c in cset if fnmatch.fnmatch(c, glob)) cset.difference_update(rv) return list(rv) def get_client_list(self, globs): """ given a list of host globs, get a list of clients that match them """ return self._expand_globs(globs, self.core.metadata.clients) def get_group_list(self, globs): """ given a list of group glob, get a list of groups that match them""" # special cases to speed things up: return self._expand_globs(globs, list(self.core.metadata.groups.keys())) class Debug(InfoCmd): """ Shell out to a Python interpreter """ interpreters, default_interpreter = load_interpreters() options = [ Bcfg2.Options.BooleanOption( "-n", "--non-interactive", help="Do not enter the interactive debugger"), Bcfg2.Options.PathOption( "-f", dest="cmd_list", type=argparse.FileType('r'), help="File containing commands to run"), Bcfg2.Options.Option( "--interpreter", cf=("bcfg2-info", "interpreter"), env="BCFG2_INFO_INTERPRETER", choices=interpreters.keys(), default=default_interpreter)] def run(self, setup): if setup.cmd_list: console = InteractiveConsole(locals()) for command in setup.cmd_list.readlines(): command = command.strip() if command: console.push(command) if not setup.non_interactive: print("Dropping to interpreter; press ^D to resume") self.interpreters[setup.interpreter](self.core.get_locals()) class Build(InfoCmd): """ Build config for hostname, writing to filename """ options = [Bcfg2.Options.PositionalArgument("hostname"), Bcfg2.Options.PositionalArgument("filename", nargs='?', default=sys.stdout, type=argparse.FileType('w'))] def run(self, setup): etree = lxml.etree.ElementTree( self.core.BuildConfiguration(setup.hostname)) try: etree.write( setup.filename, encoding='UTF-8', xml_declaration=True, pretty_print=True) except IOError: err = sys.exc_info()[1] print("Failed to write %s: %s" % (setup.filename, err)) class Builddir(InfoCmd): """ Build config for hostname, writing separate files to directory """ # don't try to isntall these types of entries blacklisted_types = ["nonexistent", "permissions"] options = Bcfg2.Client.Tools.POSIX.POSIX.options + [ Bcfg2.Options.PositionalArgument("hostname"), Bcfg2.Options.PathOption("directory")] help = """Generates a config for client and writes the individual configuration files out separately in a tree under . This only handles file entries, and does not respect 'owner' or 'group' attributes unless run as root. """ def run(self, setup): setup.paranoid = False client_config = self.core.BuildConfiguration(setup.hostname) if client_config.tag == 'error': print("Building client configuration failed.") return 1 entries = [] for struct in client_config: for entry in struct: if (entry.tag == 'Path' and entry.get("type") not in self.blacklisted_types): failure = entry.get("failure") if failure is not None: print("Skipping entry %s:%s with bind failure: %s" % (entry.tag, entry.get("name"), failure)) continue entry.set('name', os.path.join(setup.directory, entry.get('name').lstrip("/"))) entries.append(entry) Bcfg2.Client.Tools.POSIX.POSIX(client_config).Install(entries) class Buildfile(InfoCmd): """ Build config file for hostname """ options = [ Bcfg2.Options.Option("-f", "--outfile", metavar="", type=argparse.FileType('w'), default=sys.stdout), Bcfg2.Options.PathOption("--altsrc"), Bcfg2.Options.PathOption("filename"), Bcfg2.Options.PositionalArgument("hostname")] def run(self, setup): entry = lxml.etree.Element('Path', name=setup.filename) if setup.altsrc: entry.set("altsrc", setup.altsrc) try: self.core.Bind(entry, self.core.build_metadata(setup.hostname)) except: # pylint: disable=W0702 print("Failed to build entry %s for host %s" % (setup.filename, setup.hostname)) raise try: setup.outfile.write( lxml.etree.tostring(entry, xml_declaration=False).decode('UTF-8')) setup.outfile.write("\n") except IOError: err = sys.exc_info()[1] print("Failed to write %s: %s" % (setup.outfile.name, err)) class BuildAllMixin(object): """ InfoCmd mixin to make a version of an existing command that applies to multiple hosts""" directory_arg = Bcfg2.Options.PathOption("directory") hostname_arg = Bcfg2.Options.PositionalArgument("hostname", nargs='*', default=[]) options = [directory_arg, hostname_arg] @property def _parent(self): """ the parent command """ for cls in self.__class__.__mro__: if (cls != InfoCmd and cls != self.__class__ and issubclass(cls, InfoCmd)): return cls def run(self, setup): """ Run the command """ try: os.makedirs(setup.directory) except OSError: err = sys.exc_info()[1] if err.errno != 17: print("Could not create %s: %s" % (setup.directory, err)) return 1 clients = self.get_client_list(setup.hostname) for client in clients: csetup = self._get_setup(client, copy.copy(setup)) csetup.hostname = client self._parent.run(self, csetup) # pylint: disable=E1101 def _get_setup(self, client, setup): """ This can be overridden by children to populate individual setup options on a per-client basis """ raise NotImplementedError class Buildallfile(Buildfile, BuildAllMixin): """ Build config file for all clients in directory """ options = [BuildAllMixin.directory_arg, Bcfg2.Options.PathOption("--altsrc"), Bcfg2.Options.PathOption("filename"), BuildAllMixin.hostname_arg] def run(self, setup): BuildAllMixin.run(self, setup) def _get_setup(self, client, setup): setup.outfile = open(os.path.join(setup.directory, client), 'w') return setup class Buildall(Build, BuildAllMixin): """ Build configs for all clients in directory """ options = BuildAllMixin.options def run(self, setup): BuildAllMixin.run(self, setup) def _get_setup(self, client, setup): setup.filename = os.path.join(setup.directory, client + ".xml") return setup class Buildbundle(InfoCmd): """ Render a templated bundle for hostname """ options = [Bcfg2.Options.PositionalArgument("bundle"), Bcfg2.Options.PositionalArgument("hostname")] def run(self, setup): bundler = self.core.plugins['Bundler'] bundle = None if setup.bundle in bundler.entries: bundle = bundler.entries[setup.bundle] elif not setup.bundle.endswith(".xml"): fname = setup.bundle + ".xml" if fname in bundler.entries: bundle = bundler.entries[bundle] if not bundle: print("No such bundle %s" % setup.bundle) return 1 try: metadata = self.core.build_metadata(setup.hostname) print(lxml.etree.tostring(bundle.XMLMatch(metadata), xml_declaration=False, pretty_print=True).decode('UTF-8')) except: # pylint: disable=W0702 print("Failed to render bundle %s for host %s: %s" % (setup.bundle, setup.hostname, sys.exc_info()[1])) raise class Automatch(InfoCmd): """ Perform automatch on a Properties file """ options = [ Bcfg2.Options.BooleanOption( "-f", "--force", help="Force automatch even if it's disabled"), Bcfg2.Options.PositionalArgument("propertyfile"), Bcfg2.Options.PositionalArgument("hostname")] def run(self, setup): try: props = self.core.plugins['Properties'] except KeyError: print("Properties plugin not enabled") return 1 pfile = props.entries[setup.propertyfile] if (not Bcfg2.Options.setup.force and not Bcfg2.Options.setup.automatch and pfile.xdata.get("automatch", "false").lower() != "true"): print("Automatch not enabled on %s" % setup.propertyfile) else: metadata = self.core.build_metadata(setup.hostname) print(lxml.etree.tostring(pfile.XMLMatch(metadata), xml_declaration=False, pretty_print=True).decode('UTF-8')) class ExpireCache(InfoCmd): """ Expire the metadata cache """ only_interactive = True options = [ Bcfg2.Options.PositionalArgument( "hostname", nargs="*", default=[], help="Expire cache for the given host(s)")] def run(self, setup): if setup.hostname: for client in self.get_client_list(setup.hostname): self.core.metadata_cache.expire(client) else: self.core.metadata_cache.expire() class EventDebug(InfoCmd): """ Enable debugging output for FAM events """ only_interactive = True aliases = ['event_debug'] def run(self, _): self.core.fam.set_debug(True) class Bundles(InfoCmd): """ Print out group/bundle info """ options = [Bcfg2.Options.PositionalArgument("group", nargs='*')] def run(self, setup): data = [('Group', 'Bundles')] groups = self.get_group_list(setup.group) groups.sort() for group in groups: data.append((group, ','.join(self.core.metadata.groups[group][0]))) print_tabular(data) class Clients(InfoCmd): """ Print out client/profile info """ options = [Bcfg2.Options.PositionalArgument("hostname", nargs='*', default=[])] def run(self, setup): data = [('Client', 'Profile')] for client in sorted(self.get_client_list(setup.hostname)): imd = self.core.metadata.get_initial_metadata(client) data.append((client, imd.profile)) print_tabular(data) class Config(InfoCmd): """ Print out the current configuration of Bcfg2""" options = [ Bcfg2.Options.BooleanOption( "--raw", help="Produce more accurate but less readable raw output")] def run(self, setup): parser = Bcfg2.Options.get_parser() data = [('Description', 'Value')] for option in parser.option_list: if hasattr(setup, option.dest): value = getattr(setup, option.dest) if any(issubclass(a.__class__, Bcfg2.Options.ComponentAction) for a in option.actions.values()): if not setup.raw: try: if option.action.islist: value = [v.__name__ for v in value] else: value = value.__name__ except AttributeError: # just use the value as-is pass if setup.raw: value = repr(value) data.append((getattr(option, "help", option.dest), value)) print_tabular(data) class Probes(InfoCmd): """ Get probes for the given host """ options = [ Bcfg2.Options.BooleanOption("-p", "--pretty", help="Human-readable output"), Bcfg2.Options.PositionalArgument("hostname")] def run(self, setup): if setup.pretty: probes = [] else: probes = lxml.etree.Element('probes') metadata = self.core.build_metadata(setup.hostname) for plugin in self.core.plugins_by_type(Bcfg2.Server.Plugin.Probing): for probe in plugin.GetProbes(metadata): probes.append(probe) if setup.pretty: for probe in probes: pname = probe.get("name") print("=" * (len(pname) + 2)) print(" %s" % pname) print("=" * (len(pname) + 2)) print("") print(probe.text) print("") else: print(lxml.etree.tostring(probes, xml_declaration=False, pretty_print=True).decode('UTF-8')) class Showentries(InfoCmd): """ Show abstract configuration entries for a given host """ options = [Bcfg2.Options.PositionalArgument("hostname"), Bcfg2.Options.PositionalArgument("type", nargs='?')] def run(self, setup): try: metadata = self.core.build_metadata(setup.hostname) except Bcfg2.Server.Plugin.MetadataConsistencyError: print("Unable to build metadata for %s: %s" % (setup.hostname, sys.exc_info()[1])) structures = self.core.GetStructures(metadata) output = [('Entry Type', 'Name')] etypes = None if setup.type: etypes = [setup.type, "Bound%s" % setup.type] for item in structures: output.extend((child.tag, child.get('name')) for child in item.getchildren() if not etypes or child.tag in etypes) print_tabular(output) class Groups(InfoCmd): """ Print out group info """ options = [Bcfg2.Options.PositionalArgument("group", nargs='*')] def _profile_flag(self, group): """ Whether or not the group is a profile group """ if self.core.metadata.groups[group].is_profile: return 'yes' else: return 'no' def run(self, setup): data = [("Groups", "Profile", "Category")] groups = self.get_group_list(setup.group) groups.sort() for group in groups: data.append((group, self._profile_flag(group), self.core.metadata.groups[group].category)) print_tabular(data) class Showclient(InfoCmd): """ Show metadata for the given hosts """ options = [Bcfg2.Options.PositionalArgument("hostname", nargs='*')] def run(self, setup): for client in self.get_client_list(setup.hostname): try: metadata = self.core.build_metadata(client) except Bcfg2.Server.Plugin.MetadataConsistencyError: print("Could not build metadata for %s: %s" % (client, sys.exc_info()[1])) continue fmt = "%-10s %s" print(fmt % ("Hostname:", metadata.hostname)) print(fmt % ("Profile:", metadata.profile)) group_fmt = "%-10s %-30s %s" header = False for group in sorted(list(metadata.groups)): category = "" for cat, grp in metadata.categories.items(): if grp == group: category = "Category: %s" % cat break if not header: print(group_fmt % ("Groups:", group, category)) header = True else: print(group_fmt % ("", group, category)) if metadata.bundles: sorted_bundles = sorted(list(metadata.bundles)) print(fmt % ("Bundles:", sorted_bundles[0])) for bnd in sorted_bundles[1:]: print(fmt % ("", bnd)) if metadata.connectors: print("Connector data") print("=" * 80) for conn in metadata.connectors: if getattr(metadata, conn): print(fmt % (conn + ":", getattr(metadata, conn))) print("=" * 80) class Mappings(InfoCmd): """ Print generator mappings for optional type and name """ options = [Bcfg2.Options.PositionalArgument("type", nargs='?'), Bcfg2.Options.PositionalArgument("name", nargs='?')] def run(self, setup): data = [('Plugin', 'Type', 'Name')] for generator in self.core.plugins_by_type( Bcfg2.Server.Plugin.Generator): etypes = setup.type or list(generator.Entries.keys()) if setup.name: interested = [(etype, [setup.name]) for etype in etypes] else: interested = [(etype, generator.Entries[etype]) for etype in etypes if etype in generator.Entries] for etype, names in interested: data.extend((generator.name, etype, name) for name in names if name in generator.Entries.get(etype, {})) print_tabular(data) class PackageResolve(InfoCmd): """ Resolve packages for the given host""" options = [Bcfg2.Options.PositionalArgument("hostname"), Bcfg2.Options.PositionalArgument("package", nargs="*")] def run(self, setup): try: pkgs = self.core.plugins['Packages'] except KeyError: print("Packages plugin not enabled") return 1 metadata = self.core.build_metadata(setup.hostname) indep = lxml.etree.Element("Independent", name=self.__class__.__name__.lower()) if setup.package: structures = [lxml.etree.Element("Bundle", name="packages")] for package in setup.package: lxml.etree.SubElement(structures[0], "Package", name=package) else: structures = self.core.GetStructures(metadata) pkgs._build_packages(metadata, indep, # pylint: disable=W0212 structures) print("%d new packages added" % len(indep.getchildren())) if len(indep.getchildren()): print(" %s" % "\n ".join(lxml.etree.tostring(p) for p in indep.getchildren())) class Packagesources(InfoCmd): """ Show package sources """ options = [Bcfg2.Options.PositionalArgument("hostname")] def run(self, setup): try: pkgs = self.core.plugins['Packages'] except KeyError: print("Packages plugin not enabled") return 1 try: metadata = self.core.build_metadata(setup.hostname) except Bcfg2.Server.Plugin.MetadataConsistencyError: print("Unable to build metadata for %s: %s" % (setup.hostname, sys.exc_info()[1])) return 1 print(pkgs.get_collection(metadata).sourcelist()) class Query(InfoCmd): """ Query clients """ options = [ Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.Option( "-g", "--group", metavar="", dest="querygroups", type=Bcfg2.Options.Types.comma_list), Bcfg2.Options.Option( "-p", "--profile", metavar="", dest="queryprofiles", type=Bcfg2.Options.Types.comma_list), Bcfg2.Options.Option( "-b", "--bundle", metavar="", dest="querybundles", type=Bcfg2.Options.Types.comma_list), required=True)] def run(self, setup): if setup.queryprofiles: res = self.core.metadata.get_client_names_by_profiles( setup.queryprofiles) elif setup.querygroups: res = self.core.metadata.get_client_names_by_groups( setup.querygroups) elif setup.querybundles: res = self.core.metadata.get_client_names_by_bundles( setup.querybundles) print("\n".join(res)) class Quit(InfoCmd): """ Exit program """ only_interactive = True aliases = ['exit', 'EOF'] def run(self, _): raise SystemExit(0) class Shell(InfoCmd): """ Open an interactive shell to run multiple bcfg2-info commands """ interactive = False def run(self, setup): try: self.core.cli.cmdloop('Welcome to bcfg2-info\n' 'Type "help" for more information') except KeyboardInterrupt: print("\nCtrl-C pressed, exiting...") class Update(InfoCmd): """ Process pending filesystem events """ only_interactive = True def run(self, _): self.core.fam.handle_events_in_interval(0.1) class ProfileTemplates(InfoCmd): """ Benchmark template rendering times """ options = [ Bcfg2.Options.Option( "--clients", type=Bcfg2.Options.Types.comma_list, help="Benchmark templates for the named clients"), Bcfg2.Options.Option( "--runs", help="Number of rendering passes per template", default=5, type=int), Bcfg2.Options.PositionalArgument( "templates", nargs="*", default=[], help="Profile the named templates instead of all templates")] def profile_entry(self, entry, metadata, runs=5): """ Profile a single entry """ times = [] for i in range(runs): # pylint: disable=W0612 start = time.time() try: self.core.Bind(entry, metadata) times.append(time.time() - start) except: # pylint: disable=W0702 break if times: avg = sum(times) / len(times) if avg: self.logger.debug(" %s: %.02f sec" % (metadata.hostname, avg)) return times def profile_struct(self, struct, metadata, templates=None, runs=5): """ Profile all entries in a given structure """ times = dict() entries = struct.xpath("//Path") entry_count = 0 for entry in entries: entry_count += 1 if templates is None or entry.get("name") in templates: self.logger.info("Rendering Path:%s (%s/%s)..." % (entry.get("name"), entry_count, len(entries))) times.setdefault(entry.get("name"), self.profile_entry(entry, metadata, runs=runs)) return times def profile_client(self, metadata, templates=None, runs=5): """ Profile all structures for a given client """ structs = self.core.GetStructures(metadata) struct_count = 0 times = dict() for struct in structs: struct_count += 1 self.logger.info("Rendering templates from structure %s:%s " "(%s/%s)" % (struct.tag, struct.get("name"), struct_count, len(structs))) times.update(self.profile_struct(struct, metadata, templates=templates, runs=runs)) return times def stdev(self, nums): """ Calculate the standard deviation of a list of numbers """ mean = float(sum(nums)) / len(nums) return math.sqrt(sum((n - mean) ** 2 for n in nums) / float(len(nums))) def run(self, setup): clients = self.get_client_list(setup.clients) times = dict() client_count = 0 for client in clients: client_count += 1 self.logger.info("Rendering templates for client %s (%s/%s)" % (client, client_count, len(clients))) times.update(self.profile_client(self.core.build_metadata(client), templates=setup.templates, runs=setup.runs)) # print out per-file results tmpltimes = [] for tmpl, ptimes in times.items(): try: mean = float(sum(ptimes)) / len(ptimes) except ZeroDivisionError: continue ptimes.sort() median = ptimes[len(ptimes) / 2] std = self.stdev(ptimes) if mean > 0.01 or median > 0.01 or std > 1 or setup.templates: tmpltimes.append((tmpl, mean, median, std)) print("%-50s %-9s %-11s %6s" % ("Template", "Mean Time", "Median Time", "σ")) for info in reversed(sorted(tmpltimes, key=operator.itemgetter(1))): print("%-50s %9.02f %11.02f %6.02f" % info) if HAS_PROFILE: class Profile(InfoCmd): """ Profile a single bcfg2-info command """ options = [Bcfg2.Options.PositionalArgument("command"), Bcfg2.Options.PositionalArgument("args", nargs="*")] def run(self, setup): prof = profile.Profile() cls = self.core.commands[setup.command] prof.runcall(cls, " ".join(pipes.quote(a) for a in setup.args)) display_trace(prof) class InfoCore(Bcfg2.Server.Core.Core): """Main class for bcfg2-info.""" def __init__(self, cli): Bcfg2.Server.Core.Core.__init__(self) self.cli = cli def get_locals(self): """ Expose the local variables of the core to subcommands that need to reference them (i.e., the interactive interpreter) """ return locals() def run(self): self.load_plugins() self.block_for_fam_events(handle_events=True) def _run(self): pass def _block(self): pass def shutdown(self): Bcfg2.Server.Core.Core.shutdown(self) class CLI(cmd.Cmd, Bcfg2.Options.CommandRegistry): """ The bcfg2-info CLI """ options = [Bcfg2.Options.BooleanOption("-p", "--profile", help="Profile")] def __init__(self): cmd.Cmd.__init__(self) Bcfg2.Options.CommandRegistry.__init__(self) self.prompt = 'bcfg2-info> ' self.register_commands(globals().values(), parent=InfoCmd) parser = Bcfg2.Options.get_parser( description="Inspect a running Bcfg2 server", components=[self, InfoCore]) parser.add_options(self.subcommand_options) parser.parse() if Bcfg2.Options.setup.profile and HAS_PROFILE: prof = profile.Profile() self.core = prof.runcall(InfoCore) display_trace(prof) else: if Bcfg2.Options.setup.profile: print("Profiling functionality not available.") self.core = InfoCore(self) for command in self.commands.values(): command.core = self.core def run(self): """ Run bcfg2-info """ try: if Bcfg2.Options.setup.subcommand != 'help': self.core.run() return self.runcommand() finally: self.shutdown() def shutdown(self): Bcfg2.Options.CommandRegistry.shutdown(self) self.core.shutdown() def get_names(self): """ Overwrite cmd.Cmd.get_names to use the instance to get the methods and not the class, because the CommandRegistry dynamically adds methods for the registed subcommands. """ return dir(self) def onecmd(self, line): """ Overwrite cmd.Cmd.onecmd to catch all exceptions (except SystemExit) print an error message and continue cmdloop. """ try: cmd.Cmd.onecmd(self, line) except SystemExit: raise except: # pylint: disable=W0702 exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) self.stdout.write(''.join(lines)) src/lib/Bcfg2/Server/Lint/000077500000000000000000000000001303523157100155125ustar00rootroot00000000000000src/lib/Bcfg2/Server/Lint/AWSTags.py000066400000000000000000000021571303523157100173420ustar00rootroot00000000000000""" ``bcfg2-lint`` plugin to check all given :ref:`AWSTags ` patterns for validity.""" import re import sys import Bcfg2.Server.Lint class AWSTags(Bcfg2.Server.Lint.ServerPlugin): """ ``bcfg2-lint`` plugin to check all given :ref:`AWSTags ` patterns for validity. """ __serverplugin__ = 'AWSTags' def Run(self): cfg = self.core.plugins['AWSTags'].config for entry in cfg.xdata.xpath('//Tag'): self.check(entry, "name") if entry.get("value"): self.check(entry, "value") @classmethod def Errors(cls): return {"pattern-fails-to-initialize": "error"} def check(self, entry, attr): """ Check a single attribute (``name`` or ``value``) of a single entry for validity. """ try: re.compile(entry.get(attr)) except re.error: self.LintError("pattern-fails-to-initialize", "'%s' regex could not be compiled: %s\n %s" % (attr, sys.exc_info()[1], entry.get("name"))) src/lib/Bcfg2/Server/Lint/Bundler.py000066400000000000000000000044021303523157100174570ustar00rootroot00000000000000""" ``bcfg2-lint`` plugin for :ref:`Bundler ` """ from Bcfg2.Server.Lint import ServerPlugin class Bundler(ServerPlugin): """ Perform various :ref:`Bundler ` checks. """ __serverplugin__ = 'Bundler' def Run(self): self.missing_bundles() for bundle in self.core.plugins['Bundler'].entries.values(): if self.HandlesFile(bundle.name): self.bundle_names(bundle) @classmethod def Errors(cls): return {"bundle-not-found": "error", "unused-bundle": "warning", "explicit-bundle-name": "error", "genshi-extension-bundle": "error"} def missing_bundles(self): """ Find bundles listed in Metadata but not implemented in Bundler. """ if self.files is None: # when given a list of files on stdin, this check is # useless, so skip it groupdata = self.metadata.groups_xml.xdata ref_bundles = set([b.get("name") for b in groupdata.findall("//Bundle")]) allbundles = self.core.plugins['Bundler'].bundles.keys() for bundle in ref_bundles: if bundle not in allbundles: self.LintError("bundle-not-found", "Bundle %s referenced, but does not exist" % bundle) for bundle in allbundles: if bundle not in ref_bundles: self.LintError("unused-bundle", "Bundle %s defined, but is not referenced " "in Metadata" % bundle) def bundle_names(self, bundle): """ Verify that deprecated bundle .genshi bundles and explicit bundle names aren't used """ if bundle.xdata.get('name'): self.LintError("explicit-bundle-name", "Deprecated explicit bundle name in %s" % bundle.name) if bundle.name.endswith(".genshi"): self.LintError("genshi-extension-bundle", "Bundle %s uses deprecated .genshi extension" % bundle.name) src/lib/Bcfg2/Server/Lint/Cfg.py000066400000000000000000000112421303523157100165630ustar00rootroot00000000000000""" ``bcfg2-lint`` plugin for :ref:`Cfg ` """ import os import Bcfg2.Options from fnmatch import fnmatch from Bcfg2.Server.Lint import ServerPlugin from Bcfg2.Server.Plugins.Cfg import CfgGenerator class Cfg(ServerPlugin): """ warn about Cfg issues """ __serverplugin__ = 'Cfg' def Run(self): for basename, entry in list(self.core.plugins['Cfg'].entries.items()): self.check_pubkey(basename, entry) self.check_missing_files() self.check_conflicting_handlers() @classmethod def Errors(cls): return {"no-pubkey-xml": "warning", "unknown-cfg-files": "error", "extra-cfg-files": "error", "multiple-global-handlers": "error"} def check_conflicting_handlers(self): """ Check that a single entryset doesn't have multiple non-specific (i.e., 'all') handlers. """ cfg = self.core.plugins['Cfg'] for eset in cfg.entries.values(): alls = [e for e in eset.entries.values() if (e.specific.all and issubclass(e.__class__, CfgGenerator))] if len(alls) > 1: self.LintError("multiple-global-handlers", "%s has multiple global handlers: %s" % (eset.path, ", ".join(os.path.basename(e.name) for e in alls))) def check_pubkey(self, basename, entry): """ check that privkey.xml files have corresponding pubkey.xml files """ if "privkey.xml" not in entry.entries: return privkey = entry.entries["privkey.xml"] if not self.HandlesFile(privkey.name): return pubkey = basename + ".pub" if pubkey not in self.core.plugins['Cfg'].entries: self.LintError("no-pubkey-xml", "%s has no corresponding pubkey.xml at %s" % (basename, pubkey)) else: pubset = self.core.plugins['Cfg'].entries[pubkey] if "pubkey.xml" not in pubset.entries: self.LintError("no-pubkey-xml", "%s has no corresponding pubkey.xml at %s" % (basename, pubkey)) def _list_path_components(self, path): """ Get a list of all components of a path. E.g., ``self._list_path_components("/foo/bar/foobaz")`` would return ``["foo", "bar", "foo", "baz"]``. The list is not guaranteed to be in order.""" rv = [] remaining, component = os.path.split(path) while component != '': rv.append(component) remaining, component = os.path.split(remaining) return rv def check_missing_files(self): """ check that all files on the filesystem are known to Cfg """ cfg = self.core.plugins['Cfg'] # first, collect ignore patterns from handlers ignore = set() for hdlr in Bcfg2.Options.setup.cfg_handlers: ignore.update(hdlr.__ignore__) # next, get a list of all non-ignored files on the filesystem all_files = set() for root, _, files in os.walk(cfg.data): for fname in files: fpath = os.path.join(root, fname) # check against the handler ignore patterns and the # global FAM ignore list if (not any(fname.endswith("." + i) for i in ignore) and not any(fnmatch(fpath, p) for p in Bcfg2.Options.setup.ignore_files) and not any(fnmatch(c, p) for p in Bcfg2.Options.setup.ignore_files for c in self._list_path_components(fpath))): all_files.add(fpath) # next, get a list of all files known to Cfg cfg_files = set() for root, eset in cfg.entries.items(): cfg_files.update(os.path.join(cfg.data, root.lstrip("/"), fname) for fname in eset.entries.keys()) # finally, compare the two unknown_files = all_files - cfg_files extra_files = cfg_files - all_files if unknown_files: self.LintError( "unknown-cfg-files", "Files on the filesystem could not be understood by Cfg: %s" % "; ".join(unknown_files)) if extra_files: self.LintError( "extra-cfg-files", "Cfg has entries for files that do not exist on the " "filesystem: %s\nThis is probably a bug." % "; ".join(extra_files)) src/lib/Bcfg2/Server/Lint/Comments.py000066400000000000000000000363071303523157100176620ustar00rootroot00000000000000""" Check files for various required comments. """ import os import lxml.etree import Bcfg2.Options import Bcfg2.Server.Lint from Bcfg2.Server import XI_NAMESPACE from Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator \ import CfgPlaintextGenerator from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML class Comments(Bcfg2.Server.Lint.ServerPlugin): """ The Comments lint plugin checks files for header comments that give information about the files. For instance, you can require SVN keywords in a comment, or require the name of the maintainer of a Genshi template, and so on. """ options = Bcfg2.Server.Lint.ServerPlugin.options + [ Bcfg2.Options.Option( cf=("Comments", "global_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for all file types"), Bcfg2.Options.Option( cf=("Comments", "global_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for all file types"), Bcfg2.Options.Option( cf=("Comments", "bundler_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for non-templated bundles"), Bcfg2.Options.Option( cf=("Comments", "bundler_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for non-templated bundles"), Bcfg2.Options.Option( cf=("Comments", "genshibundler_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for templated bundles"), Bcfg2.Options.Option( cf=("Comments", "genshibundler_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for templated bundles"), Bcfg2.Options.Option( cf=("Comments", "properties_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for Properties files"), Bcfg2.Options.Option( cf=("Comments", "properties_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for Properties files"), Bcfg2.Options.Option( cf=("Comments", "cfg_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for non-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "cfg_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for non-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "genshi_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for Genshi-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "genshi_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for Genshi-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "cheetah_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for Cheetah-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "cheetah_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for Cheetah-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "jinja2_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for Jinja2-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "jinja2_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for Jinja2-templated Cfg files"), Bcfg2.Options.Option( cf=("Comments", "infoxml_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for info.xml files"), Bcfg2.Options.Option( cf=("Comments", "infoxml_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for info.xml files"), Bcfg2.Options.Option( cf=("Comments", "probes_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for probes"), Bcfg2.Options.Option( cf=("Comments", "probes_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for probes"), Bcfg2.Options.Option( cf=("Comments", "metadata_keywords"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required keywords for metadata files"), Bcfg2.Options.Option( cf=("Comments", "metadata_comments"), type=Bcfg2.Options.Types.comma_list, default=[], help="Required comments for metadata files")] def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs) self.config_cache = {} def Run(self): self.check_bundles() self.check_properties() self.check_metadata() self.check_cfg() self.check_probes() @classmethod def Errors(cls): return {"unexpanded-keywords": "warning", "keywords-not-found": "warning", "comments-not-found": "warning", "broken-xinclude-chain": "warning"} def required_keywords(self, rtype): """ Given a file type, fetch the list of required VCS keywords from the bcfg2-lint config. Valid file types are documented in :manpage:`bcfg2-lint.conf(5)`. :param rtype: The file type :type rtype: string :returns: list - the required items """ return self.required_items(rtype, "keyword") def required_comments(self, rtype): """ Given a file type, fetch the list of required comments from the bcfg2-lint config. Valid file types are documented in :manpage:`bcfg2-lint.conf(5)`. :param rtype: The file type :type rtype: string :returns: list - the required items """ return self.required_items(rtype, "comment") def required_items(self, rtype, itype): """ Given a file type and item type (``comment`` or ``keyword``), fetch the list of required items from the bcfg2-lint config. Valid file types are documented in :manpage:`bcfg2-lint.conf(5)`. :param rtype: The file type :type rtype: string :param itype: The item type (``comment`` or ``keyword``) :type itype: string :returns: list - the required items """ if itype not in self.config_cache: self.config_cache[itype] = {} if rtype not in self.config_cache[itype]: rv = [] rv.extend(getattr(Bcfg2.Options.setup, "global_%ss" % itype)) local_reqs = getattr(Bcfg2.Options.setup, "%s_%ss" % (rtype.lower(), itype)) if local_reqs == ['']: # explicitly specified as empty rv = [] else: rv.extend(local_reqs) self.config_cache[itype][rtype] = rv return self.config_cache[itype][rtype] def check_bundles(self): """ Check bundle files for required comments. """ if 'Bundler' in self.core.plugins: for bundle in self.core.plugins['Bundler'].entries.values(): xdata = None rtype = "" try: xdata = lxml.etree.XML(bundle.data) rtype = "bundler" except (lxml.etree.XMLSyntaxError, AttributeError): xdata = \ lxml.etree.parse(bundle.template.filepath).getroot() rtype = "genshibundler" self.check_xml(bundle.name, xdata, rtype) def check_properties(self): """ Check Properties files for required comments. """ if 'Properties' in self.core.plugins: props = self.core.plugins['Properties'] for propfile, pdata in props.entries.items(): if os.path.splitext(propfile)[1] == ".xml": self.check_xml(pdata.name, pdata.xdata, 'properties') def has_all_xincludes(self, mfile): """ Return True if :attr:`Bcfg2.Server.Lint.Plugin.files` includes all XIncludes listed in the specified metadata type, false otherwise. In other words, this returns True if bcfg2-lint is dealing with complete metadata. :param mfile: The metadata file ("clients.xml" or "groups.xml") to check for XIncludes :type mfile: string :returns: bool """ if self.files is None: return True else: path = os.path.join(self.metadata.data, mfile) if path in self.files: xdata = lxml.etree.parse(path) for el in xdata.findall('./%sinclude' % XI_NAMESPACE): if not self.has_all_xincludes(el.get('href')): self.LintError("broken-xinclude-chain", "Broken XInclude chain: could not " "include %s" % path) return False return True def check_metadata(self): """ Check Metadata files for required comments. """ if self.has_all_xincludes("groups.xml"): self.check_xml(os.path.join(self.metadata.data, "groups.xml"), self.metadata.groups_xml.data, "metadata") if hasattr(self.metadata, "clients_xml"): if self.has_all_xincludes("clients.xml"): self.check_xml(os.path.join(self.metadata.data, "clients.xml"), self.metadata.clients_xml.data, "metadata") def check_cfg(self): """ Check Cfg files and ``info.xml`` files for required comments. """ if 'Cfg' in self.core.plugins: for entryset in self.core.plugins['Cfg'].entries.values(): for entry in entryset.entries.values(): rtype = None if isinstance(entry, CfgGenshiGenerator): rtype = "genshi" elif isinstance(entry, CfgPlaintextGenerator): rtype = "cfg" elif isinstance(entry, CfgCheetahGenerator): rtype = "cheetah" elif isinstance(entry, CfgJinja2Generator): rtype = "jinja2" elif isinstance(entry, CfgInfoXML): self.check_xml(entry.infoxml.name, entry.infoxml.xdata, "infoxml") continue if rtype: self.check_plaintext(entry.name, entry.data, rtype) def check_probes(self): """ Check Probes for required comments """ if 'Probes' in self.core.plugins: for probe in self.core.plugins['Probes'].probes.entries.values(): self.check_plaintext(probe.name, probe.data, "probes") def check_xml(self, filename, xdata, rtype): """ Generic check to check an XML file for required comments. :param filename: The filename :type filename: string :param xdata: The file data :type xdata: lxml.etree._Element :param rtype: The type of file. Available types are documented in :manpage:`bcfg2-lint.conf(5)`. :type rtype: string """ self.check_lines(filename, [str(el) for el in xdata.getiterator(lxml.etree.Comment)], rtype) def check_plaintext(self, filename, data, rtype): """ Generic check to check a plain text file for required comments. :param filename: The filename :type filename: string :param data: The file data :type data: string :param rtype: The type of file. Available types are documented in :manpage:`bcfg2-lint.conf(5)`. :type rtype: string """ self.check_lines(filename, data.splitlines(), rtype) def check_lines(self, filename, lines, rtype): """ Generic header check for a set of lines. :param filename: The filename :type filename: string :param lines: The data to check :type lines: list of strings :param rtype: The type of file. Available types are documented in :manpage:`bcfg2-lint.conf(5)`. :type rtype: string """ if self.HandlesFile(filename): # found is trivalent: # False == keyword not found # None == keyword found but not expanded # True == keyword found and expanded found = dict((k, False) for k in self.required_keywords(rtype)) for line in lines: # we check for both '$:' and '$$' to see # if the keyword just hasn't been expanded for (keyword, status) in found.items(): if not status: if '$%s:' % keyword in line: found[keyword] = True elif '$%s$' % keyword in line: found[keyword] = None unexpanded = [keyword for (keyword, status) in found.items() if status is None] if unexpanded: self.LintError("unexpanded-keywords", "%s: Required keywords(s) found but not " "expanded: %s" % (filename, ", ".join(unexpanded))) missing = [keyword for (keyword, status) in found.items() if status is False] if missing: self.LintError("keywords-not-found", "%s: Required keywords(s) not found: $%s$" % (filename, "$, $".join(missing))) # next, check for required comments. found is just # boolean found = dict((k, False) for k in self.required_comments(rtype)) for line in lines: for (comment, status) in found.items(): if not status: found[comment] = comment in line missing = [comment for (comment, status) in found.items() if status is False] if missing: self.LintError("comments-not-found", "%s: Required comments(s) not found: %s" % (filename, ", ".join(missing))) src/lib/Bcfg2/Server/Lint/Crypto.py000066400000000000000000000051631303523157100173510ustar00rootroot00000000000000""" Check for data that claims to be encrypted, but is not. """ import os import lxml.etree import Bcfg2.Options from Bcfg2.Server.Lint import ServerlessPlugin from Bcfg2.Server.Encryption import is_encrypted class Crypto(ServerlessPlugin): """ Check for templated scripts or executables. """ def Run(self): if os.path.exists(os.path.join(Bcfg2.Options.setup.repository, "Cfg")): self.check_cfg() if os.path.exists(os.path.join(Bcfg2.Options.setup.repository, "Properties")): self.check_properties() # TODO: check all XML files @classmethod def Errors(cls): return {"unencrypted-cfg": "error", "empty-encrypted-properties": "error", "unencrypted-properties": "error"} def check_cfg(self): """ Check for Cfg files that end in .crypt but aren't encrypted """ for root, _, files in os.walk( os.path.join(Bcfg2.Options.setup.repository, "Cfg")): for fname in files: fpath = os.path.join(root, fname) if self.HandlesFile(fpath) and fname.endswith(".crypt"): if not is_encrypted(open(fpath).read()): self.LintError( "unencrypted-cfg", "%s is a .crypt file, but it is not encrypted" % fpath) def check_properties(self): """ Check for Properties data that has an ``encrypted`` attribute but aren't encrypted """ for root, _, files in os.walk( os.path.join(Bcfg2.Options.setup.repository, "Properties")): for fname in files: fpath = os.path.join(root, fname) if self.HandlesFile(fpath) and fname.endswith(".xml"): xdata = lxml.etree.parse(fpath) for elt in xdata.xpath('//*[@encrypted]'): if not elt.text: self.LintError( "empty-encrypted-properties", "Element in %s has an 'encrypted' attribute, " "but no text content: %s" % (fpath, self.RenderXML(elt))) elif not is_encrypted(elt.text): self.LintError( "unencrypted-properties", "Element in %s has an 'encrypted' attribute, " "but is not encrypted: %s" % (fpath, self.RenderXML(elt))) src/lib/Bcfg2/Server/Lint/Genshi.py000066400000000000000000000040531303523157100173030ustar00rootroot00000000000000""" Check Genshi templates for syntax errors. """ import sys import Bcfg2.Server.Lint from genshi.template import TemplateLoader, NewTextTemplate, MarkupTemplate, \ TemplateSyntaxError from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator class Genshi(Bcfg2.Server.Lint.ServerPlugin): """ Check Genshi templates for syntax errors. """ def Run(self): if 'Cfg' in self.core.plugins: self.check_cfg() if 'Bundler' in self.core.plugins: self.check_bundler() @classmethod def Errors(cls): return {"genshi-syntax-error": "error", "unknown-genshi-error": "error"} def check_template(self, loader, fname, cls=None): """ Generic check for all genshi templates (XML and text) """ try: loader.load(fname, cls=cls) except TemplateSyntaxError: err = sys.exc_info()[1] self.LintError("genshi-syntax-error", "Genshi syntax error in %s: %s" % (fname, err)) except: err = sys.exc_info()[1] self.LintError("unknown-genshi-error", "Unknown Genshi error in %s: %s" % (fname, err)) def check_cfg(self): """ Check genshi templates in Cfg for syntax errors. """ for entryset in self.core.plugins['Cfg'].entries.values(): for entry in entryset.entries.values(): if (self.HandlesFile(entry.name) and isinstance(entry, CfgGenshiGenerator) and not entry.template): self.check_template(entry.loader, entry.name, cls=NewTextTemplate) def check_bundler(self): """ Check templates in Bundler for syntax errors. """ loader = TemplateLoader() for entry in self.core.plugins['Bundler'].entries.values(): if (self.HandlesFile(entry.name) and entry.template is not None): self.check_template(loader, entry.name, cls=MarkupTemplate) src/lib/Bcfg2/Server/Lint/GroupNames.py000066400000000000000000000076561303523157100201620ustar00rootroot00000000000000""" Ensure that all named groups are valid group names. """ import os import re import Bcfg2.Server.Lint class GroupNames(Bcfg2.Server.Lint.ServerPlugin): """ Ensure that all named groups are valid group names. """ #: A string regex that matches only valid group names. Currently, #: a group name is considered valid if it contains only #: non-whitespace characters. pattern = r'\S+$' #: A compiled regex for #: :attr:`Bcfg2.Server.Lint.GroupNames.GroupNames.pattern` valid = re.compile(r'^' + pattern) def Run(self): self.check_metadata() if 'Rules' in self.core.plugins: self.check_rules() if 'Bundler' in self.core.plugins: self.check_bundles() if 'GroupPatterns' in self.core.plugins: self.check_grouppatterns() if 'Cfg' in self.core.plugins: self.check_cfg() @classmethod def Errors(cls): return {"invalid-group-name": "error"} def check_rules(self): """ Check groups used in the Rules plugin for validity. """ for rules in self.core.plugins['Rules'].entries.values(): if not self.HandlesFile(rules.name): continue xdata = rules.pnode.data self.check_entries(xdata.xpath("//Group"), os.path.join(Bcfg2.Options.setup.repository, rules.name)) def check_bundles(self): """ Check groups used in the Bundler plugin for validity. """ for bundle in self.core.plugins['Bundler'].entries.values(): if self.HandlesFile(bundle.name) and bundle.template is None: self.check_entries(bundle.xdata.xpath("//Group"), bundle.name) def check_metadata(self): """ Check groups used or declared in the Metadata plugin for validity. """ self.check_entries(self.metadata.groups_xml.xdata.xpath("//Group"), os.path.join(Bcfg2.Options.setup.repository, self.metadata.groups_xml.name)) def check_grouppatterns(self): """ Check groups used in the GroupPatterns plugin for validity """ cfg = self.core.plugins['GroupPatterns'].config if not self.HandlesFile(cfg.name): return for grp in cfg.xdata.xpath('//GroupPattern/Group'): if not self.valid.search(grp.text): self.LintError("invalid-group-name", "Invalid group name in %s: %s" % (cfg.name, self.RenderXML(grp, keep_text=True))) def check_cfg(self): """ Check groups used in group-specific files in the Cfg plugin for validity. """ for root, _, files in os.walk(self.core.plugins['Cfg'].data): for fname in files: basename = os.path.basename(root) if (re.search(r'^%s\.G\d\d_' % basename, fname) and not re.search(r'^%s\.G\d\d_' % basename + self.pattern, fname)): self.LintError("invalid-group-name", "Invalid group name referenced in %s" % os.path.join(root, fname)) def check_entries(self, entries, fname): """ Check a generic list of XML entries for tags with invalid name attributes. :param entries: A list of XML tags whose ``name`` attributes will be validated. :type entries: list of lxml.etree._Element :param fname: The filename the entry list came from :type fname: string """ for grp in entries: if not self.valid.search(grp.get("name")): self.LintError("invalid-group-name", "Invalid group name in %s: %s" % (fname, self.RenderXML(grp))) src/lib/Bcfg2/Server/Lint/GroupPatterns.py000066400000000000000000000032741303523157100207070ustar00rootroot00000000000000""" ``bcfg2-lint`` plugin for :ref:`GroupPatterns ` """ import sys from Bcfg2.Server.Lint import ServerPlugin from Bcfg2.Server.Plugins.GroupPatterns import PatternMap, \ PatternInitializationError class GroupPatterns(ServerPlugin): """ ``bcfg2-lint`` plugin to check all given :ref:`GroupPatterns ` patterns for validity. This is simply done by trying to create a :class:`Bcfg2.Server.Plugins.GroupPatterns.PatternMap` object for each pattern, and catching exceptions and presenting them as ``bcfg2-lint`` errors.""" __serverplugin__ = 'GroupPatterns' def Run(self): cfg = self.core.plugins['GroupPatterns'].config for entry in cfg.xdata.xpath('//GroupPattern'): groups = [g.text for g in entry.findall('Group')] self.check(entry, groups, ptype='NamePattern') self.check(entry, groups, ptype='NameRange') @classmethod def Errors(cls): return {"pattern-fails-to-initialize": "error"} def check(self, entry, groups, ptype="NamePattern"): """ Check a single pattern for validity """ for el in entry.findall(ptype): pat = el.text try: if ptype == "NamePattern": PatternMap(pat, None, groups) else: PatternMap(None, pat, groups) except PatternInitializationError: err = sys.exc_info()[1] self.LintError("pattern-fails-to-initialize", "Failed to initialize %s %s for %s: %s" % (ptype, pat, entry.get('pattern'), err)) src/lib/Bcfg2/Server/Lint/InfoXML.py000066400000000000000000000056131303523157100173450ustar00rootroot00000000000000""" Ensure that all config files have a valid info.xml file. """ import os import Bcfg2.Options import Bcfg2.Server.Lint from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML class InfoXML(Bcfg2.Server.Lint.ServerPlugin): """ Ensure that all config files have a valid info.xml file. This plugin can check for: * Missing ``info.xml`` files; * Use of deprecated ``info``/``:info`` files; * Paranoid mode disabled in an ``info.xml`` file; * Required attributes missing from ``info.xml`` """ __serverplugin__ = 'Cfg' options = Bcfg2.Server.Lint.ServerPlugin.options + [ Bcfg2.Options.Common.default_paranoid, Bcfg2.Options.Option( cf=("InfoXML", "required_attrs"), type=Bcfg2.Options.Types.comma_list, default=["owner", "group", "mode"], help="Attributes to require on tags")] def Run(self): if 'Cfg' not in self.core.plugins: return for filename, entryset in self.core.plugins['Cfg'].entries.items(): infoxml_fname = os.path.join(entryset.path, "info.xml") if self.HandlesFile(infoxml_fname): found = False for entry in entryset.entries.values(): if isinstance(entry, CfgInfoXML): self.check_infoxml(infoxml_fname, entry.infoxml.xdata) found = True if not found: self.LintError("no-infoxml", "No info.xml found for %s" % filename) @classmethod def Errors(cls): return {"no-infoxml": "warning", "paranoid-false": "warning", "required-infoxml-attrs-missing": "error"} def check_infoxml(self, fname, xdata): """ Verify that info.xml contains everything it should. """ for info in xdata.getroottree().findall("//Info"): required = [] required = Bcfg2.Options.setup.required_attrs missing = [attr for attr in required if info.get(attr) is None] if missing: self.LintError("required-infoxml-attrs-missing", "Required attribute(s) %s not found in %s:%s" % (",".join(missing), fname, self.RenderXML(info))) if ((Bcfg2.Options.setup.default_paranoid == "true" and info.get("paranoid") is not None and info.get("paranoid").lower() == "false") or (Bcfg2.Options.setup.default_paranoid == "false" and (info.get("paranoid") is None or info.get("paranoid").lower() != "true"))): self.LintError("paranoid-false", "Paranoid must be true in %s:%s" % (fname, self.RenderXML(info))) src/lib/Bcfg2/Server/Lint/Jinja2.py000066400000000000000000000027621303523157100172100ustar00rootroot00000000000000""" Check Jinja2 templates for syntax errors. """ import sys import Bcfg2.Server.Lint from jinja2 import Template, TemplateSyntaxError from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator class Jinja2(Bcfg2.Server.Lint.ServerPlugin): """ Check Jinja2 templates for syntax errors. """ def Run(self): if 'Cfg' in self.core.plugins: self.check_cfg() @classmethod def Errors(cls): return {"jinja2-syntax-error": "error", "unknown-jinja2-error": "error"} def check_template(self, entry): """ Generic check for all jinja2 templates """ try: Template(entry.data.decode(entry.encoding)) except TemplateSyntaxError: err = sys.exc_info()[1] self.LintError("jinja2-syntax-error", "Jinja2 syntax error in %s: %s" % (entry.name, err)) except: err = sys.exc_info()[1] self.LintError("unknown-jinja2-error", "Unknown Jinja2 error in %s: %s" % (entry.name, err)) def check_cfg(self): """ Check jinja2 templates in Cfg for syntax errors. """ for entryset in self.core.plugins['Cfg'].entries.values(): for entry in entryset.entries.values(): if (self.HandlesFile(entry.name) and isinstance(entry, CfgJinja2Generator)): self.check_template(entry) src/lib/Bcfg2/Server/Lint/MergeFiles.py000066400000000000000000000124571303523157100201170ustar00rootroot00000000000000""" find Probes or Cfg files with multiple similar files that might be merged into one """ import os import copy from difflib import SequenceMatcher import Bcfg2.Server.Lint from Bcfg2.Server.Plugins.Cfg import CfgGenerator from Bcfg2.Utils import is_string def threshold(val): """ Option type processor to accept either a percentage (e.g., "threshold=75") or a ratio (e.g., "threshold=.75") """ rv = float(val) if rv > 1: rv /= 100 return rv class MergeFiles(Bcfg2.Server.Lint.ServerPlugin): """ find Probes or Cfg files with multiple similar files that might be merged into one """ options = Bcfg2.Server.Lint.ServerPlugin.options + [ Bcfg2.Options.Option( cf=("MergeFiles", "threshold"), default="0.75", type=threshold, help="The threshold at which to suggest merging files and probes")] def Run(self): if 'Cfg' in self.core.plugins: self.check_cfg() if 'Probes' in self.core.plugins: self.check_probes() @classmethod def Errors(cls): return {"merge-cfg": "warning", "identical-cfg": "error", "merge-probes": "warning", "identical-probes": "error"} def check_cfg(self): """ check Cfg for similar files """ # ignore non-specific Cfg entries, e.g., privkey.xml ignore = [] for hdlr in Bcfg2.Options.setup.cfg_handlers: if not hdlr.__specific__: ignore.extend(hdlr.__basenames__) for filename, entryset in self.core.plugins['Cfg'].entries.items(): candidates = dict([(f, e) for f, e in entryset.entries.items() if (isinstance(e, CfgGenerator) and is_string(e.data, Bcfg2.Options.setup.encoding) and f not in ignore and not f.endswith(".crypt"))]) similar, identical = self.get_similar(candidates) for mset in similar: self.LintError("merge-cfg", "The following files are similar: %s. " "Consider merging them into a single Genshi " "template." % ", ".join([os.path.join(filename, p) for p in mset])) for mset in identical: self.LintError("identical-cfg", "The following files are identical: %s. " "Strongly consider merging them into a single " "Genshi template." % ", ".join([os.path.join(filename, p) for p in mset])) def check_probes(self): """ check Probes for similar files """ probes = self.core.plugins['Probes'].probes.entries similar, identical = self.get_similar(probes) for mset in similar: self.LintError("merge-probes", "The following probes are similar: %s. " "Consider merging them into a single probe." % ", ".join([p for p in mset])) for mset in identical: self.LintError("identical-probes", "The following probes are identical: %s. " "Strongly consider merging them into a single " "probe." % ", ".join([p for p in mset])) def get_similar(self, entries): """ Get a list of similar files from the entry dict. Return value is a list of lists, each of which gives the filenames of similar files """ similar = [] identical = [] elist = list(entries.items()) while elist: rv = self._find_similar(elist.pop(0), copy.copy(elist)) if rv[0]: similar.append(rv[0]) if rv[1]: identical.append(rv[1]) elist = [(fname, fdata) for fname, fdata in elist if fname not in rv[0] | rv[1]] return similar, identical def _find_similar(self, ftuple, others): """ Find files similar to the one described by ftupe in the list of other files. ftuple is a tuple of (filename, data); others is a list of such tuples. threshold is a float between 0 and 1 that describes how similar two files much be to rate as 'similar' """ fname, fdata = ftuple similar = set() identical = set() for cname, cdata in others: seqmatch = SequenceMatcher(None, fdata.data, cdata.data) # perform progressively more expensive comparisons if seqmatch.real_quick_ratio() == 1.0: identical.add(cname) elif ( seqmatch.real_quick_ratio() > Bcfg2.Options.setup.threshold and seqmatch.quick_ratio() > Bcfg2.Options.setup.threshold and seqmatch.ratio() > Bcfg2.Options.setup.threshold): similar.add(cname) if similar: similar.add(fname) if identical: identical.add(fname) return (similar, identical) src/lib/Bcfg2/Server/Lint/Metadata.py000066400000000000000000000166751303523157100176230ustar00rootroot00000000000000""" ``bcfg2-lint`` plugin for :ref:`Metadata ` """ from Bcfg2.Server.Lint import ServerPlugin class Metadata(ServerPlugin): """ ``bcfg2-lint`` plugin for :ref:`Metadata `. This checks for several things: * ```` tags nested inside other ```` tags; * Deprecated options (like ``location="floating"``); * Profiles that don't exist, or that aren't profile groups; * Groups or clients that are defined multiple times; * Multiple default groups or a default group that isn't a profile group. """ __serverplugin__ = 'Metadata' def Run(self): self.nested_clients() self.deprecated_options() self.bogus_profiles() self.duplicate_groups() self.duplicate_default_groups() self.duplicate_clients() self.default_is_profile() @classmethod def Errors(cls): return {"nested-client-tags": "warning", "deprecated-clients-options": "warning", "nonexistent-profile-group": "error", "non-profile-set-as-profile": "error", "duplicate-group": "error", "duplicate-client": "error", "multiple-default-groups": "error", "default-is-not-profile": "error"} def deprecated_options(self): """ Check for the ``location='floating'`` option, which has been deprecated in favor of ``floating='true'``. """ if not hasattr(self.metadata, "clients_xml"): # using metadata database return clientdata = self.metadata.clients_xml.xdata for el in clientdata.xpath("//Client"): loc = el.get("location") if loc: if loc == "floating": floating = True else: floating = False self.LintError("deprecated-clients-options", "The location='%s' option is deprecated. " "Please use floating='%s' instead:\n%s" % (loc, floating, self.RenderXML(el))) def nested_clients(self): """ Check for a ```` tag inside a ```` tag, which is either redundant or will never match. """ groupdata = self.metadata.groups_xml.xdata for el in groupdata.xpath("//Client//Client"): self.LintError("nested-client-tags", "Client %s nested within Client tag: %s" % (el.get("name"), self.RenderXML(el))) def bogus_profiles(self): """ Check for clients that have profiles that are either not flagged as profile groups in ``groups.xml``, or don't exist. """ if not hasattr(self.metadata, "clients_xml"): # using metadata database return for client in self.metadata.clients_xml.xdata.findall('.//Client'): profile = client.get("profile") if profile not in self.metadata.groups: self.LintError("nonexistent-profile-group", "%s has nonexistent profile group %s:\n%s" % (client.get("name"), profile, self.RenderXML(client))) elif not self.metadata.groups[profile].is_profile: self.LintError("non-profile-set-as-profile", "%s is set as profile for %s, but %s is not a " "profile group:\n%s" % (profile, client.get("name"), profile, self.RenderXML(client))) def duplicate_default_groups(self): """ Check for multiple default groups. """ defaults = [] for grp in self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \ self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"): if grp.get("default", "false").lower() == "true": defaults.append(self.RenderXML(grp)) if len(defaults) > 1: self.LintError("multiple-default-groups", "Multiple default groups defined:\n%s" % "\n".join(defaults)) def duplicate_clients(self): """ Check for clients that are defined more than once. """ if not hasattr(self.metadata, "clients_xml"): # using metadata database return self.duplicate_entries( self.metadata.clients_xml.xdata.xpath("//Client"), "client") def duplicate_groups(self): """ Check for groups that are defined more than once. There are two ways this can happen: 1. The group is listed twice with contradictory options. 2. The group is listed with no options *first*, and then with options later. In this context, 'first' refers to the order in which groups are parsed; see the loop condition below and _handle_groups_xml_event above for details. """ groups = dict() duplicates = dict() for grp in self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \ self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"): grpname = grp.get("name") if grpname in duplicates: duplicates[grpname].append(grp) elif len(grp.attrib) > 1: # group has options if grpname in groups: duplicates[grpname] = [grp, groups[grpname]] else: groups[grpname] = grp else: # group has no options groups[grpname] = grp for grpname, grps in duplicates.items(): self.LintError("duplicate-group", "Group %s is defined multiple times:\n%s" % (grpname, "\n".join(self.RenderXML(g) for g in grps))) def duplicate_entries(self, allentries, etype): """ Generic duplicate entry finder. :param allentries: A list of all entries to check for duplicates. :type allentries: list of lxml.etree._Element :param etype: The entry type. This will be used to determine the error name (``duplicate-``) and for display to the end user. :type etype: string """ entries = dict() for el in allentries: if el.get("name") in entries: entries[el.get("name")].append(self.RenderXML(el)) else: entries[el.get("name")] = [self.RenderXML(el)] for ename, els in entries.items(): if len(els) > 1: self.LintError("duplicate-%s" % etype, "%s %s is defined multiple times:\n%s" % (etype.title(), ename, "\n".join(els))) def default_is_profile(self): """ Ensure that the default group is a profile group. """ if (self.metadata.default and not self.metadata.groups[self.metadata.default].is_profile): xdata = \ self.metadata.groups_xml.xdata.xpath("//Group[@name='%s']" % self.metadata.default)[0] self.LintError("default-is-not-profile", "Default group is not a profile group:\n%s" % self.RenderXML(xdata)) src/lib/Bcfg2/Server/Lint/Pkgmgr.py000066400000000000000000000035711303523157100173210ustar00rootroot00000000000000""" ``bcfg2-lint`` plugin for :ref:`Pkgmgr ` """ import os import glob import lxml.etree import Bcfg2.Options from Bcfg2.Server.Lint import ServerlessPlugin class Pkgmgr(ServerlessPlugin): """ Find duplicate :ref:`Pkgmgr ` entries with the same priority. """ __serverplugin__ = 'Pkgmgr' def Run(self): pset = set() for pfile in glob.glob(os.path.join(Bcfg2.Options.setup.repository, 'Pkgmgr', '*.xml')): if self.HandlesFile(pfile): xdata = lxml.etree.parse(pfile).getroot() # get priority, type, group priority = xdata.get('priority') ptype = xdata.get('type') for pkg in xdata.xpath("//Package"): if pkg.getparent().tag == 'Group': grp = pkg.getparent().get('name') if (type(grp) is not str and grp.getparent().tag == 'Group'): pgrp = grp.getparent().get('name') else: pgrp = 'none' else: grp = 'none' pgrp = 'none' ptuple = (pkg.get('name'), priority, ptype, grp, pgrp) # check if package is already listed with same # priority, type, grp if ptuple in pset: self.LintError( "duplicate-package", "Duplicate Package %s, priority:%s, type:%s" % (pkg.get('name'), priority, ptype)) else: pset.add(ptuple) @classmethod def Errors(cls): return {"duplicate-packages": "error"} src/lib/Bcfg2/Server/Lint/RequiredAttrs.py000066400000000000000000000332031303523157100206630ustar00rootroot00000000000000""" Verify attributes for configuration entries that cannot be verified with an XML schema alone. """ import os import re import Bcfg2.Server.Lint import Bcfg2.Client.Tools.VCS from Bcfg2.Server.Plugins.Packages import Apt, Yum from Bcfg2.Client.Tools.POSIX.base import device_map # format verifying functions. TODO: These should be moved into XML # schemas where possible. def is_filename(val): """ Return True if val is a string describing a valid full path """ return val.startswith("/") and len(val) > 1 def is_selinux_type(val): """ Return True if val is a string describing a valid (although not necessarily existent) SELinux type """ return re.match(r'^[a-z_]+_t', val) def is_selinux_user(val): """ Return True if val is a string describing a valid (although not necessarily existent) SELinux user """ return re.match(r'^[a-z_]+_u', val) def is_octal_mode(val): """ Return True if val is a string describing a valid octal permissions mode """ return re.match(r'[0-7]{3,4}', val) def is_username(val): """ Return True if val is a string giving either a positive integer uid, or a valid Unix username """ return re.match(r'^([A-z][-_A-z0-9]{0,30}|\d+)$', val) def is_device_mode(val): """ Return True if val is a string describing a positive integer """ return re.match(r'^\d+$', val) def is_vcs_type(val): """ Return True if val is a supported vcs type handled by the current client tool """ return (val != 'Path' and hasattr(Bcfg2.Client.Tools.VCS.VCS, 'Install%s' % val)) class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): """ Verify attributes for configuration entries that cannot be verified with an XML schema alone. """ def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs) self.required_attrs = { 'Path': { '__any__': {'name': is_filename}, 'augeas': {'owner': is_username, 'group': is_username, 'mode': is_octal_mode}, 'device': {'owner': is_username, 'group': is_username, 'mode': is_octal_mode, 'dev_type': lambda v: v in device_map}, 'directory': {'owner': is_username, 'group': is_username, 'mode': is_octal_mode}, 'file': {'owner': is_username, 'group': is_username, 'mode': is_octal_mode, '__text__': None}, 'hardlink': {'owner': is_username, 'group': is_username, 'mode': is_octal_mode, 'to': is_filename}, 'symlink': {}, 'ignore': {}, 'nonexistent': {}, 'permissions': {'owner': is_username, 'group': is_username, 'mode': is_octal_mode}, 'vcs': {'vcstype': is_vcs_type, 'revision': None, 'sourceurl': None}, }, 'Service': { '__any__': {'name': None}, 'smf': {'name': None, 'FMRI': None} }, 'Action': { None: { 'name': None, 'timing': lambda v: v in ['pre', 'post', 'both'], 'when': lambda v: v in ['modified', 'always'], 'status': lambda v: v in ['ignore', 'check'], 'command': None, }, }, 'ACL': { 'default': { 'scope': lambda v: v in ['user', 'group'], 'perms': lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v), }, 'access': { 'scope': lambda v: v in ['user', 'group'], 'perms': lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v), }, 'mask': { 'perms': lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v), }, }, 'Package': { '__any__': {'name': None}, }, 'SEBoolean': { None: { 'name': None, 'value': lambda v: v in ['on', 'off'], }, }, 'SEModule': { None: {'name': None, '__text__': None}, }, 'SEPort': { None: { 'name': lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)', v), 'selinuxtype': is_selinux_type, }, }, 'SEFcontext': { None: {'name': None, 'selinuxtype': is_selinux_type}, }, 'SENode': { None: { 'name': lambda v: "/" in v, 'selinuxtype': is_selinux_type, 'proto': lambda v: v in ['ipv6', 'ipv4'] }, }, 'SELogin': { None: {'name': is_username, 'selinuxuser': is_selinux_user}, }, 'SEUser': { None: { 'name': is_selinux_user, 'roles': lambda v: all(is_selinux_user(u) for u in " ".split(v)), 'prefix': None, }, }, 'SEInterface': { None: {'name': None, 'selinuxtype': is_selinux_type}, }, 'SEPermissive': { None: {'name': is_selinux_type}, }, 'POSIXGroup': { None: {'name': is_username}, }, 'POSIXUser': { None: {'name': is_username}, }, } def Run(self): self.check_packages() if "Defaults" in self.core.plugins: self.logger.info("Defaults plugin enabled; skipping required " "attribute checks") else: self.check_rules() self.check_bundles() @classmethod def Errors(cls): return {"missing-elements": "error", "unknown-entry-type": "error", "unknown-entry-tag": "error", "required-attrs-missing": "error", "required-attr-format": "error", "extra-attrs": "warning"} def check_default_acl(self, path): """ Check that a default ACL contains either no entries or minimum required entries """ defaults = 0 if path.xpath("ACL[@type='default' and @scope='user' and @user='']"): defaults += 1 if path.xpath("ACL[@type='default' and @scope='group' and @group='']"): defaults += 1 if path.xpath("ACL[@type='default' and @scope='other']"): defaults += 1 if defaults > 0 and defaults < 3: self.LintError( "missing-elements", "A Path must have either no default ACLs or at" " least default:user::, default:group:: and" " default:other::") def check_packages(self): """ Check Packages sources for Source entries with missing attributes. """ if 'Packages' not in self.core.plugins: return for source in self.core.plugins['Packages'].sources: if isinstance(source, Yum.YumSource): if (not source.pulp_id and not source.url and not source.rawurl): self.LintError( "required-attrs-missing", "A %s source must have either a url, rawurl, or " "pulp_id attribute: %s" % (source.ptype, self.RenderXML(source.xsource))) elif not source.url and not source.rawurl: self.LintError( "required-attrs-missing", "A %s source must have either a url or rawurl attribute: " "%s" % (source.ptype, self.RenderXML(source.xsource))) if (not isinstance(source, Apt.AptSource) and source.recommended): self.LintError( "extra-attrs", "The recommended attribute is not supported on %s sources:" " %s" % (source.ptype, self.RenderXML(source.xsource))) def check_rules(self): """ check Rules for Path entries with missing attrs """ if 'Rules' not in self.core.plugins: return for rules in self.core.plugins['Rules'].entries.values(): xdata = rules.pnode.data for path in xdata.xpath("//Path"): self.check_entry(path, os.path.join(Bcfg2.Options.setup.repository, rules.name)) def check_bundles(self): """ Check bundles for BoundPath and BoundPackage entries with missing attrs. """ if 'Bundler' not in self.core.plugins: return for bundle in self.core.plugins['Bundler'].entries.values(): if self.HandlesFile(bundle.name) and bundle.template is None: for path in bundle.xdata.xpath( "//*[substring(name(), 1, 5) = 'Bound']"): self.check_entry(path, bundle.name) # ensure that abstract Path tags have either name # or glob specified for path in bundle.xdata.xpath("//Path"): if ('name' not in path.attrib and 'glob' not in path.attrib): self.LintError( "required-attrs-missing", "Path tags require either a 'name' or 'glob' " "attribute: \n%s" % self.RenderXML(path)) # ensure that abstract Package tags have either name # or group specified for package in bundle.xdata.xpath("//Package"): if ('name' not in package.attrib and 'group' not in package.attrib): self.LintError( "required-attrs-missing", "Package tags require either a 'name' or 'group' " "attribute: \n%s" % self.RenderXML(package)) def check_entry(self, entry, filename): """ Generic entry check. :param entry: The XML entry to check for missing attributes. :type entry: lxml.etree._Element :param filename: The filename the entry came from :type filename: string """ if self.HandlesFile(filename): name = entry.get('name') tag = entry.tag if tag.startswith("Bound"): tag = tag[5:] if tag not in self.required_attrs: self.LintError("unknown-entry-tag", "Unknown entry tag '%s': %s" % (tag, self.RenderXML(entry))) return etype = entry.get('type') if etype in self.required_attrs[tag]: required_attrs = self.required_attrs[tag][etype] elif '__any__' in self.required_attrs[tag]: required_attrs = self.required_attrs[tag]['__any__'] else: self.LintError("unknown-entry-type", "Unknown %s type %s: %s" % (tag, etype, self.RenderXML(entry))) return attrs = set(entry.attrib.keys()) if 'dev_type' in required_attrs: dev_type = entry.get('dev_type') if dev_type in ['block', 'char']: # check if major/minor are specified required_attrs['major'] = is_device_mode required_attrs['minor'] = is_device_mode if tag == 'Path': self.check_default_acl(entry) if tag == 'ACL' and 'scope' in required_attrs: required_attrs[entry.get('scope')] = is_username if '__text__' in required_attrs: fmt = required_attrs['__text__'] del required_attrs['__text__'] if (not entry.text and not entry.get('empty', 'false').lower() == 'true'): self.LintError("required-attrs-missing", "Text missing for %s %s in %s: %s" % (tag, name, filename, self.RenderXML(entry))) if fmt is not None and not fmt(entry.text): self.LintError( "required-attr-format", "Text content of %s %s in %s is malformed\n%s" % (tag, name, filename, self.RenderXML(entry))) if not attrs.issuperset(required_attrs.keys()): self.LintError( "required-attrs-missing", "The following required attribute(s) are missing for %s " "%s in %s: %s\n%s" % (tag, name, filename, ", ".join([attr for attr in set(required_attrs.keys()).difference(attrs)]), self.RenderXML(entry))) for attr, fmt in required_attrs.items(): if fmt and attr in attrs and not fmt(entry.attrib[attr]): self.LintError( "required-attr-format", "The %s attribute of %s %s in %s is malformed\n%s" % (attr, tag, name, filename, self.RenderXML(entry))) src/lib/Bcfg2/Server/Lint/TemplateAbuse.py000066400000000000000000000071411303523157100206220ustar00rootroot00000000000000""" Check for templated scripts or executables. """ import os import stat import Bcfg2.Server.Lint from Bcfg2.Compat import any # pylint: disable=W0622 from Bcfg2.Server.Plugin import default_path_metadata from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenshiGenerator import \ CfgEncryptedGenshiGenerator from Bcfg2.Server.Plugins.Cfg.CfgEncryptedCheetahGenerator import \ CfgEncryptedCheetahGenerator from Bcfg2.Server.Plugins.Cfg.CfgEncryptedJinja2Generator import \ CfgEncryptedJinja2Generator class TemplateAbuse(Bcfg2.Server.Lint.ServerPlugin): """ Check for templated scripts or executables. """ templates = [CfgGenshiGenerator, CfgCheetahGenerator, CfgJinja2Generator, CfgEncryptedGenshiGenerator, CfgEncryptedCheetahGenerator, CfgEncryptedJinja2Generator] extensions = [".pl", ".py", ".sh", ".rb"] def Run(self): if 'Cfg' in self.core.plugins: for entryset in self.core.plugins['Cfg'].entries.values(): for entry in entryset.entries.values(): if (self.HandlesFile(entry.name) and any(isinstance(entry, t) for t in self.templates)): self.check_template(entryset, entry) @classmethod def Errors(cls): return {"templated-script": "warning", "templated-executable": "warning"} def check_template(self, entryset, entry): """ Check a template to see if it's a script or an executable. """ # first, check for a known script extension ext = os.path.splitext(entryset.path)[1] if ext in self.extensions: self.LintError("templated-script", "Templated script found: %s\n" "File has a known script extension: %s\n" "Template a config file for the script instead" % (entry.name, ext)) return # next, check for a shebang line firstline = open(entry.name).readline() if firstline.startswith("#!"): self.LintError("templated-script", "Templated script found: %s\n" "File starts with a shebang: %s\n" "Template a config file for the script instead" % (entry.name, firstline)) return # finally, check for executable permissions in info.xml for entry in entryset.entries.values(): if isinstance(entry, CfgInfoXML): for pinfo in entry.infoxml.xdata.xpath("//FileInfo/Info"): try: mode = int( pinfo.get("mode", default_path_metadata()['mode']), 8) except ValueError: # LintError will be produced by RequiredAttrs plugin self.logger.warning("Non-octal mode: %s" % mode) continue if mode & stat.S_IXUSR != 0: self.LintError( "templated-executable", "Templated executable found: %s\n" "Template a config file for the executable instead" % entry.name) return src/lib/Bcfg2/Server/Lint/TemplateHelper.py000066400000000000000000000101631303523157100210000ustar00rootroot00000000000000""" ``bcfg2-lint`` plugin for :ref:`TemplateHelper ` """ import sys import imp from Bcfg2.Server.Lint import ServerPlugin from Bcfg2.Server.Plugins.TemplateHelper import HelperModule, MODULE_RE from Bcfg2.Utils import safe_module_name class TemplateHelper(ServerPlugin): """ ``bcfg2-lint`` plugin to ensure that all :ref:`TemplateHelper ` modules are valid. This can check for: * A TemplateHelper module that cannot be imported due to syntax or other compile-time errors; * A TemplateHelper module that does not have an ``__export__`` attribute, or whose ``__export__`` is not a list; * Bogus symbols listed in ``__export__``, including symbols that don't exist, that are reserved, or that start with underscores. """ __serverplugin__ = 'TemplateHelper' def __init__(self, *args, **kwargs): ServerPlugin.__init__(self, *args, **kwargs) # we instantiate a dummy helper to discover which keywords and # defaults are reserved dummy = HelperModule("foo.py", None) self.reserved_keywords = dir(dummy) self.reserved_defaults = dummy.reserved_defaults def Run(self): for helper in self.core.plugins['TemplateHelper'].entries.values(): if self.HandlesFile(helper.name): self.check_helper(helper.name) def check_helper(self, helper): """ Check a single helper module. :param helper: The filename of the helper module :type helper: string """ module_name = MODULE_RE.search(helper).group(1) try: module = imp.load_source( safe_module_name('TemplateHelper', module_name), helper) except: # pylint: disable=W0702 err = sys.exc_info()[1] self.LintError("templatehelper-import-error", "Failed to import %s: %s" % (helper, err)) return if not hasattr(module, "__export__"): self.LintError("templatehelper-no-export", "%s has no __export__ list" % helper) return elif not isinstance(module.__export__, list): self.LintError("templatehelper-nonlist-export", "__export__ is not a list in %s" % helper) return for sym in module.__export__: if not hasattr(module, sym): self.LintError("templatehelper-nonexistent-export", "%s: exported symbol %s does not exist" % (helper, sym)) elif sym in self.reserved_keywords: self.LintError("templatehelper-reserved-export", "%s: exported symbol %s is reserved" % (helper, sym)) elif sym.startswith("_"): self.LintError("templatehelper-underscore-export", "%s: exported symbol %s starts with underscore" % (helper, sym)) if sym in getattr(module, "__default__", []): self.LintError("templatehelper-export-and-default", "%s: %s is listed in both __default__ and " "__export__" % (helper, sym)) for sym in getattr(module, "__default__", []): if sym in self.reserved_defaults: self.LintError("templatehelper-reserved-default", "%s: default symbol %s is reserved" % (helper, sym)) @classmethod def Errors(cls): return {"templatehelper-import-error": "error", "templatehelper-no-export": "error", "templatehelper-nonlist-export": "error", "templatehelper-nonexistent-export": "error", "templatehelper-reserved-export": "error", "templatehelper-reserved-default": "error", "templatehelper-underscore-export": "warning", "templatehelper-export-and-default": "warning"} src/lib/Bcfg2/Server/Lint/Validate.py000066400000000000000000000241201303523157100176140ustar00rootroot00000000000000"""Validate XML files. Ensure that all XML files in the Bcfg2 repository validate according to their respective schemas. """ import glob import os import sys import lxml.etree import Bcfg2.Options import Bcfg2.Server.Lint from Bcfg2.Utils import Executor class Validate(Bcfg2.Server.Lint.ServerlessPlugin): """ Ensure that all XML files in the Bcfg2 repository validate according to their respective schemas. """ options = Bcfg2.Server.Lint.ServerlessPlugin.options + [ Bcfg2.Options.PathOption( "--schema", cf=("Validate", "schema"), default="/usr/share/bcfg2/schemas", help="The full path to the XML schema files")] def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) #: A dict of : that maps files in the #: Bcfg2 specification to their schemas. The globs are #: extended :mod:`fnmatch` globs that also support ``**``, #: which matches any number of any characters, including #: forward slashes. The schema files are relative to the #: schema directory, which can be controlled by the #: ``bcfg2-lint --schema`` option. self.filesets = \ {"Metadata/groups.xml": "metadata.xsd", "Metadata/clients.xml": "clients.xsd", "Cfg/**/info.xml": "info.xsd", "Cfg/**/privkey.xml": "privkey.xsd", "Cfg/**/pubkey.xml": "pubkey.xsd", "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd", "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd", "Cfg/**/sslcert.xml": "sslca-cert.xsd", "Cfg/**/sslkey.xml": "sslca-key.xsd", "SSHbase/**/info.xml": "info.xsd", "TGenshi/**/info.xml": "info.xsd", "TCheetah/**/info.xml": "info.xsd", "Bundler/*.xml": "bundle.xsd", "Bundler/*.genshi": "bundle.xsd", "Pkgmgr/*.xml": "pkglist.xsd", "Rules/*.xml": "rules.xsd", "Defaults/*.xml": "defaults.xsd", "etc/report-configuration.xml": "report-configuration.xsd", "Deps/*.xml": "deps.xsd", "Decisions/*.xml": "decisions.xsd", "Packages/sources.xml": "packages.xsd", "GroupPatterns/config.xml": "grouppatterns.xsd", "AWSTags/config.xml": "awstags.xsd", "NagiosGen/config.xml": "nagiosgen.xsd", "FileProbes/config.xml": "fileprobes.xsd", "GroupLogic/groups.xml": "grouplogic.xsd" } self.filelists = {} self.get_filelists() self.cmd = Executor() def Run(self): for path, schemaname in self.filesets.items(): try: filelist = self.filelists[path] except KeyError: filelist = [] if filelist: # avoid loading schemas for empty file lists schemafile = os.path.join(Bcfg2.Options.setup.schema, schemaname) schema = self._load_schema(schemafile) if schema: for filename in filelist: self.validate(filename, schemafile, schema=schema) self.check_properties() @classmethod def Errors(cls): return {"schema-failed-to-parse": "warning", "properties-schema-not-found": "warning", "xml-failed-to-parse": "error", "xml-failed-to-read": "error", "xml-failed-to-verify": "error", "xinclude-does-not-exist": "error", "input-output-error": "error"} def check_properties(self): """ Check Properties files against their schemas. """ for filename in self.filelists['props']: schemafile = "%s.xsd" % os.path.splitext(filename)[0] if os.path.exists(schemafile): self.validate(filename, schemafile) else: self.LintError("properties-schema-not-found", "No schema found for %s" % filename) # ensure that it at least parses self.parse(filename) def parse(self, filename): """ Parse an XML file, raising the appropriate LintErrors if it can't be parsed or read. Return the lxml.etree._ElementTree parsed from the file. :param filename: The full path to the file to parse :type filename: string :returns: lxml.etree._ElementTree - the parsed data""" try: xdata = lxml.etree.parse(filename) if self.files is None: self._expand_wildcard_xincludes(xdata) xdata.xinclude() return xdata except (lxml.etree.XIncludeError, SyntaxError): cmd = ["xmllint", "--noout"] if self.files is None: cmd.append("--xinclude") cmd.append(filename) result = self.cmd.run(cmd) self.LintError("xml-failed-to-parse", "%s fails to parse:\n%s" % (filename, result.stdout + result.stderr)) return False except IOError: self.LintError("xml-failed-to-read", "Failed to open file %s" % filename) return False def _expand_wildcard_xincludes(self, xdata): """ a lightweight version of :func:`Bcfg2.Server.Plugin.helpers.XMLFileBacked._follow_xincludes` """ xinclude = '%sinclude' % Bcfg2.Server.XI_NAMESPACE for el in xdata.findall('//' + xinclude): name = el.get("href") if name.startswith("/"): fpath = name else: fpath = os.path.join(os.path.dirname(xdata.docinfo.URL), name) # expand globs in xinclude, a bcfg2-specific extension extras = glob.glob(fpath) if not extras: msg = "%s: %s does not exist, skipping: %s" % \ (xdata.docinfo.URL, name, self.RenderXML(el)) if el.findall('./%sfallback' % Bcfg2.Server.XI_NAMESPACE): self.logger.debug(msg) else: self.LintError("xinclude-does-not-exist", msg) parent = el.getparent() parent.remove(el) for extra in extras: if extra != xdata.docinfo.URL: lxml.etree.SubElement(parent, xinclude, href=extra) def validate(self, filename, schemafile, schema=None): """ Validate a file against the given schema. :param filename: The full path to the file to validate :type filename: string :param schemafile: The full path to the schema file to validate against :type schemafile: string :param schema: The loaded schema to validate against. This can be used to avoid parsing a single schema file for every file that needs to be validate against it. :type schema: lxml.etree.Schema :returns: bool - True if the file validates, false otherwise """ if schema is None: # if no schema object was provided, instantiate one schema = self._load_schema(schemafile) if not schema: return False datafile = self.parse(filename) if not datafile: return False if not schema.validate(datafile): cmd = ["xmllint"] if self.files is None: cmd.append("--xinclude") cmd.extend(["--noout", "--schema", schemafile, filename]) result = self.cmd.run(cmd) if not result.success: self.LintError("xml-failed-to-verify", "%s fails to verify:\n%s" % (filename, result.stdout + result.stderr)) return False return True def get_filelists(self): """ Get lists of different kinds of files to validate. This doesn't return anything, but it sets :attr:`Bcfg2.Server.Lint.Validate.Validate.filelists` to a dict whose keys are path globs given in :attr:`Bcfg2.Server.Lint.Validate.Validate.filesets` and whose values are lists of the full paths to all files in the Bcfg2 repository (or given with ``bcfg2-lint --stdin``) that match the glob.""" for path in self.filesets.keys(): if '/**/' in path: if self.files is not None: self.filelists[path] = self.list_matching_files(path) else: # self.files is None fpath, fname = path.split('/**/') self.filelists[path] = [] for root, _, files in os.walk( os.path.join(Bcfg2.Options.setup.repository, fpath)): self.filelists[path].extend([os.path.join(root, f) for f in files if f == fname]) else: self.filelists[path] = self.list_matching_files(path) self.filelists['props'] = self.list_matching_files("Properties/*.xml") def _load_schema(self, filename): """ Load an XML schema document, returning the Schema object and raising appropriate lint errors on failure. :param filename: The full path to the schema file to load. :type filename: string :returns: lxml.etree.Schema - The loaded schema data """ try: return lxml.etree.XMLSchema(lxml.etree.parse(filename)) except IOError: err = sys.exc_info()[1] self.LintError("input-output-error", str(err)) except lxml.etree.XMLSchemaParseError: err = sys.exc_info()[1] self.LintError("schema-failed-to-parse", "Failed to process schema %s: %s" % (filename, err)) return None src/lib/Bcfg2/Server/Lint/ValidateJSON.py000066400000000000000000000044641303523157100203170ustar00rootroot00000000000000"""Validate JSON files. Ensure that all JSON files in the Bcfg2 repository are valid. Currently, the only plugins that uses JSON are Ohai and Properties. """ import os import sys import Bcfg2.Server.Lint try: import json # py2.4 json library is structured differently json.loads # pylint: disable=W0104 except (ImportError, AttributeError): import simplejson as json class ValidateJSON(Bcfg2.Server.Lint.ServerlessPlugin): """Ensure that all JSON files in the Bcfg2 repository are valid. Currently, the only plugins that uses JSON are Ohai and Properties. """ def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) #: A list of file globs that give the path to JSON files. The #: globs are extended :mod:`fnmatch` globs that also support #: ``**``, which matches any number of any characters, #: including forward slashes. self.globs = ["Properties/*.json", "Ohai/*.json"] self.files = self.get_files() def Run(self): for path in self.files: self.logger.debug("Validating JSON in %s" % path) try: json.load(open(path)) except ValueError: self.LintError("json-failed-to-parse", "%s does not contain valid JSON: %s" % (path, sys.exc_info()[1])) @classmethod def Errors(cls): return {"json-failed-to-parse": "error"} def get_files(self): """Return a list of all JSON files to validate, based on :attr:`Bcfg2.Server.Lint.ValidateJSON.ValidateJSON.globs`. """ rv = [] for path in self.globs: if '/**/' in path: if self.files is not None: rv.extend(self.list_matching_files(path)) else: # self.files is None fpath, fname = path.split('/**/') for root, _, files in os.walk( os.path.join(Bcfg2.Options.setup.repository, fpath)): rv.extend([os.path.join(root, f) for f in files if f == fname]) else: rv.extend(self.list_matching_files(path)) return rv src/lib/Bcfg2/Server/Lint/__init__.py000066400000000000000000000424631303523157100176340ustar00rootroot00000000000000""" Base classes for Lint plugins and error handling """ import copy import fcntl import fnmatch import glob import logging import os import struct import sys import termios import textwrap import time import lxml.etree import Bcfg2.Options import Bcfg2.Server.Core import Bcfg2.Server.Plugins from Bcfg2.Compat import walk_packages from Bcfg2.Options import _debug def _ioctl_GWINSZ(fd): # pylint: disable=C0103 """ get a tuple of (height, width) giving the size of the window from the given file descriptor """ try: return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) except (IOError, struct.error): return None def get_termsize(): """ get a tuple of (width, height) giving the size of the terminal """ if not sys.stdout.isatty(): return None dims = _ioctl_GWINSZ(0) or _ioctl_GWINSZ(1) or _ioctl_GWINSZ(2) if not dims: try: fd = os.open(os.ctermid(), os.O_RDONLY) dims = _ioctl_GWINSZ(fd) os.close(fd) except IOError: pass if not dims: try: dims = (os.environ['LINES'], os.environ['COLUMNS']) except KeyError: return None return int(dims[1]), int(dims[0]) class Plugin(object): """ Base class for all bcfg2-lint plugins """ #: Name of the matching server plugin or None if there is no #: matching one. If this is None the lint plugin will only loaded #: by default if the matching server plugin is enabled, too. __serverplugin__ = None options = [Bcfg2.Options.Common.repository] def __init__(self, errorhandler=None, files=None): """ :param errorhandler: A :class:`Bcfg2.Server.Lint.ErrorHandler` that will be used to handle lint errors. If one is not provided, a new one will be instantiated. :type errorhandler: Bcfg2.Server.Lint.ErrorHandler :param files: A list of files to run bcfg2-lint against. (See the bcfg2-lint ``--stdin`` option.) :type files: list of strings """ #: The list of files that bcfg2-lint should be run against self.files = files self.logger = logging.getLogger('bcfg2-lint') if errorhandler is None: #: The error handler self.errorhandler = ErrorHandler() else: self.errorhandler = errorhandler self.errorhandler.RegisterErrors(self.Errors()) def Run(self): """ Run the plugin. Must be overloaded by child classes. """ raise NotImplementedError @classmethod def Errors(cls): """ Returns a dict of errors the plugin supplies, in a format suitable for passing to :func:`Bcfg2.Server.Lint.ErrorHandler.RegisterErrors`. Must be overloaded by child classes. :returns: dict """ raise NotImplementedError def HandlesFile(self, fname): """ Returns True if the given file should be handled by the plugin according to :attr:`Bcfg2.Server.Lint.Plugin.files`, False otherwise. """ return (self.files is None or fname in self.files or os.path.join(Bcfg2.Options.setup.repository, fname) in self.files or os.path.abspath(fname) in self.files or os.path.abspath(os.path.join(Bcfg2.Options.setup.repository, fname)) in self.files) def LintError(self, err, msg): """ Raise an error from the lint process. :param err: The name of the error being raised. This name must be a key in the dict returned by :func:`Bcfg2.Server.Lint.Plugin.Errors`. :type err: string :param msg: The freeform message to display to the end user. :type msg: string """ self.errorhandler.dispatch(err, msg) def RenderXML(self, element, keep_text=False): """ Render an XML element for error output. This prefixes the line number and removes children for nicer display. :param element: The element to render :type element: lxml.etree._Element :param keep_text: Do not discard text content from the element for display :type keep_text: boolean """ xml = None if len(element) or element.text: el = copy.copy(element) if el.text and not keep_text: el.text = '...' for child in el.iterchildren(): el.remove(child) xml = lxml.etree.tostring( el, xml_declaration=False).decode("UTF-8").strip() else: xml = lxml.etree.tostring( element, xml_declaration=False).decode("UTF-8").strip() return " line %s: %s" % (element.sourceline, xml) def list_matching_files(self, path): """list all files matching the path in self.files or the bcfg2 repo.""" if self.files is not None: return fnmatch.filter(self.files, os.path.join('*', path)) else: return glob.glob(os.path.join(Bcfg2.Options.setup.repository, path)) class ErrorHandler(object): """ A class to handle errors for bcfg2-lint plugins """ def __init__(self, errors=None): """ :param errors: An initial dict of errors to register :type errors: dict """ #: The number of errors passed to this error handler self.errors = 0 #: The number of warnings passed to this error handler self.warnings = 0 self.logger = logging.getLogger('bcfg2-lint') termsize = get_termsize() if termsize is not None and termsize[0] > 0: twrap = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ", width=termsize[0]) #: A function to wrap text to the width of the terminal self._wrapper = twrap.wrap else: self._wrapper = lambda s: [s] #: A dict of registered errors self.errortypes = dict() if errors is not None: self.RegisterErrors(dict(errors.items())) def RegisterErrors(self, errors): """ Register a dict of errors that a plugin may raise. The keys of the dict are short strings that describe each error; the values are the default error handling for that error ("error", "warning", or "silent"). :param errors: The error dict :type errors: dict """ for err, action in errors.items(): if err not in self.errortypes: if "warn" in action: self.errortypes[err] = self.warn elif "err" in action: self.errortypes[err] = self.error else: self.errortypes[err] = self.debug def dispatch(self, err, msg): """ Dispatch an error to the correct handler. :param err: The name of the error being raised. This name must be a key in :attr:`Bcfg2.Server.Lint.ErrorHandler.errortypes`, the dict of registered errors. :type err: string :param msg: The freeform message to display to the end user. :type msg: string """ if err in self.errortypes: self.errortypes[err](msg) self.logger.debug(" (%s)" % err) else: # assume that it's an error, but complain self.error(msg) self.logger.warning("Unknown error %s" % err) def error(self, msg): """ Log an error condition. :param msg: The freeform message to display to the end user. :type msg: string """ self.errors += 1 self._log(msg, self.logger.error, prefix="ERROR: ") def warn(self, msg): """ Log a warning condition. :param msg: The freeform message to display to the end user. :type msg: string """ self.warnings += 1 self._log(msg, self.logger.warning, prefix="WARNING: ") def debug(self, msg): """ Log a silent/debug condition. :param msg: The freeform message to display to the end user. :type msg: string """ self._log(msg, self.logger.debug) def _log(self, msg, logfunc, prefix=""): """ Generic log function that logs a message with the given function after wrapping it for the terminal width. """ # a message may itself consist of multiple lines. wrap() will # elide them all into a single paragraph, which we don't want. # so we split the message into its paragraphs and wrap each # paragraph individually. this means, unfortunately, that we # lose textwrap's built-in initial indent functionality, # because we want to only treat the very first line of the # first paragraph specially. so we do some silliness. rawlines = msg.splitlines() firstline = True for rawline in rawlines: lines = self._wrapper(rawline) for line in lines: if firstline: logfunc(prefix + line.lstrip()) firstline = False else: logfunc(line) class ServerlessPlugin(Plugin): # pylint: disable=W0223 """ Base class for bcfg2-lint plugins that are run before the server starts up (i.e., plugins that check things that may prevent the server from starting up). """ pass class ServerPlugin(Plugin): # pylint: disable=W0223 """ Base class for bcfg2-lint plugins that check things that require the running Bcfg2 server. """ def __init__(self, core, errorhandler=None, files=None): """ :param core: The Bcfg2 server core :type core: Bcfg2.Server.Core.BaseCore :param errorhandler: A :class:`Bcfg2.Server.Lint.ErrorHandler` that will be used to handle lint errors. If one is not provided, a new one will be instantiated. :type errorhandler: Bcfg2.Server.Lint.ErrorHandler :param files: A list of files to run bcfg2-lint against. (See the bcfg2-lint ``--stdin`` option.) :type files: list of strings """ Plugin.__init__(self, errorhandler=errorhandler, files=files) #: The server core self.core = core self.logger = self.core.logger #: The metadata plugin self.metadata = self.core.metadata class LintPluginAction(Bcfg2.Options.ComponentAction): """ Option parser action to load lint plugins """ bases = ['Bcfg2.Server.Lint'] class LintPluginOption(Bcfg2.Options.Option): """ Option class for the lint_plugins """ def early_parsing_hook(self, namespace): """ We want a usefull default for the enabled lint plugins. Therfore we use all importable plugins, that either pertain with enabled server plugins or that has no matching plugin. """ plugins = [p.__name__ for p in namespace.plugins] for loader, name, _is_pkg in walk_packages(path=__path__): try: module = loader.find_module(name).load_module(name) plugin = getattr(module, name) if plugin.__serverplugin__ is None or \ plugin.__serverplugin__ in plugins: _debug("Automatically adding lint plugin %s" % plugin.__name__) self.default.append(plugin.__name__) except ImportError: pass class _EarlyOptions(object): """ We need the server.plugins options in an early parsing hook for determining the default value for the lint_plugins. So we create a component that is parsed before the other options. """ parse_first = True options = [Bcfg2.Options.Common.plugins] class CLI(object): """ The bcfg2-lint CLI """ options = Bcfg2.Server.Core.Core.options + [ Bcfg2.Options.PathOption( '--lint-config', default='/etc/bcfg2-lint.conf', action=Bcfg2.Options.ConfigFileAction, help='Specify bcfg2-lint configuration file'), LintPluginOption( "--lint-plugins", cf=('lint', 'plugins'), default=[], type=Bcfg2.Options.Types.comma_list, action=LintPluginAction, help='bcfg2-lint plugin list'), Bcfg2.Options.BooleanOption( '--list-errors', help='Show error handling'), Bcfg2.Options.BooleanOption( '--stdin', help='Operate on a list of files supplied on stdin'), Bcfg2.Options.Option( cf=("errors", '*'), dest="lint_errors", help="How to handle bcfg2-lint errors")] def __init__(self): parser = Bcfg2.Options.get_parser( description="Manage a running Bcfg2 server", components=[self, _EarlyOptions]) parser.parse() self.logger = logging.getLogger(parser.prog) self.logger.debug("Running lint with plugins: %s" % [p.__name__ for p in Bcfg2.Options.setup.lint_plugins]) if Bcfg2.Options.setup.stdin: self.files = [s.strip() for s in sys.stdin.readlines()] else: self.files = None self.errorhandler = self.get_errorhandler() self.serverlessplugins = [] self.serverplugins = [] for plugin in Bcfg2.Options.setup.lint_plugins: if issubclass(plugin, ServerPlugin): self.serverplugins.append(plugin) else: self.serverlessplugins.append(plugin) def run(self): """ Run bcfg2-lint """ if Bcfg2.Options.setup.list_errors: for plugin in self.serverplugins + self.serverlessplugins: self.errorhandler.RegisterErrors(getattr(plugin, 'Errors')()) print("%-35s %-35s" % ("Error name", "Handler")) for err, handler in self.errorhandler.errortypes.items(): print("%-35s %-35s" % (err, handler.__name__)) return 0 if not self.serverplugins and not self.serverlessplugins: self.logger.error("No lint plugins loaded!") return 1 self.run_serverless_plugins() if self.serverplugins: if self.errorhandler.errors: # it would be swell if we could try to start the server # even if there were errors with the serverless plugins, # but since XML parsing errors occur in the FAM thread # (not in the core server thread), there's no way we can # start the server and try to catch exceptions -- # bcfg2-lint isn't in the same stack as the exceptions. # so we're forced to assume that a serverless plugin error # will prevent the server from starting print("Serverless plugins encountered errors, skipping server " "plugins") else: self.run_server_plugins() if (self.errorhandler.errors or self.errorhandler.warnings or Bcfg2.Options.setup.verbose): print("%d errors" % self.errorhandler.errors) print("%d warnings" % self.errorhandler.warnings) if self.errorhandler.errors: return 2 elif self.errorhandler.warnings: return 3 else: return 0 def get_errorhandler(self): """ get a Bcfg2.Server.Lint.ErrorHandler object """ return Bcfg2.Server.Lint.ErrorHandler( errors=Bcfg2.Options.setup.lint_errors) def run_serverless_plugins(self): """ Run serverless plugins """ self.logger.debug("Running serverless plugins: %s" % [p.__name__ for p in self.serverlessplugins]) for plugin in self.serverlessplugins: self.logger.debug(" Running %s" % plugin.__name__) plugin(files=self.files, errorhandler=self.errorhandler).Run() def run_server_plugins(self): """ run plugins that require a running server to run """ core = Bcfg2.Server.Core.Core() try: core.load_plugins() core.block_for_fam_events(handle_events=True) self.logger.debug("Running server plugins: %s" % [p.__name__ for p in self.serverplugins]) for plugin in self.serverplugins: self.logger.debug(" Running %s" % plugin.__name__) plugin(core, files=self.files, errorhandler=self.errorhandler).Run() finally: core.shutdown() def _run_plugin(self, plugin, args=None): """ Run a single bcfg2-lint plugin """ if args is None: args = [] start = time.time() # python 2.5 doesn't support mixing *magic and keyword arguments kwargs = dict(files=self.files, errorhandler=self.errorhandler) rv = plugin(*args, **kwargs).Run() self.logger.debug(" Ran %s in %0.2f seconds" % (plugin.__name__, time.time() - start)) return rv src/lib/Bcfg2/Server/MultiprocessingCore.py000066400000000000000000000465101303523157100211640ustar00rootroot00000000000000""" The multiprocessing server core is a reimplementation of the :mod:`Bcfg2.Server.BuiltinCore` that uses the Python :mod:`multiprocessing` library to offload work to multiple child processes. As such, it requires Python 2.6+. The parent communicates with the children over :class:`multiprocessing.Queue` objects via a :class:`Bcfg2.Server.MultiprocessingCore.RPCQueue` object. A method being called via the RPCQueue must be exposed by the child by decorating it with :func:`Bcfg2.Server.Core.exposed`. """ import time import threading import lxml.etree import multiprocessing import Bcfg2.Options import Bcfg2.Server.Cache import Bcfg2.Server.Plugin from itertools import cycle from Bcfg2.Compat import Queue, Empty, wraps from Bcfg2.Server.Core import Core, exposed from Bcfg2.Server.BuiltinCore import BuiltinCore from multiprocessing.connection import Listener, Client class RPCQueue(Bcfg2.Server.Plugin.Debuggable): """ An implementation of a :class:`multiprocessing.Queue` designed for several additional use patterns: * Random-access reads, based on a key that identifies the data; * Publish-subscribe, where a datum is sent to all hosts. The subscribers can deal with this as a normal Queue with no special handling. """ poll_wait = 3.0 def __init__(self): Bcfg2.Server.Plugin.Debuggable.__init__(self) self._terminate = threading.Event() self._queues = dict() self._listeners = [] def add_subscriber(self, name): """ Add a subscriber to the queue. This returns the :class:`multiprocessing.Queue` object that the subscriber should read from. """ self._queues[name] = multiprocessing.Queue() return self._queues[name] def publish(self, method, args=None, kwargs=None): """ Publish an RPC call to the queue for consumption by all subscribers. """ for queue in self._queues.values(): queue.put((None, (method, args or [], kwargs or dict()))) def rpc(self, dest, method, args=None, kwargs=None): """ Make an RPC call to the named subscriber, expecting a response. This opens a :class:`multiprocessing.connection.Listener` and passes the Listener address to the child as part of the RPC call, so that the child can connect to the Listener to submit its results. """ listener = Listener() self.logger.debug("Created new RPC listener at %s" % listener.address) self._listeners.append(listener) try: self._queues[dest].put((listener.address, (method, args or [], kwargs or dict()))) conn = listener.accept() try: while not self._terminate.is_set(): if conn.poll(self.poll_wait): return conn.recv() finally: conn.close() finally: listener.close() self._listeners.remove(listener) def close(self): """ Close queues and connections. """ self._terminate.set() self.logger.debug("Closing RPC queues") for name, queue in self._queues.items(): self.logger.debug("Closing RPC queue to %s" % name) queue.close() # close any listeners that are waiting for connections self.logger.debug("Closing RPC connections") for listener in self._listeners: self.logger.debug("Closing RPC connection at %s" % listener.address) listener.close() class DualEvent(object): """ DualEvent is a clone of :class:`threading.Event` that internally implements both :class:`threading.Event` and :class:`multiprocessing.Event`. """ def __init__(self, threading_event=None, multiprocessing_event=None): self._threading_event = threading_event or threading.Event() self._multiproc_event = multiprocessing_event or \ multiprocessing.Event() if threading_event or multiprocessing_event: # initialize internal flag to false, regardless of the # state of either object passed in self.clear() def is_set(self): """ Return true if and only if the internal flag is true. """ return self._threading_event.is_set() isSet = is_set def set(self): """ Set the internal flag to true. """ self._threading_event.set() self._multiproc_event.set() def clear(self): """ Reset the internal flag to false. """ self._threading_event.clear() self._multiproc_event.clear() def wait(self, timeout=None): """ Block until the internal flag is true, or until the optional timeout occurs. """ return self._threading_event.wait(timeout=timeout) class ChildCore(Core): """ A child process for :class:`Bcfg2.MultiprocessingCore.Core`. This core builds configurations from a given :class:`multiprocessing.Pipe`. Note that this is a full-fledged server core; the only input it gets from the parent process is the hostnames of clients to render. All other state comes from the FAM. However, this core only is used to render configs; it doesn't handle anything else (authentication, probes, etc.) because those are all much faster. There's no reason that it couldn't handle those, though, if the pipe communication "protocol" were made more robust. """ #: How long to wait while polling for new RPC commands. This #: doesn't affect the speed with which a command is processed, but #: setting it too high will result in longer shutdown times, since #: we only check for the termination event from the main process #: every ``poll_wait`` seconds. poll_wait = 3.0 def __init__(self, name, rpc_q, terminate): """ :param name: The name of this child :type name: string :param read_q: The queue the child will read from for RPC communications from the parent process. :type read_q: multiprocessing.Queue :param write_q: The queue the child will write the results of RPC calls to. :type write_q: multiprocessing.Queue :param terminate: An event that flags ChildCore objects to shut themselves down. :type terminate: multiprocessing.Event """ Core.__init__(self) #: The name of this child self.name = name #: The :class:`multiprocessing.Event` that will be monitored #: to determine when this child should shut down. self.terminate = terminate #: The queue used for RPC communication self.rpc_q = rpc_q # override this setting so that the child doesn't try to write # the pidfile Bcfg2.Options.setup.daemon = False # ensure that the child doesn't start a perflog thread self.perflog_thread = None self._rmi = dict() def _run(self): return True def _dispatch(self, address, data): """ Method dispatcher used for commands received from the RPC queue. """ if address is not None: # if the key is None, then no response is expected. we # make the return connection before dispatching the actual # RPC call so that the parent is blocking for a connection # as briefly as possible self.logger.debug("Connecting to parent via %s" % address) client = Client(address) method, args, kwargs = data func = None rv = None if "." in method: if method in self._rmi: func = self._rmi[method] else: self.logger.error("%s: Method %s does not exist" % (self.name, method)) elif not hasattr(self, method): self.logger.error("%s: Method %s does not exist" % (self.name, method)) else: # method is not a plugin RMI, and exists func = getattr(self, method) if not func.exposed: self.logger.error("%s: Method %s is not exposed" % (self.name, method)) func = None if func is not None: self.logger.debug("%s: Calling RPC method %s" % (self.name, method)) rv = func(*args, **kwargs) if address is not None: # if the key is None, then no response is expected self.logger.debug("Returning data to parent via %s" % address) client.send(rv) def _block(self): self._rmi = self._get_rmi() while not self.terminate.is_set(): try: address, data = self.rpc_q.get(timeout=self.poll_wait) threadname = "-".join(str(i) for i in data) rpc_thread = threading.Thread(name=threadname, target=self._dispatch, args=[address, data]) rpc_thread.start() except Empty: pass except KeyboardInterrupt: break self.shutdown() def shutdown(self): Core.shutdown(self) self.logger.info("%s: Closing RPC command queue" % self.name) self.rpc_q.close() while len(threading.enumerate()) > 1: threads = [t for t in threading.enumerate() if t != threading.current_thread()] self.logger.info("%s: Waiting for %d thread(s): %s" % (self.name, len(threads), [t.name for t in threads])) time.sleep(1) self.logger.info("%s: All threads stopped" % self.name) def _get_rmi(self): rmi = dict() for pname, pinst in self._get_rmi_objects().items(): for crmi in pinst.__child_rmi__: if isinstance(crmi, tuple): mname = crmi[1] else: mname = crmi rmi["%s.%s" % (pname, mname)] = getattr(pinst, mname) return rmi @exposed def expire_cache(self, *tags, **kwargs): """ Expire cached data """ Bcfg2.Server.Cache.expire(*tags, exact=kwargs.pop("exact", False)) @exposed def GetConfig(self, client): """ Render the configuration for a client """ self.metadata.update_client_list() self.logger.debug("%s: Building configuration for %s" % (self.name, client)) return lxml.etree.tostring(self.BuildConfiguration(client)) class MultiprocessingCore(BuiltinCore): """ A multiprocessing core that delegates building the actual client configurations to :class:`Bcfg2.Server.MultiprocessingCore.ChildCore` objects. The parent process doesn't build any children itself; all calls to :func:`GetConfig` are delegated to children. All other calls are handled by the parent process. """ options = BuiltinCore.options + [ Bcfg2.Options.Option( '--children', dest="core_children", cf=('server', 'children'), type=int, default=multiprocessing.cpu_count(), help='Spawn this number of children for the multiprocessing core')] #: How long to wait for a child process to shut down cleanly #: before it is terminated. shutdown_timeout = 10.0 def __init__(self): BuiltinCore.__init__(self) #: A dict of child name -> one end of the #: :class:`multiprocessing.Pipe` object used to communicate #: with that child. (The child is given the other end of the #: Pipe.) self.pipes = dict() #: A queue that keeps track of which children are available to #: render a configuration. A child is popped from the queue #: when it starts to render a config, then it's pushed back on #: when it's done. This lets us use a blocking call to #: :func:`Queue.Queue.get` when waiting for an available #: child. self.available_children = \ Queue(maxsize=Bcfg2.Options.setup.core_children) #: The flag that indicates when to stop child threads and #: processes self.terminate = DualEvent(threading_event=self.terminate) #: A :class:`Bcfg2.Server.MultiprocessingCore.RPCQueue` object #: used to send or publish commands to children. self.rpc_q = RPCQueue() #: A list of children that will be cycled through self._all_children = [] #: An iterator that each child will be taken from in sequence, #: to provide a round-robin distribution of render requests self.children = None def __str__(self): if hasattr(Bcfg2.Options.setup, "server"): return "%s(%s; %s children)" % (self.__class__.__name__, Bcfg2.Options.setup.server, len(self._all_children)) else: return "%s(%s children)" % (self.__class__.__name__, len(self._all_children)) def _run(self): for cnum in range(Bcfg2.Options.setup.core_children): name = "Child-%s" % cnum self.logger.debug("Starting child %s" % name) child_q = self.rpc_q.add_subscriber(name) childcore = ChildCore(name, child_q, self.terminate) child = multiprocessing.Process(target=childcore.run, name=name) child.start() self.logger.debug("Child %s started with PID %s" % (name, child.pid)) self._all_children.append(name) self.logger.debug("Started %s children: %s" % (len(self._all_children), self._all_children)) self.children = cycle(self._all_children) Bcfg2.Server.Cache.add_expire_hook(self.cache_dispatch) return BuiltinCore._run(self) def shutdown(self): BuiltinCore.shutdown(self) self.logger.info("Closing RPC command queues") self.rpc_q.close() def term_children(): """ Terminate all remaining multiprocessing children. """ for child in multiprocessing.active_children(): self.logger.error("Waited %s seconds to shut down %s, " "terminating" % (self.shutdown_timeout, child.name)) child.terminate() timer = threading.Timer(self.shutdown_timeout, term_children) timer.start() while len(multiprocessing.active_children()): self.logger.info("Waiting for %s child(ren): %s" % (len(multiprocessing.active_children()), [c.name for c in multiprocessing.active_children()])) time.sleep(1) timer.cancel() self.logger.info("All children shut down") while len(threading.enumerate()) > 1: threads = [t for t in threading.enumerate() if t != threading.current_thread()] self.logger.info("Waiting for %s thread(s): %s" % (len(threads), [t.name for t in threads])) time.sleep(1) self.logger.info("Shutdown complete") def _get_rmi(self): child_rmi = dict() for pname, pinst in self._get_rmi_objects().items(): for crmi in pinst.__child_rmi__: if isinstance(crmi, tuple): parentname, childname = crmi else: parentname = childname = crmi child_rmi["%s.%s" % (pname, parentname)] = \ "%s.%s" % (pname, childname) rmi = BuiltinCore._get_rmi(self) for method in rmi.keys(): if method in child_rmi: rmi[method] = self._child_rmi_wrapper(method, rmi[method], child_rmi[method]) return rmi def _child_rmi_wrapper(self, method, parent_rmi, child_rmi): """ Returns a callable that dispatches a call to the given child RMI to child processes, and calls the parent RMI locally (i.e., in the parent process). """ @wraps(parent_rmi) def inner(*args, **kwargs): """ Function that dispatches an RMI call to child processes and to the (original) parent function. """ self.logger.debug("Dispatching RMI call to %s to children: %s" % (method, child_rmi)) self.rpc_q.publish(child_rmi, args=args, kwargs=kwargs) return parent_rmi(*args, **kwargs) return inner @exposed def set_debug(self, address, debug): self.rpc_q.set_debug(debug) self.rpc_q.publish("set_debug", args=[address, debug]) return BuiltinCore.set_debug(self, address, debug) def cache_dispatch(self, tags, exact, _): """ Publish cache expiration events to child nodes. """ self.rpc_q.publish("expire_cache", args=tags, kwargs=dict(exact=exact)) @exposed def GetConfig(self, address): client = self.resolve_client(address)[0] childname = self.children.next() self.logger.debug("Building configuration for %s on %s" % (client, childname)) return self.rpc_q.rpc(childname, "GetConfig", args=[client]) @exposed def get_statistics(self, address): stats = dict() def _aggregate_statistics(newstats, prefix=None): """ Aggregate a set of statistics from a child or parent server core. This adds the statistics to the overall statistics dict (optionally prepending a prefix, such as "Child-1", to uniquely identify this set of statistics), and aggregates it with the set of running totals that are kept from all cores. """ for statname, vals in newstats.items(): if statname.startswith("ChildCore:"): statname = statname[5:] if prefix: prettyname = "%s:%s" % (prefix, statname) else: prettyname = statname stats[prettyname] = vals totalname = "Total:%s" % statname if totalname not in stats: stats[totalname] = vals else: newmin = min(stats[totalname][0], vals[0]) newmax = max(stats[totalname][1], vals[1]) newcount = stats[totalname][3] + vals[3] newmean = ((stats[totalname][2] * stats[totalname][3]) + (vals[2] * vals[3])) / newcount stats[totalname] = (newmin, newmax, newmean, newcount) stats = dict() for childname in self._all_children: _aggregate_statistics( self.rpc_q.rpc(childname, "get_statistics", args=[address]), prefix=childname) _aggregate_statistics(BuiltinCore.get_statistics(self, address)) return stats src/lib/Bcfg2/Server/Plugin/000077500000000000000000000000001303523157100160425ustar00rootroot00000000000000src/lib/Bcfg2/Server/Plugin/__init__.py000066400000000000000000000035511303523157100201570ustar00rootroot00000000000000""" ``Bcfg2.Server.Plugin`` contains server plugin base classes, interfaces, exceptions, and helper objects. This module is split into a number of submodules to make it more manageable, but it imports all symbols from the submodules, so with the exception of some documentation it's not necessary to use the submodules. E.g., you can (and should) do:: from Bcfg2.Server.Plugin import Plugin ...rather than:: from Bcfg2.Server.Plugin.base import Plugin """ import Bcfg2.Options # pylint: disable=W0401 from Bcfg2.Server.Plugin.base import * from Bcfg2.Server.Plugin.interfaces import * from Bcfg2.Server.Plugin.helpers import * from Bcfg2.Server.Plugin.exceptions import * class _OptionContainer(object): """ Container for plugin options that are loaded at import time """ options = [ Bcfg2.Options.Common.default_paranoid, Bcfg2.Options.Option( cf=('mdata', 'owner'), dest="default_owner", default='root', help='Default Path owner'), Bcfg2.Options.Option( cf=('mdata', 'group'), dest="default_group", default='root', help='Default Path group'), Bcfg2.Options.Option( cf=('mdata', 'important'), dest="default_important", default='false', choices=['true', 'false'], help='Default Path priority (importance)'), Bcfg2.Options.Option( cf=('mdata', 'mode'), dest="default_mode", default='644', help='Default mode for Path'), Bcfg2.Options.Option( cf=('mdata', 'secontext'), dest="default_secontext", default='__default__', help='Default SELinux context'), Bcfg2.Options.Option( cf=('mdata', 'sensitive'), dest="default_sensitive", default='false', help='Default Path sensitivity setting')] Bcfg2.Options.get_parser().add_component(_OptionContainer) src/lib/Bcfg2/Server/Plugin/base.py000066400000000000000000000074201303523157100173310ustar00rootroot00000000000000"""This module provides the base class for Bcfg2 server plugins.""" import os import Bcfg2.Options from Bcfg2.Logger import Debuggable from Bcfg2.Utils import ClassName class Plugin(Debuggable): """ The base class for all Bcfg2 Server plugins. """ #: The name of the plugin. name = ClassName() #: The email address of the plugin author. __author__ = 'bcfg-dev@mcs.anl.gov' #: Plugin is experimental. Use of this plugin will produce a log #: message alerting the administrator that an experimental plugin #: is in use. experimental = False #: Plugin is deprecated and will be removed in a future release. #: Use of this plugin will produce a log message alerting the #: administrator that an experimental plugin is in use. deprecated = False #: Plugin conflicts with the list of other plugin names conflicts = [] #: Plugins of the same type are processed in order of ascending #: sort_order value. Plugins with the same sort_order are sorted #: alphabetically by their name. sort_order = 500 #: Whether or not to automatically create a data directory for #: this plugin create = True #: List of names of methods to be exposed as XML-RPC functions __rmi__ = Debuggable.__rmi__ #: How exposed XML-RPC functions should be dispatched to child #: processes, if :mod:`Bcfg2.Server.MultiprocessingCore` is in #: use. Items ``__child_rmi__`` can either be strings (in which #: case the same function is called on child processes as on the #: parent) or 2-tuples, in which case the first element is the #: name of the RPC function called on the parent process, and the #: second element is the name of the function to call on child #: processes. Functions that are not listed in the list will not #: be dispatched to child processes, i.e., they will only be #: called on the parent. A function must be listed in ``__rmi__`` #: in order to be exposed; functions listed in ``_child_rmi__`` #: but not ``__rmi__`` will be ignored. __child_rmi__ = Debuggable.__child_rmi__ def __init__(self, core): """ :param core: The Bcfg2.Server.Core initializing the plugin :type core: Bcfg2.Server.Core :raises: :exc:`OSError` if adding a file monitor failed; :class:`Bcfg2.Server.Plugin.exceptions.PluginInitError` on other errors .. autoattribute:: Bcfg2.Server.Plugin.base.Debuggable.__rmi__ """ Debuggable.__init__(self, name=self.name) self.Entries = {} self.core = core self.data = os.path.join(Bcfg2.Options.setup.repository, self.name) if self.create and not os.path.exists(self.data): self.logger.warning("%s: %s does not exist, creating" % (self.name, self.data)) os.makedirs(self.data) self.running = True @classmethod def init_repo(cls, repo): """ Perform any tasks necessary to create an initial Bcfg2 repository. :param repo: The path to the Bcfg2 repository on the filesystem :type repo: string :returns: None """ os.makedirs(os.path.join(repo, cls.name)) def shutdown(self): """ Perform shutdown tasks for the plugin :returns: None """ self.debug_log("Shutting down %s plugin" % self.name) self.running = False def set_debug(self, debug): self.debug_log("%s: debug = %s" % (self.name, self.debug_flag), flag=True) for entry in self.Entries.values(): if isinstance(entry, Debuggable): entry.set_debug(debug) return Debuggable.set_debug(self, debug) def __str__(self): return "%s Plugin" % self.__class__.__name__ src/lib/Bcfg2/Server/Plugin/exceptions.py000066400000000000000000000020341303523157100205740ustar00rootroot00000000000000""" Exceptions for Bcfg2 Server Plugins.""" class PluginInitError(Exception): """Error raised in cases of :class:`Bcfg2.Server.Plugin.base.Plugin` initialization errors.""" pass class PluginExecutionError(Exception): """Error raised in case of :class:`Bcfg2.Server.Plugin.base.Plugin` execution errors.""" pass class MetadataConsistencyError(Exception): """This error gets raised when metadata is internally inconsistent.""" pass class MetadataRuntimeError(Exception): """This error is raised when the metadata engine is called prior to reading enough data, or for other :class:`Bcfg2.Server.Plugin.interfaces.Metadata` errors.""" pass class ValidationError(Exception): """ Exception raised by :class:`Bcfg2.Server.Plugin.interfaces.StructureValidator` and :class:`Bcfg2.Server.Plugin.interfaces.GoalValidator` objects """ class SpecificityError(Exception): """ Thrown by :class:`Bcfg2.Server.Plugin.helpers.Specificity` in case of filename parse failure.""" pass src/lib/Bcfg2/Server/Plugin/helpers.py000066400000000000000000002111321303523157100200560ustar00rootroot00000000000000""" Helper classes for Bcfg2 server plugins """ import os import re import sys import copy import glob import logging import genshi import operator import lxml.etree import Bcfg2.Server import Bcfg2.Options import Bcfg2.Server.FileMonitor from Bcfg2.Logger import Debuggable from Bcfg2.Compat import CmpMixin, wraps from Bcfg2.Server.Plugin.base import Plugin from Bcfg2.Server.Plugin.interfaces import Generator, TemplateDataProvider from Bcfg2.Server.Plugin.exceptions import SpecificityError, \ PluginExecutionError, PluginInitError try: import Bcfg2.Server.Encryption HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False try: import django # pylint: disable=W0611 HAS_DJANGO = True except ImportError: HAS_DJANGO = False LOGGER = logging.getLogger(__name__) def removecomment(stream): """ A Genshi filter that removes comments from the stream. This function is a generator. :param stream: The Genshi stream to remove comments from :type stream: genshi.core.Stream :returns: tuple of ``(kind, data, pos)``, as when iterating through a Genshi stream """ for kind, data, pos in stream: if kind is genshi.core.COMMENT: continue yield kind, data, pos def bind_info(entry, metadata, infoxml=None, default=None): """ Bind the file metadata in the given :class:`Bcfg2.Server.Plugin.helpers.InfoXML` object to the given entry. :param entry: The abstract entry to bind the info to :type entry: lxml.etree._Element :param metadata: The client metadata to get info for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param infoxml: The info.xml file to pull file metadata from :type infoxml: Bcfg2.Server.Plugin.helpers.InfoXML :param default: Default metadata to supply when the info.xml file does not include a particular attribute :type default: dict :returns: None :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError` """ if default is None: default = default_path_metadata() for attr, val in list(default.items()): entry.set(attr, val) if infoxml: mdata = dict() infoxml.pnode.Match(metadata, mdata, entry=entry) if 'Info' not in mdata: msg = "Failed to set metadata for file %s" % entry.get('name') LOGGER.error(msg) raise PluginExecutionError(msg) for attr, val in list(mdata['Info'][None].items()): entry.set(attr, val) def default_path_metadata(): """ Get the default Path entry metadata from the config. :returns: dict of metadata attributes and their default values """ return dict([(k, getattr(Bcfg2.Options.setup, "default_%s" % k)) for k in ['owner', 'group', 'mode', 'secontext', 'important', 'paranoid', 'sensitive']]) class DefaultTemplateDataProvider(TemplateDataProvider): """ A base :class:`Bcfg2.Server.Plugin.interfaces.TemplateDataProvider` that provides default data for text and XML templates. Note that, since Cheetah and Genshi text templates treat the ``path`` variable differently, this is overridden, by :class:`Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator.DefaultCheetahDataProvider` and :class:`Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator.DefaultGenshiDataProvider`, respectively. """ def get_template_data(self, entry, metadata, template): return dict(name=entry.get('realname', entry.get('name')), metadata=metadata, source_path=template, repo=Bcfg2.Options.setup.repository) def get_xml_template_data(self, _, metadata): return dict(metadata=metadata, repo=Bcfg2.Options.setup.repository) _sentinel = object() # pylint: disable=C0103 def _get_template_data(func_name, args, default=_sentinel): """ Generic template data getter for both text and XML templates. :param func_name: The name of the function to call on :class:`Bcfg2.Server.Plugin.interfaces.TemplateDataProvider` objects to get data for this template type. Should be one of either ``get_template_data`` for text templates, or ``get_xml_template_data`` for XML templates. :type func_name: string :param args: The arguments to pass to the data retrieval function :type args: list :param default: An object that provides a set of base values. If this is not provided, an instance of :class:`Bcfg2.Server.Plugin.helpers.DefaultTemplateDataProvider` is used. This can be set to None to avoid setting any base values at all. :type default: Bcfg2.Server.Plugin.interfaces.TemplateDataProvider """ if default is _sentinel: default = DefaultTemplateDataProvider() providers = Bcfg2.Server.core.plugins_by_type(TemplateDataProvider) if default is not None: providers.insert(0, default) rv = dict() source = dict() for prov in providers: pdata = getattr(prov, func_name)(*args) for key, val in pdata.items(): if key not in rv: rv[key] = val source[key] = prov else: LOGGER.warning("Duplicate template variable %s provided by " "both %s and %s" % (key, prov, source[key])) return rv def get_template_data(entry, metadata, template, default=_sentinel): """ Get all template variables for a text (i.e., Cfg) template """ return _get_template_data("get_template_data", [entry, metadata, template], default=default) def get_xml_template_data(structfile, metadata, default=_sentinel): """ Get all template variables for an XML template """ return _get_template_data("get_xml_template_data", [structfile, metadata], default=default) class DatabaseBacked(Plugin): """ Provides capabilities for a plugin to read and write to a database. The plugin must add an option to flag database use with something like: options = Bcfg2.Server.Plugin.Plugins.options + [ Bcfg2.Options.BooleanOption( cf=('metadata', 'use_database'), dest="metadata_db", help="Use database capabilities of the Metadata plugin") This must be done manually due to various limitations in Python. .. private-include: _use_db .. private-include: _must_lock """ def __init__(self, core): Plugin.__init__(self, core) use_db = getattr(Bcfg2.Options.setup, "%s_db" % self.name.lower(), False) if use_db and not HAS_DJANGO: raise PluginInitError("%s is configured to use the database but " "Django libraries are not found" % self.name) elif use_db and not self.core.database_available: raise PluginInitError("%s is configured to use the database but " "the database is unavailable due to prior " "errors" % self.name) @property def _use_db(self): """ Whether or not this plugin is configured to use the database. """ use_db = getattr(Bcfg2.Options.setup, "%s_db" % self.name.lower(), False) if use_db and HAS_DJANGO and self.core.database_available: return True else: return False @property def _must_lock(self): """ Whether or not the backend database must acquire a thread lock before writing, because it does not allow multiple threads to write.""" return self._use_db and Bcfg2.Options.setup.db_engine == 'sqlite3' @staticmethod def get_db_lock(func): """ Decorator to be used by a method of a :class:`DatabaseBacked` plugin that will update database data. """ @wraps(func) def _acquire_and_run(self, *args, **kwargs): """ The decorated function """ if self._must_lock: # pylint: disable=W0212 try: self.core.db_write_lock.acquire() rv = func(self, *args, **kwargs) finally: self.core.db_write_lock.release() else: rv = func(self, *args, **kwargs) return rv return _acquire_and_run class PluginDatabaseModel(object): """ A database model mixin that all database models used by :class:`Bcfg2.Server.Plugin.helpers.DatabaseBacked` plugins must inherit from. This is just a mixin; models must also inherit from django.db.models.Model to be valid Django models.""" class Meta(object): # pylint: disable=W0232 """ Model metadata options """ app_label = "Server" class FileBacked(Debuggable): """ This object caches file data in memory. FileBacked objects are principally meant to be used as a part of :class:`Bcfg2.Server.Plugin.helpers.DirectoryBacked`. """ def __init__(self, name): """ :param name: The full path to the file to cache and monitor :type name: string """ Debuggable.__init__(self) #: A string containing the raw data in this file self.data = '' #: The full path to the file self.name = name #: The FAM object used to receive notifications of changes self.fam = Bcfg2.Server.FileMonitor.get_fam() def HandleEvent(self, event=None): """HandleEvent is called whenever the FAM registers an event. :param event: The event object :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ if event and event.code2str() not in ['exists', 'changed', 'created']: return try: self.data = open(self.name).read() except IOError: err = sys.exc_info()[1] self.logger.error("Failed to read file %s: %s" % (self.name, err)) self.Index() def Index(self): """ Index() is called by :func:`HandleEvent` every time the data changes, and parses the data into usable data as required.""" pass def __repr__(self): return "%s: %s" % (self.__class__.__name__, self.name) class DirectoryBacked(Debuggable): """ DirectoryBacked objects represent a directory that contains files, represented by objects of the type listed in :attr:`__child__`, and other directories recursively. It monitors for new files and directories to be added, and creates new objects as required to track those.""" #: The type of child objects to create for files contained within #: the directory that is tracked. Default is #: :class:`Bcfg2.Server.Plugin.helpers.FileBacked` __child__ = FileBacked #: Only track and include files whose names (not paths) match this #: compiled regex. patterns = re.compile('.*') #: Preemptively ignore files whose names (not paths) match this #: compiled regex. ``ignore`` can be set to ``None`` to ignore no #: files. If a file is encountered that does not match #: :attr:`patterns` or ``ignore``, then a warning will be produced. ignore = None def __init__(self, data): """ :param data: The path to the data directory that will be monitored :type data: string .. ----- .. autoattribute:: __child__ """ Debuggable.__init__(self) self.data = os.path.normpath(data) self.fam = Bcfg2.Server.FileMonitor.get_fam() #: self.entries contains information about the files monitored #: by this object. The keys of the dict are the relative #: paths to the files. The values are the objects (of type #: :attr:`__child__`) that handle their contents. self.entries = {} #: self.handles contains information about the directories #: monitored by this object. The keys of the dict are the #: values returned by the initial fam.AddMonitor() call (which #: appear to be integers). The values are the relative paths of #: the directories. self.handles = {} # Monitor everything in the plugin's directory if not os.path.exists(self.data): self.logger.warning("%s does not exist, creating" % self.data) os.makedirs(self.data) self.add_directory_monitor('') def set_debug(self, debug): for entry in self.entries.values(): if isinstance(entry, Debuggable): entry.set_debug(debug) return Debuggable.set_debug(self, debug) def __getitem__(self, key): return self.entries[key] def __len__(self): return len(self.entries) def __delitem__(self, key): del self.entries[key] def __setitem__(self, key, val): self.entries[key] = val def __iter__(self): return iter(list(self.entries.items())) def add_directory_monitor(self, relative): """ Add a new directory to the FAM for monitoring. :param relative: Path name to monitor. This must be relative to the plugin's directory. An empty string value ("") will cause the plugin directory itself to be monitored. :type relative: string :returns: None """ dirpathname = os.path.join(self.data, relative) if relative not in self.handles.values(): if not os.path.isdir(dirpathname): self.logger.error("%s is not a directory" % dirpathname) return reqid = self.fam.AddMonitor(dirpathname, self) self.handles[reqid] = relative def add_entry(self, relative, event): """ Add a new file to our tracked entries, and to our FAM for monitoring. :param relative: Path name to monitor. This must be relative to the plugin's directory. :type relative: string: :param event: FAM event that caused this entry to be added. :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ self.entries[relative] = self.__child__(os.path.join(self.data, relative)) self.entries[relative].HandleEvent(event) def HandleEvent(self, event): # pylint: disable=R0912 """ Handle FAM events. This method is invoked by the FAM when it detects a change to a filesystem object we have requsted to be monitored. This method manages the lifecycle of events related to the monitored objects, adding them to our list of entries and creating objects of type :attr:`__child__` that actually do the domain-specific processing. When appropriate, it propogates events those objects by invoking their HandleEvent method in turn. :param event: FAM event that caused this entry to be added. :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ action = event.code2str() # Exclude events for actions we don't care about if action == 'endExist': return if event.requestID not in self.handles: self.logger.warn("Got %s event with unknown handle (%s) for %s" % (action, event.requestID, event.filename)) return # Clean up path names event.filename = os.path.normpath(event.filename) if event.filename.startswith(self.data): # the first event we get is on the data directory itself event.filename = event.filename[len(self.data) + 1:] if self.ignore and self.ignore.search(event.filename): self.logger.debug("Ignoring event %s" % event.filename) return # Calculate the absolute and relative paths this event refers to abspath = os.path.join(self.data, self.handles[event.requestID], event.filename) relpath = os.path.join(self.handles[event.requestID], event.filename).lstrip('/') if action == 'deleted': for key in list(self.entries.keys()): if key.startswith(relpath): del self.entries[key] # We remove values from self.entries, but not # self.handles, because the FileMonitor doesn't stop # watching a directory just because it gets deleted. If it # is recreated, we will start getting notifications for it # again without having to add a new monitor. elif os.path.isdir(abspath): # Deal with events for directories if action in ['exists', 'created']: self.add_directory_monitor(relpath) elif action == 'changed': if relpath in self.entries: # Ownerships, permissions or timestamps changed on # the directory. None of these should affect the # contents of the files, though it could change # our ability to access them. # # It seems like the right thing to do is to cancel # monitoring the directory and then begin # monitoring it again. But the current FileMonitor # class doesn't support canceling, so at least let # the user know that a restart might be a good # idea. self.logger.warn("Directory properties for %s changed, " "please consider restarting the server" % abspath) else: # Got a "changed" event for a directory that we # didn't know about. Go ahead and treat it like a # "created" event, but log a warning, because this # is unexpected. self.logger.warn("Got %s event for unexpected dir %s" % (action, abspath)) self.add_directory_monitor(relpath) else: self.logger.warn("Got unknown dir event %s %s %s" % (event.requestID, event.code2str(), abspath)) elif self.patterns.search(event.filename): if action in ['exists', 'created']: self.add_entry(relpath, event) elif action == 'changed': if relpath in self.entries: self.entries[relpath].HandleEvent(event) else: # Got a "changed" event for a file that we didn't # know about. Go ahead and treat it like a # "created" event, but log a warning, because this # is unexpected. self.logger.warn("Got %s event for unexpected file %s" % (action, abspath)) self.add_entry(relpath, event) else: self.logger.warn("Got unknown file event %s %s %s" % (event.requestID, event.code2str(), abspath)) else: self.logger.warn("Could not process filename %s; ignoring" % event.filename) class XMLFileBacked(FileBacked): """ This object parses and caches XML file data in memory. It can be used as a standalone object or as a part of :class:`Bcfg2.Server.Plugin.helpers.XMLDirectoryBacked` """ #: If ``__identifier__`` is set, then a top-level tag with the #: specified name will be required on the file being cached. Its #: value will be available as :attr:`label`. To disable this #: behavior, set ``__identifier__`` to ``None``. __identifier__ = 'name' #: If ``create`` is set, then it overrides the ``create`` argument #: to the constructor. create = None def __init__(self, filename, should_monitor=False, create=None): """ :param filename: The full path to the file to cache and monitor :type filename: string :param should_monitor: Whether or not to monitor this file for changes. It may be useful to disable monitoring when, for instance, the file is monitored by another object (e.g., an :class:`Bcfg2.Server.Plugin.helpers.XMLDirectoryBacked` object). :type should_monitor: bool :param create: Create the file if it doesn't exist. ``create`` can be either an :class:`lxml.etree._Element` object, which will be used as initial content, or a string, which will be used as the name of the (empty) tag that will be the initial content of the file. :type create: lxml.etree._Element or string .. ----- .. autoattribute:: __identifier__ """ FileBacked.__init__(self, filename) #: The raw XML data contained in the file as an #: :class:`lxml.etree.ElementTree` object, with XIncludes #: processed. self.xdata = None #: The label of this file. This is determined from the #: top-level tag in the file, which must have an attribute #: specified by :attr:`__identifier__`. self.label = "" #: All entries in this file. By default, all immediate #: children of the top-level XML tag. self.entries = [] #: "Extra" files included in this file by XInclude. self.extras = [] #: Extra FAM monitors set by this object for files included by #: XInclude. self.extra_monitors = [] if ((create is not None or self.create not in [None, False]) and not os.path.exists(self.name)): toptag = create or self.create self.logger.warning("%s does not exist, creating" % self.name) if hasattr(toptag, "getroottree"): el = toptag else: el = lxml.etree.Element(toptag) el.getroottree().write(self.name, xml_declaration=False, pretty_print=True) #: Whether or not to monitor this file for changes. self.should_monitor = should_monitor if should_monitor: self.fam.AddMonitor(filename, self) def _follow_xincludes(self, fname=None, xdata=None): """ follow xincludes, adding included files to self.extras """ xinclude = '%sinclude' % Bcfg2.Server.XI_NAMESPACE if xdata is None: if fname is None: xdata = self.xdata.getroottree() else: xdata = lxml.etree.parse(fname) for el in xdata.findall('//' + xinclude): name = el.get("href") if name.startswith("/"): fpath = name else: rel = fname or self.name fpath = os.path.join(os.path.dirname(rel), name) # expand globs in xinclude, a bcfg2-specific extension extras = glob.glob(fpath) if not extras: msg = "%s: %s does not exist, skipping" % (self.name, name) if el.findall('./%sfallback' % Bcfg2.Server.XI_NAMESPACE): self.logger.debug(msg) else: self.logger.error(msg) # add a FAM monitor for this path. this isn't perfect # -- if there's an xinclude of "*.xml", we'll watch # the literal filename "*.xml". but for non-globbing # filenames, it works fine. if fpath not in self.extra_monitors: self.add_monitor(fpath) parent = el.getparent() parent.remove(el) for extra in extras: if extra != self.name: lxml.etree.SubElement(parent, xinclude, href=extra) if extra not in self.extras: self.extras.append(extra) self._follow_xincludes(fname=extra) if extra not in self.extra_monitors: self.add_monitor(extra) def Index(self): self.xdata = lxml.etree.XML(self.data, base_url=self.name, parser=Bcfg2.Server.XMLParser) self.extras = [] self._follow_xincludes() if self.extras: try: self.xdata.getroottree().xinclude() except lxml.etree.XIncludeError: err = sys.exc_info()[1] self.logger.error("XInclude failed on %s: %s" % (self.name, err)) self.entries = self.xdata.getchildren() if self.__identifier__ is not None: self.label = self.xdata.attrib[self.__identifier__] Index.__doc__ = FileBacked.Index.__doc__ def add_monitor(self, fpath): """ Add a FAM monitor to a file that has been XIncluded. :param fpath: The full path to the file to monitor :type fpath: string :returns: None """ self.extra_monitors.append(fpath) self.fam.AddMonitor(fpath, self) def __iter__(self): return iter(self.entries) def __str__(self): return "%s at %s" % (self.__class__.__name__, self.name) class StructFile(XMLFileBacked): """ StructFiles are XML files that contain a set of structure file formatting logic for handling ```` and ```` tags. .. ----- .. autoattribute:: __identifier__ .. automethod:: _include_element """ #: If ``__identifier__`` is not None, then it must be the name of #: an XML attribute that will be required on the top-level tag of #: the file being cached __identifier__ = None #: Whether or not to enable encryption encryption = True #: Callbacks used to determine if children of items with the given #: tags should be included in the return value of #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` and #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch`. Each #: callback is passed the same arguments as #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element`. #: It should return True if children of the element should be #: included in the match, False otherwise. The callback does #: *not* need to consider negation; that will be handled in #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element` _include_tests = \ dict(Group=lambda el, md, *args: el.get('name') in md.groups, Client=lambda el, md, *args: el.get('name') == md.hostname) def __init__(self, filename, should_monitor=False, create=None): XMLFileBacked.__init__(self, filename, should_monitor=should_monitor, create=create) self.template = None def Index(self): XMLFileBacked.Index(self) if (self.name.endswith('.genshi') or ('py' in self.xdata.nsmap and self.xdata.nsmap['py'] == 'http://genshi.edgewall.org/')): try: loader = genshi.template.TemplateLoader() self.template = \ loader.load(self.name, cls=genshi.template.MarkupTemplate, encoding=Bcfg2.Options.setup.encoding) except LookupError: err = sys.exc_info()[1] self.logger.error('Genshi lookup error in %s: %s' % (self.name, err)) except genshi.template.TemplateError: err = sys.exc_info()[1] self.logger.error('Genshi template error in %s: %s' % (self.name, err)) except genshi.input.ParseError: err = sys.exc_info()[1] self.logger.error('Genshi parse error in %s: %s' % (self.name, err)) if HAS_CRYPTO and self.encryption: for el in self.xdata.xpath("//*[@encrypted]"): try: el.text = self._decrypt(el).encode('ascii', 'xmlcharrefreplace') except UnicodeDecodeError: self.logger.info("%s: Decrypted %s to gibberish, skipping" % (self.name, el.tag)) except Bcfg2.Server.Encryption.EVPError: lax_decrypt = self.xdata.get( "lax_decryption", str(Bcfg2.Options.setup.lax_decryption)).lower() == \ "true" msg = "Failed to decrypt %s element in %s" % (el.tag, self.name) if lax_decrypt: self.logger.debug(msg) else: raise PluginExecutionError(msg) Index.__doc__ = XMLFileBacked.Index.__doc__ def _decrypt(self, element): """ Decrypt a single encrypted properties file element """ if not element.text or not element.text.strip(): return passes = Bcfg2.Options.setup.passphrases try: passphrase = passes[element.get("encrypted")] return Bcfg2.Server.Encryption.ssl_decrypt(element.text, passphrase) except KeyError: raise Bcfg2.Server.Encryption.EVPError("No passphrase named '%s'" % element.get("encrypted")) raise Bcfg2.Server.Encryption.EVPError("Failed to decrypt") def _include_element(self, item, metadata, *args): """ Determine if an XML element matches the other arguments. The first argument is always the XML element to match, and the second will always be a single :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` object representing the metadata to match against. Subsequent arguments are as given to :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` or :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch`. In the base StructFile implementation, there are no additional arguments; in classes that inherit from StructFile, see the :func:`Match` and :func:`XMLMatch` method signatures.""" if isinstance(item, lxml.etree._Comment): # pylint: disable=W0212 return False if item.tag in self._include_tests: negate = item.get('negate', 'false').lower() == 'true' return negate != self._include_tests[item.tag](item, metadata, *args) else: return True def _render(self, metadata): """ Render the template for the given client metadata :param metadata: Client metadata to match against. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: lxml.etree._Element object representing the rendered XML data """ stream = self.template.generate( **get_xml_template_data(self, metadata)).filter(removecomment) return lxml.etree.XML(stream.render('xml', strip_whitespace=False).encode(), parser=Bcfg2.Server.XMLParser) def _match(self, item, metadata, *args): """ recursive helper for :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` """ if self._include_element(item, metadata, *args): if item.tag in self._include_tests.keys(): rv = [] if self._include_element(item, metadata, *args): for child in item.iterchildren(): rv.extend(self._match(child, metadata, *args)) return rv else: rv = copy.deepcopy(item) for child in rv.iterchildren(): rv.remove(child) for child in item.iterchildren(): rv.extend(self._match(child, metadata, *args)) return [rv] else: return [] def _do_match(self, metadata, *args): """ Helper for :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` that lets a subclass of StructFile easily redefine the public Match() interface to accept a different number of arguments. This provides a sane prototype for the Match() function while keeping the internals consistent. """ rv = [] if self.template is None: entries = self.entries else: entries = self._render(metadata).getchildren() for child in entries: rv.extend(self._match(child, metadata, *args)) return rv def Match(self, metadata): """ Return matching fragments of the data in this file. A tag is considered to match if all ```` and ```` tags that are its ancestors match the metadata given. Since tags are included unmodified, it's possible for a tag to itself match while containing non-matching children. Consequently, only the tags contained in the list returned by Match() (and *not* their descendents) should be considered to match the metadata. Match() returns matching fragments in document order. :param metadata: Client metadata to match against. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: list of lxml.etree._Element objects """ return self._do_match(metadata) def _xml_match(self, item, metadata, *args): """ recursive helper for :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` """ if self._include_element(item, metadata, *args): if item.tag in self._include_tests.keys(): for child in item.iterchildren(): item.remove(child) item.getparent().append(child) self._xml_match(child, metadata, *args) if item.text: if item.getparent().text is None: item.getparent().text = item.text else: item.getparent().text += item.text item.getparent().remove(item) else: for child in item.iterchildren(): self._xml_match(child, metadata, *args) else: item.getparent().remove(item) def _do_xmlmatch(self, metadata, *args): """ Helper for :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` that lets a subclass of StructFile easily redefine the public Match() interface to accept a different number of arguments. This provides a sane prototype for the Match() function while keeping the internals consistent. """ if self.template is None: rv = copy.deepcopy(self.xdata) else: rv = self._render(metadata) for child in rv.iterchildren(): self._xml_match(child, metadata, *args) return rv def XMLMatch(self, metadata): """ Return a rebuilt XML document that only contains the matching portions of the original file. A tag is considered to match if all ```` and ```` tags that are its ancestors match the metadata given. Unlike :func:`Match`, the document returned by XMLMatch will only contain matching data. All ```` and ```` tags will have been stripped out. The new document produced by XMLMatch() is not necessarily in the same order as the original document. :param metadata: Client metadata to match against. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: lxml.etree._Element """ return self._do_xmlmatch(metadata) class InfoXML(StructFile): """ InfoXML files contain Group, Client, and Path tags to set the metadata (permissions, owner, etc.) of files. """ encryption = False _include_tests = copy.copy(StructFile._include_tests) _include_tests['Path'] = lambda el, md, entry, *args: \ entry.get('realname', entry.get('name')) == el.get("name") def Match(self, metadata, entry): # pylint: disable=W0221 """ Implementation of :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` that considers Path tags to allow ``info.xml`` files to set different file metadata for different file paths. """ return self._do_match(metadata, entry) def XMLMatch(self, metadata, entry): # pylint: disable=W0221 """ Implementation of :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` that considers Path tags to allow ``info.xml`` files to set different file metadata for different file paths. """ return self._do_xmlmatch(metadata, entry) def BindEntry(self, entry, metadata): """ Bind the matching file metadata for this client and entry to the entry. :param entry: The abstract entry to bind the info to. This will be modified in place :type entry: lxml.etree._Element :param metadata: The client metadata to get info for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ fileinfo = self.Match(metadata, entry) if len(fileinfo) == 0: raise PluginExecutionError("No metadata found in %s for %s" % (self.name, entry.get('name'))) elif len(fileinfo) > 1: self.logger.warning("Multiple file metadata found in %s for %s" % (self.name, entry.get('name'))) for attr, val in fileinfo[0].attrib.items(): entry.set(attr, val) class XMLDirectoryBacked(DirectoryBacked): """ :class:`Bcfg2.Server.Plugin.helpers.DirectoryBacked` for XML files. """ #: Only track and include files whose names (not paths) match this #: compiled regex. patterns = re.compile(r'^.*\.xml$') #: The type of child objects to create for files contained within #: the directory that is tracked. Default is #: :class:`Bcfg2.Server.Plugin.helpers.XMLFileBacked` __child__ = XMLFileBacked class PriorityStructFile(StructFile): """ A StructFile where each file has a priority, given as a top-level XML attribute. """ def __init__(self, filename, should_monitor=False): StructFile.__init__(self, filename, should_monitor=should_monitor) self.priority = -1 __init__.__doc__ = StructFile.__init__.__doc__ def Index(self): StructFile.Index(self) try: self.priority = int(self.xdata.get('priority')) except (ValueError, TypeError): raise PluginExecutionError("Got bogus priority %s for file %s" % (self.xdata.get('priority'), self.name)) Index.__doc__ = StructFile.Index.__doc__ class PrioDir(Plugin, Generator, XMLDirectoryBacked): """ PrioDir handles a directory of XML files where each file has a set priority. .. ----- .. autoattribute:: __child__ """ #: The type of child objects to create for files contained within #: the directory that is tracked. Default is #: :class:`Bcfg2.Server.Plugin.helpers.PriorityStructFile` __child__ = PriorityStructFile def __init__(self, core): Plugin.__init__(self, core) Generator.__init__(self) XMLDirectoryBacked.__init__(self, self.data) __init__.__doc__ = Plugin.__init__.__doc__ def HandleEvent(self, event): XMLDirectoryBacked.HandleEvent(self, event) self.Entries = {} for src in self.entries.values(): for child in src.xdata.iterchildren(): if child.tag in ['Group', 'Client']: continue if child.tag not in self.Entries: self.Entries[child.tag] = dict() self.Entries[child.tag][child.get("name")] = self.BindEntry HandleEvent.__doc__ = XMLDirectoryBacked.HandleEvent.__doc__ def _matches(self, entry, metadata, candidate): # pylint: disable=W0613 """ Whether or not a given candidate matches the abstract entry given. By default this does strict matching (i.e., the entry name matches the candidate name), but this can be overridden to provide regex matching, etc. :param entry: The entry to find a match for :type entry: lxml.etree._Element :param metadata: The metadata to get attributes for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :candidate: A candidate concrete entry to match with :type candidate: lxml.etree._Element :returns: bool """ return (entry.tag == candidate.tag and entry.get('name') == candidate.get('name')) def BindEntry(self, entry, metadata): """ Bind the attributes that apply to an entry to it. The entry is modified in-place. :param entry: The entry to add attributes to. :type entry: lxml.etree._Element :param metadata: The metadata to get attributes for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ matching = [] for src in self.entries.values(): for candidate in src.XMLMatch(metadata).xpath("//%s" % entry.tag): if self._matches(entry, metadata, candidate): matching.append((src, candidate)) if len(matching) == 0: raise PluginExecutionError("No matching source for entry when " "retrieving attributes for %s:%s" % (entry.tag, entry.get('name'))) elif len(matching) == 1: data = matching[0][1] else: prio = [int(m[0].priority) for m in matching] priority = max(prio) if prio.count(priority) > 1: msg = "Found conflicting sources with same priority (%s) " \ "for %s:%s for %s" % (priority, entry.tag, entry.get("name"), metadata.hostname) self.logger.error(msg) self.logger.error([m[0].name for m in matching]) raise PluginExecutionError(msg) for src, candidate in matching: if int(src.priority) == priority: data = candidate break self._apply(entry, data) def _apply(self, entry, data): """ Apply all available values from data onto entry. This sets the available attributes (for all attribues unset in the entry), adds all children and copies the text from data to entry. :param entry: The entry to apply the changes :type entry: lxml.etree._Element :param data: The entry to get the data from :type data: lxml.etree._Element """ if data.text is not None and data.text.strip() != '': entry.text = data.text for item in data.getchildren(): entry.append(copy.copy(item)) for key, val in list(data.attrib.items()): if key not in entry.attrib: entry.attrib[key] = val class Specificity(CmpMixin): """ Represent the specificity of an object; i.e., what client(s) it applies to. It can be group- or client-specific, or apply to all clients. Specificity objects are sortable; objects that are less specific are considered less than objects that are more specific. Objects that apply to all clients are the least specific; objects that apply to a single client are the most specific. Objects that apply to groups are sorted by priority. """ def __init__(self, all=False, group=False, # pylint: disable=W0622 hostname=False, prio=0, delta=False): """ :param all: The object applies to all clients. :type all: bool :param group: The object applies only to the given group. :type group: string or False :param hostname: The object applies only to the named client. :type hostname: string or False :param prio: The object has the given priority relative to other objects that also apply to the same group. ```` must be specified with ````. :type prio: int :param delta: The object is a delta (i.e., a .cat or .diff file, not a full file). Deltas are deprecated. :type delta: bool Exactly one of {all|group|hostname} should be given. """ CmpMixin.__init__(self) self.hostname = hostname self.all = all self.group = group self.prio = prio self.delta = delta def matches(self, metadata): """ Return True if the object described by this Specificity object applies to the given client. That is, if this Specificity applies to all clients, or to a group the client is a member of, or to the client individually. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: bool """ return (self.all or self.hostname == metadata.hostname or self.group in metadata.groups) def __cmp__(self, other): # pylint: disable=R0911 """Sort most to least specific.""" if self.all: if other.all: return 0 else: return 1 elif other.all: return -1 elif self.group: if other.hostname: return 1 if other.group and other.prio > self.prio: return 1 if other.group and other.prio == self.prio: return 0 elif other.group: return -1 elif self.hostname and other.hostname: return 0 return -1 def __str__(self): rv = [self.__class__.__name__, ': '] if self.all: rv.append("all") elif self.group: rv.append("Group %s, priority %s" % (self.group, self.prio)) elif self.hostname: rv.append("Host %s" % self.hostname) if self.delta: rv.append(", delta=%s" % self.delta) return "".join(rv) class SpecificData(Debuggable): """ A file that is specific to certain clients, groups, or all clients. """ def __init__(self, name, specific): # pylint: disable=W0613 """ :param name: The full path to the file :type name: string :param specific: A :class:`Bcfg2.Server.Plugin.helpers.Specificity` object describing what clients this file applies to. :type specific: Bcfg2.Server.Plugin.helpers.Specificity """ Debuggable.__init__(self) self.name = name self.specific = specific self.data = None def handle_event(self, event): """ Handle a FAM event. Note that the SpecificData object itself has no FAM, so this must be produced by a parent object (e.g., :class:`Bcfg2.Server.Plugin.helpers.EntrySet`). :param event: The event that applies to this file :type event: Bcfg2.Server.FileMonitor.Event :returns: None :raises: :exc:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError` """ if event.code2str() == 'deleted': return try: self.data = open(self.name).read() except UnicodeDecodeError: self.data = open(self.name, mode='rb').read() except IOError: self.logger.error("Failed to read file %s" % self.name) class EntrySet(Debuggable): """ EntrySets deal with a collection of host- and group-specific files (e.g., :class:`Bcfg2.Server.Plugin.helpers.SpecificData` objects) in a single directory. EntrySets are usually used as part of :class:`Bcfg2.Server.Plugin.helpers.GroupSpool` objects.""" #: Preemptively ignore files whose names (not paths) match this #: compiled regex. ``ignore`` cannot be set to ``None``. If a #: file is encountered that does not match the ``basename`` #: argument passed to the constructor or ``ignore``, then a #: warning will be produced. ignore = re.compile(r'^(\.#.*|.*~|\..*\.(sw[px])|.*\.genshi_include)$') # The ``basename`` argument passed to the constructor will be #: processed as a string that contains a regular expression (i.e., #: *not* a compiled regex object) if ``basename_is_regex`` is True, #: and all files that match the regex will be cincluded in the #: EntrySet. If ``basename_is_regex`` is False, then it will be #: considered a plain string and filenames must match exactly. basename_is_regex = False def __init__(self, basename, path, entry_type): """ :param basename: The filename or regular expression that files in this EntrySet must match. See :attr:`basename_is_regex` for more details. :type basename: string :param path: The full path to the directory containing files for this EntrySet :type path: string :param entry_type: A callable that returns an object that represents files in this EntrySet. This will usually be a class object, but it can be an object factory or similar callable. See below for the expected signature. :type entry_type: callable The ``entry_type`` callable must have the following signature:: entry_type(filepath, specificity) Where the parameters are: :param filepath: Full path to file :type filepath: string :param specific: A :class:`Bcfg2.Server.Plugin.helpers.Specificity` object describing what clients this file applies to. :type specific: Bcfg2.Server.Plugin.helpers.Specificity Additionally, the object returned by ``entry_type`` must have a ``specific`` attribute that is sortable (e.g., a :class:`Bcfg2.Server.Plugin.helpers.Specificity` object). See :class:`Bcfg2.Server.Plugin.helpers.SpecificData` for an example of a class that can be used as an ``entry_type``. """ Debuggable.__init__(self, name=basename) self.path = path self.entry_type = entry_type self.entries = {} self.metadata = default_path_metadata() self.infoxml = None if self.basename_is_regex: base_pat = basename else: base_pat = re.escape(basename) pattern = r'(.*/)?' + base_pat + \ r'(\.((H_(?P\S+))|(G(?P\d+)_(?P\S+))))?$' #: ``specific`` is a regular expression that is used to #: determine the specificity of a file in this entry set. It #: must have three named groups: ``hostname``, ``prio`` (the #: priority of a group-specific file), and ``group``. The base #: regex is constructed from the ``basename`` argument. It can #: be overridden on a per-entry basis in :func:`entry_init`. self.specific = re.compile(pattern) def set_debug(self, debug): rv = Debuggable.set_debug(self, debug) for entry in self.entries.values(): entry.set_debug(debug) return rv def get_matching(self, metadata): """ Get a list of all entries that apply to the given client. This gets all matching entries; for example, there could be an entry that applies to all clients, multiple group-specific entries, and a client-specific entry, all of which would be returned by get_matching(). You can use :func:`best_matching` to get the single best matching entry. :param metadata: The client metadata to get matching entries for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: list -- all matching ``entry_type`` objects (see the constructor docs for more details) """ return [item for item in list(self.entries.values()) if item.specific.matches(metadata)] def best_matching(self, metadata, matching=None): """ Return the single most specific matching entry from the set of matching entries. You can use :func:`get_matching` to get all matching entries. :param metadata: The client metadata to get matching entries for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param matching: The set of matching entries to pick from. If this is not provided, :func:`get_matching` will be called. :type matching: list of ``entry_type`` objects (see the constructor docs for more details) :returns: a single object from the list of matching ``entry_type`` objects :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError` if no matching entries are found """ if matching is None: matching = self.get_matching(metadata) if matching: matching.sort(key=operator.attrgetter("specific")) return matching[0] else: raise PluginExecutionError("No matching entries available for %s " "for %s" % (self.path, metadata.hostname)) def handle_event(self, event): """ Dispatch a FAM event to the appropriate function or child ``entry_type`` object. This will probably be handled by a call to :func:`update_metadata`, :func:`reset_metadata`, :func:`entry_init`, or to the ``entry_type`` ``handle_event()`` function. :param event: An event that applies to a file handled by this EntrySet :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ action = event.code2str() if event.filename == 'info.xml': if action in ['exists', 'created', 'changed']: self.update_metadata(event) elif action == 'deleted': self.reset_metadata(event) return if action in ['exists', 'created']: self.entry_init(event) else: if event.filename not in self.entries: self.logger.warning("Got %s event for unknown file %s" % (action, event.filename)) if action == 'changed': # received a bogus changed event; warn, but treat # it like a created event self.entry_init(event) return if action == 'changed': self.entries[event.filename].handle_event(event) elif action == 'deleted': del self.entries[event.filename] def entry_init(self, event, entry_type=None, specific=None): """ Handle the creation of a file on the filesystem and the creation of an object in this EntrySet to track it. :param event: An event that applies to a file handled by this EntrySet :type event: Bcfg2.Server.FileMonitor.Event :param entry_type: Override the default ``entry_type`` for this EntrySet object and create a different object for this entry. See the constructor docs for more information on ``entry_type``. :type entry_type: callable :param specific: Override the default :attr:`specific` regular expression used by this object with a custom regular expression that will be used to determine the specificity of this entry. :type specific: compiled regular expression object :returns: None :raises: :class:`Bcfg2.Server.Plugin.exceptions.SpecificityError` """ if entry_type is None: entry_type = self.entry_type if event.filename in self.entries: self.logger.warn("Got duplicate add for %s" % event.filename) else: fpath = os.path.join(self.path, event.filename) try: spec = self.specificity_from_filename(event.filename, specific=specific) except SpecificityError: if not self.ignore.match(event.filename): self.logger.error("Could not process filename %s; ignoring" % fpath) return self.entries[event.filename] = entry_type(fpath, spec) self.entries[event.filename].handle_event(event) def specificity_from_filename(self, fname, specific=None): """ Construct a :class:`Bcfg2.Server.Plugin.helpers.Specificity` object from a filename and regex. See :attr:`specific` for details on the regex. :param fname: The filename (not full path) of a file that is in this EntrySet's directory. It is not necessary to determine first if the filename matches this EntrySet's basename; that can be done by catching :class:`Bcfg2.Server.Plugin.exceptions.SpecificityError` from this function. :type fname: string :param specific: Override the default :attr:`specific` regular expression used by this object with a custom regular expression that will be used to determine the specificity of this entry. :type specific: compiled regular expression object :returns: Object representing the specificity of the file :rtype: :class:`Bcfg2.Server.Plugin.helpers.Specificity` :raises: :class:`Bcfg2.Server.Plugin.exceptions.SpecificityError` if the regex does not match the filename """ if specific is None: specific = self.specific data = specific.match(fname) if not data: raise SpecificityError(fname) kwargs = {} if data.group('hostname'): kwargs['hostname'] = data.group('hostname') elif data.group('group'): kwargs['group'] = data.group('group') kwargs['prio'] = int(data.group('prio')) else: kwargs['all'] = True if 'delta' in data.groupdict(): kwargs['delta'] = data.group('delta') return Specificity(**kwargs) def update_metadata(self, event): """ Process changes to or creation of info.xml files for the EntrySet. :param event: An event that applies to an info handled by this EntrySet :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ fpath = os.path.join(self.path, event.filename) if event.filename == 'info.xml': if not self.infoxml: self.infoxml = InfoXML(fpath) self.infoxml.HandleEvent(event) def reset_metadata(self, event): """ Reset metadata to defaults if info.xml is removed. :param event: An event that applies to an info handled by this EntrySet :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ if event.filename == 'info.xml': self.infoxml = None def bind_info_to_entry(self, entry, metadata): """ Bind the metadata for the given client in the base info.xml for this EntrySet to the entry. :param entry: The abstract entry to bind the info to. This will be modified in place :type entry: lxml.etree._Element :param metadata: The client metadata to get info for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ for attr, val in list(self.metadata.items()): entry.set(attr, val) if self.infoxml is not None: self.infoxml.BindEntry(entry, metadata) def bind_entry(self, entry, metadata): """ Return the single best fully-bound entry from the set of available entries for the specified client. :param entry: The abstract entry to bind the info to :type entry: lxml.etree._Element :param metadata: The client metadata to get info for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: lxml.etree._Element - the fully-bound entry """ self.bind_info_to_entry(entry, metadata) return self.best_matching(metadata).bind_entry(entry, metadata) class GroupSpool(Plugin, Generator): """ A GroupSpool is a collection of :class:`Bcfg2.Server.Plugin.helpers.EntrySet` objects -- i.e., a directory tree, each directory in which may contain files that are specific to groups/clients/etc. """ #: ``filename_pattern`` is used as the ``basename`` argument to the #: :attr:`es_cls` callable. It may or may not be a regex, #: depending on the :attr:`EntrySet.basename_is_regex` setting. filename_pattern = "" #: ``es_child_cls`` is a callable that will be used as the #: ``entry_type`` argument to the :attr:`es_cls` callable. It must #: return objects that will represent individual files in the #: GroupSpool. For instance, #: :class:`Bcfg2.Server.Plugin.helpers.SpecificData`. es_child_cls = object #: ``es_cls`` is a callable that must return objects that will be #: used to represent directories (i.e., sets of entries) within the #: GroupSpool. E.g., #: :class:`Bcfg2.Server.Plugin.helpers.EntrySet`. The returned #: object must implement a callable called ``bind_entry`` that has #: the same signature as :attr:`EntrySet.bind_entry`. es_cls = EntrySet #: The entry type (i.e., the XML tag) handled by this GroupSpool #: object. entry_type = 'Path' def __init__(self, core): Plugin.__init__(self, core) Generator.__init__(self) self.fam = Bcfg2.Server.FileMonitor.get_fam() #: See :class:`Bcfg2.Server.Plugins.interfaces.Generator` for #: details on the Entries attribute. self.Entries[self.entry_type] = {} #: ``entries`` is a dict whose keys are :func:`event_id` return #: values and whose values are :attr:`es_cls` objects. It ties #: the directories handled by this GroupSpools to the #: :attr:`es_cls` objects that handle each directory. self.entries = {} self.handles = {} self.AddDirectoryMonitor('') __init__.__doc__ = Plugin.__init__.__doc__ def add_entry(self, event): """ This method handles two functions: * Adding a new entry of type :attr:`es_cls` to track a new directory. * Passing off an event on a file to the correct entry object to handle it. :param event: An event that applies to a file or directory handled by this GroupSpool :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ epath = self.event_path(event) ident = self.event_id(event) if os.path.isdir(epath): self.AddDirectoryMonitor(epath[len(self.data):]) if ident not in self.entries and os.path.isfile(epath): dirpath = self.data + ident self.entries[ident] = self.es_cls(self.filename_pattern, dirpath, self.es_child_cls) self.Entries[self.entry_type][ident] = \ self.entries[ident].bind_entry if not os.path.isdir(epath): # do not pass through directory events self.entries[ident].handle_event(event) def event_path(self, event): """ Return the full path to the filename affected by an event. :class:`Bcfg2.Server.FileMonitor.Event` objects just contain the filename, not the full path, so this function reconstructs the fill path based on the path to the :attr:`es_cls` object that handles the event. :param event: An event that applies to a file or directory handled by this GroupSpool :type event: Bcfg2.Server.FileMonitor.Event :returns: string """ return os.path.join(self.data, self.handles[event.requestID].lstrip("/"), event.filename) def event_id(self, event): """ Return a string that can be used to relate the event unambiguously to a single :attr:`es_cls` object in the :attr:`entries` dict. In practice, this means: * If the event is on a directory, ``event_id`` returns the full path to the directory. * If the event is on a file, ``event_id`` returns the full path to the directory the file is in. :param event: An event that applies to a file or directory handled by this GroupSpool :type event: Bcfg2.Server.FileMonitor.Event :returns: string """ epath = self.event_path(event) if os.path.isdir(epath): return os.path.join(self.handles[event.requestID].lstrip("/"), event.filename) else: return self.handles[event.requestID].rstrip("/") def set_debug(self, debug): for entry in self.entries.values(): if hasattr(entry, "set_debug"): entry.set_debug(debug) return Plugin.set_debug(self, debug) set_debug.__doc__ = Plugin.set_debug.__doc__ def HandleEvent(self, event): """ HandleEvent is the event dispatcher for GroupSpool objects. It receives all events and dispatches them the appropriate handling object (e.g., one of the :attr:`es_cls` objects in :attr:`entries`), function (e.g., :func:`add_entry`), or behavior (e.g., deleting an entire entry set). :param event: An event that applies to a file or directory handled by this GroupSpool :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ action = event.code2str() if event.filename[0] == '/': return ident = self.event_id(event) if action in ['exists', 'created']: self.add_entry(event) elif action == 'changed': if ident in self.entries: self.entries[ident].handle_event(event) else: # got a changed event for a file we didn't know # about. go ahead and process this as a 'created', but # warn self.logger.warning("Got changed event for unknown file %s" % ident) self.add_entry(event) elif action == 'deleted': fbase = self.handles[event.requestID] + event.filename if fbase in self.entries: # a directory was deleted del self.entries[fbase] del self.Entries[self.entry_type][fbase] elif ident in self.entries: self.entries[ident].handle_event(event) elif ident not in self.entries: self.logger.warning("Got deleted event for unknown file %s" % ident) def AddDirectoryMonitor(self, relative): """ Add a FAM monitor to a new directory and set the appropriate event handler. :param relative: The path to the directory relative to the base data directory of the GroupSpool object. :type relative: string :returns: None """ if not relative.endswith('/'): relative += '/' name = self.data + relative if relative not in list(self.handles.values()): if not os.path.isdir(name): self.logger.error("Failed to open directory %s" % name) return reqid = self.fam.AddMonitor(name, self) self.handles[reqid] = relative src/lib/Bcfg2/Server/Plugin/interfaces.py000066400000000000000000000621131303523157100205420ustar00rootroot00000000000000""" Interface definitions for Bcfg2 server plugins """ import os import sys import copy import threading import lxml.etree import Bcfg2.Server import Bcfg2.Options from Bcfg2.Compat import Queue, Empty, Full, cPickle from Bcfg2.Server.Plugin.base import Plugin from Bcfg2.Server.Plugin.exceptions import PluginInitError, \ MetadataRuntimeError, MetadataConsistencyError # Since this file basically just contains abstract interface # descriptions, just about every function declaration has unused # arguments. Disable this pylint warning for the whole file. # pylint: disable=W0613 class Generator(object): """ Generator plugins contribute to literal client configurations. That is, they generate entry contents. An entry is generated in one of two ways: #. The Bcfg2 core looks in the ``Entries`` dict attribute of the plugin object. ``Entries`` is expected to be a dict whose keys are entry tags (e.g., ``"Path"``, ``"Service"``, etc.) and whose values are dicts; those dicts should map the ``name`` attribute of an entry to a callable that will be called to generate the content. The callable will receive two arguments: the abstract entry (as an lxml.etree._Element object), and the client metadata object the entry is being generated for. #. If the entry is not listed in ``Entries``, the Bcfg2 core calls :func:`HandlesEntry`; if that returns True, then it calls :func:`HandleEntry`. """ def HandlesEntry(self, entry, metadata): """ HandlesEntry is the slow path method for routing configuration binding requests. It is called if the ``Entries`` dict does not contain a method for binding the entry. :param entry: The entry to bind :type entry: lxml.etree._Element :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: bool - Whether or not this plugin can handle the entry :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError` """ return False def HandleEntry(self, entry, metadata): """ HandleEntry is the slow path method for binding configuration binding requests. It is called if the ``Entries`` dict does not contain a method for binding the entry, and :func:`HandlesEntry` returns True. :param entry: The entry to bind :type entry: lxml.etree._Element :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: lxml.etree._Element - The fully bound entry :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError` """ return entry class Structure(object): """ Structure Plugins contribute to abstract client configurations. That is, they produce lists of entries that will be generated for a client. """ def BuildStructures(self, metadata): """ Build a list of lxml.etree._Element objects that will be added to the top-level ```` tag of the client configuration. Consequently, each object in the list returned by ``BuildStructures()`` must consist of a container tag (e.g., ```` or ````) which contains the entry tags. It must not return a list of entry tags. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: list of lxml.etree._Element objects """ raise NotImplementedError class Metadata(object): """ Metadata plugins handle initial metadata construction, accumulating data from :class:`Connector` plugins, and producing :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` objects. """ def viz(self, hosts, bundles, key, only_client, colors): """ Return a string containing a graphviz document that maps out the Metadata for :ref:`bcfg2-admin viz ` :param hosts: Include hosts in the graph :type hosts: bool :param bundles: Include bundles in the graph :type bundles: bool :param key: Include a key in the graph :type key: bool :param only_client: Only include data for the specified client :type only_client: string :param colors: Use the specified graphviz colors :type colors: list of strings :return: string """ raise NotImplementedError def set_version(self, client, version): """ Set the version for the named client to the specified version string. :param client: Hostname of the client :type client: string :param profile: Client Bcfg2 version :type profile: string :return: None :raises: :class:`Bcfg2.Server.Plugin.exceptions.MetadataRuntimeError`, :class:`Bcfg2.Server.Plugin.exceptions.MetadataConsistencyError` """ pass def set_profile(self, client, profile, address): """ Set the profile for the named client to the named profile group. :param client: Hostname of the client :type client: string :param profile: Name of the profile group :type profile: string :param address: Address pair of ``(, )`` :type address: tuple :return: None :raises: :class:`Bcfg2.Server.Plugin.exceptions.MetadataRuntimeError`, :class:`Bcfg2.Server.Plugin.exceptions.MetadataConsistencyError` """ pass def resolve_client(self, address, cleanup_cache=False): """ Resolve the canonical name of this client. If this method is not implemented, the hostname claimed by the client is used. (This may be a security risk; it's highly recommended that you implement ``resolve_client`` if you are writing a Metadata plugin.) :param address: Address pair of ``(, )`` :type address: tuple :param cleanup_cache: Whether or not to remove expire the entire client hostname resolution class :type cleanup_cache: bool :return: string - canonical client hostname :raises: :class:`Bcfg2.Server.Plugin.exceptions.MetadataRuntimeError`, :class:`Bcfg2.Server.Plugin.exceptions.MetadataConsistencyError` """ return address[1] def AuthenticateConnection(self, cert, user, password, address): """ Authenticate the given client. :param cert: an x509 certificate :type cert: dict :param user: The username of the user trying to authenticate :type user: string :param password: The password supplied by the client :type password: string :param addresspair: An address pair of ``(, )`` :type addresspair: tuple :return: bool - True if the authenticate succeeds, False otherwise """ raise NotImplementedError def get_initial_metadata(self, client_name): """ Return a :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` object that fully describes everything the Metadata plugin knows about the named client. :param client_name: The hostname of the client :type client_name: string :return: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ raise NotImplementedError def merge_additional_data(self, imd, source, data): """ Add arbitrary data from a :class:`Connector` plugin to the given metadata object. :param imd: An initial metadata object :type imd: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param source: The name of the plugin providing this data :type source: string :param data: The data to add :type data: any :return: None """ raise NotImplementedError def merge_additional_groups(self, imd, groups): """ Add groups from a :class:`Connector` plugin to the given metadata object. :param imd: An initial metadata object :type imd: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param groups: The groups to add :type groups: list of strings :return: None """ raise NotImplementedError def update_client_list(self): """ Re-read the cached list of clients """ raise NotImplementedError class Connector(object): """ Connector plugins augment client metadata instances with additional data, additional groups, or both. """ def get_additional_groups(self, metadata): """ Return a list of additional groups for the given client. Each group can be either the name of a group (a string), or a :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` object that defines other data besides just the name. Note that you cannot return a :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` object that clobbers a group defined by another plugin; the original group will be used instead. For instance, assume the following in ``Metadata/groups.xml``: .. code-block:: xml ... You could not subsequently return a :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` object with ``public=True``; a warning would be issued, and the original (non-public) ``foo`` group would be used. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: list of strings or :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` objects. """ return list() def get_additional_data(self, metadata): """ Return arbitrary additional data for the given ClientMetadata object. By convention this is usually a dict object, but doesn't need to be. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: dict """ return dict() class Probing(object): """ Probing plugins can collect data from clients and process it. """ def GetProbes(self, metadata): """ Return a list of probes for the given client. Each probe should be an lxml.etree._Element object that adheres to the following specification. Each probe must the following attributes: * ``name``: The unique name of the probe. * ``source``: The origin of the probe; probably the name of the plugin that supplies the probe. * ``interpreter``: The command that will be run on the client to interpret the probe script. Compiled (i.e., non-interpreted) probes are not supported. The text of the XML tag should be the contents of the probe, i.e., the code that will be run on the client. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: list of lxml.etree._Element objects """ raise NotImplementedError def ReceiveData(self, metadata, datalist): """ Process data returned from the probes for the given client. ``datalist`` is a list of lxml.etree._Element objects, each of which is a single tag; the ``name`` attribute holds the unique name of the probe that was run, and the text contents of the tag hold the results of the probe. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param datalist: The probe data :type datalist: list of lxml.etree._Element objects :return: None """ raise NotImplementedError class Statistics(Plugin): """ Statistics plugins handle statistics for clients. In general, you should avoid using Statistics and use :class:`ThreadedStatistics` instead.""" create = False def process_statistics(self, client, xdata): """ Process the given XML statistics data for the specified client. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param data: The statistics data :type data: lxml.etree._Element :return: None """ raise NotImplementedError class Threaded(object): """ Threaded plugins use threads in any way. The thread must be started after daemonization, so this class implements a single method, :func:`start_threads`, that can be used to start threads after daemonization of the server core. """ def start_threads(self): """ Start this plugin's threads after daemonization. :return: None :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginInitError` """ raise NotImplementedError class ThreadedStatistics(Statistics, Threaded, threading.Thread): """ ThreadedStatistics plugins process client statistics in a separate thread. """ def __init__(self, core): Statistics.__init__(self, core) Threaded.__init__(self) threading.Thread.__init__(self) # Event from the core signaling an exit self.terminate = core.terminate self.work_queue = Queue(100000) self.pending_file = os.path.join(Bcfg2.Options.setup.repository, "etc", "%s.pending" % self.name) self.daemon = False def start_threads(self): self.start() def _save(self): """Save any pending data to a file.""" pending_data = [] try: while not self.work_queue.empty(): (metadata, xdata) = self.work_queue.get_nowait() data = \ lxml.etree.tostring(xdata, xml_declaration=False).decode("UTF-8") pending_data.append((metadata.hostname, data)) except Empty: pass try: savefile = open(self.pending_file, 'w') cPickle.dump(pending_data, savefile) savefile.close() self.logger.info("Saved pending %s data" % self.name) except (IOError, TypeError): err = sys.exc_info()[1] self.logger.warning("Failed to save pending data: %s" % err) def _load(self): """Load any pending data from a file.""" if not os.path.exists(self.pending_file): return True pending_data = [] try: savefile = open(self.pending_file, 'r') pending_data = cPickle.load(savefile) savefile.close() except (IOError, cPickle.UnpicklingError): err = sys.exc_info()[1] self.logger.warning("Failed to load pending data: %s" % err) return False for (pmetadata, pdata) in pending_data: # check that shutdown wasnt called early if self.terminate.isSet(): return False try: while True: try: metadata = self.core.build_metadata(pmetadata) break except MetadataRuntimeError: pass self.terminate.wait(5) if self.terminate.isSet(): return False self.work_queue.put_nowait( (metadata, lxml.etree.XML(pdata, parser=Bcfg2.Server.XMLParser))) except Full: self.logger.warning("Queue.Full: Failed to load queue data") break except lxml.etree.LxmlError: lxml_error = sys.exc_info()[1] self.logger.error("Unable to load saved interaction: %s" % lxml_error) except MetadataConsistencyError: self.logger.error("Unable to load metadata for save " "interaction: %s" % pmetadata) try: os.unlink(self.pending_file) except OSError: self.logger.error("Failed to unlink save file: %s" % self.pending_file) self.logger.info("Loaded pending %s data" % self.name) return True def run(self): if not self._load(): return while not self.terminate.isSet() and self.work_queue is not None: try: (client, xdata) = self.work_queue.get(block=True, timeout=2) except Empty: continue except: # we want to catch all exceptions here so that a stray # error doesn't kill the entire statistics thread. For # instance, if a bad value gets pushed onto the queue # and the assignment above raises TypeError, we want # to report the error, ignore the bad value, and # continue processing statistics. self.logger.error("Unknown error processing statistics: %s" % sys.exc_info()[1]) continue self.handle_statistic(client, xdata) if self.work_queue is not None and not self.work_queue.empty(): self._save() def process_statistics(self, metadata, data): try: self.work_queue.put_nowait((metadata, copy.copy(data))) except Full: self.logger.warning("%s: Queue is full. Dropping interactions." % self.name) def handle_statistic(self, metadata, data): """ Process the given XML statistics data for the specified client object. This differs from the :func:`Statistics.process_statistics` method only in that ThreadedStatistics first adds the data to a queue, and then processes them in a separate thread. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param data: The statistics data :type data: lxml.etree._Element :return: None """ raise NotImplementedError # pylint: disable=C0111 # Someone who understands these interfaces better needs to write docs # for PullSource and PullTarget class PullSource(object): def GetExtra(self, client): return [] def GetCurrentEntry(self, client, e_type, e_name): raise NotImplementedError class PullTarget(object): def AcceptChoices(self, entry, metadata): raise NotImplementedError def AcceptPullData(self, specific, new_entry, verbose): raise NotImplementedError # pylint: enable=C0111 class Decision(object): """ Decision plugins produce decision lists for affecting which entries are actually installed on clients. """ def GetDecisions(self, metadata, mode): """ Return a list of tuples of ``(, )`` to be used as the decision list for the given client in the specified mode. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param mode: The decision mode ("whitelist" or "blacklist") :type mode: string :return: list of tuples """ raise NotImplementedError class StructureValidator(object): """ StructureValidator plugins can modify the list of structures after it has been created but before the entries have been concretely bound. """ def validate_structures(self, metadata, structures): """ Given a list of structures (i.e., of tags that contain entry tags), modify that list or the structures in it in-place. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param config: A list of lxml.etree._Element objects describing the structures (i.e., bundles) for this client. This can be modified in place. :type config: list of lxml.etree._Element :returns: None :raises: :class:`Bcfg2.Server.Plugin.exceptions.ValidationError` """ raise NotImplementedError class GoalValidator(object): """ GoalValidator plugins can modify the concretely-bound configuration of a client as a last stage before the configuration is sent to the client. """ def validate_goals(self, metadata, config): """ Given a monolithic XML document of the full configuration, modify the document in-place. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param config: The full configuration for the client :type config: lxml.etree._Element :returns: None :raises: :class:`Bcfg2.Server.Plugin.exceptions:ValidationError` """ raise NotImplementedError class Version(Plugin): """ Version plugins interact with various version control systems. """ create = False options = Plugin.options + [ Bcfg2.Options.PathOption(cf=('server', 'vcs_root'), default='', help='Server VCS repository root')] #: The path to the VCS metadata file or directory, relative to the #: base of the Bcfg2 repository. E.g., for Subversion this would #: be ".svn" __vcs_metadata_path__ = None __rmi__ = Plugin.__rmi__ + ['get_revision'] def __init__(self, core): Plugin.__init__(self, core) if self.__vcs_metadata_path__: self.vcs_path = os.path.join(Bcfg2.Options.setup.vcs_root, self.__vcs_metadata_path__) if not os.path.exists(self.vcs_path): raise PluginInitError("%s is not present" % self.vcs_path) else: self.vcs_path = None __init__.__doc__ = Plugin.__init__.__doc__ + """ .. autoattribute:: __vcs_metadata_path__ """ def get_revision(self): """ Return the current revision of the Bcfg2 specification. This will be included in the ``revision`` attribute of the top-level tag of the XML configuration sent to the client. :returns: string - the current version """ raise NotImplementedError class ClientRunHooks(object): """ ClientRunHooks can hook into various parts of a client run to perform actions at various times without needing to pretend to be a different plugin type. """ def start_client_run(self, metadata): """ Invoked at the start of a client run, after all probe data has been received and decision lists have been queried (if applicable), but before the configuration is generated. :param metadata: The client metadata object :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ pass def end_client_run(self, metadata): """ Invoked at the end of a client run, immediately after :class:`GoalValidator` plugins have been run and just before the configuration is returned to the client. :param metadata: The client metadata object :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ pass def end_statistics(self, metadata): """ Invoked after statistics are processed for a client. :param metadata: The client metadata object :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ pass class ClientACLs(object): """ ClientACLs are used to grant or deny access to different XML-RPC calls based on client IP or metadata. """ def check_acl_ip(self, address, rmi): """ Check if the given IP address is authorized to make the named XML-RPC call. :param address: The address pair of the client to check ACLs for :type address: tuple of (, ) :param rmi: The fully-qualified name of the RPC call :param rmi: string :returns: bool or None - True to allow, False to deny, None to defer to metadata ACLs """ return True def check_acl_metadata(self, metadata, rmi): """ Check if the given client is authorized to make the named XML-RPC call. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param rmi: The fully-qualified name of the RPC call :param rmi: string :returns: bool """ return True class TemplateDataProvider(object): """ TemplateDataProvider plugins provide variables to templates for use in rendering. """ def get_template_data(self, entry, metadata, template): """ Get a dict of variables that will be supplied to a Cfg template for rendering """ return dict() def get_xml_template_data(self, structfile, metadata): """ Get a dict of variables that will be supplied to an XML template (e.g., a bundle) for rendering """ return dict() src/lib/Bcfg2/Server/Plugins/000077500000000000000000000000001303523157100162255ustar00rootroot00000000000000src/lib/Bcfg2/Server/Plugins/ACL.py000066400000000000000000000134301303523157100171770ustar00rootroot00000000000000""" Support for client ACLs based on IP address and client metadata """ import os import struct import socket import Bcfg2.Server.Plugin def rmi_names_equal(first, second): """ Compare two XML-RPC method names and see if they match. Resolves some limited wildcards; see :ref:`server-plugins-misc-acl-wildcards` for details. :param first: One of the ACLs to compare :type first: string :param second: The other ACL to compare :type second: string :returns: bool """ if first == second: # single wildcard is special, and matches everything return True if first is None or second is None: return False if '*' not in first + second: # no wildcards, and not exactly equal return False first_parts = first.split('.') second_parts = second.split('.') if len(first_parts) != len(second_parts): return False for i in range(len(first_parts)): if (first_parts[i] != second_parts[i] and first_parts[i] != '*' and second_parts[i] != '*'): return False return True def ip2int(ip): """ convert a dotted-quad IP address into an integer representation of the same """ return struct.unpack('>L', socket.inet_pton(socket.AF_INET, ip))[0] def ip_matches(ip, entry): """ Return True if the given IP matches the IP or IP and netmask in the given ACL entry; False otherwise """ if entry.get("netmask"): try: mask = int("1" * int(entry.get("netmask")) + "0" * (32 - int(entry.get("netmask"))), 2) except ValueError: mask = ip2int(entry.get("netmask")) return ip2int(ip) & mask == ip2int(entry.get("address")) & mask elif entry.get("address") is None: # no address, no netmask -- match all return True elif ip == entry.get("address"): # just a plain ip address return True return False class IPACLFile(Bcfg2.Server.Plugin.XMLFileBacked): """ representation of ACL ip.xml, for IP-based ACLs """ __identifier__ = None actions = dict(Allow=True, Deny=False, Defer=None) def check_acl(self, address, rmi): """ Check a client address against the ACL list """ if not len(self.entries): # default defer if no ACLs are defined. self.debug_log("ACL: %s requests %s: No IP ACLs, defer" % (address, rmi)) return self.actions["Defer"] for entry in self.entries: if (ip_matches(address, entry) and rmi_names_equal(entry.get("method"), rmi)): self.debug_log("ACL: %s requests %s: Found matching IP ACL, " "%s" % (address, rmi, entry.tag.lower())) return self.actions[entry.tag] if address == "127.0.0.1": self.debug_log("ACL: %s requests %s: No matching IP ACLs, " "localhost allowed" % (address, rmi)) return self.actions['Allow'] # default allow for localhost self.debug_log("ACL: %s requests %s: No matching IP ACLs, defer" % (address, rmi)) return self.actions["Defer"] # default defer for other machines class MetadataACLFile(Bcfg2.Server.Plugin.StructFile): """ representation of ACL metadata.xml, for metadata-based ACLs """ def check_acl(self, metadata, rmi): """ check client metadata against the ACL list """ if not len(self.entries): # default allow if no ACLs are defined. self.debug_log("ACL: %s requests %s: No metadata ACLs, allow" % (metadata.hostname, rmi)) return True for el in self.Match(metadata): if rmi_names_equal(el.get("method"), rmi): self.debug_log("ACL: %s requests %s: Found matching metadata " "ACL, %s" % (metadata.hostname, rmi, el.tag.lower())) return el.tag == "Allow" if metadata.hostname in ['localhost', 'localhost.localdomain']: # default allow for localhost self.debug_log("ACL: %s requests %s: No matching metadata ACLs, " "localhost allowed" % (metadata.hostname, rmi)) return True self.debug_log("ACL: %s requests %s: No matching metadata ACLs, deny" % (metadata.hostname, rmi)) return False # default deny for other machines class ACL(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.ClientACLs): """ allow connections to bcfg-server based on IP address """ def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.ClientACLs.__init__(self) self.ip_acls = IPACLFile(os.path.join(self.data, 'ip.xml'), should_monitor=True) self.metadata_acls = MetadataACLFile(os.path.join(self.data, 'metadata.xml'), should_monitor=True) def check_acl_ip(self, address, rmi): self.debug_log("ACL: %s requests %s: Checking IP ACLs" % (address[0], rmi)) return self.ip_acls.check_acl(address[0], rmi) def check_acl_metadata(self, metadata, rmi): self.debug_log("ACL: %s requests %s: Checking metadata ACLs" % (metadata.hostname, rmi)) return self.metadata_acls.check_acl(metadata, rmi) def set_debug(self, debug): rv = Bcfg2.Server.Plugin.Plugin.set_debug(self, debug) self.ip_acls.set_debug(debug) self.metadata_acls.set_debug(debug) return rv set_debug.__doc__ = Bcfg2.Server.Plugin.Plugin.set_debug.__doc__ src/lib/Bcfg2/Server/Plugins/AWSTags.py000066400000000000000000000160541303523157100200560ustar00rootroot00000000000000"""Query tags from AWS via boto, optionally setting group membership.""" import os import re import sys import Bcfg2.Server.Plugin from boto import connect_ec2 from Bcfg2.Server.Cache import Cache from Bcfg2.Compat import ConfigParser class NoInstanceFound(Exception): """ Raised when there's no AWS instance for a given hostname """ class AWSTagPattern(object): """ Handler for a single Tag entry """ def __init__(self, name, value, groups): self.name = re.compile(name) if value is not None: self.value = re.compile(value) else: self.value = value self.groups = groups def get_groups(self, tags): """ Get groups that apply to the given tag set """ for key, value in tags.items(): name_match = self.name.search(key) if name_match: if self.value is not None: value_match = self.value.search(value) if value_match: return self._munge_groups(value_match) else: return self._munge_groups(name_match) break return [] def _munge_groups(self, match): """ Replace backreferences (``$1``, ``$2``) in Group tags with their values in the regex. """ rv = [] sub = match.groups() for group in self.groups: newg = group for idx in range(len(sub)): newg = newg.replace('$%s' % (idx + 1), sub[idx]) rv.append(newg) return rv def __str__(self): if self.value: return "%s: %s=%s: %s" % (self.__class__.__name__, self.name, self.value, self.groups) else: return "%s: %s: %s" % (self.__class__.__name__, self.name, self.groups) class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked): """ representation of AWSTags config.xml """ __identifier__ = None create = 'AWSTags' def __init__(self, filename, core=None): Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, should_monitor=True) self.core = core self.tags = [] def Index(self): Bcfg2.Server.Plugin.XMLFileBacked.Index(self) if (self.core and self.core.metadata_cache_mode in ['cautious', 'aggressive']): self.core.metadata_cache.expire() self.tags = [] for entry in self.xdata.xpath('//Tag'): try: groups = [g.text for g in entry.findall('Group')] self.tags.append(AWSTagPattern(entry.get("name"), entry.get("value"), groups)) except re.error: self.logger.error("AWSTags: Failed to initialize pattern %s: " "%s" % (entry.get("name"), sys.exc_info()[1])) def get_groups(self, tags): """ return a list of groups that should be added to the given client based on patterns that match the tags """ ret = [] for pattern in self.tags: ret.extend(pattern.get_groups(tags)) return ret class AWSTags(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.Connector): """ Query tags from AWS via boto, optionally setting group membership """ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['expire_cache'] def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.Connector.__init__(self) try: key_id = self.core.setup.cfp.get("awstags", "access_key_id") secret_key = self.core.setup.cfp.get("awstags", "secret_access_key") except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): err = sys.exc_info()[1] raise Bcfg2.Server.Plugin.PluginInitError( "AWSTags is not configured in bcfg2.conf: %s" % err) self.debug_log("%s: Connecting to EC2" % self.name) self._ec2 = connect_ec2(aws_access_key_id=key_id, aws_secret_access_key=secret_key) self._tagcache = Cache() try: self._keep_cache = self.core.setup.cfp.getboolean("awstags", "cache") except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): self._keep_cache = True self.config = PatternFile(os.path.join(self.data, 'config.xml'), core=core) def _load_instance(self, hostname): """ Load an instance from EC2 whose private DNS name matches the given hostname """ self.debug_log("AWSTags: Loading instance with private-dns-name=%s" % hostname) filters = {'private-dns-name': hostname} reservations = self._ec2.get_all_instances(filters=filters) if reservations: res = reservations[0] if res.instances: return res.instances[0] raise NoInstanceFound( "AWSTags: No instance found with private-dns-name=%s" % hostname) def _get_tags_from_ec2(self, hostname): """ Get tags for the given host from EC2. This does not use the local caching layer. """ self.debug_log("AWSTags: Getting tags for %s from AWS" % hostname) try: return self._load_instance(hostname).tags except NoInstanceFound: self.debug_log(sys.exc_info()[1]) return dict() def get_tags(self, metadata): """ Get tags for the given host. This caches the tags locally if 'cache' in the ``[awstags]`` section of ``bcfg2.conf`` is true. """ if not self._keep_cache: return self._get_tags_from_ec2(metadata) if metadata.hostname not in self._tagcache: self._tagcache[metadata.hostname] = \ self._get_tags_from_ec2(metadata.hostname) return self._tagcache[metadata.hostname] def expire_cache(self, key=None): """ Expire the cache for one host, or for all hosts. This is exposed as an XML-RPC RMI. """ self._tagcache.expire(key=key) def start_client_run(self, metadata): self.expire_cache(key=metadata.hostname) if self.core.metadata_cache_mode == 'aggressive': self.logger.warning("AWSTags is incompatible with aggressive " "client metadata caching, try 'cautious' " "or 'initial'") self.core.metadata_cache.expire(metadata.hostname) def get_additional_data(self, metadata): return self.get_tags(metadata) def get_additional_groups(self, metadata): return self.config.get_groups(self.get_tags(metadata)) src/lib/Bcfg2/Server/Plugins/Bundler.py000066400000000000000000000143701303523157100201770ustar00rootroot00000000000000"""This provides bundle clauses with translation functionality.""" import os import re import sys import copy import fnmatch import lxml.etree from Bcfg2.Server.Plugin import StructFile, Plugin, Structure, \ StructureValidator, XMLDirectoryBacked, Generator from Bcfg2.version import Bcfg2VersionInfo from genshi.template import TemplateError class BundleFile(StructFile): """ Representation of a bundle XML file """ bundle_name_re = re.compile(r'^(?P.*)\.(xml|genshi)$') def __init__(self, filename, should_monitor=False): StructFile.__init__(self, filename, should_monitor=should_monitor) if self.name.endswith(".genshi"): self.logger.warning("Bundler: %s: Bundle filenames ending with " ".genshi are deprecated; add the Genshi XML " "namespace to a .xml bundle instead" % self.name) def Index(self): StructFile.Index(self) if self.xdata.get("name"): self.logger.warning("Bundler: %s: Explicitly specifying bundle " "names is deprecated" % self.name) @property def bundle_name(self): """ The name of the bundle, as determined from the filename """ return self.bundle_name_re.match( os.path.basename(self.name)).group("name") class Bundler(Plugin, Structure, StructureValidator, XMLDirectoryBacked): """ The bundler creates dependent clauses based on the bundle/translation scheme from Bcfg1. """ __author__ = 'bcfg-dev@mcs.anl.gov' __child__ = BundleFile patterns = re.compile(r'^.*\.(?:xml|genshi)$') def __init__(self, core): Plugin.__init__(self, core) Structure.__init__(self) StructureValidator.__init__(self) XMLDirectoryBacked.__init__(self, self.data) #: Bundles by bundle name, rather than filename self.bundles = dict() def HandleEvent(self, event): XMLDirectoryBacked.HandleEvent(self, event) self.bundles = dict([(b.bundle_name, b) for b in self.entries.values()]) def validate_structures(self, metadata, structures): """ Translate entries into entries """ for struct in structures: for pathglob in struct.xpath("//Path[@glob]"): for plugin in self.core.plugins_by_type(Generator): for match in fnmatch.filter(plugin.Entries['Path'].keys(), pathglob.get("glob")): lxml.etree.SubElement(pathglob.getparent(), "Path", name=match) pathglob.getparent().remove(pathglob) def BuildStructures(self, metadata): bundleset = [] bundles = copy.copy(metadata.bundles) bundles_added = set(bundles) while bundles: bundlename = bundles.pop() try: bundle = self.bundles[bundlename] except KeyError: self.logger.error("Bundler: Bundle %s does not exist" % bundlename) continue try: data = bundle.XMLMatch(metadata) except TemplateError: err = sys.exc_info()[1] self.logger.error("Bundler: Failed to render templated bundle " "%s: %s" % (bundlename, err)) continue except: self.logger.error("Bundler: Unexpected bundler error for %s" % bundlename, exc_info=1) continue if data.get("independent", "false").lower() == "true": data.tag = "Independent" del data.attrib['independent'] data.set("name", bundlename) for child in data.findall("Bundle"): if child.getchildren(): # XInclude'd bundle -- "flatten" it so there # aren't extra Bundle tags, since other bits in # Bcfg2 only handle the direct children of the # top-level Bundle tag if data.get("name"): self.logger.warning("Bundler: In file XIncluded from " "%s: Explicitly specifying " "bundle names is deprecated" % self.name) for el in child.getchildren(): data.append(el) data.remove(child) else: # no children -- wat self.logger.warning("Bundler: Useless empty Bundle tag " "in %s" % self.name) data.remove(child) for child in data.findall('RequiredBundle'): if child.get("name"): # dependent bundle -- add it to the list of # bundles for this client if child.get("name") not in bundles_added: bundles.add(child.get("name")) bundles_added.add(child.get("name")) if child.get('inherit_modification', 'false') == 'true': if metadata.version_info >= \ Bcfg2VersionInfo('1.4.0pre2'): lxml.etree.SubElement(data, 'Bundle', name=child.get('name')) else: self.logger.warning( 'Bundler: inherit_modification="true" is ' 'only supported for clients starting ' '1.4.0pre2') data.remove(child) else: # no name -- wat self.logger.warning('Bundler: Missing required name in ' 'RequiredBundle tag in %s' % self.name) data.remove(child) bundleset.append(data) return bundleset src/lib/Bcfg2/Server/Plugins/Bzr.py000066400000000000000000000023571303523157100173430ustar00rootroot00000000000000""" The Bzr plugin provides a revision interface for Bcfg2 repos using bazaar. """ import Bcfg2.Server.Plugin from bzrlib.workingtree import WorkingTree from bzrlib import errors class Bzr(Bcfg2.Server.Plugin.Version): """ The Bzr plugin provides a revision interface for Bcfg2 repos using bazaar. """ __author__ = 'bcfg-dev@mcs.anl.gov' def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.logger.debug("Initialized Bazaar plugin with directory %s at " "revision = %s" % (Bcfg2.Options.setup.vcs_root, self.get_revision())) def get_revision(self): """Read Bazaar revision information for the Bcfg2 repository.""" try: working_tree = WorkingTree.open(Bcfg2.Options.setup.vcs_root) revision = str(working_tree.branch.revno()) if (working_tree.has_changes(working_tree.basis_tree()) or working_tree.unknowns()): revision += "+" except errors.NotBranchError: msg = "Failed to read Bazaar branch" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) return revision src/lib/Bcfg2/Server/Plugins/Cfg/000077500000000000000000000000001303523157100167245ustar00rootroot00000000000000src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py000066400000000000000000000076671303523157100245570ustar00rootroot00000000000000""" The CfgAuthorizedKeysGenerator generates ``authorized_keys`` files based on an XML specification of which SSH keypairs should granted access. """ import lxml.etree import Bcfg2.Options from Bcfg2.Server.Plugin import StructFile, PluginExecutionError from Bcfg2.Server.Plugins.Cfg import CfgGenerator, get_cfg from Bcfg2.Server.Plugins.Metadata import ClientMetadata class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile): """ The CfgAuthorizedKeysGenerator generates authorized_keys files based on an XML specification of which SSH keypairs should granted access. """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within authorizedkeys.xml __specific__ = False #: Handle authorized keys XML files __basenames__ = ['authorizedkeys.xml', 'authorized_keys.xml'] def __init__(self, fname): CfgGenerator.__init__(self, fname, None) StructFile.__init__(self, fname) self.cache = dict() self.core = get_cfg().core __init__.__doc__ = CfgGenerator.__init__.__doc__ def handle_event(self, event): CfgGenerator.handle_event(self, event) StructFile.HandleEvent(self, event) self.cache = dict() handle_event.__doc__ = CfgGenerator.handle_event.__doc__ def get_data(self, entry, metadata): spec = self.XMLMatch(metadata) rv = [] for allow in spec.findall("Allow"): options = [] for opt in allow.findall("Option"): if opt.get("value"): options.append("%s=%s" % (opt.get("name"), opt.get("value"))) else: options.append(opt.get("name")) pubkey_name = allow.get("from") if pubkey_name: host = allow.get("host") group = allow.get("group") category = allow.get("category", Bcfg2.Options.setup.sshkeys_category) if host: key_md = self.core.build_metadata(host) elif group: key_md = ClientMetadata("dummy", group, [group], [], set(), set(), dict(), None, None, None, None) elif category and not metadata.group_in_category(category): self.logger.warning("Cfg: %s ignoring Allow from %s: " "No group in category %s" % (metadata.hostname, pubkey_name, category)) continue else: key_md = metadata key_entry = lxml.etree.Element("Path", name=pubkey_name) try: self.core.Bind(key_entry, key_md) except PluginExecutionError: self.logger.info("Cfg: %s skipping Allow from %s: " "No key found" % (metadata.hostname, pubkey_name)) continue if not key_entry.text: self.logger.warning("Cfg: %s skipping Allow from %s: " "Empty public key" % (metadata.hostname, pubkey_name)) continue pubkey = key_entry.text elif allow.text: pubkey = allow.text.strip() else: self.logger.warning("Cfg: %s ignoring empty Allow tag: %s" % (metadata.hostname, lxml.etree.tostring(allow))) continue rv.append(" ".join([",".join(options), pubkey]).strip()) return "\n".join(rv) get_data.__doc__ = CfgGenerator.get_data.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py000066400000000000000000000041561303523157100231340ustar00rootroot00000000000000""" The CfgCheetahGenerator allows you to use the `Cheetah `_ templating system to generate :ref:`server-plugins-generators-cfg` files. """ import Bcfg2.Options from Bcfg2.Server.Plugin import PluginExecutionError, \ DefaultTemplateDataProvider, get_template_data from Bcfg2.Server.Plugins.Cfg import CfgGenerator try: from Cheetah.Template import Template HAS_CHEETAH = True except ImportError: HAS_CHEETAH = False class DefaultCheetahDataProvider(DefaultTemplateDataProvider): """ Template data provider for Cheetah templates. Cheetah and Genshi currently differ over the value of the ``path`` variable, which is why this is necessary. """ def get_template_data(self, entry, metadata, template): rv = DefaultTemplateDataProvider.get_template_data(self, entry, metadata, template) rv['path'] = rv['name'] return rv class CfgCheetahGenerator(CfgGenerator): """ The CfgCheetahGenerator allows you to use the `Cheetah `_ templating system to generate :ref:`server-plugins-generators-cfg` files. """ #: Handle .cheetah files __extensions__ = ['cheetah'] #: Low priority to avoid matching host- or group-specific #: .crypt.cheetah files __priority__ = 50 #: :class:`Cheetah.Template.Template` compiler settings settings = dict(useStackFrames=False) def __init__(self, fname, spec): CfgGenerator.__init__(self, fname, spec) if not HAS_CHEETAH: raise PluginExecutionError("Cheetah is not available") __init__.__doc__ = CfgGenerator.__init__.__doc__ def get_data(self, entry, metadata): template = Template(self.data.decode(Bcfg2.Options.setup.encoding), compilerSettings=self.settings) for key, val in get_template_data( entry, metadata, self.name, default=DefaultCheetahDataProvider()).items(): setattr(template, key, val) return template.respond() get_data.__doc__ = CfgGenerator.get_data.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py000066400000000000000000000017141303523157100250070ustar00rootroot00000000000000""" Handle encrypted Cheetah templates (.crypt.cheetah or .cheetah.crypt files)""" from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator \ import CfgEncryptedGenerator class CfgEncryptedCheetahGenerator(CfgCheetahGenerator, CfgEncryptedGenerator): """ CfgEncryptedCheetahGenerator lets you encrypt your Cheetah :ref:`server-plugins-generators-cfg` files on the server """ #: handle .crypt.cheetah or .cheetah.crypt files __extensions__ = ['cheetah.crypt', 'crypt.cheetah'] #: Override low priority from parent class __priority__ = 0 def handle_event(self, event): CfgEncryptedGenerator.handle_event(self, event) handle_event.__doc__ = CfgEncryptedGenerator.handle_event.__doc__ def get_data(self, entry, metadata): return CfgCheetahGenerator.get_data(self, entry, metadata) get_data.__doc__ = CfgCheetahGenerator.get_data.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py000066400000000000000000000031401303523157100235200ustar00rootroot00000000000000""" CfgEncryptedGenerator lets you encrypt your plaintext :ref:`server-plugins-generators-cfg` files on the server. """ import Bcfg2.Options from Bcfg2.Server.Plugin import PluginExecutionError from Bcfg2.Server.Plugins.Cfg import CfgGenerator try: from Bcfg2.Server.Encryption import bruteforce_decrypt, EVPError HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False class CfgEncryptedGenerator(CfgGenerator): """ CfgEncryptedGenerator lets you encrypt your plaintext :ref:`server-plugins-generators-cfg` files on the server. """ #: Handle .crypt files __extensions__ = ["crypt"] #: Low priority to avoid matching host- or group-specific #: .genshi.crypt and .cheetah.crypt files __priority__ = 50 def __init__(self, fname, spec): CfgGenerator.__init__(self, fname, spec) if not HAS_CRYPTO: raise PluginExecutionError("M2Crypto is not available") def handle_event(self, event): CfgGenerator.handle_event(self, event) if self.data is None: return # todo: let the user specify a passphrase by name try: self.data = bruteforce_decrypt(self.data) except EVPError: msg = "Cfg: Failed to decrypt %s" % self.name if Bcfg2.Options.setup.lax_decryption: self.logger.debug(msg) else: raise PluginExecutionError(msg) def get_data(self, entry, metadata): if self.data is None: raise PluginExecutionError("Failed to decrypt %s" % self.name) return CfgGenerator.get_data(self, entry, metadata) src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py000066400000000000000000000031211303523157100246550ustar00rootroot00000000000000""" Handle encrypted Genshi templates (.crypt.genshi or .genshi.crypt files) """ from genshi.template import TemplateLoader from Bcfg2.Compat import StringIO from Bcfg2.Server.Plugin import PluginExecutionError from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator try: from Bcfg2.Server.Encryption import bruteforce_decrypt HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False class EncryptedTemplateLoader(TemplateLoader): """ Subclass :class:`genshi.template.TemplateLoader` to decrypt the data on the fly as it's read in using :func:`Bcfg2.Server.Encryption.bruteforce_decrypt` """ def _instantiate(self, cls, fileobj, filepath, filename, encoding=None): plaintext = StringIO(bruteforce_decrypt(fileobj.read())) return TemplateLoader._instantiate(self, cls, plaintext, filepath, filename, encoding=encoding) class CfgEncryptedGenshiGenerator(CfgGenshiGenerator): """ CfgEncryptedGenshiGenerator lets you encrypt your Genshi :ref:`server-plugins-generators-cfg` files on the server """ #: handle .crypt.genshi or .genshi.crypt files __extensions__ = ['genshi.crypt', 'crypt.genshi'] #: Override low priority from parent class __priority__ = 0 #: Use a TemplateLoader class that decrypts the data on the fly #: when it's read in __loader_cls__ = EncryptedTemplateLoader def __init__(self, fname, spec): CfgGenshiGenerator.__init__(self, fname, spec) if not HAS_CRYPTO: raise PluginExecutionError("M2Crypto is not available") src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedJinja2Generator.py000066400000000000000000000016751303523157100245710ustar00rootroot00000000000000""" Handle encrypted Jinja2 templates (.crypt.jinja2 or .jinja2.crypt files)""" from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator \ import CfgEncryptedGenerator class CfgEncryptedJinja2Generator(CfgJinja2Generator, CfgEncryptedGenerator): """ CfgEncryptedJinja2Generator lets you encrypt your Jinja2 :ref:`server-plugins-generators-cfg` files on the server """ #: handle .crypt.jinja2 or .jinja2.crypt files __extensions__ = ['jinja2.crypt', 'crypt.jinja2'] #: Override low priority from parent class __priority__ = 0 def handle_event(self, event): CfgEncryptedGenerator.handle_event(self, event) handle_event.__doc__ = CfgEncryptedGenerator.handle_event.__doc__ def get_data(self, entry, metadata): return CfgJinja2Generator.get_data(self, entry, metadata) get_data.__doc__ = CfgJinja2Generator.get_data.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py000066400000000000000000000030571303523157100245000ustar00rootroot00000000000000""" Invoke an external command to verify file contents """ import os import sys import shlex from Bcfg2.Utils import Executor from Bcfg2.Server.Plugin import PluginExecutionError from Bcfg2.Server.Plugins.Cfg import CfgVerifier, CfgVerificationError class CfgExternalCommandVerifier(CfgVerifier): """ Invoke an external script to verify :ref:`server-plugins-generators-cfg` file contents """ #: Handle :file:`:test` files __basenames__ = [':test'] def __init__(self, name, specific): CfgVerifier.__init__(self, name, specific) self.cmd = [] self.exc = Executor(timeout=30) __init__.__doc__ = CfgVerifier.__init__.__doc__ def verify_entry(self, entry, metadata, data): try: result = self.exc.run(self.cmd, inputdata=data) if not result.success: raise CfgVerificationError(result.error) except OSError: raise CfgVerificationError(sys.exc_info()[1]) verify_entry.__doc__ = CfgVerifier.verify_entry.__doc__ def handle_event(self, event): CfgVerifier.handle_event(self, event) if not self.data: return self.cmd = [] if not os.access(self.name, os.X_OK): bangpath = self.data.splitlines()[0].strip() if bangpath.startswith("#!"): self.cmd.extend(shlex.split(bangpath[2:].strip())) else: raise PluginExecutionError("Cannot execute %s" % self.name) self.cmd.append(self.name) handle_event.__doc__ = CfgVerifier.handle_event.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py000066400000000000000000000202531303523157100230040ustar00rootroot00000000000000""" The CfgGenshiGenerator allows you to use the `Genshi `_ templating system to generate :ref:`server-plugins-generators-cfg` files. """ import re import sys import traceback import Bcfg2.Options from Bcfg2.Server.Plugin import PluginExecutionError, removecomment, \ DefaultTemplateDataProvider, get_template_data from Bcfg2.Server.Plugins.Cfg import CfgGenerator from genshi.template import TemplateLoader, NewTextTemplate from genshi.template.base import TemplateError from genshi.template.eval import UndefinedError, Suite def _genshi_removes_blank_lines(): """ Genshi 0.5 uses the Python :mod:`compiler` package to compile genshi snippets to AST. Genshi 0.6 uses some bespoke magic, because compiler has been deprecated. :func:`compiler.parse` produces an AST that removes all excess whitespace (e.g., blank lines), while :func:`genshi.template.astutil.parse` does not. In order to determine which actual line of code an error occurs on, we need to know which is in use and how it treats blank lines. I've beat my head against this for hours and the best/only way I can find is to compile some genshi code with an error and see which line it's on.""" code = """d = dict() d['a']""" try: Suite(code).execute(dict()) except KeyError: line = traceback.extract_tb(sys.exc_info()[2])[-1][1] if line == 2: return True else: return False #: True if Genshi removes all blank lines from a code block before #: executing it; False indicates that Genshi only removes leading #: and trailing blank lines. See #: :func:`_genshi_removes_blank_lines` for an explanation of this. GENSHI_REMOVES_BLANK_LINES = _genshi_removes_blank_lines() class DefaultGenshiDataProvider(DefaultTemplateDataProvider): """ Template data provider for Genshi templates. Cheetah and Genshi currently differ over the value of the ``path`` variable, which is why this is necessary. """ def get_template_data(self, entry, metadata, template): rv = DefaultTemplateDataProvider.get_template_data(self, entry, metadata, template) rv['path'] = template return rv class CfgGenshiGenerator(CfgGenerator): """ The CfgGenshiGenerator allows you to use the `Genshi `_ templating system to generate :ref:`server-plugins-generators-cfg` files. """ #: Handle .genshi files __extensions__ = ['genshi'] #: ``__loader_cls__`` is the class that will be instantiated to #: load the template files. It must implement one public function, #: ``load()``, as :class:`genshi.template.TemplateLoader`. __loader_cls__ = TemplateLoader #: Ignore ``.genshi_include`` files so they can be used with the #: Genshi ``{% include ... %}`` directive without raising warnings. __ignore__ = ["genshi_include"] #: Low priority to avoid matching host- or group-specific #: .crypt.genshi files __priority__ = 50 #: Error-handling in Genshi is pretty obtuse. This regex is used #: to extract the first line of the code block that raised an #: exception in a Genshi template so we can provide a decent error #: message that actually tells the end user where an error #: occurred. pyerror_re = re.compile(r'<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>') def __init__(self, fname, spec): CfgGenerator.__init__(self, fname, spec) self.template = None self.loader = self.__loader_cls__(max_cache_size=0) __init__.__doc__ = CfgGenerator.__init__.__doc__ def get_data(self, entry, metadata): if self.template is None: raise PluginExecutionError("Failed to load template %s" % self.name) stream = self.template.generate( **get_template_data( entry, metadata, self.name, default=DefaultGenshiDataProvider())).filter(removecomment) try: try: return stream.render('text', encoding=Bcfg2.Options.setup.encoding, strip_whitespace=False) except TypeError: return stream.render('text', encoding=Bcfg2.Options.setup.encoding) except UndefinedError: # a failure in a genshi expression _other_ than %{ python ... %} err = sys.exc_info()[1] stack = traceback.extract_tb(sys.exc_info()[2]) for quad in stack: if quad[0] == self.name: raise PluginExecutionError("%s: %s at '%s'" % (err.__class__.__name__, err, quad[2])) raise except: # this needs to be a blanket except, since it can catch # any error raised by the genshi template. self._handle_genshi_exception(sys.exc_info()) get_data.__doc__ = CfgGenerator.get_data.__doc__ def _handle_genshi_exception(self, exc): """ this is horrible, and I deeply apologize to whoever gets to maintain this after I go to the Great Beer Garden in the Sky. genshi is incredibly opaque about what's being executed, so the only way I can find to determine which {% python %} block is being executed -- if there are multiples -- is to iterate through them and match the snippet of the first line that's in the traceback with the first non-empty line of the block. """ # a failure in a %{ python ... %} block -- the snippet in # the traceback is just the beginning of the block. err = exc[1] stack = traceback.extract_tb(exc[2]) # find the right frame of the stack for frame in reversed(stack): if frame[0] == self.name: lineno, func = frame[1:3] break else: # couldn't even find the stack frame, wtf. raise PluginExecutionError("%s: %s" % (err.__class__.__name__, err)) execs = [contents for etype, contents, _ in self.template.stream if etype == self.template.EXEC] contents = None if len(execs) == 1: contents = execs[0] elif len(execs) > 1: match = self.pyerror_re.match(func) if match: firstline = match.group(0) for pyblock in execs: if pyblock.startswith(firstline): contents = pyblock break # else, no EXEC blocks -- WTF? if contents: # we now have the bogus block, but we need to get the # offending line. To get there, we do (line number given # in the exception) - (firstlineno from the internal # genshi code object of the snippet) = (line number of the # line with an error within the block, with blank lines # removed as appropriate for # :attr:`GENSHI_REMOVES_BLANK_LINES`) code = contents.source.strip().splitlines() if GENSHI_REMOVES_BLANK_LINES: code = [l for l in code if l.strip()] try: line = code[lineno - contents.code.co_firstlineno] raise PluginExecutionError("%s: %s at '%s'" % (err.__class__.__name__, err, line)) except IndexError: raise PluginExecutionError("%s: %s" % (err.__class__.__name__, err)) raise def handle_event(self, event): CfgGenerator.handle_event(self, event) try: self.template = self.loader.load( self.name, cls=NewTextTemplate, encoding=Bcfg2.Options.setup.encoding) except TemplateError: raise PluginExecutionError("Failed to load template: %s" % sys.exc_info()[1]) handle_event.__doc__ = CfgGenerator.handle_event.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py000066400000000000000000000013731303523157100211760ustar00rootroot00000000000000""" Handle info.xml files """ from Bcfg2.Server.Plugin import InfoXML from Bcfg2.Server.Plugins.Cfg import CfgInfo class CfgInfoXML(CfgInfo): """ CfgInfoXML handles :file:`info.xml` files for :ref:`server-plugins-generators-cfg` """ #: Handle :file:`info.xml` files __basenames__ = ['info.xml'] def __init__(self, path): CfgInfo.__init__(self, path) self.infoxml = InfoXML(path) __init__.__doc__ = CfgInfo.__init__.__doc__ def bind_info_to_entry(self, entry, metadata): self.infoxml.BindEntry(entry, metadata) bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__ def handle_event(self, event): self.infoxml.HandleEvent() handle_event.__doc__ = CfgInfo.handle_event.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py000066400000000000000000000075521303523157100227130ustar00rootroot00000000000000""" The CfgJinja2Generator allows you to use the `Jinja2 `_ templating system to generate :ref:`server-plugins-generators-cfg` files. """ import os import sys import Bcfg2.Options from Bcfg2.Server.Plugin import PluginExecutionError, \ DefaultTemplateDataProvider, get_template_data from Bcfg2.Server.Plugins.Cfg import CfgGenerator try: from jinja2 import Environment, FileSystemLoader HAS_JINJA2 = True class RelEnvironment(Environment): """Override join_path() to enable relative template paths.""" def join_path(self, template, parent): return os.path.join(os.path.dirname(parent), template) except ImportError: HAS_JINJA2 = False class DefaultJinja2DataProvider(DefaultTemplateDataProvider): """ Template data provider for Jinja2 templates. Jinja2 and Genshi currently differ over the value of the ``path`` variable, which is why this is necessary. """ def get_template_data(self, entry, metadata, template): rv = DefaultTemplateDataProvider.get_template_data(self, entry, metadata, template) rv['path'] = rv['name'] return rv class CfgJinja2Generator(CfgGenerator): """ The CfgJinja2Generator allows you to use the `Jinja2 `_ templating system to generate :ref:`server-plugins-generators-cfg` files. """ #: Handle .jinja2 files __extensions__ = ['jinja2'] if HAS_JINJA2: #: ``__loader_cls__`` is the class that will be instantiated to #: load the template files. It must implement one public function, #: ``load()``, as :class:`genshi.template.TemplateLoader`. __loader_cls__ = FileSystemLoader #: ``__environment_cls__`` is the class that will be instantiated to #: store the jinja2 environment. It must implement one public #: function, ``get_template()``, as :class:`jinja2.Environment`. __environment_cls__ = RelEnvironment #: Ignore ``.jinja2_include`` files so they can be used with the #: Jinja2 ``{% include ... %}`` directive without raising warnings. __ignore__ = ["jinja2_include"] #: Low priority to avoid matching host- or group-specific #: .crypt.jinja2 files __priority__ = 50 def __init__(self, fname, spec): CfgGenerator.__init__(self, fname, spec) if not HAS_JINJA2: raise PluginExecutionError("Jinja2 is not available") self.template = None encoding = Bcfg2.Options.setup.encoding self.loader = self.__loader_cls__('/', encoding=encoding) try: # keep_trailing_newline is new in Jinja2 2.7, and will # fail with earlier versions self.environment = \ self.__environment_cls__(loader=self.loader, keep_trailing_newline=True) except TypeError: self.environment = \ self.__environment_cls__(loader=self.loader) __init__.__doc__ = CfgGenerator.__init__.__doc__ def get_data(self, entry, metadata): if self.template is None: raise PluginExecutionError("Failed to load template %s" % self.name) return self.template.render( get_template_data(entry, metadata, self.name, default=DefaultJinja2DataProvider())) get_data.__doc__ = CfgGenerator.get_data.__doc__ def handle_event(self, event): CfgGenerator.handle_event(self, event) try: self.template = \ self.environment.get_template(self.name) except: raise PluginExecutionError("Failed to load template: %s" % sys.exc_info()[1]) handle_event.__doc__ = CfgGenerator.handle_event.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgPlaintextGenerator.py000066400000000000000000000013501303523157100235340ustar00rootroot00000000000000""" CfgPlaintextGenerator is a :class:`Bcfg2.Server.Plugins.Cfg.CfgGenerator` that handles plain text (i.e., non-templated) :ref:`server-plugins-generators-cfg` files.""" from Bcfg2.Server.Plugins.Cfg import CfgGenerator class CfgPlaintextGenerator(CfgGenerator): """ CfgPlaintextGenerator is a :class:`Bcfg2.Server.Plugins.Cfg.CfgGenerator` that handles plain text (i.e., non-templated) :ref:`server-plugins-generators-cfg` files. The base Generator class already implements this functionality, so CfgPlaintextGenerator doesn't need to do anything itself.""" #: Very low priority to avoid matching host- or group-specific #: files with other extensions -- e.g., .genshi, .crypt, etc. __priority__ = 100 src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py000066400000000000000000000116611303523157100233260ustar00rootroot00000000000000""" The CfgPrivateKeyCreator creates SSH keys on the fly. """ import os import shutil import tempfile import Bcfg2.Options from Bcfg2.Utils import Executor from Bcfg2.Server.Plugins.Cfg import XMLCfgCreator, CfgCreationError from Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator import CfgPublicKeyCreator class CfgPrivateKeyCreator(XMLCfgCreator): """The CfgPrivateKeyCreator creates SSH keys on the fly. """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within privkey.xml __specific__ = False #: Handle XML specifications of private keys __basenames__ = ['privkey.xml'] cfg_section = "sshkeys" options = [ Bcfg2.Options.Option( cf=("sshkeys", "category"), dest="sshkeys_category", help="Metadata category that generated SSH keys are specific to"), Bcfg2.Options.Option( cf=("sshkeys", "passphrase"), dest="sshkeys_passphrase", help="Passphrase used to encrypt generated SSH private keys")] def __init__(self, fname): XMLCfgCreator.__init__(self, fname) pubkey_path = os.path.dirname(self.name) + ".pub" pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path)) self.pubkey_creator = CfgPublicKeyCreator(pubkey_name) self.cmd = Executor() def _gen_keypair(self, metadata, spec=None): """ Generate a keypair according to the given client medata and key specification. :param metadata: The client metadata to generate keys for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param spec: The key specification to follow when creating the keys. This should be an XML document that only contains key specification data that applies to the given client metadata, and may be obtained by doing ``self.XMLMatch(metadata)`` :type spec: lxml.etree._Element :returns: tuple - (private key data, public key data) """ if spec is None: spec = self.XMLMatch(metadata) # set key parameters ktype = "rsa" bits = None params = spec.find("Params") if params is not None: bits = params.get("bits") ktype = params.get("type", ktype) try: passphrase = spec.find("Passphrase").text except AttributeError: passphrase = '' tempdir = tempfile.mkdtemp() try: filename = os.path.join(tempdir, "privkey") # generate key pair cmd = ["ssh-keygen", "-f", filename, "-t", ktype] if bits: cmd.extend(["-b", bits]) cmd.append("-N") log_cmd = cmd[:] cmd.append(passphrase) if passphrase: log_cmd.append("******") else: log_cmd.append("''") self.debug_log("Cfg: Generating new SSH key pair: %s" % " ".join(log_cmd)) result = self.cmd.run(cmd) if not result.success: raise CfgCreationError("Cfg: Failed to generate SSH key pair " "at %s for %s: %s" % (filename, metadata.hostname, result.error)) elif result.stderr: self.logger.warning("Cfg: Generated SSH key pair at %s for %s " "with errors: %s" % (filename, metadata.hostname, result.stderr)) return (open(filename).read(), open(filename + ".pub").read()) finally: shutil.rmtree(tempdir) # pylint: disable=W0221 def create_data(self, entry, metadata): """ Create data for the given entry on the given client :param entry: The abstract entry to create data for. This will not be modified :type entry: lxml.etree._Element :param metadata: The client metadata to create data for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: string - The private key data """ spec = self.XMLMatch(metadata) specificity = self.get_specificity(metadata) privkey, pubkey = self._gen_keypair(metadata, spec) # write the public key, stripping the comment and # replacing it with a comment that specifies the filename. kdata = pubkey.split()[:2] kdata.append(self.pubkey_creator.get_filename(**specificity)) pubkey = " ".join(kdata) + "\n" self.pubkey_creator.write_data(pubkey, **specificity) # encrypt the private key, write to the proper place, and # return it self.write_data(privkey, **specificity) return privkey # pylint: enable=W0221 src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py000066400000000000000000000077731303523157100231430ustar00rootroot00000000000000""" The CfgPublicKeyCreator invokes :class:`Bcfg2.Server.Plugins.Cfg.CfgPrivateKeyCreator.CfgPrivateKeyCreator` to create SSH keys on the fly. """ import os import sys import tempfile import lxml.etree from Bcfg2.Utils import Executor from Bcfg2.Server.Plugin import StructFile, PluginExecutionError from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError, get_cfg class CfgPublicKeyCreator(CfgCreator, StructFile): """ .. currentmodule:: Bcfg2.Server.Plugins.Cfg The CfgPublicKeyCreator creates SSH public keys on the fly. It is invoked by :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to handle the creation of the public key, and can also call :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to trigger the creation of a keypair when a public key is created. """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within pubkey.xml __specific__ = False #: Handle XML specifications of private keys __basenames__ = ['pubkey.xml'] #: No text content on any tags, so encryption support disabled encryption = False def __init__(self, fname): CfgCreator.__init__(self, fname) StructFile.__init__(self, fname) self.cfg = get_cfg() self.core = self.cfg.core self.cmd = Executor() def create_data(self, entry, metadata): if entry.get("name").endswith(".pub"): privkey = entry.get("name")[:-4] else: raise CfgCreationError("Cfg: Could not determine private key for " "%s: Filename does not end in .pub" % entry.get("name")) privkey_entry = lxml.etree.Element("Path", name=privkey) try: self.core.Bind(privkey_entry, metadata) except PluginExecutionError: raise CfgCreationError("Cfg: Could not bind %s (private key for " "%s): %s" % (privkey, self.name, sys.exc_info()[1])) try: eset = self.cfg.entries[privkey] creator = eset.best_matching(metadata, eset.get_handlers(metadata, CfgCreator)) except KeyError: raise CfgCreationError("Cfg: No private key defined for %s (%s)" % (self.name, privkey)) except PluginExecutionError: raise CfgCreationError("Cfg: No privkey.xml defined for %s " "(private key for %s)" % (privkey, self.name)) specificity = creator.get_specificity(metadata) fname = self.get_filename(**specificity) # if the private key didn't exist, then creating it may have # created the private key, too. check for it first. if os.path.exists(fname): return open(fname).read() else: # generate public key from private key fd, privfile = tempfile.mkstemp() try: os.fdopen(fd, 'w').write(privkey_entry.text) cmd = ["ssh-keygen", "-y", "-f", privfile] self.debug_log("Cfg: Extracting SSH public key from %s: %s" % (privkey, " ".join(cmd))) result = self.cmd.run(cmd) if not result.success: raise CfgCreationError("Cfg: Failed to extract public key " "from %s: %s" % (privkey, result.error)) self.write_data(result.stdout, **specificity) return result.stdout finally: os.unlink(privfile) def handle_event(self, event): CfgCreator.handle_event(self, event) StructFile.HandleEvent(self, event) handle_event.__doc__ = CfgCreator.handle_event.__doc__ src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCACertCreator.py000066400000000000000000000251631303523157100227300ustar00rootroot00000000000000""" Cfg creator that creates SSL certs """ import os import sys import tempfile import lxml.etree import Bcfg2.Options from Bcfg2.Utils import Executor from Bcfg2.Compat import ConfigParser from Bcfg2.Server.FileMonitor import get_fam from Bcfg2.Server.Plugin import PluginExecutionError from Bcfg2.Server.Plugins.Cfg import CfgCreationError, XMLCfgCreator, \ CfgCreator, CfgVerifier, CfgVerificationError, get_cfg class CfgSSLCACertCreator(XMLCfgCreator, CfgVerifier): """ This class acts as both a Cfg creator that creates SSL certs, and as a Cfg verifier that verifies SSL certs. """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within pubkey.xml __specific__ = False #: Handle XML specifications of private keys __basenames__ = ['sslcert.xml'] cfg_section = "sslca" options = [ Bcfg2.Options.Option( cf=("sslca", "category"), dest="sslca_category", help="Metadata category that generated SSL keys are specific to"), Bcfg2.Options.Option( cf=("sslca", "passphrase"), dest="sslca_passphrase", help="Passphrase used to encrypt generated SSL keys"), Bcfg2.Options.WildcardSectionGroup( Bcfg2.Options.PathOption( cf=("sslca_*", "config"), help="Path to the openssl config for the CA"), Bcfg2.Options.Option( cf=("sslca_*", "passphrase"), help="Passphrase for the CA private key"), Bcfg2.Options.PathOption( cf=("sslca_*", "chaincert"), help="Path to the SSL chaining certificate for verification"), Bcfg2.Options.BooleanOption( cf=("sslca_*", "root_ca"), help="Whether or not is a root CA (as opposed to " "an intermediate cert"), prefix="")] def __init__(self, fname): XMLCfgCreator.__init__(self, fname) CfgVerifier.__init__(self, fname, None) self.cmd = Executor() self.cfg = get_cfg() def build_req_config(self, metadata): """ Generates a temporary openssl configuration file that is used to generate the required certificate request. """ fd, fname = tempfile.mkstemp() cfp = ConfigParser.ConfigParser({}) cfp.optionxform = str defaults = dict( req=dict( default_md='sha1', distinguished_name='req_distinguished_name', req_extensions='v3_req', x509_extensions='v3_req', prompt='no'), req_distinguished_name=dict(), v3_req=dict(subjectAltName='@alt_names'), alt_names=dict()) for section in list(defaults.keys()): cfp.add_section(section) for key in defaults[section]: cfp.set(section, key, defaults[section][key]) spec = self.XMLMatch(metadata) cert = spec.find("Cert") altnamenum = 1 altnames = spec.findall('subjectAltName') altnames.extend(list(metadata.aliases)) altnames.append(metadata.hostname) for altname in altnames: cfp.set('alt_names', 'DNS.' + str(altnamenum), altname) altnamenum += 1 for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']: if cert.get(item): cfp.set('req_distinguished_name', item, cert.get(item)) cfp.set('req_distinguished_name', 'CN', metadata.hostname) self.debug_log("Cfg: Writing temporary CSR config to %s" % fname) try: cfp.write(os.fdopen(fd, 'w')) except IOError: raise CfgCreationError("Cfg: Failed to write temporary CSR config " "file: %s" % sys.exc_info()[1]) return fname def build_request(self, keyfile, metadata): """ Create the certificate request """ req_config = self.build_req_config(metadata) try: fd, req = tempfile.mkstemp() os.close(fd) cert = self.XMLMatch(metadata).find("Cert") days = cert.get("days", "365") cmd = ["openssl", "req", "-new", "-config", req_config, "-days", days, "-key", keyfile, "-text", "-out", req] result = self.cmd.run(cmd) if not result.success: raise CfgCreationError("Failed to generate CSR: %s" % result.error) return req finally: try: os.unlink(req_config) except OSError: self.logger.error("Cfg: Failed to unlink temporary CSR " "config: %s" % sys.exc_info()[1]) def get_ca(self, name): """ get a dict describing a CA from the config file """ rv = dict() prefix = "sslca_%s_" % name for attr in dir(Bcfg2.Options.setup): if attr.startswith(prefix): rv[attr[len(prefix):]] = getattr(Bcfg2.Options.setup, attr) return rv def create_data(self, entry, metadata): """ generate a new cert """ self.logger.info("Cfg: Generating new SSL cert for %s" % self.name) cert = self.XMLMatch(metadata).find("Cert") ca = self.get_ca(cert.get('ca', 'default')) req = self.build_request(self._get_keyfile(cert, metadata), metadata) try: days = cert.get('days', '365') cmd = ["openssl", "ca", "-config", ca['config'], "-in", req, "-days", days, "-batch"] passphrase = ca.get('passphrase') if passphrase: cmd.extend(["-passin", "pass:%s" % passphrase]) result = self.cmd.run(cmd) if not result.success: raise CfgCreationError("Failed to generate cert: %s" % result.error) except KeyError: raise CfgCreationError("Cfg: [sslca_%s] section has no 'config' " "option" % cert.get('ca', 'default')) finally: try: os.unlink(req) except OSError: self.logger.error("Cfg: Failed to unlink temporary CSR: %s " % sys.exc_info()[1]) data = result.stdout if cert.get('append_chain') and 'chaincert' in ca: data += open(ca['chaincert']).read() self.write_data(data, **self.get_specificity(metadata)) return data def verify_entry(self, entry, metadata, data): fd, fname = tempfile.mkstemp() self.debug_log("Cfg: Writing SSL cert %s to temporary file %s for " "verification" % (entry.get("name"), fname)) os.fdopen(fd, 'w').write(data) cert = self.XMLMatch(metadata).find("Cert") ca = self.get_ca(cert.get('ca', 'default')) try: if ca.get('chaincert'): self.verify_cert_against_ca(fname, entry, metadata) self.verify_cert_against_key(fname, self._get_keyfile(cert, metadata)) finally: os.unlink(fname) def _get_keyfile(self, cert, metadata): """ Given a element and client metadata, return the full path to the file on the filesystem that the key lives in.""" keypath = cert.get("key") eset = self.cfg.entries[keypath] try: return eset.best_matching(metadata).name except PluginExecutionError: # SSL key needs to be created try: creator = eset.best_matching(metadata, eset.get_handlers(metadata, CfgCreator)) except PluginExecutionError: raise CfgCreationError("Cfg: No SSL key or key creator " "defined for %s" % keypath) keyentry = lxml.etree.Element("Path", name=keypath) creator.create_data(keyentry, metadata) tries = 0 while True: if tries >= 10: raise CfgCreationError("Cfg: Timed out waiting for event " "on SSL key at %s" % keypath) get_fam().handle_events_in_interval(1) try: return eset.best_matching(metadata).name except PluginExecutionError: tries += 1 continue def verify_cert_against_ca(self, filename, entry, metadata): """ check that a certificate validates against the ca cert, and that it has not expired. """ cert = self.XMLMatch(metadata).find("Cert") ca = self.get_ca(cert.get("ca", "default")) chaincert = ca.get('chaincert') cmd = ["openssl", "verify"] is_root = ca.get('root_ca', "false").lower() == 'true' if is_root: cmd.append("-CAfile") else: # verifying based on an intermediate cert cmd.extend(["-purpose", "sslserver", "-untrusted"]) cmd.extend([chaincert, filename]) self.debug_log("Cfg: Verifying %s against CA" % entry.get("name")) result = self.cmd.run(cmd) if result.stdout == cert + ": OK\n": self.debug_log("Cfg: %s verified successfully against CA" % entry.get("name")) else: raise CfgVerificationError("%s failed verification against CA: %s" % (entry.get("name"), result.error)) def _get_modulus(self, fname, ftype="x509"): """ get the modulus from the given file """ cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname] self.debug_log("Cfg: Getting modulus of %s for verification: %s" % (fname, " ".join(cmd))) result = self.cmd.run(cmd) if not result.success: raise CfgVerificationError("Failed to get modulus of %s: %s" % (fname, result.error)) return result.stdout.strip() def verify_cert_against_key(self, filename, keyfile): """ check that a certificate validates against its private key. """ cert = self._get_modulus(filename) key = self._get_modulus(keyfile, ftype="rsa") if cert == key: self.debug_log("Cfg: %s verified successfully against key %s" % (filename, keyfile)) else: raise CfgVerificationError("%s failed verification against key %s" % (filename, keyfile)) src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py000066400000000000000000000024221303523157100225540ustar00rootroot00000000000000""" Cfg creator that creates SSL keys """ from Bcfg2.Utils import Executor from Bcfg2.Server.Plugins.Cfg import CfgCreationError, XMLCfgCreator class CfgSSLCAKeyCreator(XMLCfgCreator): """ Cfg creator that creates SSL keys """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within sslkey.xml __specific__ = False __basenames__ = ["sslkey.xml"] cfg_section = "sslca" def create_data(self, entry, metadata): self.logger.info("Cfg: Generating new SSL key for %s" % self.name) spec = self.XMLMatch(metadata) key = spec.find("Key") if key is None: key = {} ktype = key.get('type', 'rsa') bits = key.get('bits', '2048') if ktype == 'rsa': cmd = ["openssl", "genrsa", bits] elif ktype == 'dsa': cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits] result = Executor().run(cmd) if not result.success: raise CfgCreationError("Failed to generate key %s for %s: %s" % (self.name, metadata.hostname, result.error)) self.write_data(result.stdout, **self.get_specificity(metadata)) return result.stdout src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py000066400000000000000000001150541303523157100210430ustar00rootroot00000000000000"""This module implements a config file repository.""" import re import os import sys import errno import operator import lxml.etree import Bcfg2.Options import Bcfg2.Server.Plugin from Bcfg2.Server.Plugin import PluginExecutionError # pylint: disable=W0622 from Bcfg2.Compat import u_str, unicode, b64encode, any, walk_packages # pylint: enable=W0622 try: import Bcfg2.Server.Encryption HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False _handlers = [m[1] # pylint: disable=C0103 for m in walk_packages(path=__path__)] _CFG = None def get_cfg(): """ Get the :class:`Bcfg2.Server.Plugins.Cfg.Cfg` plugin object created by the Bcfg2 core. This is provided so that the handler objects can access it as necessary, since the existing :class:`Bcfg2.Server.Plugin.helpers.GroupSpool` and :class:`Bcfg2.Server.Plugin.helpers.EntrySet` classes have no facility for passing it otherwise.""" return _CFG class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData): """ .. currentmodule:: Bcfg2.Server.Plugins.Cfg CfgBaseFileMatcher is the parent class for all Cfg handler objects. """ #: The set of filenames handled by this handler. If #: ``__basenames__`` is the empty list, then the basename of each #: :class:`CfgEntrySet` is used -- i.e., the name of the directory #: that contains the file is used for the basename. __basenames__ = [] #: This handler only handles files with the listed extensions #: (which come *after* :attr:`CfgBaseFileMatcher.__specific__` #: indicators). __extensions__ = [] #: This handler ignores all files with the listed extensions. A #: file that is ignored by a handler will not be handled by any #: other handlers; that is, a file is ignored if any handler #: ignores it. Ignoring a file is not simply a means to defer #: handling of that file to another handler. __ignore__ = [] #: Whether or not the files handled by this handler are permitted #: to have specificity indicators in their filenames -- e.g., #: ``.H_client.example.com`` or ``.G10_foogroup``. __specific__ = True #: Cfg handlers are checked in ascending order of priority to see #: if they handle a given event. If this explicit priority is not #: set, then :class:`CfgPlaintextGenerator.CfgPlaintextGenerator` #: would match against nearly every other sort of generator file #: if it comes first. It's not necessary to set ``__priority__`` #: on handlers where :attr:`CfgBaseFileMatcher.__specific__` is #: False, since they don't have a potentially open-ended regex __priority__ = 0 #: Flag to indicate a deprecated handler. deprecated = False #: Flag to indicate an experimental handler. experimental = False def __init__(self, name, specific): if not self.__specific__ and not specific: specific = Bcfg2.Server.Plugin.Specificity(all=True) Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific) __init__.__doc__ = Bcfg2.Server.Plugin.SpecificData.__init__.__doc__ + \ """ .. ----- .. autoattribute:: CfgBaseFileMatcher.__basenames__ .. autoattribute:: CfgBaseFileMatcher.__extensions__ .. autoattribute:: CfgBaseFileMatcher.__ignore__ .. autoattribute:: CfgBaseFileMatcher.__specific__ .. autoattribute:: CfgBaseFileMatcher.__priority__ """ @classmethod def get_regex(cls, basenames): """ Get a compiled regular expression to match filenames (not full paths) that this handler handles. :param basename: The base filename to use if :attr:`CfgBaseFileMatcher.__basenames__` is not defined (i.e., the name of the directory that contains the files the regex will be applied to) :type basename: string :returns: compiled regex """ components = ['^(?P%s)' % '|'.join(re.escape(b) for b in basenames)] if cls.__specific__: components.append(r'(|\.H_(?P\S+?)|' + r'\.G(?P\d+)_(?P\S+?))') if cls.__extensions__: components.append(r'\.(?P%s)' % r'|'.join(cls.__extensions__)) components.append(r'$') return re.compile("".join(components)) @classmethod def handles(cls, event, basename=None): """ Return True if this handler handles the file described by ``event``. This is faster than just applying :func:`CfgBaseFileMatcher.get_regex` because it tries to do non-regex matching first. :param event: The FAM event to check :type event: Bcfg2.Server.FileMonitor.Event :param basename: The base filename to use if :attr:`CfgBaseFileMatcher.__basenames__` is not defined (i.e., the name of the directory that contains the files the regex will be applied to) :type basename: string :returns: bool - True if this handler handles the file listed in the event, False otherwise. """ if cls.__basenames__: basenames = cls.__basenames__ else: basenames = [os.path.basename(basename)] return bool(cls.get_regex(basenames).match(event.filename)) @classmethod def ignore(cls, event, basename=None): # pylint: disable=W0613 """ Return True if this handler ignores the file described by ``event``. See :attr:`CfgBaseFileMatcher.__ignore__` for more information on how ignoring files works. :param event: The FAM event to check :type event: Bcfg2.Server.FileMonitor.Event :param basename: The base filename to use if :attr:`CfgBaseFileMatcher.__basenames__` is not defined (i.e., the name of the directory that contains the files the regex will be applied to) :type basename: string :returns: bool - True if this handler handles the file listed in the event, False otherwise. """ return any(event.filename.endswith("." + e) for e in cls.__ignore__) def __str__(self): return "%s(%s)" % (self.__class__.__name__, self.name) class CfgGenerator(CfgBaseFileMatcher): """ CfgGenerators generate the initial content of a file. Every valid :class:`Bcfg2.Server.Plugins.Cfg.CfgEntrySet` must have at least one file handled by a CfgGenerator. Moreover, each CfgEntrySet must have one unambiguously best handler for each client. See :class:`Bcfg2.Server.Plugin.helpers.EntrySet` for more details on how the best handler is chosen.""" def __init__(self, name, specific): # we define an __init__ that just calls the parent __init__, # so that we can set the docstring on __init__ to something # different from the parent __init__ -- namely, the parent # __init__ docstring, minus everything after ``.. -----``, # which we use to delineate the actual docs from the # .. autoattribute hacks we have to do to get private # attributes included in sphinx 1.0 """ CfgBaseFileMatcher.__init__(self, name, specific) __init__.__doc__ = CfgBaseFileMatcher.__init__.__doc__.split(".. -----")[0] def get_data(self, entry, metadata): # pylint: disable=W0613 """ get_data() returns the initial data of a file. :param entry: The entry to generate data for. ``entry`` should not be modified in-place. :type entry: lxml.etree._Element :param metadata: The client metadata to generate data for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: string - the contents of the entry """ return self.data class CfgFilter(CfgBaseFileMatcher): """ CfgFilters modify the initial content of a file after it has been generated by a :class:`Bcfg2.Server.Plugins.Cfg.CfgGenerator`. """ def __init__(self, name, specific): # see comment on CfgGenerator.__init__ above CfgBaseFileMatcher.__init__(self, name, specific) __init__.__doc__ = CfgBaseFileMatcher.__init__.__doc__.split(".. -----")[0] def modify_data(self, entry, metadata, data): """ Return new data for the entry, based on the initial data produced by the :class:`Bcfg2.Server.Plugins.Cfg.CfgGenerator`. :param entry: The entry to filter data for. ``entry`` should not be modified in-place. :type entry: lxml.etree._Element :param metadata: The client metadata to filter data for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param data: The initial contents of the entry produced by the CfgGenerator :type data: string :returns: string - the new contents of the entry """ raise NotImplementedError class CfgInfo(CfgBaseFileMatcher): """ CfgInfo handlers provide metadata (owner, group, paranoid, etc.) for a file entry. """ #: Whether or not the files handled by this handler are permitted #: to have specificity indicators in their filenames -- e.g., #: ``.H_client.example.com`` or ``.G10_foogroup``. By default #: CfgInfo handlers do not allow specificities __specific__ = False def __init__(self, fname): """ :param name: The full path to the file :type name: string .. ----- .. autoattribute:: Bcfg2.Server.Plugins.Cfg.CfgInfo.__specific__ """ CfgBaseFileMatcher.__init__(self, fname, None) def bind_info_to_entry(self, entry, metadata): """ Assign the appropriate attributes to the entry, modifying it in place. :param entry: The abstract entry to bind the info to :type entry: lxml.etree._Element :param metadata: The client metadata to get info for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ raise NotImplementedError class CfgVerifier(CfgBaseFileMatcher): """ CfgVerifier handlers validate entry data once it has been generated, filtered, and info applied. Validation can be enabled or disabled in the configuration. Validation can apply to the contents of an entry, the attributes on it (e.g., owner, group, etc.), or both. """ def __init__(self, name, specific): # see comment on CfgGenerator.__init__ above CfgBaseFileMatcher.__init__(self, name, specific) __init__.__doc__ = CfgBaseFileMatcher.__init__.__doc__.split(".. -----")[0] def verify_entry(self, entry, metadata, data): """ Perform entry contents. validation. :param entry: The entry to validate data for. ``entry`` should not be modified in-place. Info attributes have been bound to the entry, but the text data has not been set. :type entry: lxml.etree._Element :param metadata: The client metadata to validate data for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param data: The contents of the entry :type data: string :returns: None :raises: :exc:`Bcfg2.Server.Plugins.Cfg.CfgVerificationError` """ raise NotImplementedError class CfgCreator(CfgBaseFileMatcher): """ CfgCreator handlers create static entry data if no generator was found to generate any. A CfgCreator runs at most once per client, writes its data to disk as a static file, and is not called on subsequent runs by the same client. """ #: CfgCreators generally store their configuration in a single XML #: file, and are thus not specific __specific__ = False def __init__(self, fname): """ :param name: The full path to the file :type name: string .. ----- .. autoattribute:: Bcfg2.Server.Plugins.Cfg.CfgInfo.__specific__ """ CfgBaseFileMatcher.__init__(self, fname, None) def create_data(self, entry, metadata): """ Create new data for the given entry and write it to disk using :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.write_data`. :param entry: The entry to create data for. :type entry: lxml.etree._Element :param metadata: The client metadata to create data for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: string - The contents of the entry :raises: :exc:`Bcfg2.Server.Plugins.Cfg.CfgCreationError` """ raise NotImplementedError def get_filename(self, host=None, group=None, prio=0, ext=''): """ Get the filename where the new data will be written. If ``host`` is given, it will be host-specific. It will be group-specific if ``group`` and ``prio`` are given. If neither ``host`` nor ``group`` is given, the filename will be non-specific. In general, this will be called as:: self.get_filename(**self.get_specificity(metadata)) :param host: The file applies to the given host :type host: bool :param group: The file applies to the given group :type group: string :param prio: The file has the given priority relative to other objects that also apply to the same group. ``group`` must also be specified. :type prio: int :param ext: An extension to add after the specificity (e.g., '.crypt', to signal that an encrypted file has been created) :type prio: string :returns: string - the filename """ basefilename = \ os.path.join(os.path.dirname(self.name), os.path.basename(os.path.dirname(self.name))) if group: return "%s.G%02d_%s%s" % (basefilename, prio, group, ext) elif host: return "%s.H_%s%s" % (basefilename, host, ext) else: return "%s%s" % (basefilename, ext) def write_data(self, data, host=None, group=None, prio=0, ext=''): """ Write the new data to disk. If ``host`` is given, it is written as a host-specific file, or as a group-specific file if ``group`` and ``prio`` are given. If neither ``host`` nor ``group`` is given, it will be written as a non-specific file. In general, this will be called as:: self.write_data(data, **self.get_specificity(metadata)) :param data: The data to write :type data: string :param host: The data applies to the given host :type host: bool :param group: The data applies to the given group :type group: string :param prio: The data has the given priority relative to other objects that also apply to the same group. ``group`` must also be specified. :type prio: int :param ext: An extension to add after the specificity (e.g., '.crypt', to signal that an encrypted file has been created) :type prio: string :returns: None :raises: :exc:`Bcfg2.Server.Plugins.Cfg.CfgCreationError` """ fileloc = self.get_filename(host=host, group=group, prio=prio, ext=ext) self.debug_log("Cfg: Writing new file %s" % fileloc) try: os.makedirs(os.path.dirname(fileloc)) except OSError: err = sys.exc_info()[1] if err.errno != errno.EEXIST: raise CfgCreationError("Could not create parent directories " "for %s: %s" % (fileloc, err)) try: open(fileloc, 'wb').write(data) except IOError: err = sys.exc_info()[1] raise CfgCreationError("Could not write %s: %s" % (fileloc, err)) class XMLCfgCreator(CfgCreator, # pylint: disable=W0223 Bcfg2.Server.Plugin.StructFile): """ A CfgCreator that uses XML to describe how data should be generated. """ #: Whether or not the created data from this class can be #: encrypted encryptable = True #: Encryption and creation settings can be stored in bcfg2.conf, #: either under the [cfg] section, or under the named section. cfg_section = None def __init__(self, name): CfgCreator.__init__(self, name) Bcfg2.Server.Plugin.StructFile.__init__(self, name) def handle_event(self, event): CfgCreator.handle_event(self, event) Bcfg2.Server.Plugin.StructFile.HandleEvent(self, event) @property def passphrase(self): """ The passphrase used to encrypt created data """ if self.cfg_section: localopt = "%s_passphrase" % self.cfg_section passphrase = getattr(Bcfg2.Options.setup, localopt, Bcfg2.Options.setup.cfg_passphrase) else: passphrase = Bcfg2.Options.setup.cfg_passphrase if passphrase is None: return None try: return Bcfg2.Options.setup.passphrases[passphrase] except KeyError: raise CfgCreationError("%s: No such passphrase: %s" % (self.__class__.__name__, passphrase)) @property def category(self): """ The category to which created data is specific """ if self.cfg_section: localopt = "%s_category" % self.cfg_section return getattr(Bcfg2.Options.setup, localopt, Bcfg2.Options.setup.cfg_category) else: return Bcfg2.Options.setup.cfg_category def write_data(self, data, host=None, group=None, prio=0, ext=''): if HAS_CRYPTO and self.encryptable and self.passphrase: self.debug_log("Cfg: Encrypting created data") data = Bcfg2.Server.Encryption.ssl_encrypt(data, self.passphrase) ext = '.crypt' CfgCreator.write_data(self, data, host=host, group=group, prio=prio, ext=ext) def get_specificity(self, metadata): """ Get config settings for key generation specificity (per-host or per-group). :param metadata: The client metadata to create data for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: dict - A dict of specificity arguments suitable for passing to :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.write_data` or :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.get_filename` """ category = self.xdata.get("category", self.category) if category is None: per_host_default = "true" else: per_host_default = "false" per_host = self.xdata.get("perhost", per_host_default).lower() == "true" specificity = dict(host=metadata.hostname) if category and not per_host: group = metadata.group_in_category(category) if group: specificity = dict(group=group, prio=int(self.xdata.get("priority", 50))) else: self.logger.info("Cfg: %s has no group in category %s, " "creating host-specific data" % (metadata.hostname, category)) return specificity class CfgVerificationError(Exception): """ Raised by :func:`Bcfg2.Server.Plugins.Cfg.CfgVerifier.verify_entry` when an entry fails verification """ pass class CfgCreationError(Exception): """ Raised by :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.create_data` when data creation fails """ pass class CfgDefaultInfo(CfgInfo): """ :class:`Bcfg2.Server.Plugins.Cfg.Cfg` handler that supplies a default set of file metadata """ def __init__(self): CfgInfo.__init__(self, '') __init__.__doc__ = CfgInfo.__init__.__doc__.split(".. -----")[0] def bind_info_to_entry(self, entry, _): for key, value in Bcfg2.Server.Plugin.default_path_metadata().items(): entry.attrib[key] = value bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet): """ Handle a collection of host- and group-specific Cfg files with multiple different Cfg handlers in a single directory. """ def __init__(self, basename, path, entry_type): Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, entry_type) self.specific = None __init__.__doc__ = Bcfg2.Server.Plugin.EntrySet.__doc__ def set_debug(self, debug): rv = Bcfg2.Server.Plugin.EntrySet.set_debug(self, debug) for entry in self.entries.values(): entry.set_debug(debug) return rv def handle_event(self, event): """ Dispatch a FAM event to :func:`entry_init` or the appropriate child handler object. :param event: An event that applies to a file handled by this CfgEntrySet :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ action = event.code2str() if event.filename not in self.entries: if action not in ['exists', 'created', 'changed']: # process a bogus changed event like a created return for hdlr in Bcfg2.Options.setup.cfg_handlers: if hdlr.handles(event, basename=self.path): if action == 'changed': # warn about a bogus 'changed' event, but # handle it like a 'created' self.logger.warning("Got %s event for unknown file %s" % (action, event.filename)) self.debug_log("%s handling %s event on %s" % (hdlr.__name__, action, event.filename)) self.entry_init(event, hdlr) return elif hdlr.ignore(event, basename=self.path): return # we only get here if event.filename in self.entries, so handle # created event like changed elif action == 'changed' or action == 'created': self.entries[event.filename].handle_event(event) return elif action == 'deleted': del self.entries[event.filename] return self.logger.error("Could not process event %s for %s; ignoring" % (action, event.filename)) def get_matching(self, metadata): return self.get_handlers(metadata, CfgGenerator) get_matching.__doc__ = Bcfg2.Server.Plugin.EntrySet.get_matching.__doc__ def entry_init(self, event, hdlr): # pylint: disable=W0221 """ Handle the creation of a file on the filesystem and the creation of a Cfg handler object in this CfgEntrySet to track it. :param event: An event that applies to a file handled by this CfgEntrySet :type event: Bcfg2.Server.FileMonitor.Event :param hdlr: The Cfg handler class to be used to create an object for the file described by ``event`` :type hdlr: class :returns: None :raises: :class:`Bcfg2.Server.Plugin.exceptions.SpecificityError` """ fpath = os.path.join(self.path, event.filename) if hdlr.__basenames__: fdesc = "/".join(hdlr.__basenames__) elif hdlr.__extensions__: fdesc = "." + "/.".join(hdlr.__extensions__) if hdlr.deprecated: self.logger.warning("Cfg: %s: Use of %s files is deprecated" % (fpath, fdesc)) elif hdlr.experimental: self.logger.warning("Cfg: %s: Use of %s files is experimental" % (fpath, fdesc)) if hdlr.__specific__: if hdlr.__basenames__: # specific entry with explicit basenames basenames = hdlr.__basenames__ else: # specific entry with no explicit basename; use the # directory name as the basename basenames = [os.path.basename(self.path)] Bcfg2.Server.Plugin.EntrySet.entry_init( self, event, entry_type=hdlr, specific=hdlr.get_regex(basenames)) else: if event.filename in self.entries: self.logger.warn("Got duplicate add for %s" % event.filename) else: self.entries[event.filename] = hdlr(fpath) self.entries[event.filename].handle_event(event) def bind_entry(self, entry, metadata): self.bind_info_to_entry(entry, metadata) data, generator = self._generate_data(entry, metadata) if generator is not None: # apply no filters if the data was created by a CfgCreator for fltr in self.get_handlers(metadata, CfgFilter): if fltr.specific <= generator.specific: # only apply filters that are as specific or more # specific than the generator used for this entry. # Note that specificity comparison is backwards in # this sense, since it's designed to sort from # most specific to least specific. data = fltr.modify_data(entry, metadata, data) if Bcfg2.Options.setup.cfg_validation: try: self._validate_data(entry, metadata, data) except CfgVerificationError: raise PluginExecutionError("Failed to verify %s for %s: %s" % (entry.get('name'), metadata.hostname, sys.exc_info()[1])) if entry.get('encoding') == 'base64': data = b64encode(data) else: try: if not isinstance(data, unicode): if not isinstance(data, str): data = data.decode('utf-8') data = u_str(data, Bcfg2.Options.setup.encoding) except UnicodeDecodeError: msg = "Failed to decode %s: %s" % (entry.get('name'), sys.exc_info()[1]) self.logger.error(msg) self.logger.error("Please verify you are using the proper " "encoding") raise PluginExecutionError(msg) except ValueError: msg = "Error in specification for %s: %s" % (entry.get('name'), sys.exc_info()[1]) self.logger.error(msg) self.logger.error("You need to specify base64 encoding for %s" % entry.get('name')) raise PluginExecutionError(msg) except TypeError: # data is already unicode; newer versions of Cheetah # seem to return unicode pass if data: entry.text = data else: entry.set('empty', 'true') return entry bind_entry.__doc__ = Bcfg2.Server.Plugin.EntrySet.bind_entry.__doc__ def get_handlers(self, metadata, handler_type): """ Get all handlers of the given type for the given metadata. :param metadata: The metadata to get all handlers for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param handler_type: The type of Cfg handler to get :type handler_type: type :returns: list of Cfg handler classes """ rv = [] for ent in self.entries.values(): if (isinstance(ent, handler_type) and (not ent.__specific__ or ent.specific.matches(metadata))): rv.append(ent) return rv def bind_info_to_entry(self, entry, metadata): """ Bind entry metadata to the entry with the best CfgInfo handler :param entry: The abstract entry to bind the info to. This will be modified in place :type entry: lxml.etree._Element :param metadata: The client metadata to get info for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None """ info_handlers = self.get_handlers(metadata, CfgInfo) CfgDefaultInfo().bind_info_to_entry(entry, metadata) if len(info_handlers) > 1: self.logger.error("More than one info supplier found for %s: %s" % (entry.get("name"), info_handlers)) if len(info_handlers): info_handlers[0].bind_info_to_entry(entry, metadata) if entry.tag == 'Path': entry.set('type', 'file') def _create_data(self, entry, metadata): """ Create data for the given entry on the given client :param entry: The abstract entry to create data for. This will not be modified :type entry: lxml.etree._Element :param metadata: The client metadata to create data for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: string - the data for the entry """ creator = self.best_matching(metadata, self.get_handlers(metadata, CfgCreator)) try: return creator.create_data(entry, metadata) except CfgCreationError: raise PluginExecutionError("Cfg: Error creating data for %s: %s" % (entry.get("name"), sys.exc_info()[1])) def _generate_data(self, entry, metadata): """ Generate data for the given entry on the given client :param entry: The abstract entry to generate data for. This will not be modified :type entry: lxml.etree._Element :param metadata: The client metadata to generate data for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: tuple of (string, generator) - the data for the entry and the generator used to generate it (or None, if data was created) """ try: generator = self.best_matching(metadata, self.get_handlers(metadata, CfgGenerator)) except PluginExecutionError: # if no creators or generators exist, _create_data() # raises an appropriate exception return (self._create_data(entry, metadata), None) try: return (generator.get_data(entry, metadata), generator) except: # TODO: the exceptions raised by ``get_data`` are not # constrained in any way, so for now this needs to be a # blanket except. msg = "Cfg: Error rendering %s: %s" % (entry.get("name"), sys.exc_info()[1]) self.logger.error(msg) raise PluginExecutionError(msg) def _validate_data(self, entry, metadata, data): """ Validate data for the given entry on the given client :param entry: The abstract entry to validate data for :type entry: lxml.etree._Element :param metadata: The client metadata validate data for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: None :raises: :exc:`Bcfg2.Server.Plugins.Cfg.CfgVerificationError` """ verifiers = self.get_handlers(metadata, CfgVerifier) # we can have multiple verifiers, but we only want to use the # best matching verifier of each class verifiers_by_class = dict() for verifier in verifiers: cls = verifier.__class__.__name__ if cls not in verifiers_by_class: verifiers_by_class[cls] = [verifier] else: verifiers_by_class[cls].append(verifier) for verifiers in verifiers_by_class.values(): verifier = self.best_matching(metadata, verifiers) verifier.verify_entry(entry, metadata, data) def list_accept_choices(self, entry, metadata): '''return a list of candidate pull locations''' generators = [ent for ent in list(self.entries.values()) if (isinstance(ent, CfgGenerator) and ent.specific.matches(metadata))] if not generators: msg = "No base file found for %s" % entry.get('name') self.logger.error(msg) raise PluginExecutionError(msg) rv = [] try: best = self.best_matching(metadata, generators) rv.append(best.specific) except PluginExecutionError: pass if not rv or not rv[0].hostname: rv.append( Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)) return rv def build_filename(self, specific): """ Create a filename for pulled file data """ bfname = self.path + '/' + self.path.split('/')[-1] if specific.all: return bfname elif specific.group: return "%s.G%02d_%s" % (bfname, specific.prio, specific.group) elif specific.hostname: return "%s.H_%s" % (bfname, specific.hostname) def write_update(self, specific, new_entry, log): """ Write pulled data to the filesystem """ if 'text' in new_entry: name = self.build_filename(specific) if os.path.exists("%s.genshi" % name): msg = "Cfg: Unable to pull data for genshi types" self.logger.error(msg) raise PluginExecutionError(msg) elif os.path.exists("%s.cheetah" % name): msg = "Cfg: Unable to pull data for cheetah types" self.logger.error(msg) raise PluginExecutionError(msg) try: etext = new_entry['text'].encode(Bcfg2.Options.setup.encoding) except UnicodeDecodeError: msg = "Cfg: Cannot encode content of %s as %s" % \ (name, Bcfg2.Options.setup.encoding) self.logger.error(msg) raise PluginExecutionError(msg) open(name, 'w').write(etext) self.debug_log("Wrote file %s" % name, flag=log) badattr = [attr for attr in ['owner', 'group', 'mode'] if attr in new_entry] if badattr: metadata_updates = {} metadata_updates.update(self.metadata) for attr in badattr: metadata_updates[attr] = new_entry.get(attr) infoxml = lxml.etree.Element('FileInfo') infotag = lxml.etree.SubElement(infoxml, 'Info') for attr in metadata_updates: infotag.attrib.__setitem__(attr, metadata_updates[attr]) ofile = open(self.path + "/info.xml", "w") ofile.write(lxml.etree.tostring(infoxml, xml_declaration=False, pretty_print=True).decode('UTF-8')) ofile.close() self.debug_log("Wrote file %s" % os.path.join(self.path, "info.xml"), flag=log) class CfgHandlerAction(Bcfg2.Options.ComponentAction): """ Option parser action to load Cfg handlers """ bases = ['Bcfg2.Server.Plugins.Cfg'] class Cfg(Bcfg2.Server.Plugin.GroupSpool, Bcfg2.Server.Plugin.PullTarget): """ The Cfg plugin provides a repository to describe configuration file contents for clients. In its simplest form, the Cfg repository is just a directory tree modeled off of the directory tree on your client machines. """ __author__ = 'bcfg-dev@mcs.anl.gov' es_cls = CfgEntrySet es_child_cls = Bcfg2.Server.Plugin.SpecificData options = Bcfg2.Server.Plugin.GroupSpool.options + [ Bcfg2.Options.BooleanOption( '--cfg-validation', cf=('cfg', 'validation'), default=True, dest="cfg_validation", help='Run validation on Cfg files'), Bcfg2.Options.Option( cf=('cfg', 'category'), dest="cfg_category", help='The default name of the metadata category that created data ' 'is specific to'), Bcfg2.Options.Option( cf=('cfg', 'passphrase'), dest="cfg_passphrase", help='The default passphrase name used to encrypt created data'), Bcfg2.Options.Option( cf=("cfg", "handlers"), dest="cfg_handlers", help="Cfg handlers to load", type=Bcfg2.Options.Types.comma_list, action=CfgHandlerAction, default=_handlers)] def __init__(self, core): global _CFG # pylint: disable=W0603 Bcfg2.Server.Plugin.GroupSpool.__init__(self, core) Bcfg2.Server.Plugin.PullTarget.__init__(self) Bcfg2.Options.setup.cfg_handlers.sort( key=operator.attrgetter("__priority__")) _CFG = self __init__.__doc__ = Bcfg2.Server.Plugin.GroupSpool.__init__.__doc__ def has_generator(self, entry, metadata): """ Return True if the given entry can be generated for the given metadata; False otherwise :param entry: Determine if a :class:`Bcfg2.Server.Plugins.Cfg.CfgGenerator` object exists that handles this (abstract) entry :type entry: lxml.etree._Element :param metadata: Determine if a CfgGenerator has data that applies to this client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: bool """ if entry.get('name') not in self.entries: return False return bool(self.entries[entry.get('name')].get_handlers(metadata, CfgGenerator)) def AcceptChoices(self, entry, metadata): return self.entries[entry.get('name')].list_accept_choices(entry, metadata) AcceptChoices.__doc__ = \ Bcfg2.Server.Plugin.PullTarget.AcceptChoices.__doc__ def AcceptPullData(self, specific, new_entry, log): return self.entries[new_entry.get('name')].write_update(specific, new_entry, log) AcceptPullData.__doc__ = \ Bcfg2.Server.Plugin.PullTarget.AcceptPullData.__doc__ src/lib/Bcfg2/Server/Plugins/Cvs.py000066400000000000000000000021211303523157100173260ustar00rootroot00000000000000""" The Cvs plugin provides a revision interface for Bcfg2 repos using cvs. """ from Bcfg2.Utils import Executor import Bcfg2.Server.Plugin class Cvs(Bcfg2.Server.Plugin.Version): """ The Cvs plugin provides a revision interface for Bcfg2 repos using cvs.""" __author__ = 'bcfg-dev@mcs.anl.gov' __vcs_metadata_path__ = "CVSROOT" def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.cmd = Executor() self.logger.debug("Initialized cvs plugin with CVS directory %s" % self.vcs_path) def get_revision(self): """Read cvs revision information for the Bcfg2 repository.""" result = self.cmd.run(["env LC_ALL=C", "cvs", "log"], shell=True, cwd=Bcfg2.Options.setup.vcs_root) try: return result.stdout.splitlines()[0].strip() except (IndexError, AttributeError): msg = "Failed to read revision from CVS: %s" % result.error self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) src/lib/Bcfg2/Server/Plugins/DBStats.py000066400000000000000000000010341303523157100201010ustar00rootroot00000000000000""" DBstats provides a database-backed statistics handler """ import Bcfg2.Server.Plugin class DBStats(Bcfg2.Server.Plugin.Plugin): """ DBstats provides a database-backed statistics handler """ def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) self.logger.error("DBStats has been replaced with Reporting") self.logger.error("DBStats: Be sure to migrate your data " "before running the report collector") raise Bcfg2.Server.Plugin.PluginInitError src/lib/Bcfg2/Server/Plugins/Darcs.py000066400000000000000000000021411303523157100176310ustar00rootroot00000000000000""" Darcs is a version plugin for dealing with Bcfg2 repos stored in the Darcs VCS. """ from Bcfg2.Utils import Executor import Bcfg2.Server.Plugin class Darcs(Bcfg2.Server.Plugin.Version): """ Darcs is a version plugin for dealing with Bcfg2 repos stored in the Darcs VCS. """ __author__ = 'bcfg-dev@mcs.anl.gov' __vcs_metadata_path__ = "_darcs" def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.cmd = Executor() self.logger.debug("Initialized Darcs plugin with darcs directory %s" % self.vcs_path) def get_revision(self): """Read Darcs changeset information for the Bcfg2 repository.""" result = self.cmd.run(["env LC_ALL=C", "darcs", "changes"], shell=True, cwd=Bcfg2.Options.setup.vcs_root) if result.success: return result.stdout.splitlines()[0].strip() else: msg = "Failed to read revision from darcs: %s" % result.error self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) src/lib/Bcfg2/Server/Plugins/Decisions.py000066400000000000000000000024601303523157100205210ustar00rootroot00000000000000""" The Decisions plugin provides a flexible method to whitelist or blacklist certain entries. """ import os import Bcfg2.Server.Plugin import Bcfg2.Server.FileMonitor class DecisionFile(Bcfg2.Server.Plugin.StructFile): """ Representation of a Decisions XML file """ def get_decisions(self, metadata): """ Get a list of whitelist or blacklist tuples """ if self.xdata is None: # no white/blacklist has been read yet, probably because # it doesn't exist return [] return [(x.get('type'), x.get('name')) for x in self.XMLMatch(metadata).xpath('.//Decision')] class Decisions(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Decision): """ Decisions plugin """ __author__ = 'bcfg-dev@mcs.anl.gov' def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Decision.__init__(self) self.whitelist = DecisionFile(os.path.join(self.data, "whitelist.xml"), should_monitor=True) self.blacklist = DecisionFile(os.path.join(self.data, "blacklist.xml"), should_monitor=True) def GetDecisions(self, metadata, mode): return getattr(self, mode).get_decisions(metadata) src/lib/Bcfg2/Server/Plugins/Defaults.py000066400000000000000000000037461303523157100203600ustar00rootroot00000000000000"""This generator provides rule-based entry mappings.""" import Bcfg2.Options import Bcfg2.Server.Plugin import Bcfg2.Server.Plugins.Rules class Defaults(Bcfg2.Server.Plugins.Rules.Rules, Bcfg2.Server.Plugin.GoalValidator): """Set default attributes on bound entries""" __author__ = 'bcfg-dev@mcs.anl.gov' options = Bcfg2.Server.Plugin.PrioDir.options + [ Bcfg2.Options.BooleanOption( cf=("defaults", "replace_name"), dest="defaults_replace_name", help="Replace %{name} in attributes with name of target entry")] # Rules is a Generator that happens to implement all of the # functionality we want, so we overload it, but Defaults should # _not_ handle any entries; it does its stuff in the structure # validation phase. so we overload Handle(s)Entry and HandleEvent # to ensure that Defaults handles no entries, even though it's a # Generator. def HandlesEntry(self, entry, metadata): return False def HandleEvent(self, event): Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event) def validate_goals(self, metadata, config): """ Apply defaults """ for struct in config.getchildren(): for entry in struct.getchildren(): try: self.BindEntry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: # either no matching defaults (which is okay), # or multiple matching defaults (which is not # okay, but is logged). either way, we don't # care about the error. pass @property def _regex_enabled(self): """ Defaults depends on regex matching, so force it enabled """ return True @property def _replace_name_enabled(self): """ Return True if the replace_name feature is enabled, False otherwise """ return Bcfg2.Options.setup.defaults_replace_name src/lib/Bcfg2/Server/Plugins/Deps.py000066400000000000000000000070601303523157100174750ustar00rootroot00000000000000"""This plugin provides automatic dependency handling.""" import lxml.etree import Bcfg2.Server.Plugin from Bcfg2.Server.Plugin import PluginExecutionError class Deps(Bcfg2.Server.Plugin.PrioDir, Bcfg2.Server.Plugin.StructureValidator): # Override the default sort_order (of 500) so that this plugin # gets handled after others running at the default. In particular, # we want to run after Packages, so we can see the final set of # packages that will be installed on the client. sort_order = 750 def __init__(self, core): Bcfg2.Server.Plugin.PrioDir.__init__(self, core) Bcfg2.Server.Plugin.StructureValidator.__init__(self) self.cache = {} def HandleEvent(self, event): self.cache = {} Bcfg2.Server.Plugin.PrioDir.HandleEvent(self, event) def validate_structures(self, metadata, structures): """Examine the passed structures and append any additional prerequisite entries as defined by the files in Deps. """ entries = [] for structure in structures: for entry in structure.getchildren(): tag = entry.tag if tag.startswith('Bound'): tag = tag[5:] if ((tag, entry.get('name')) not in entries and not isinstance(entry, lxml.etree._Comment)): entries.append((tag, entry.get('name'))) entries.sort() entries = tuple(entries) groups = list(metadata.groups) groups.sort() groups = tuple(groups) # Check to see if we have cached the prereqs already if (entries, groups) in self.cache: prereqs = self.cache[(entries, groups)] else: prereqs = self.calculate_prereqs(metadata, entries) self.cache[(entries, groups)] = prereqs newstruct = lxml.etree.Element("Independent", name=self.__class__.__name__) for tag, name in prereqs: lxml.etree.SubElement(newstruct, tag, name=name) structures.append(newstruct) def calculate_prereqs(self, metadata, entries): """Calculate the prerequisites defined in Deps for the passed set of entries. """ prereqs = [] toexamine = list(entries[:]) while toexamine: entry = toexamine.pop() # tuples of (PriorityStructFile, element) for each # matching element and the structfile that contains it matching = [] for deps in self.entries.values(): el = deps.find("/%s[name='%s']" % (entry.tag, entry.get("name"))) if el: matching.append((deps, el)) if len(matching) > 1: prio = [int(m[0].priority) for m in matching] if prio.count(max(prio)) > 1: raise PluginExecutionError( "Deps: Found conflicting dependencies with same " "priority for %s:%s for %s: %s" % (entry.tag, entry.get("name"), metadata.hostname, [m[0].name for m in matching])) index = prio.index(max(prio)) matching = [matching[index]] if not matching: continue for prq in matching[0][1].getchildren(): if prq not in prereqs and prq not in entries: toexamine.append(prq) prereqs.append(prq) return prereqs src/lib/Bcfg2/Server/Plugins/FileProbes.py000066400000000000000000000224201303523157100206310ustar00rootroot00000000000000""" This module allows you to probe a client for a file, which is then added to the specification. On subsequent runs, the file will be replaced on the client if it is missing; if it has changed on the client, it can either be updated in the specification or replaced on the client """ import os import sys import errno import lxml.etree import Bcfg2.Server import Bcfg2.Server.Plugin import Bcfg2.Server.FileMonitor from Bcfg2.Compat import b64decode #: The probe we send to clients to get the file data. Returns an XML #: document describing the file and its metadata. We avoid returning #: a non-0 error code on most errors, since that could halt client #: execution. PROBECODE = """#!/usr/bin/env python import os import sys import pwd import grp import Bcfg2.Client.XML try: from Bcfg2.Compat import b64encode, oct_mode except ImportError: from base64 import b64encode oct_mode = oct path = "%s" if not os.path.exists(path): sys.stderr.write("%%s does not exist" %% path) raise SystemExit(0) try: stat = os.stat(path) except OSError: sys.stderr.write("Could not stat %%s: %%s" %% (path, sys.exc_info()[1])) raise SystemExit(0) data = Bcfg2.Client.XML.Element("ProbedFileData", name=path, owner=pwd.getpwuid(stat[4])[0], group=grp.getgrgid(stat[5])[0], mode=oct_mode(stat[0] & 4095)) try: data.text = b64encode(open(path).read()) except IOError: sys.stderr.write("Could not read %%s: %%s" %% (path, sys.exc_info()[1])) raise SystemExit(0) print(Bcfg2.Client.XML.tostring(data, xml_declaration=False).decode('UTF-8')) """ class FileProbes(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Probing): """ This module allows you to probe a client for a file, which is then added to the specification. On subsequent runs, the file will be replaced on the client if it is missing; if it has changed on the client, it can either be updated in the specification or replaced on the client """ __author__ = 'chris.a.st.pierre@gmail.com' def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Probing.__init__(self) self.config = \ Bcfg2.Server.Plugin.StructFile(os.path.join(self.data, 'config.xml'), should_monitor=True, create=self.name) self.entries = dict() self.probes = dict() def GetProbes(self, metadata): """Return a set of probes for execution on client.""" if metadata.hostname not in self.probes: cfg = self.core.plugins['Cfg'] self.entries[metadata.hostname] = dict() self.probes[metadata.hostname] = [] for entry in self.config.Match(metadata): path = entry.get("name") # do not probe for files that are already in Cfg and # for which update is false; we can't possibly do # anything with the data we get from such a probe if (entry.get('update', 'false').lower() == "false" and not cfg.has_generator(entry, metadata)): continue self.entries[metadata.hostname][path] = entry probe = lxml.etree.Element('probe', name=path, source=self.name, interpreter="/usr/bin/env python") probe.text = PROBECODE % path self.probes[metadata.hostname].append(probe) self.debug_log("Adding file probe for %s to %s" % (path, metadata.hostname)) return self.probes[metadata.hostname] def ReceiveData(self, metadata, datalist): """Receive data from probe.""" self.debug_log("Receiving file probe data from %s" % metadata.hostname) for data in datalist: if data.text is None: self.logger.error("Got null response to %s file probe from %s" % (data.get('name'), metadata.hostname)) else: try: self.write_data( lxml.etree.XML(data.text, parser=Bcfg2.Server.XMLParser), metadata) except lxml.etree.XMLSyntaxError: # if we didn't get XML back from the probe, assume # it's an error message self.logger.error(data.text) def write_data(self, data, metadata): """Write the probed file data to the bcfg2 specification.""" filename = data.get("name") contents = b64decode(data.text) entry = self.entries[metadata.hostname][filename] cfg = self.core.plugins['Cfg'] specific = "%s.H_%s" % (os.path.basename(filename), metadata.hostname) # we can't use os.path.join() for this because specific # already has a leading /, which confuses os.path.join() fileloc = os.path.join(cfg.data, os.path.join(filename, specific).lstrip("/")) create = False try: cfg.entries[filename].bind_entry(entry, metadata) except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError): create = True # get current entry data if entry.text and entry.get("encoding") == "base64": entrydata = b64decode(entry.text) else: entrydata = entry.text if create: self.logger.info("Writing new probed file %s" % fileloc) self.write_file(fileloc, contents) self.verify_file(filename, contents, metadata) infoxml = os.path.join(cfg.data, filename.lstrip("/"), "info.xml") self.write_infoxml(infoxml, data) elif entrydata == contents: self.debug_log("Existing %s contents match probed contents" % filename) return elif (entry.get('update', 'false').lower() == "true"): self.logger.info("Writing updated probed file %s" % fileloc) self.write_file(fileloc, contents) self.verify_file(filename, contents, metadata) else: self.logger.info("Skipping updated probed file %s" % fileloc) return def write_file(self, fileloc, contents): """ Write the probed file to disk """ try: os.makedirs(os.path.dirname(fileloc)) except OSError: err = sys.exc_info()[1] if err.errno == errno.EEXIST: pass else: self.logger.error("Could not create parent directories for " "%s: %s" % (fileloc, err)) return try: open(fileloc, 'wb').write(contents) except IOError: err = sys.exc_info()[1] self.logger.error("Could not write %s: %s" % (fileloc, err)) return def verify_file(self, filename, contents, metadata): """ Service the FAM events queued up by the key generation so the data structure entries will be available for binding. NOTE: We wait for up to ten seconds. There is some potential for race condition, because if the file monitor doesn't get notified about the new key files in time, those entries won't be available for binding. In practice, this seems "good enough".""" entry = self.entries[metadata.hostname][filename] cfg = self.core.plugins['Cfg'] tries = 0 updated = False while not updated: if tries >= 10: self.logger.error("%s still not registered" % filename) return Bcfg2.Server.FileMonitor.get_fam().handle_events_in_interval(1) try: cfg.entries[filename].bind_entry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: tries += 1 continue # get current entry data if entry.get("encoding") == "base64": entrydata = b64decode(entry.text) else: entrydata = entry.text if entrydata == contents: updated = True tries += 1 def write_infoxml(self, infoxml, data): """ write an info.xml for the file """ if os.path.exists(infoxml): return self.logger.info("Writing %s for %s" % (infoxml, data.get("name"))) default_mdata = Bcfg2.Server.Plugin.default_path_metadata() info = lxml.etree.Element( "Info", owner=data.get("owner", default_mdata['owner']), group=data.get("group", default_mdata['group']), mode=data.get("mode", default_mdata['mode'])) root = lxml.etree.Element("FileInfo") root.append(info) try: root.getroottree().write(infoxml, xml_declaration=False, pretty_print=True) except IOError: err = sys.exc_info()[1] self.logger.error("Could not write %s: %s" % (infoxml, err)) return src/lib/Bcfg2/Server/Plugins/Fossil.py000066400000000000000000000024751303523157100200460ustar00rootroot00000000000000""" The Fossil plugin provides a revision interface for Bcfg2 repos using fossil.""" from Bcfg2.Utils import Executor import Bcfg2.Server.Plugin class Fossil(Bcfg2.Server.Plugin.Version): """ The Fossil plugin provides a revision interface for Bcfg2 repos using fossil. """ __author__ = 'bcfg-dev@mcs.anl.gov' __vcs_metadata_path__ = "_FOSSIL_" def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.cmd = Executor() self.logger.debug("Initialized Fossil plugin with fossil directory %s" % self.vcs_path) def get_revision(self): """Read fossil revision information for the Bcfg2 repository.""" result = self.cmd.run(["env LC_ALL=C", "fossil", "info"], shell=True, cwd=Bcfg2.Options.setup.vcs_root) try: revision = None for line in result.stdout.splitlines(): ldata = line.split(': ') if ldata[0].strip() == 'checkout': revision = line[1].strip().split(' ')[0] return revision except (IndexError, AttributeError): msg = "Failed to read revision from Fossil: %s" % result.error self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) src/lib/Bcfg2/Server/Plugins/Git.py000066400000000000000000000077111303523157100173300ustar00rootroot00000000000000""" The Git plugin provides a revision interface for Bcfg2 repos using git. """ import sys import Bcfg2.Options from Bcfg2.Server.Plugin import Version, PluginExecutionError try: import git HAS_GITPYTHON = True except ImportError: from Bcfg2.Utils import Executor HAS_GITPYTHON = False class Git(Version): """ The Git plugin provides a revision interface for Bcfg2 repos using git. """ __author__ = 'bcfg-dev@mcs.anl.gov' __vcs_metadata_path__ = ".git" if HAS_GITPYTHON: __rmi__ = Version.__rmi__ + ['Update'] def __init__(self, core): Version.__init__(self, core) if HAS_GITPYTHON: self.repo = git.Repo(Bcfg2.Options.setup.vcs_root) self.cmd = None else: self.logger.debug("Git: GitPython not found, using CLI interface " "to Git") self.repo = None self.cmd = Executor() self.logger.debug("Initialized git plugin with git directory %s" % self.vcs_path) def _log_git_cmd(self, output): """ Send output from a GitPython command to the debug log """ for line in output.strip().splitlines(): self.debug_log("Git: %s" % line) def get_revision(self): """Read git revision information for the Bcfg2 repository.""" if HAS_GITPYTHON: return self.repo.head.commit.hexsha else: cmd = ["git", "--git-dir", self.vcs_path, "--work-tree", Bcfg2.Options.setup.vcs_root, "rev-parse", "HEAD"] self.debug_log("Git: Running %s" % cmd) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError(result.stderr) return result.stdout def Update(self, ref=None): """ Git.Update() => True|False Update the working copy against the upstream repository """ self.logger.info("Git: Git.Update(ref='%s')" % ref) self.debug_log("Git: Performing garbage collection on repo at %s" % Bcfg2.Options.setup.vcs_root) try: self._log_git_cmd(self.repo.git.gc('--auto')) except git.GitCommandError: self.logger.warning("Git: Failed to perform garbage collection: %s" % sys.exc_info()[1]) self.debug_log("Git: Fetching all refs for repo at %s" % Bcfg2.Options.setup.vcs_root) try: self._log_git_cmd(self.repo.git.fetch('--all')) except git.GitCommandError: self.logger.warning("Git: Failed to fetch refs: %s" % sys.exc_info()[1]) if ref: self.debug_log("Git: Checking out %s" % ref) try: self._log_git_cmd(self.repo.git.checkout('-f', ref)) except git.GitCommandError: raise PluginExecutionError("Git: Failed to checkout %s: %s" % (ref, sys.exc_info()[1])) # determine if we should try to pull to get the latest commit # on this head tracking = None if not self.repo.head.is_detached: self.debug_log("Git: Determining if %s is a tracking branch" % self.repo.head.ref.name) tracking = self.repo.head.ref.tracking_branch() if tracking is not None: self.debug_log("Git: %s is a tracking branch, pulling from %s" % (self.repo.head.ref.name, tracking)) try: self._log_git_cmd(self.repo.git.pull("--rebase")) except git.GitCommandError: raise PluginExecutionError("Git: Failed to pull from " "upstream: %s" % sys.exc_info()[1]) self.logger.info("Git: Repo at %s updated to %s" % (Bcfg2.Options.setup.vcs_root, self.get_revision())) return True src/lib/Bcfg2/Server/Plugins/GroupLogic.py000066400000000000000000000065771303523157100206700ustar00rootroot00000000000000""" GroupLogic is a connector plugin that lets you use an XML Genshi template to dynamically set additional groups for clients. """ import os import lxml.etree from threading import local import Bcfg2.Server.Plugin from Bcfg2.Server.Plugins.Metadata import MetadataGroup class GroupLogicConfig(Bcfg2.Server.Plugin.StructFile): """ Representation of the GroupLogic groups.xml file """ create = lxml.etree.Element("GroupLogic", nsmap=dict(py="http://genshi.edgewall.org/")) def __init__(self, filename, core): Bcfg2.Server.Plugin.StructFile.__init__(self, filename, should_monitor=True) self.core = core def Index(self): Bcfg2.Server.Plugin.StructFile.Index(self) if self.core.metadata_cache_mode in ['cautious', 'aggressive']: self.core.metadata_cache.expire() def _match(self, item, metadata, *args): if item.tag == 'Group' and not len(item.getchildren()): return [item] return Bcfg2.Server.Plugin.StructFile._match(self, item, metadata, *args) def _xml_match(self, item, metadata, *args): if item.tag == 'Group' and not len(item.getchildren()): return [item] return Bcfg2.Server.Plugin.StructFile._xml_match(self, item, metadata, *args) class GroupLogic(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector): """ GroupLogic is a connector plugin that lets you use an XML Genshi template to dynamically set additional groups for clients. """ # perform grouplogic later than other Connector plugins, so it can # use groups set by them sort_order = 1000 def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) self.config = GroupLogicConfig(os.path.join(self.data, "groups.xml"), core=core) self._local = local() def get_additional_groups(self, metadata): if not hasattr(self._local, "building"): # building is a thread-local set that tracks which # machines GroupLogic is getting additional groups for. # If a get_additional_groups() is called twice for a # machine before the first call has completed, the second # call returns an empty list. This is for infinite # recursion protection; without this check, it'd be # impossible to use things like metadata.query.in_group() # in GroupLogic, since that requires building all # metadata, which requires running # GroupLogic.get_additional_groups() for all hosts, which # requires building all metadata... self._local.building = set() if metadata.hostname in self._local.building: return [] self._local.building.add(metadata.hostname) rv = [] for el in self.config.XMLMatch(metadata).findall("Group"): if el.get("category"): rv.append(MetadataGroup(el.get("name"), category=el.get("category"))) else: rv.append(el.get("name")) self._local.building.discard(metadata.hostname) return rv src/lib/Bcfg2/Server/Plugins/GroupPatterns.py000066400000000000000000000114351303523157100214200ustar00rootroot00000000000000""" set group membership based on client hostnames """ import os import re import sys import Bcfg2.Server.Lint import Bcfg2.Server.Plugin from Bcfg2.Utils import PackedDigitRange class PatternInitializationError(Exception): """Raised when creating a PatternMap object fails.""" class PatternMap(object): """Handler for a single pattern or range.""" def __init__(self, pattern, rangestr, groups): self.pattern = pattern self.rangestr = rangestr self.groups = groups try: if pattern is not None: self._re = re.compile(pattern) self.process = self.process_re elif rangestr is not None: if '\\' in rangestr: raise PatternInitializationError( "Backslashes are not allowed in NameRanges") range_finder = r'\[\[[\d\-,]+\]\]' self.process = self.process_range self._re = re.compile(r'^' + re.sub(range_finder, r'(\d+)', rangestr)) dmatcher = re.compile(re.sub(range_finder, r'\[\[([\d\-,]+)\]\]', rangestr)) self.dranges = [PackedDigitRange(x) for x in dmatcher.match(rangestr).groups()] else: raise PatternInitializationError("No pattern or range given") except re.error: raise PatternInitializationError( "Could not compile pattern regex: %s" % sys.exc_info()[1]) def process_range(self, name): """match the given hostname against a range-based NameRange.""" match = self._re.match(name) if not match: return None digits = match.groups() for grp in range(len(digits)): if not self.dranges[grp].includes(digits[grp]): return None return self.groups def process_re(self, name): """match the given hostname against a regex-based NamePattern.""" match = self._re.search(name) if not match: return None ret = [] sub = match.groups() for group in self.groups: newg = group for idx in range(len(sub)): newg = newg.replace('$%s' % (idx + 1), sub[idx]) ret.append(newg) return ret def __str__(self): return "%s: %s %s" % (self.__class__.__name__, self.pattern, self.groups) class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked): """representation of GroupPatterns config.xml.""" __identifier__ = None create = 'GroupPatterns' def __init__(self, filename, core=None): Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, should_monitor=True) self.core = core self.patterns = [] def Index(self): Bcfg2.Server.Plugin.XMLFileBacked.Index(self) if (self.core and self.core.metadata_cache_mode in ['cautious', 'aggressive']): self.core.metadata_cache.expire() self.patterns = [] for entry in self.xdata.xpath('//GroupPattern'): try: groups = [g.text for g in entry.findall('Group')] for pat_ent in entry.findall('NamePattern'): pat = pat_ent.text self.patterns.append(PatternMap(pat, None, groups)) for range_ent in entry.findall('NameRange'): rng = range_ent.text self.patterns.append(PatternMap(None, rng, groups)) except PatternInitializationError: self.logger.error("GroupPatterns: Failed to initialize " "pattern %s: %s" % (entry.text, sys.exc_info()[1])) def process_patterns(self, hostname): """ return a list of groups that should be added to the given client based on patterns that match the hostname """ ret = [] for pattern in self.patterns: grps = pattern.process(hostname) if grps is not None: ret.extend(grps) return ret class GroupPatterns(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector): """set group membership based on client hostnames.""" def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) self.config = PatternFile(os.path.join(self.data, 'config.xml'), core=core) def get_additional_groups(self, metadata): return self.config.process_patterns(metadata.hostname) src/lib/Bcfg2/Server/Plugins/Guppy.py000066400000000000000000000025241303523157100177060ustar00rootroot00000000000000"""Debugging plugin to trace memory leaks within the bcfg2-server process. By default the remote debugger is started when this plugin is enabled. The debugger can be shutoff in a running process using "bcfg2-admin xcmd Guppy.Disable" and reenabled using "bcfg2-admin xcmd Guppy.Enable". To attach the console run: python -c "from guppy import hpy;hpy().monitor()" For example: # python -c "from guppy import hpy;hpy().monitor()" *** Connection 1 opened *** lc CID PID ARGV 1 25063 ['/usr/sbin/bcfg2-server', '-D', '/var/run/bcfg2-server.pid'] sc 1 Remote connection 1. To return to Monitor, type or . int Remote interactive console. To return to Annex, type '-'. >>> hp.heap() ... """ import Bcfg2.Server.Plugin from guppy.heapy import Remote class Guppy(Bcfg2.Server.Plugin.Plugin): """Guppy is a debugging plugin to help trace memory leaks.""" __author__ = 'bcfg-dev@mcs.anl.gov' __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable', 'Disable'] __child_rmi__ = __rmi__[:] def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) self.Enable() @staticmethod def Enable(): """Enable remote debugging.""" Remote.on() @staticmethod def Disable(): """Disable remote debugging.""" Remote.off() src/lib/Bcfg2/Server/Plugins/Hg.py000066400000000000000000000020261303523157100171350ustar00rootroot00000000000000"""Revision interface for Bcfg2 repos using mercurial. """ import sys from mercurial import ui, hg import Bcfg2.Server.Plugin class Hg(Bcfg2.Server.Plugin.Version): """Revision interface for Bcfg2 repos using mercurial. """ __author__ = 'bcfg-dev@mcs.anl.gov' __vcs_metadata_path__ = ".hg" def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.logger.debug("Initialized hg plugin with hg directory %s" % self.vcs_path) def get_revision(self): """Read hg revision information for the Bcfg2 repository.""" try: repo_path = Bcfg2.Options.setup.vcs_root + "/" repo = hg.repository(ui.ui(), repo_path) tip = repo.changelog.tip() return repo.changelog.rev(tip) except hg.error.RepoError: err = sys.exc_info()[1] msg = "Failed to read hg repository: %s" % err self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) src/lib/Bcfg2/Server/Plugins/Ldap.py000066400000000000000000000207631303523157100174670ustar00rootroot00000000000000""" A plugin to fetch data from a LDAP directory """ import imp import os import sys import time import traceback import Bcfg2.Options import Bcfg2.Server.Plugin from Bcfg2.Logger import Debuggable from Bcfg2.Utils import ClassName, safe_module_name try: import ldap HAS_LDAP = True except ImportError: HAS_LDAP = False class ConfigFile(Bcfg2.Server.Plugin.FileBacked): """ Config file for the Ldap plugin """ def __init__(self, name, core): Bcfg2.Server.Plugin.FileBacked.__init__(self, name) self.core = core self.queries = list() self.fam.AddMonitor(name, self) def Index(self): """ Get the queries from the config file """ try: module = imp.load_source(safe_module_name('Ldap', self.name), self.name) except: # pylint: disable=W0702 err = sys.exc_info()[1] self.logger.error("Ldap: Failed to import %s: %s" % (self.name, err)) return if not hasattr(module, "__queries__"): self.logger.error("Ldap: %s has no __queries__ list" % self.name) return self.queries = list() for query in module.__queries__: try: self.queries.append(getattr(module, query)) except AttributeError: self.logger.warning( "Ldap: %s exports %s, but has no such attribute" % (self.name, query)) if self.core.metadata_cache_mode in ['cautious', 'aggressive']: self.core.metadata_cache.expire() class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.Connector): """ The Ldap plugin allows adding data from an LDAP server to your metadata. """ experimental = True options = [ Bcfg2.Options.Option( cf=('ldap', 'retries'), type=int, default=3, dest='ldap_retries', help='The number of times to retry reaching the ' 'LDAP server if a connection is broken'), Bcfg2.Options.Option( cf=('ldap', 'retry_delay'), type=float, default=5.0, dest='ldap_retry_delay', help='The time in seconds betreen retries')] def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) if not HAS_LDAP: msg = "Python ldap module is required for Ldap plugin" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginInitError(msg) self.config = ConfigFile(os.path.join(self.data, 'config.py')) def get_additional_data(self, metadata): query = None try: data = {} self.debug_log("Found queries %s" % self.config.queries) for query_class in self.config.queries: query = query_class() if query.is_applicable(metadata): self.debug_log("Processing query '%s'" % query.name) data[query.name] = query.get_result(metadata) else: self.debug_log("query '%s' not applicable to host '%s'" % (query.name, metadata.hostname)) return data except: # pylint: disable=W0702 if hasattr(query, "name"): self.logger.error( "Exception during processing of query named '%s', query " "results will be empty and may cause bind failures" % query.name) for line in traceback.format_exc().split('\n'): self.logger.error(line) return {} def start_client_run(self, metadata): if self.core.metadata_cache_mode == 'aggressive': self.logger.warning("Ldap is incompatible with aggressive " "client metadata caching, try 'cautious' " "or 'initial'") self.core.metadata_cache.expire(metadata.hostname) class LdapConnection(Debuggable): """ Connection to an LDAP server. """ def __init__(self, host="localhost", port=389, binddn=None, bindpw=None): Debuggable.__init__(self) if HAS_LDAP: msg = "Python ldap module is required for Ldap plugin" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginInitError(msg) self.host = host self.port = port self.binddn = binddn self.bindpw = bindpw self.conn = None self.__scopes__ = { 'base': ldap.SCOPE_BASE, 'one': ldap.SCOPE_ONELEVEL, 'sub': ldap.SCOPE_SUBTREE, } def __del__(self): """ Disconnection if the instance is destroyed. """ self.disconnect() def disconnect(self): """ If a connection to an LDAP server is available, disconnect it. """ if self.conn: self.conn.unbund() self.conn = None def connect(self): """ Open a connection to the configured LDAP server, and do a simple bind ff both binddn and bindpw are set. """ self.disconnect() self.conn = ldap.initialize(self.url) if self.binddn is not None and self.bindpw is not None: self.conn.simple_bind_s(self.binddn, self.bindpw) def run_query(self, query): """ Connect to the server and execute the query. If the server is down, wait the configured amount and try to reconnect. :param query: The query to execute on the LDAP server. :type query: Bcfg.Server.Plugins.Ldap.LdapQuery """ for attempt in range(Bcfg2.Options.setup.ldap_retries + 1): try: if not self.conn: self.connect() return self.conn.search_s( query.base, self.__scopes__[query.scope], query.filter.replace('\\', '\\\\'), query.attrs) except ldap.SERVER_DOWN: self.conn = None self.logger.error( "LdapConnection: Server %s down. Retry %d/%d in %.2fs." % (self.url, attempt + 1, Bcfg2.Options.setup.ldap_retries, Bcfg2.Options.setup.ldap_retry_delay)) time.sleep(Bcfg2.Options.setup.ldap_retry_delay) return None @property def url(self): """ The URL of the LDAP server. """ return "ldap://%s:%d" % (self.host, self.port) class LdapQuery(object): """ Query referencing an LdapConnection and providing several methods for query manipulation. """ #: Name of the Query, used to register it in additional data. name = ClassName() base = "" scope = "sub" filter = "(objectClass=*)" attrs = None connection = None result = None def __unicode__(self): return "LdapQuery: %s" % self.name def is_applicable(self, metadata): # pylint: disable=W0613 """ Check is the query should be executed for a given metadata object. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ return True def prepare_query(self, metadata, **kwargs): # pylint: disable=W0613 """ Prepares the query based on the client metadata. You can for example modify the filter based on the client hostname. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ pass def process_result(self, metadata, **kwargs): # pylint: disable=W0613 """ Post-process the query result. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ return self.result def get_result(self, metadata, **kwargs): """ Handle the perparation, execution and processing of the query. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError` """ if self.connection is not None: self.prepare_query(metadata, **kwargs) self.result = self.connection.run_query(self) self.result = self.process_result(metadata, **kwargs) else: raise Bcfg2.Server.Plugin.PluginExecutionError( 'No connection defined for %s' % self.name) return self.result src/lib/Bcfg2/Server/Plugins/Metadata.py000066400000000000000000002026741303523157100203320ustar00rootroot00000000000000""" This file stores persistent metadata for the Bcfg2 Configuration Repository. """ import re import os import sys import time import copy import errno import socket import logging import lxml.etree import Bcfg2.Server import Bcfg2.Options import Bcfg2.Server.Plugin import Bcfg2.Server.FileMonitor from Bcfg2.Utils import locked from Bcfg2.Server.Cache import Cache # pylint: disable=W0622 from Bcfg2.Compat import MutableMapping, all, any, wraps # pylint: enable=W0622 from Bcfg2.version import Bcfg2VersionInfo try: from django.db import models HAS_DJANGO = True except ImportError: HAS_DJANGO = False # pylint: disable=C0103 ClientVersions = None MetadataClientModel = None # pylint: enable=C0103 def load_django_models(): """ Load models for Django after option parsing has completed """ # pylint: disable=W0602 global MetadataClientModel, ClientVersions # pylint: enable=W0602 if not HAS_DJANGO: return class MetadataClientModel(models.Model, # pylint: disable=W0621 Bcfg2.Server.Plugin.PluginDatabaseModel): """ django model for storing clients in the database """ hostname = models.CharField(max_length=255, primary_key=True) version = models.CharField(max_length=31, null=True) class ClientVersions(MutableMapping, # pylint: disable=W0621,W0612 Bcfg2.Server.Plugin.DatabaseBacked): """ dict-like object to make it easier to access client bcfg2 versions from the database """ create = False def __getitem__(self, key): try: return MetadataClientModel.objects.get( hostname=key).version except MetadataClientModel.DoesNotExist: raise KeyError(key) @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock def __setitem__(self, key, value): client, created = \ MetadataClientModel.objects.get_or_create(hostname=key) if created or client.version != value: client.version = value client.save() @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock def __delitem__(self, key): # UserDict didn't require __delitem__, but MutableMapping # does. we don't want deleting a client version record to # delete the client, so we just set the version to None, # which is kinda like deleting it, but not really. try: client = MetadataClientModel.objects.get(hostname=key) except MetadataClientModel.DoesNotExist: raise KeyError(key) client.version = None client.save() def __len__(self): return MetadataClientModel.objects.count() def __iter__(self): for client in MetadataClientModel.objects.all(): yield client.hostname def keys(self): """ Get keys for the mapping """ return list(iter(self)) def __contains__(self, key): try: MetadataClientModel.objects.get(hostname=key) return True except MetadataClientModel.DoesNotExist: return False class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked): """Handles xml config files and all XInclude statements""" def __init__(self, metadata, basefile): fpath = os.path.join(metadata.data, basefile) toptag = os.path.splitext(basefile)[0].title() Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath, should_monitor=False, create=toptag) self.metadata = metadata self.basefile = basefile self.data = None self.basedata = None self.basedir = metadata.data self.logger = metadata.logger self.pseudo_monitor = isinstance(Bcfg2.Server.FileMonitor.get_fam(), Bcfg2.Server.FileMonitor.Pseudo) def _get_xdata(self): """ getter for xdata property """ if not self.data: raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" % self.basefile) return self.data def _set_xdata(self, val): """ setter for xdata property. in practice this should only be used by the test suite """ self.data = val xdata = property(_get_xdata, _set_xdata) @property def base_xdata(self): """ property to get the data of the base file (without any xincludes processed) """ if not self.basedata: raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" % self.basefile) return self.basedata def load_xml(self): """Load changes from XML""" try: xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile), parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: self.logger.error('Failed to parse %s' % self.basefile) return self.extras = [] self.basedata = copy.deepcopy(xdata) self._follow_xincludes(xdata=xdata) if self.extras: try: xdata.xinclude() except lxml.etree.XIncludeError: self.logger.error("Failed to process XInclude for file %s" % self.basefile) self.data = xdata def write(self): """Write changes to xml back to disk.""" self.write_xml(os.path.join(self.basedir, self.basefile), self.basedata) def write_xml(self, fname, xmltree): """Write changes to xml back to disk.""" tmpfile = "%s.new" % fname datafile = None fd = None i = 0 # counter to avoid flooding logs with lock messages while datafile is None: try: fd = os.open(tmpfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY) datafile = os.fdopen(fd, 'w') except OSError: err = sys.exc_info()[1] if err.errno == errno.EEXIST: # note: not a real lock. this is here to avoid # the scenario where two threads write to the file # at the same-ish time, and one writes to # foo.xml.new, then the other one writes to it # (losing the first thread's changes), then the # first renames it, then the second tries to # rename it and borks. if (i % 10) == 0: self.logger.info("%s is locked, waiting" % fname) i += 1 time.sleep(0.1) else: msg = "Failed to write %s: %s" % (tmpfile, err) self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg) # prep data dataroot = xmltree.getroot() newcontents = lxml.etree.tostring(dataroot, xml_declaration=False, pretty_print=True).decode('UTF-8') while locked(fd): pass datafile.write(newcontents) datafile.close() # check if clients.xml is a symlink if os.path.islink(fname): fname = os.readlink(fname) try: os.rename(tmpfile, fname) except OSError: try: os.unlink(tmpfile) except OSError: pass msg = "Metadata: Failed to rename %s: %s" % (tmpfile, sys.exc_info()[1]) self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg) self.load_xml() def find_xml_for_xpath(self, xpath): """Find and load xml file containing the xpath query""" if self.pseudo_monitor: # Reload xml if we don't have a real monitor self.load_xml() cli = self.basedata.xpath(xpath) if len(cli) > 0: return {'filename': os.path.join(self.basedir, self.basefile), 'xmltree': self.basedata, 'xquery': cli} else: # Try to find the data in included files for included in self.extras: try: xdata = lxml.etree.parse(included, parser=Bcfg2.Server.XMLParser) cli = xdata.xpath(xpath) if len(cli) > 0: return {'filename': included, 'xmltree': xdata, 'xquery': cli} except lxml.etree.XMLSyntaxError: self.logger.error('Failed to parse %s' % included) return {} def add_monitor(self, fpath): self.extras.append(fpath) self.fam.AddMonitor(fpath, self.metadata) def HandleEvent(self, event=None): """Handle fam events""" filename = os.path.basename(event.filename) if event.filename in self.extras: if event.code2str() == 'exists': return False elif filename != self.basefile: return False if event.code2str() == 'endExist': return False self.load_xml() return True class ClientMetadata(object): """This object contains client metadata.""" # pylint: disable=R0913 def __init__(self, client, profile, groups, bundles, aliases, addresses, categories, uuid, password, version, query): #: The client hostname (as a string) self.hostname = client #: The client profile (as a string) self.profile = profile #: The set of all bundles this client gets self.bundles = bundles #: A list of all client aliases self.aliases = aliases #: A list of all addresses this client is known by self.addresses = addresses #: A list of groups this client is a member of self.groups = groups #: A dict of categories of this client's groups. Keys are #: category names, values are corresponding group names. self.categories = categories #: The UUID identifier for this client self.uuid = uuid #: The Bcfg2 password for this client self.password = password #: Connector plugins known to this client self.connectors = [] #: The version of the Bcfg2 client this client is running, as #: a string self.version = version try: #: The version of the Bcfg2 client this client is running, #: as a :class:`Bcfg2.version.Bcfg2VersionInfo` object. self.version_info = Bcfg2VersionInfo(version) except (ValueError, AttributeError): self.version_info = None #: A :class:`Bcfg2.Server.Plugins.Metadata.MetadataQuery` #: object for this client. self.query = query # pylint: enable=R0913 def inGroup(self, group): """Test to see if client is a member of group. :returns: bool """ return group in self.groups def group_in_category(self, category): """ Return the group in the given category that the client is a member of, or an empty string. :returns: string """ for grp in self.query.all_groups_in_category(category): if grp in self.groups: return grp return '' def __repr__(self): return "%s(%s, profile=%s, groups=%s)" % (self.__class__.__name__, self.hostname, self.profile, self.groups) class MetadataQuery(object): """ This class provides query methods for the metadata of all clients known to the Bcfg2 server, without being able to modify that data. Note that ``*by_groups()`` and ``*by_profiles()`` behave differently; for a client to be included in the return value of a ``*by_groups()`` method, it must be a member of *all* groups listed in the argument; for a client to be included in the return value of a ``*by_profiles()`` method, it must have *any* group listed as its profile group. """ def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category): self.logger = logging.getLogger(self.__class__.__name__) #: Get :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` #: object for the given hostname. #: #: :returns: Bcfg2.Server.Plugins.Metadata.ClientMetadata self.by_name = by_name #: Get a list of hostnames of clients that are in all given #: groups. #: #: :param groups: The groups to check clients for membership in #: :type groups: list #: #: :returns: list of strings self.names_by_groups = self._warn_string(by_groups) #: Get a list of hostnames of clients whose profile matches #: any given profile group. #: #: :param profiles: The profiles to check clients for #: membership in. #: :type profiles: list #: :returns: list of strings self.names_by_profiles = self._warn_string(by_profiles) #: Get all known client hostnames. #: #: :returns: list of strings self.all_clients = get_clients #: Get all known group names. #: #: :returns: list of strings self.all_groups = all_groups #: Get the names of all groups in the given category. #: #: :param category: The category to query for groups that #: belong to it. #: :type category: string #: :returns: list of strings self.all_groups_in_category = all_groups_in_category def _warn_string(self, func): """ decorator to warn that a MetadataQuery function that expects a list has been called with a single string argument instead. this is a common mistake in templates, and it doesn't cause errors because strings are iterables """ # pylint: disable=C0111 @wraps(func) def inner(arg): if isinstance(arg, str): self.logger.warning("%s: %s takes a list as argument, not a " "string" % (self.__class__.__name__, func.__name__)) return func(arg) # pylint: enable=C0111 return inner def by_groups(self, groups): """ Get a list of :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` objects that are in all given groups. :param groups: The groups to check clients for membership in. :type groups: list :returns: list of Bcfg2.Server.Plugins.Metadata.ClientMetadata objects """ # don't need to decorate this with _warn_string because # names_by_groups is decorated return [self.by_name(name) for name in self.names_by_groups(groups)] def by_profiles(self, profiles): """ Get a list of :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` objects that have any of the given groups as their profile. :param profiles: The profiles to check clients for membership in. :type profiles: list :returns: list of Bcfg2.Server.Plugins.Metadata.ClientMetadata objects """ # don't need to decorate this with _warn_string because # names_by_profiles is decorated return [self.by_name(name) for name in self.names_by_profiles(profiles)] def all(self): """ Get a list of all :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` objects. :returns: list of Bcfg2.Server.Plugins.Metadata.ClientMetadata """ return [self.by_name(name) for name in self.all_clients()] class MetadataGroup(tuple): # pylint: disable=E0012,R0924 """ representation of a metadata group. basically just a named tuple """ # pylint: disable=R0913,W0613 def __new__(cls, name, bundles=None, category=None, is_profile=False, is_public=False): if bundles is None: bundles = set() return tuple.__new__(cls, (bundles, category)) # pylint: enable=W0613 def __init__(self, name, bundles=None, category=None, is_profile=False, is_public=False): if bundles is None: bundles = set() tuple.__init__(self) self.name = name self.bundles = bundles self.category = category self.is_profile = is_profile self.is_public = is_public # record which clients we've warned about category suppression self.warned = [] # pylint: enable=R0913 def __str__(self): return repr(self) def __repr__(self): return "%s %s (bundles=%s, category=%s)" % \ (self.__class__.__name__, self.name, self.bundles, self.category) def __hash__(self): return hash(self.name) class Metadata(Bcfg2.Server.Plugin.Metadata, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.DatabaseBacked): """This class contains data for bcfg2 server metadata.""" __author__ = 'bcfg-dev@mcs.anl.gov' sort_order = 500 __rmi__ = Bcfg2.Server.Plugin.DatabaseBacked.__rmi__ + ['list_clients', 'remove_client'] options = Bcfg2.Server.Plugin.DatabaseBacked.options + [ Bcfg2.Options.Common.password, Bcfg2.Options.BooleanOption( cf=('metadata', 'use_database'), dest="metadata_db", help="Use database capabilities of the Metadata plugin"), Bcfg2.Options.Option( cf=('communication', 'authentication'), default='cert+password', choices=['cert', 'bootstrap', 'cert+password'], help='Default client authentication method')] options_parsed_hook = staticmethod(load_django_models) def __init__(self, core): Bcfg2.Server.Plugin.Metadata.__init__(self) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core) self.states = dict() self.extra = dict() self.handlers = dict() self.groups_xml = self._handle_file("groups.xml") if (self._use_db and os.path.exists(os.path.join(self.data, "clients.xml"))): self.logger.warning("Metadata: database enabled but clients.xml " "found, parsing in compatibility mode") self.clients_xml = self._handle_file("clients.xml") elif not self._use_db: self.clients_xml = self._handle_file("clients.xml") # mapping of clientname -> authtype self.auth = dict() # list of clients required to have non-global password self.secure = [] # list of floating clients self.floating = [] # mapping of clientname -> password self.passwords = {} self.addresses = {} self.raddresses = {} # mapping of clientname -> [groups] self.clientgroups = {} # list of clients self.clients = [] self.aliases = {} self.raliases = {} # mapping of groupname -> MetadataGroup object self.groups = {} # mappings of groupname -> [predicates] self.group_membership = dict() self.negated_groups = dict() # list of group names in document order self.ordered_groups = [] # mapping of hostname -> version string if self._use_db: self.versions = ClientVersions(core) # pylint: disable=E1102 else: self.versions = dict() self.uuid = {} self.session_cache = {} self.cache = Cache("Metadata") self.default = None self.pdirty = False self.password = Bcfg2.Options.setup.password self.query = MetadataQuery(core.build_metadata, self.list_clients, self.get_client_names_by_groups, self.get_client_names_by_profiles, self.get_all_group_names, self.get_all_groups_in_category) @classmethod def init_repo(cls, repo, **kwargs): # must use super here; inheritance works funny with class methods super(Metadata, cls).init_repo(repo) for fname in ["clients.xml", "groups.xml"]: aname = re.sub(r'[^A-z0-9_]', '_', fname) if aname in kwargs: open(os.path.join(repo, cls.name, fname), "w").write(kwargs[aname]) @property def use_database(self): """ Expose self._use_db publicly for use in :class:`Bcfg2.Server.MultiprocessingCore.ChildCore` """ return self._use_db def _handle_file(self, fname): """ set up the necessary magic for handling a metadata file (clients.xml or groups.xml, e.g.) """ Bcfg2.Server.FileMonitor.get_fam().AddMonitor( os.path.join(self.data, fname), self) self.states[fname] = False xmlcfg = XMLMetadataConfig(self, fname) aname = re.sub(r'[^A-z0-9_]', '_', os.path.basename(fname)) self.handlers[xmlcfg.HandleEvent] = getattr(self, "_handle_%s_event" % aname) self.extra[fname] = [] return xmlcfg def _search_xdata(self, tag, name, tree, alias=False): """ Generic method to find XML data (group, client, etc.) """ for node in tree.findall("//%s" % tag): if node.get("name") == name: return node elif alias: for child in node: if (child.tag == "Alias" and child.attrib["name"] == name): return node return None def search_group(self, group_name, tree): """Find a group.""" return self._search_xdata("Group", group_name, tree) def search_bundle(self, bundle_name, tree): """Find a bundle.""" return self._search_xdata("Bundle", bundle_name, tree) def search_client(self, client_name, tree): """ find a client in the given XML tree """ return self._search_xdata("Client", client_name, tree, alias=True) def _add_xdata(self, config, tag, name, attribs=None, alias=False): """ Generic method to add XML data (group, client, etc.) """ node = self._search_xdata(tag, name, config.xdata, alias=alias) if node is not None: raise Bcfg2.Server.Plugin.MetadataConsistencyError("%s \"%s\" " "already exists" % (tag, name)) element = lxml.etree.SubElement(config.base_xdata.getroot(), tag, name=name) if attribs: for key, val in list(attribs.items()): element.set(key, val) config.write() return element def add_group(self, group_name, attribs): """Add group to groups.xml.""" if self._use_db: msg = "Metadata does not support adding groups with " + \ "use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) else: return self._add_xdata(self.groups_xml, "Group", group_name, attribs=attribs) def add_bundle(self, bundle_name): """Add bundle to groups.xml.""" if self._use_db: msg = "Metadata does not support adding bundles with " + \ "use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) else: return self._add_xdata(self.groups_xml, "Bundle", bundle_name) @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock def add_client(self, client_name, attribs=None): """Add client to clients.xml.""" if attribs is None: attribs = dict() if self._use_db: if attribs: msg = "Metadata does not support setting client attributes " +\ "with use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) try: client = MetadataClientModel.objects.get(hostname=client_name) except MetadataClientModel.DoesNotExist: # pylint: disable=E1102 client = MetadataClientModel(hostname=client_name) # pylint: enable=E1102 client.save() self.update_client_list() return client else: try: return self._add_xdata(self.clients_xml, "Client", client_name, attribs=attribs, alias=True) except Bcfg2.Server.Plugin.MetadataConsistencyError: # already exists err = sys.exc_info()[1] self.logger.info(err) return self._search_xdata("Client", client_name, self.clients_xml.xdata, alias=True) def _update_xdata(self, config, tag, name, attribs, alias=False): """ Generic method to modify XML data (group, client, etc.) """ node = self._search_xdata(tag, name, config.xdata, alias=alias) if node is None: msg = "%s \"%s\" does not exist" % (tag, name) self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % (tag, node.get('name'))) if not xdict: msg = 'Unexpected error finding %s "%s"' % (tag, name) self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) for key, val in list(attribs.items()): xdict['xquery'][0].set(key, val) config.write_xml(xdict['filename'], xdict['xmltree']) def update_group(self, group_name, attribs): """Update a groups attributes.""" if self._use_db: msg = "Metadata does not support updating groups with " + \ "use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) else: return self._update_xdata(self.groups_xml, "Group", group_name, attribs) def update_client(self, client_name, attribs): """Update a clients attributes.""" if self._use_db: msg = "Metadata does not support updating clients with " + \ "use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) else: return self._update_xdata(self.clients_xml, "Client", client_name, attribs, alias=True) def list_clients(self): """ List all clients in client database. Making ``self.clients`` a property and reading the client list dynamically from the database on every call to ``self.clients`` can result in very high rates of database reads, so we cache the ``list_clients()`` results to reduce the database load. When the database is in use, the client list is reread periodically with :func:`Bcfg2.Server.Plugins.Metadata.update_client_list`. """ if self._use_db: return set([c.hostname for c in MetadataClientModel.objects.all()]) else: return self.clients def _remove_xdata(self, config, tag, name): """ Generic method to remove XML data (group, client, etc.) """ node = self._search_xdata(tag, name, config.xdata) if node is None: self.logger.error("%s \"%s\" does not exist" % (tag, name)) raise Bcfg2.Server.Plugin.MetadataConsistencyError xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % (tag, node.get('name'))) if not xdict: self.logger.error("Unexpected error finding %s \"%s\"" % (tag, name)) raise Bcfg2.Server.Plugin.MetadataConsistencyError xdict['xquery'][0].getparent().remove(xdict['xquery'][0]) config.write_xml(xdict['filename'], xdict['xmltree']) def remove_group(self, group_name): """Remove a group.""" if self._use_db: msg = "Metadata does not support removing groups with " + \ "use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) else: return self._remove_xdata(self.groups_xml, "Group", group_name) def remove_bundle(self, bundle_name): """Remove a bundle.""" if self._use_db: msg = "Metadata does not support removing bundles with " + \ "use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) else: return self._remove_xdata(self.groups_xml, "Bundle", bundle_name) def remove_client(self, client_name): """Remove a client.""" if self._use_db: try: client = MetadataClientModel.objects.get(hostname=client_name) except MetadataClientModel.DoesNotExist: msg = "Client %s does not exist" % client_name self.logger.warning(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) client.delete() self.update_client_list() else: return self._remove_xdata(self.clients_xml, "Client", client_name) def _handle_clients_xml_event(self, _): # pylint: disable=R0912 """ handle all events for clients.xml and files xincluded from clients.xml """ # disable metadata builds during parsing. this prevents # clients from getting bogus metadata during the brief time it # takes to rebuild the clients.xml data self.states['clients.xml'] = False xdata = self.clients_xml.xdata self.clients = [] self.clientgroups = {} self.aliases = {} self.raliases = {} self.secure = [] self.floating = [] self.addresses = {} self.raddresses = {} for client in xdata.findall('.//Client'): clname = client.get('name').lower() if 'address' in client.attrib: caddr = client.get('address') if caddr in self.addresses: self.addresses[caddr].append(clname) else: self.addresses[caddr] = [clname] if clname not in self.raddresses: self.raddresses[clname] = set() self.raddresses[clname].add(caddr) if 'auth' in client.attrib: self.auth[client.get('name')] = client.get('auth') if 'uuid' in client.attrib: self.uuid[client.get('uuid')] = clname if client.get('secure', 'false').lower() == 'true': self.secure.append(clname) if (client.get('location', 'fixed') == 'floating' or client.get('floating', 'false').lower() == 'true'): self.floating.append(clname) if 'password' in client.attrib: self.passwords[clname] = client.get('password') if 'version' in client.attrib: self.versions[clname] = client.get('version') self.raliases[clname] = set() for alias in client.findall('Alias'): self.aliases.update({alias.get('name'): clname}) self.raliases[clname].add(alias.get('name')) if 'address' not in alias.attrib: continue if alias.get('address') in self.addresses: self.addresses[alias.get('address')].append(clname) else: self.addresses[alias.get('address')] = [clname] if clname not in self.raddresses: self.raddresses[clname] = set() self.raddresses[clname].add(alias.get('address')) self.clients.append(clname) profile = client.get("profile") if self.groups: # check if we've parsed groups.xml yet if profile not in self.groups: self.logger.warning("Metadata: %s has nonexistent " "profile group %s" % (clname, profile)) elif not self.groups[profile].is_profile: self.logger.warning("Metadata: %s set as profile for " "%s, but is not a profile group" % (profile, clname)) try: self.clientgroups[clname].append(profile) except KeyError: self.clientgroups[clname] = [profile] self.update_client_list() self.cache.expire() self.states['clients.xml'] = True def _get_condition(self, element): """ Return a predicate that returns True if a client meets the condition specified in the given Group or Client element """ negate = element.get('negate', 'false').lower() == 'true' pname = element.get("name") if element.tag == 'Group': return lambda c, g, _: negate != (pname in g) elif element.tag == 'Client': return lambda c, g, _: negate != (pname == c) def _get_category_condition(self, grpname): """ get a predicate that returns False if a client is already a member of a group in the given group's category, True otherwise""" return lambda client, _, categories: \ bool(self._check_category(client, grpname, categories)) def _aggregate_conditions(self, conditions): """ aggregate all conditions on a given group declaration into a single predicate """ return lambda client, groups, cats: \ all(cond(client, groups, cats) for cond in conditions) def _handle_groups_xml_event(self, _): # pylint: disable=R0912 """ re-read groups.xml on any event on it """ # disable metadata builds during parsing. this prevents # clients from getting bogus metadata during the brief time it # takes to rebuild the groups.xml data self.states['groups.xml'] = False self.groups = {} self.group_membership = dict() self.negated_groups = dict() self.ordered_groups = [] # first, we get a list of all of the groups declared in the # file. we do this in two stages because the old way of # parsing groups.xml didn't support nested groups; in the old # way, only Group tags under a Groups tag counted as # declarative. so we parse those first, and then parse the # other Group tags if they haven't already been declared. # this lets you set options on a group (e.g., public="false") # at the top level and then just use the name elsewhere, which # is the original behavior for grp in self.groups_xml.xdata.xpath("//Groups/Group") + \ self.groups_xml.xdata.xpath("//Groups/Group//Group"): if grp.get("name") in self.groups: continue self.groups[grp.get("name")] = \ MetadataGroup(grp.get("name"), bundles=[b.get("name") for b in grp.findall("Bundle")], category=grp.get("category"), is_profile=grp.get("profile", "false") == "true", is_public=grp.get("public", "false") == "true") if grp.get('default', 'false') == 'true': self.default = grp.get('name') # confusing loop condition; the XPath query asks for all # elements under a Group tag under a Groups tag; that is # infinitely recursive, so "all" elements really means _all_ # elements. We then manually filter out non-Group elements # since there doesn't seem to be a way to get Group elements # of arbitrary depth with particular ultimate ancestors in # XPath. We do the same thing for Client tags. for el in self.groups_xml.xdata.xpath("//Groups/Group//*") + \ self.groups_xml.xdata.xpath("//Groups/Client//*"): if (el.tag != 'Group' and el.tag != 'Client') or el.getchildren(): continue conditions = [] for parent in el.iterancestors(): cond = self._get_condition(parent) if cond: conditions.append(cond) gname = el.get("name") if el.get("negate", "false").lower() == "true": self.negated_groups.setdefault(gname, []) self.negated_groups[gname].append( self._aggregate_conditions(conditions)) else: if self.groups[gname].category: conditions.append(self._get_category_condition(gname)) if gname not in self.ordered_groups: self.ordered_groups.append(gname) self.group_membership.setdefault(gname, []) self.group_membership[gname].append( self._aggregate_conditions(conditions)) self.cache.expire() self.states['groups.xml'] = True def HandleEvent(self, event): """Handle update events for data files.""" for handles, event_handler in self.handlers.items(): if handles(event): # clear the entire cache when we get an event for any # metadata file self.cache.expire() # clear out the list of category suppressions that # have been warned about, since this may change when # clients.xml or groups.xml changes. for group in self.groups.values(): group.warned = [] event_handler(event) if False not in list(self.states.values()) and self.debug_flag: # check that all groups are real and complete. this is # just logged at a debug level because many groups might # be probed, and we don't want to warn about them. for client, groups in list(self.clientgroups.items()): for group in groups: if group not in self.groups: self.debug_log("Client %s set as nonexistent group %s" % (client, group)) def set_profile(self, client, profile, # pylint: disable=W0221 addresspair, require_public=True): """Set group parameter for provided client.""" self.logger.info("Asserting client %s profile to %s" % (client, profile)) if False in list(self.states.values()): raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not " "been read yet") if profile not in self.groups: msg = "Profile group %s does not exist" % profile self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) group = self.groups[profile] if require_public and not group.is_public: msg = "Cannot set client %s to private group %s" % (client, profile) self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) if client in self.clients: if self._use_db: msg = "DBMetadata does not support asserting client profiles" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) metadata = self.core.build_metadata(client) if metadata.profile != profile: self.logger.info("Changing %s profile from %s to %s" % (client, metadata.profile, profile)) self.update_client(client, dict(profile=profile)) if client in self.clientgroups: if metadata.profile in self.clientgroups[client]: self.clientgroups[client].remove(metadata.profile) self.clientgroups[client].append(profile) else: self.clientgroups[client] = [profile] else: self.logger.debug( "Ignoring %s request to change profile from %s to %s" % (client, metadata.profile, profile)) else: self.logger.info("Creating new client: %s, profile %s" % (client, profile)) if self._use_db: self.add_client(client) else: if addresspair in self.session_cache: # we are working with a uuid'd client self.add_client(self.session_cache[addresspair][1], dict(uuid=client, profile=profile, address=addresspair[0])) else: self.add_client(client, dict(profile=profile)) self.clients.append(client) self.clientgroups[client] = [profile] if not self._use_db: self.clients_xml.write() def set_version(self, client, version): """Set version for provided client.""" if client not in self.clients: # this creates the client as a side effect self.get_initial_metadata(client) if client not in self.versions or version != self.versions[client]: self.logger.info("Setting client %s version to %s" % (client, version)) if not self._use_db: self.update_client(client, dict(version=version)) self.clients_xml.write() self.versions[client] = version def resolve_client(self, addresspair, cleanup_cache=False): """Lookup address locally or in DNS to get a hostname.""" if addresspair in self.session_cache: # client _was_ cached, so there can be some expired # entries. we need to clean them up to avoid potentially # infinite memory swell cache_ttl = 90 if cleanup_cache: # remove entries for this client's IP address with # _any_ port numbers - perhaps a priority queue could # be faster? curtime = time.time() for addrpair in list(self.session_cache.keys()): if addresspair[0] == addrpair[0]: (stamp, _) = self.session_cache[addrpair] if curtime - stamp > cache_ttl: del self.session_cache[addrpair] # return the cached data try: stamp = self.session_cache[addresspair][0] if time.time() - stamp < cache_ttl: return self.session_cache[addresspair][1] except KeyError: # we cleaned all cached data for this client in cleanup_cache pass address = addresspair[0] if address in self.addresses: if len(self.addresses[address]) != 1: err = ("Address %s has multiple reverse assignments; a " "uuid must be used" % address) self.logger.error(err) raise Bcfg2.Server.Plugin.MetadataConsistencyError(err) return self.addresses[address][0] try: cname = socket.getnameinfo(addresspair, socket.NI_NAMEREQD)[0].lower() if cname in self.aliases: return self.aliases[cname] return cname except (socket.gaierror, socket.herror): err = "Address resolution error for %s: %s" % (address, sys.exc_info()[1]) self.logger.error(err) raise Bcfg2.Server.Plugin.MetadataConsistencyError(err) def _merge_groups(self, client, groups, categories=None): """ set group membership based on the contents of groups.xml and initial group membership of this client. Returns a tuple of (allgroups, categories)""" numgroups = -1 # force one initial pass if categories is None: categories = dict() while numgroups != len(groups): numgroups = len(groups) newgroups = set() removegroups = set() for grpname in self.ordered_groups: if grpname in groups: continue if any(p(client, groups, categories) for p in self.group_membership[grpname]): newgroups.add(grpname) if (grpname in self.groups and self.groups[grpname].category): categories[self.groups[grpname].category] = grpname groups.update(newgroups) for grpname, predicates in self.negated_groups.items(): if grpname not in groups: continue if any(p(client, groups, categories) for p in predicates): removegroups.add(grpname) if (grpname in self.groups and self.groups[grpname].category): del categories[self.groups[grpname].category] groups.difference_update(removegroups) return (groups, categories) def _check_category(self, client, grpname, categories): """ Determine if the given client is already a member of a group in the same category as the named group. The return value is one of three possibilities: * If the client is already a member of a group in the same category, then False is returned (i.e., the category check failed); * If the group is not in any categories, then True is returned; * If the group is not a member of a group in the category, then the name of the category is returned. This makes it easy to add the category to the ClientMetadata object (or other category list). If a pure boolean value is required, you can do ``bool(self._check_category(...))``. """ if grpname not in self.groups: return True category = self.groups[grpname].category if not category: return True if category in categories: if client not in self.groups[grpname].warned: self.logger.warning("%s: Group %s suppressed by category %s; " "%s already a member of %s" % (self.name, grpname, category, client, categories[category])) self.groups[grpname].warned.append(client) return False return category def _check_and_add_category(self, client, grpname, categories): """ If the client is not a member of a group in the same category as the named group, then the category is added to ``categories``. :func:`Bcfg2.Server.Plugins.Metadata._check_category` is used to determine if the category can be added. If the category check failed, returns False; otherwise, returns True. """ rv = self._check_category(client, grpname, categories) if rv and rv is not True: categories[rv] = grpname return True return rv def get_initial_metadata(self, client): # pylint: disable=R0914,R0912 """Return the metadata for a given client.""" if False in list(self.states.values()): raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not " "been read yet") client = client.lower() if client in self.cache: return self.cache[client] if client in self.aliases: client = self.aliases[client] groups = set() categories = dict() profile = None def _add_group(grpname): """ Add a group to the set of groups for this client. Handles setting categories and category suppression. Returns the new profile for the client (which might be unchanged). """ if grpname in self.groups: if not self._check_and_add_category(client, grpname, categories): return profile groups.add(grpname) if not profile and self.groups[grpname].is_profile: return grpname else: return profile else: groups.add(grpname) return profile if client not in self.clients: pgroup = None if client in self.clientgroups: pgroup = self.clientgroups[client][0] self.debug_log("%s: Adding new client with profile %s" % (self.name, pgroup)) elif self.default: pgroup = self.default self.debug_log("%s: Adding new client with default profile %s" % (self.name, pgroup)) if pgroup: self.set_profile(client, pgroup, (None, None), require_public=False) profile = _add_group(pgroup) else: raise Bcfg2.Server.Plugin.MetadataConsistencyError( "Cannot add new client %s; no default group set" % client) for cgroup in self.clientgroups.get(client, []): if cgroup in groups: continue if cgroup not in self.groups: self.groups[cgroup] = MetadataGroup(cgroup) profile = _add_group(cgroup) # we do this before setting the default because there may be # groups set in tags in groups.xml that we want to # set groups, categories = self._merge_groups(client, groups, categories=categories) if len(groups) == 0 and self.default: # no initial groups; add the default profile profile = _add_group(self.default) groups, categories = self._merge_groups(client, groups, categories=categories) bundles = set() for group in groups: try: bundles.update(self.groups[group].bundles) except KeyError: self.logger.warning("%s: %s is a member of undefined group %s" % (self.name, client, group)) aliases = self.raliases.get(client, set()) addresses = self.raddresses.get(client, set()) version = self.versions.get(client, None) if client in self.passwords: password = self.passwords[client] else: password = None uuids = [item for item, value in list(self.uuid.items()) if value == client] if uuids: uuid = uuids[0] else: uuid = None if not profile: # one last ditch attempt at setting the profile profiles = [g for g in groups if g in self.groups and self.groups[g].is_profile] if len(profiles) >= 1: profile = profiles[0] rv = ClientMetadata(client, profile, groups, bundles, aliases, addresses, categories, uuid, password, version, self.query) if self.core.metadata_cache_mode == 'initial': self.cache[client] = rv return rv def get_all_group_names(self): """ return a list of all group names """ all_groups = set() all_groups.update(self.groups.keys()) all_groups.update(self.group_membership.keys()) all_groups.update(self.negated_groups.keys()) for grp in self.clientgroups.values(): all_groups.update(grp) return all_groups def get_all_groups_in_category(self, category): """ return a list of names of groups in the given category """ return set([g.name for g in self.groups.values() if g.category == category]) def get_client_names_by_profiles(self, profiles): """ return a list of names of clients in the given profile groups """ rv = [] for client in self.list_clients(): mdata = self.core.build_metadata(client) if mdata.profile in profiles: rv.append(client) return rv def get_client_names_by_groups(self, groups): """ return a list of names of clients in the given groups """ rv = [] for client in self.list_clients(): mdata = self.core.build_metadata(client) if mdata.groups.issuperset(groups): rv.append(client) return rv def get_client_names_by_bundles(self, bundles): """ given a list of bundles, return a list of names of clients that use those bundles """ rv = [] for client in self.list_clients(): mdata = self.core.build_metadata(client) if mdata.bundles.issuperset(bundles): rv.append(client) return rv def merge_additional_groups(self, imd, groups): for group in groups: if group in imd.groups: continue if not self._check_and_add_category(imd.hostname, group, imd.categories): continue imd.groups.add(group) self._merge_groups(imd.hostname, imd.groups, categories=imd.categories) for group in imd.groups: if group in self.groups: imd.bundles.update(self.groups[group].bundles) if not imd.profile: # if the client still doesn't have a profile group after # initial metadata, try to find one in the additional # groups profiles = [g for g in groups if g in self.groups and self.groups[g].is_profile] if len(profiles) >= 1: imd.profile = profiles[0] elif self.default: imd.profile = self.default def merge_additional_data(self, imd, source, data): if not hasattr(imd, source): setattr(imd, source, data) imd.connectors.append(source) def validate_client_address(self, client, addresspair): """Check address against client.""" address = addresspair[0] if client in self.floating: self.debug_log("Client %s is floating" % client) return True if address in self.addresses: if client in self.addresses[address]: self.debug_log("Client %s matches address %s" % (client, address)) return True else: self.logger.error("Got request for non-float client %s from %s" % (client, address)) return False resolved = self.resolve_client(addresspair) if resolved.lower() == client.lower(): self.logger.debug("Client %s address validates" % client) return True else: self.logger.error("Got request for %s from incorrect address %s" % (client, address)) self.logger.error("Resolved to %s" % resolved) return False # pylint: disable=R0911,R0912 def AuthenticateConnection(self, cert, user, password, address): """This function checks auth creds.""" if not isinstance(user, str): user = user.decode('utf-8') if cert: id_method = 'cert' certinfo = dict([x[0] for x in cert['subject']]) # look at cert.cN client = certinfo['commonName'] self.debug_log("Got cN %s; using as client name" % client) elif user == 'root': id_method = 'address' try: client = self.resolve_client(address) except Bcfg2.Server.Plugin.MetadataConsistencyError: err = sys.exc_info()[1] self.logger.error("Client %s failed to resolve: %s" % (address[0], err)) return False else: id_method = 'uuid' # user maps to client if user not in self.uuid: client = user self.uuid[user] = user else: client = self.uuid[user] # we have the client name self.debug_log("Authenticating client %s" % client) # validate id_method auth_type = self.auth.get(client, Bcfg2.Options.setup.authentication) if auth_type == 'cert' and id_method != 'cert': self.logger.error("Client %s does not provide a cert, but only " "cert auth is allowed" % client) return False # next we validate the address if (id_method != 'uuid' and not self.validate_client_address(client, address)): return False if id_method == 'cert' and auth_type != 'cert+password': # remember the cert-derived client name for this connection if client in self.floating: self.session_cache[address] = (time.time(), client) self.logger.debug("Client %s certificate validates" % client) # we are done if cert+password not required return True if client not in self.passwords and client in self.secure: self.logger.error("Client %s in secure mode but has no password" % address[0]) return False if client not in self.secure: if client in self.passwords: plist = [self.password, self.passwords[client]] else: plist = [self.password] if password not in plist: self.logger.error("Client %s failed to use an allowed password" % address[0]) return False else: # client in secure mode and has a client password if password != self.passwords[client]: self.logger.error("Client %s failed to use client password in " "secure mode" % address[0]) return False # populate the session cache if user != 'root': self.session_cache[address] = (time.time(), client) self.logger.debug("Client %s authenticated successfully" % client) return True # pylint: enable=R0911,R0912 def update_client_list(self): """ Re-read the client list from the database (if the database is in use) """ if self._use_db: self.logger.debug("Metadata: Re-reading client list from database") old = set(self.clients) self.clients = self.list_clients() # we could do this with set.symmetric_difference(), but we # want detailed numbers of added/removed clients for # logging new = set(self.clients) added = new - old removed = old - new self.logger.debug("Metadata: Added %s clients: %s" % (len(added), added)) self.logger.debug("Metadata: Removed %s clients: %s" % (len(removed), removed)) for client in added.union(removed): self.cache.expire(client) def start_client_run(self, metadata): """ Hook to reread client list if the database is in use """ self.update_client_list() def end_statistics(self, metadata): """ Hook to toggle clients in bootstrap mode """ if self.auth.get(metadata.hostname, Bcfg2.Options.setup.authentication) == 'bootstrap': self.update_client(metadata.hostname, dict(auth='cert')) def viz(self, hosts, bundles, key, only_client, colors): """Admin mode viz support.""" clientmeta = None if only_client: clientmeta = self.core.build_metadata(only_client) groups = self.groups_xml.xdata.getroot() categories = {'default': 'grey83'} viz_str = [] egroups = groups.findall("Group") + groups.findall('.//Groups/Group') color = 0 for group in egroups: if not group.get('category') in categories: categories[group.get('category')] = colors[color] color = (color + 1) % len(colors) group.set('color', categories[group.get('category')]) if None in categories: del categories[None] if hosts: viz_str.extend(self._viz_hosts(only_client)) if bundles: viz_str.extend(self._viz_bundles(bundles, clientmeta)) viz_str.extend(self._viz_groups(egroups, bundles, clientmeta)) if key: for category in categories: viz_str.append('"%s" [label="%s", shape="trapezium", ' 'style="filled", fillcolor="%s"];' % (category, category, categories[category])) return "\n".join("\t" + s for s in viz_str) def _viz_hosts(self, only_client): """ add hosts to the viz graph """ def include_client(client): """ return True if the given client should be included in the graph""" return not only_client or client != only_client instances = {} rv = [] for client in list(self.list_clients()): if not include_client(client): continue if client in self.clientgroups: grps = self.clientgroups[client] elif self.default: grps = [self.default] else: continue for group in grps: try: instances[group].append(client) except KeyError: instances[group] = [client] for group, clist in list(instances.items()): clist.sort() rv.append('"%s-instances" [ label="%s", shape="record" ];' % (group, '|'.join(clist))) rv.append('"%s-instances" -> "group-%s";' % (group, group)) return rv def _viz_bundles(self, bundles, clientmeta): """ add bundles to the viz graph """ def include_bundle(bundle): """ return True if the given bundle should be included in the graph""" return not clientmeta or bundle in clientmeta.bundles bundles = \ list(set(bund.get('name') for bund in self.groups_xml.xdata.findall('.//Bundle') if include_bundle(bund.get('name')))) bundles.sort() return ['"bundle-%s" [ label="%s", shape="septagon"];' % (bundle, bundle) for bundle in bundles] def _viz_groups(self, egroups, bundles, clientmeta): """ add groups to the viz graph """ def include_group(group): """ return True if the given group should be included in the graph """ return not clientmeta or group in clientmeta.groups rv = [] gseen = [] for group in egroups: if group.get('profile', 'false') == 'true': style = "filled, bold" else: style = "filled" gseen.append(group.get('name')) if include_group(group.get('name')): rv.append('"group-%s" [label="%s", style="%s", fillcolor=%s];' % (group.get('name'), group.get('name'), style, group.get('color'))) if bundles: for bundle in group.findall('Bundle'): rv.append('"group-%s" -> "bundle-%s";' % (group.get('name'), bundle.get('name'))) gfmt = '"group-%s" [label="%s", style="filled", fillcolor="grey83"];' for group in egroups: for parent in group.findall('Group'): if (parent.get('name') not in gseen and include_group(parent.get('name'))): rv.append(gfmt % (parent.get('name'), parent.get('name'))) gseen.append(parent.get("name")) if include_group(group.get('name')): rv.append('"group-%s" -> "group-%s";' % (group.get('name'), parent.get('name'))) return rv src/lib/Bcfg2/Server/Plugins/NagiosGen.py000066400000000000000000000113221303523157100204500ustar00rootroot00000000000000'''This module implements a Nagios configuration generator''' import os import re import sys import glob import socket from Bcfg2.Server.Plugin import Plugin, Generator, StructFile, \ PluginExecutionError class NagiosGen(Plugin, Generator): """ NagiosGen is a Bcfg2 plugin that dynamically generates Nagios configuration file based on Bcfg2 data. """ __author__ = 'bcfg-dev@mcs.anl.gov' line_fmt = '\t%-32s %s' def __init__(self, core): Plugin.__init__(self, core) Generator.__init__(self) self.config = \ StructFile(os.path.join(self.data, 'config.xml'), should_monitor=True, create=self.name) self.Entries = { 'Path': {'/etc/nagiosgen.status': self.createhostconfig, '/etc/nagios/conf.d/bcfg2.cfg': self.createserverconfig}} self.client_attrib = {'encoding': 'ascii', 'owner': 'root', 'group': 'root', 'type': 'file', 'mode': '0400'} self.server_attrib = {'encoding': 'ascii', 'owner': 'nagios', 'group': 'nagios', 'type': 'file', 'mode': '0440'} def createhostconfig(self, entry, metadata): """Build host specific configuration file.""" try: host_address = socket.getaddrinfo(metadata.hostname, None)[0][4][0] except socket.error: self.logger.error() raise PluginExecutionError("Failed to find IP address for %s" % metadata.hostname) host_groups = [grp for grp in metadata.groups if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))] + \ [bundle for bundle in metadata.bundles if os.path.isfile('%s/%s-bundle.cfg' % (self.data, bundle))] host_config = ['define host {', self.line_fmt % ('host_name', metadata.hostname), self.line_fmt % ('alias', metadata.hostname), self.line_fmt % ('address', host_address)] if host_groups: host_config.append(self.line_fmt % ("hostgroups", ",".join(sorted(host_groups)))) # read the config xtra = dict() for el in self.config.Match(metadata): if el.tag == 'Option': xtra[el.get("name")] = el.text if xtra: host_config.extend([self.line_fmt % (opt, val) for opt, val in list(xtra.items())]) if 'use' not in xtra: host_config.append(self.line_fmt % ('use', 'default')) host_config.append('}') entry.text = "%s\n" % "\n".join(host_config) for (key, value) in list(self.client_attrib.items()): entry.attrib.__setitem__(key, value) fname = os.path.join(self.data, metadata.hostname + "-host.cfg") try: open(fname, 'w').write(entry.text) except OSError: err = sys.exc_info()[1] self.logger.error("Failed to write %s: %s" % (fname, err)) def createserverconfig(self, entry, _): """Build monolithic server configuration file.""" host_configs = glob.glob(os.path.join(self.data, '*-host.cfg')) group_configs = glob.glob(os.path.join(self.data, '*-group.cfg')) + \ glob.glob(os.path.join(self.data, '*-bundle.cfg')) host_data = [] group_data = [] for host in host_configs: host_data.append(open(host, 'r').read()) used_groups = set(['default']) for line in "\n".join(host_data).splitlines(): # only include those groups which are actually used if "hostgroup" in line: used_groups.update(line.split()[1].split(',')) for group in group_configs: group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group) if group_name in used_groups: groupfile = open(group, 'r') group_data.append(groupfile.read()) groupfile.close() entry.text = "%s\n\n%s" % ("\n".join(group_data), "\n".join(host_data)) for (key, value) in list(self.server_attrib.items()): entry.attrib.__setitem__(key, value) fname = os.path.join(self.data, "nagiosgen.cfg") try: open(fname, 'w').write(entry.text) except OSError: err = sys.exc_info()[1] self.logger.error("Failed to write %s: %s" % (fname, err)) src/lib/Bcfg2/Server/Plugins/Ohai.py000066400000000000000000000063361303523157100174670ustar00rootroot00000000000000"""The Ohai plugin is used to detect information about the client operating system using ohai (http://wiki.opscode.com/display/chef/Ohai) """ import os import sys import glob import lxml.etree import Bcfg2.Server.Plugin try: import json # py2.4 json library is structured differently json.loads # pylint: disable=W0104 except (ImportError, AttributeError): import simplejson as json PROBECODE = """#!/bin/sh export PATH=$PATH:/sbin:/usr/sbin if type ohai >& /dev/null; then ohai else # an empty dict, so "'foo' in metadata.Ohai" tests succeed echo '{}' fi """ class OhaiCache(object): """ Storage for Ohai output on the local filesystem so that the output can be used by bcfg2-info, etc. """ def __init__(self, dirname): self.dirname = dirname self.cache = dict() def hostpath(self, host): """ Get the path to the file that contains Ohai data for the given host """ return os.path.join(self.dirname, "%s.json" % host) def __setitem__(self, item, value): if value is None: # simply return if the client returned nothing return self.cache[item] = json.loads(value) open(self.hostpath(item), 'w').write(value) def __getitem__(self, item): if item not in self.cache: try: data = open(self.hostpath(item)).read() except IOError: raise KeyError(item) self.cache[item] = json.loads(data) return self.cache[item] def __delitem__(self, item): if item in self.cache: del self.cache[item] try: os.unlink(self.hostpath(item)) except OSError: raise IndexError("Could not unlink %s: %s" % (self.hostpath(item), sys.exc_info()[1])) def __len__(self): return len(glob.glob(self.hostpath('*'))) def __iter__(self): data = list(self.cache.keys()) data.extend([x[:-5] for x in os.listdir(self.dirname)]) return data.__iter__() class Ohai(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Probing, Bcfg2.Server.Plugin.Connector): """The Ohai plugin is used to detect information about the client operating system. """ def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Probing.__init__(self) Bcfg2.Server.Plugin.Connector.__init__(self) self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai', interpreter='/bin/sh') self.probe.text = PROBECODE self.cache = OhaiCache(self.data) def GetProbes(self, _): return [self.probe] def ReceiveData(self, meta, datalist): if meta.hostname not in self.cache or \ self.cache[meta.hostname] != datalist[0].text: self.cache[meta.hostname] = datalist[0].text if self.core.metadata_cache_mode in ['cautious', 'aggressive']: self.core.metadata_cache.expire(meta.hostname) def get_additional_data(self, meta): if meta.hostname in self.cache: return self.cache[meta.hostname] return dict() src/lib/Bcfg2/Server/Plugins/POSIXCompat.py000066400000000000000000000017531303523157100206530ustar00rootroot00000000000000"""This plugin provides a compatibility layer which turns new-style POSIX entries into old-style entries. """ import Bcfg2.Server.Plugin class POSIXCompat(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.GoalValidator): """POSIXCompat is a goal validator plugin for POSIX entries.""" create = False def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.GoalValidator.__init__(self) def validate_goals(self, metadata, goals): """Verify that we are generating correct old POSIX entries.""" if metadata.version_info and metadata.version_info >= (1, 3, 0, '', 0): # do not care about a client that is _any_ 1.3.0 release # (including prereleases and RCs) return for goal in goals: for entry in goal.getchildren(): if entry.tag == 'Path' and 'mode' in entry.keys(): entry.set('perms', entry.get('mode')) src/lib/Bcfg2/Server/Plugins/Packages/000077500000000000000000000000001303523157100177435ustar00rootroot00000000000000src/lib/Bcfg2/Server/Plugins/Packages/Apt.py000066400000000000000000000146201303523157100210440ustar00rootroot00000000000000""" APT backend for :mod:`Bcfg2.Server.Plugins.Packages` """ import re import gzip from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import Source def strip_suffix(pkgname): """ Remove the ':any' suffix from a dependency name if it is present. """ if pkgname.endswith(':any'): return pkgname[:-4] else: return pkgname class AptCollection(Collection): """ Handle collections of APT sources. This is a no-op object that simply inherits from :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`, overrides nothing, and defers all operations to :class:`PacSource` """ def __init__(self, metadata, sources, cachepath, basepath, debug=False): # we define an __init__ that just calls the parent __init__, # so that we can set the docstring on __init__ to something # different from the parent __init__ -- namely, the parent # __init__ docstring, minus everything after ``.. -----``, # which we use to delineate the actual docs from the # .. autoattribute hacks we have to do to get private # attributes included in sphinx 1.0 """ Collection.__init__(self, metadata, sources, cachepath, basepath, debug=debug) __init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0] def get_config(self): """ Get an APT configuration file (i.e., ``sources.list``). :returns: string """ lines = ["# This config was generated automatically by the Bcfg2 " "Packages plugin", ''] for source in self: if source.rawurl: if source.rawurl[-1] != '/': source.rawurl = source.rawurl + "/" index = source.rawurl.rfind("/", 0, -1) lines.append("deb %s %s" % (source.rawurl[:index], source.rawurl[index + 1:])) else: lines.append("deb %s %s %s" % (source.url, source.version, " ".join(source.components))) if source.debsrc: lines.append("deb-src %s %s %s" % (source.url, source.version, " ".join(source.components))) lines.append("") return "\n".join(lines) class AptSource(Source): """ Handle APT sources """ #: AptSource sets the ``type`` on Package entries to "deb" ptype = 'deb' @property def urls(self): """ A list of URLs to the base metadata file for each repository described by this source. """ if not self.rawurl: rv = [] for part in self.components: for arch in self.arches: rv.append("%sdists/%s/%s/binary-%s/Packages.gz" % (self.url, self.version, part, arch)) return rv else: return ["%sPackages.gz" % self.rawurl] def read_files(self): # pylint: disable=R0912 bdeps = dict() brecs = dict() bprov = dict() self.pkgnames = set() self.essentialpkgs = set() for fname in self.files: if not self.rawurl: barch = [x for x in fname.split('@') if x.startswith('binary-')][0][7:] else: # RawURL entries assume that they only have one # element and that it is the architecture of the source. barch = self.arches[0] if barch not in bdeps: bdeps[barch] = dict() brecs[barch] = dict() bprov[barch] = dict() try: reader = gzip.GzipFile(fname) except IOError: self.logger.error("Packages: Failed to read file %s" % fname) raise for line in reader.readlines(): if not isinstance(line, str): line = line.decode('utf-8') words = str(line.strip()).split(':', 1) if words[0] == 'Package': pkgname = words[1].strip().rstrip() self.pkgnames.add(pkgname) bdeps[barch][pkgname] = [] brecs[barch][pkgname] = [] elif words[0] == 'Essential' and self.essential: if words[1].strip() == 'yes': self.essentialpkgs.add(pkgname) elif words[0] in ['Depends', 'Pre-Depends', 'Recommends']: vindex = 0 for dep in words[1].split(','): if '|' in dep: cdeps = [re.sub(r'\s+', '', re.sub(r'\(.*\)', '', cdep)) for cdep in dep.split('|')] cdeps = [strip_suffix(cdep) for cdep in cdeps] dyn_dname = "choice-%s-%s-%s" % (pkgname, barch, vindex) vindex += 1 if words[0] == 'Recommends': brecs[barch][pkgname].append(dyn_dname) else: bdeps[barch][pkgname].append(dyn_dname) bprov[barch][dyn_dname] = set(cdeps) else: raw_dep = re.sub(r'\(.*\)', '', dep) raw_dep = raw_dep.rstrip().strip() raw_dep = strip_suffix(raw_dep) if words[0] == 'Recommends': brecs[barch][pkgname].append(raw_dep) else: bdeps[barch][pkgname].append(raw_dep) elif words[0] == 'Provides': for pkg in words[1].split(','): dname = pkg.rstrip().strip() if dname not in bprov[barch]: bprov[barch][dname] = set() bprov[barch][dname].add(pkgname) self.process_files(bdeps, bprov, brecs) read_files.__doc__ = Source.read_files.__doc__ src/lib/Bcfg2/Server/Plugins/Packages/Collection.py000066400000000000000000000602741303523157100224210ustar00rootroot00000000000000""" ``Collection`` objects represent the set of :class:`Bcfg2.Server.Plugins.Packages.Source.Source` objects that apply to a given client, and can be used to query all software repositories for a client in aggregate. In some cases this can give faster or more accurate results. In most cases, ``Collection`` methods have been designed to defer the call to the Sources in the ``Collection`` and aggregate the results as appropriate. The simplest ``Collection`` implemention is thus often a simple subclass that adds no additional functionality. Overriding Methods ------------------ As noted above, the ``Collection`` object is written expressly so that you can subclass it and override no methods or attributes, and it will work by deferring all calls to the Source objects it contains. There are thus three approaches to writing a ``Collection`` subclass: #. Keep the superclass almost entirely intact and defer to the ``Source`` objects inside it. For an example of this kind of ``Collection`` object, see :mod:`Bcfg2.Server.Plugins.Packages.Apt`. #. Keep :func:`Collection.complete` intact, and override the methods it calls: :func:`Collection.is_package`, :func:`Collection.is_virtual_package`, :func:`Collection.get_deps`, :func:`Collection.get_provides`, :func:`Collection.get_vpkgs`, and :func:`Collection.setup_data`. There are no examples of this kind of ``Collection`` subclass yet. #. Provide your own implementation of :func:`Collection.complete`, in which case you do not have to override the above methods. You may want to override :func:`Collection.packages_from_entry`, :func:`Collection.packages_to_entry`, and :func:`Collection.get_new_packages`. For an example of this kind of ``Collection`` object, see :mod:`Bcfg2.Server.Plugins.Packages.yum`. In either case, you may want to override :func:`Collection.get_groups`, :func:`Collection.get_group`, :func:`Collection.get_essential`, :func:`Collection.get_config`, :func:`Collection.filter_unknown`, and :func:`Collection.build_extra_structures`. .. _pkg-objects: Conversion Between Package Objects and XML Entries -------------------------------------------------- Collection objects have to translate Bcfg2 entries, :class:`lxml.etree._Element` objects, into objects suitable for use by the backend for resolving dependencies. This is handled by two functions: * :func:`Collection.packages_from_entry` is called to translate an XML entry into a list of packages; * :func:`Collection.packages_to_entry` is called to translate a list of packages back into an XML entry. Because of this translation layer, the return type of any functions below that return packages (e.g., :func:`Collection.get_group`) is actually indeterminate; they must return an object suitable for passing to :func:`Collection.packages_to_entry`. Similarly, functions that take a package as an argument (e.g., :func:`Collection.is_package`) take the appropriate package object. In the documentation below, the actual parameter return type (usually .``string``) used in this base implementation is noted, as well as this fact. The Collection Module --------------------- """ import copy import lxml.etree import Bcfg2.Options import Bcfg2.Server.Plugin from Bcfg2.Logger import Debuggable from Bcfg2.Compat import any, md5 # pylint: disable=W0622 from Bcfg2.Server.FileMonitor import get_fam from Bcfg2.Server.Statistics import track_statistics class Collection(list, Debuggable): """ ``Collection`` objects represent the set of :class:`Bcfg2.Server.Plugins.Packages.Source` objects that apply to a given client, and can be used to query all software repositories for a client in aggregate. In some cases this can give faster or more accurate results. """ #: Whether or not this Packages backend supports package groups __package_groups__ = False def __init__(self, metadata, sources, cachepath, basepath, debug=False): """ :param metadata: The client metadata for this collection :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param sources: A list of all sources known to the server that will be used to generate the list of sources that apply to this client :type sources: list of :class:`Bcfg2.Server.Plugins.Packages.Source.Source` objects :param cachepath: The filesystem path where cache and other temporary data will be stored :type cachepath: string :param basepath: The filesystem path to the Packages plugin directory, where more permanent data can be stored :type basepath: string :param debug: Enable debugging output :type debug: bool .. ----- .. autoattribute:: __package_groups__ """ Debuggable.__init__(self) list.__init__(self, sources) self.debug_flag = self.debug_flag or debug self.metadata = metadata self.basepath = basepath self.cachepath = cachepath self.virt_pkgs = dict() self.fam = get_fam() try: self.ptype = sources[0].ptype except IndexError: self.ptype = "unknown" @property def cachekey(self): """ A unique identifier for the set of sources contained in this ``Collection`` object. This is unique to a set of sources, **not** necessarily to the client, which lets clients with identical sources share cache data.""" return md5(self.sourcelist().encode('UTF-8')).hexdigest() def get_config(self): """ Get the configuration for the package tool used by this source type. This should be a config appropriate for use on either the server (to resolve dependencies) or the client. Subclasses must override this method in order to be able to generate configs. By default it logs an error and returns the empty string. :returns: string """ self.logger.error("Packages: Cannot generate config for host %s with " "no sources or multiple source types" % self.metadata.hostname) return "" def sourcelist(self): """ Get a human-readable list of sources in this collection, including some information about each source. :returns: string """ srcs = [] for source in self: for url_map in source.url_map: if url_map['arch'] not in self.metadata.groups: continue reponame = source.get_repo_name(url_map) srcs.append("Name: %s" % reponame) srcs.append(" Type: %s" % source.ptype) if url_map['url']: srcs.append(" URL: %s" % url_map['url']) elif url_map['rawurl']: srcs.append(" RAWURL: %s" % url_map['rawurl']) if source.gpgkeys: srcs.append(" GPG Key(s): %s" % ", ".join(source.gpgkeys)) else: srcs.append(" GPG Key(s): None") if len(source.blacklist): srcs.append(" Blacklist: %s" % ", ".join(source.blacklist)) if len(source.whitelist): srcs.append(" Whitelist: %s" % ", ".join(source.whitelist)) srcs.append("") return "\n".join(srcs) def get_relevant_groups(self): """ Get all groups that might be relevant to determining which sources apply to this collection's client. The base implementation simply aggregates the results of :func:`Bcfg2.Server.Plugins.Packages.Source.Source.get_relevant_groups` :return: list of strings - group names """ groups = [] for source in self: groups.extend(source.get_relevant_groups(self.metadata)) return sorted(list(set(groups))) @property def cachefiles(self): """ A list of the full path to all cachefiles used by this collection. The base implementation simply aggregates :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.cachefile` attributes.""" cachefiles = set() for source in self: cachefiles.add(source.cachefile) return list(cachefiles) @track_statistics() def get_groups(self, grouplist): """ Given a list of package group names, return a dict of ``: ``. This method is provided since some backends may be able to query multiple groups at once faster than serially. The base implementation simply aggregates the results of :func:`Bcfg2.Server.Plugins.Packages.Source.Source.get_group`. :param grouplist: The list of groups to query :type grouplist: list of strings - group names :returns: dict of ``: `` In this implementation the packages will be strings, but see :ref:`pkg-objects`.""" rv = dict() for group, ptype in grouplist: rv[group] = self.get_group(group, ptype) return rv @track_statistics() def get_group(self, group, ptype=None): """ Get the list of packages of the given type in a package group. The base implementation simply aggregates the results of :func:`Bcfg2.Server.Plugins.Packages.Source.Source.get_group`. :param group: The name of the group to query :type group: string :param ptype: The type of packages to get, for backends that support multiple package types in package groups (e.g., "recommended," "optional," etc.) :type ptype: string :returns: list of strings - package names, but see :ref:`pkg-objects` """ if not self.__package_groups__: self.logger.error("Packages: Package groups are not supported by " "%s" % self.__class__.__name__) return [] for source in self: pkgs = source.get_group(self.metadata, group, ptype=ptype) if pkgs: return pkgs self.logger.warning("Packages: '%s' is not a valid group" % group) return [] def is_package(self, package): """ Return True if a package is a package, False otherwise. The base implementation returns True if any Source object's :func:`Bcfg2.Server.Plugins.Packages.Source.Source.is_package` returns True. :param package: The name of the package, but see :ref:`pkg-objects` :type package: string :returns: bool """ return any(source.is_package(self.metadata, package) for source in self) def is_virtual_package(self, package): """ Return True if a name is a virtual package (i.e., is a symbol provided by a real package), False otherwise. The base implementation returns True if any Source object's :func:`Bcfg2.Server.Plugins.Packages.Source.Source.is_virtual_package` returns True. :param package: The name of the symbol, but see :ref:`pkg-objects` :type package: string :returns: bool """ return any(source.is_virtual_package(self.metadata, package) for source in self) def get_deps(self, package, recs=None): """ Get a list of the dependencies of the given package. The base implementation simply aggregates the results of :func:`Bcfg2.Server.Plugins.Packages.Source.Source.get_deps`. :param package: The name of the symbol, but see :ref:`pkg-objects` :type package: string :returns: list of strings, but see :ref:`pkg-objects` """ recommended = None if recs and package in recs: recommended = recs[package] for source in self: if source.is_package(self.metadata, package): return source.get_deps(self.metadata, package, recommended) return [] def get_essential(self): """ Get a list of packages that are essential to the repository. The base implementation simply aggregates :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.essentialpkgs` attributes :returns: list of strings, but see :ref:`pkg-objects` """ essential = set() for source in self: essential |= source.essentialpkgs return essential def get_provides(self, package): """ Get a list of all symbols provided by the given package. The base implementation simply aggregates the results of :func:`Bcfg2.Server.Plugins.Packages.Source.Source.get_provides`. :param package: The name of the package, but see :ref:`pkg-objects` :type package: string :returns: list of strings, but see :ref:`pkg-objects` """ for source in self: providers = source.get_provides(self.metadata, package) if providers: return providers return [] def get_vpkgs(self): """ Get a list of all virtual packages provided by all sources. The base implementation simply aggregates the results of :func:`Bcfg2.Server.Plugins.Packages.Source.Source.get_vpkgs`. :returns: list of strings, but see :ref:`pkg-objects` """ vpkgs = dict() for source in self: s_vpkgs = source.get_vpkgs(self.metadata) for name, prov_set in list(s_vpkgs.items()): if name not in vpkgs: vpkgs[name] = set(prov_set) else: vpkgs[name].update(prov_set) return vpkgs def filter_unknown(self, unknown): """ After :func:`complete`, filter out packages that appear in the list of unknown packages but should not be presented to the user. E.g., packages that you expect to be unknown. The base implementation filters out packages that are expected to be unknown by any source in this collection. :param unknown: A set of unknown packages. The set should be modified in place. :type unknown: set of strings, but see :ref:`pkg-objects` """ for source in self: source.filter_unknown(unknown) def build_extra_structures(self, independent): """ Add additional entries to the ```` section of the final configuration. This can be used to handle, e.g., GPG keys and other entries besides packages that need to be handled for a complete client configuration. :param independent: The XML tag to add extra entries to. This is modified in place. :type independent: lxml.etree._Element """ pass def get_additional_data(self): """ Get additional :class:`Bcfg2.Server.Plugin.interfaces.Connector` data to be supplied to :func:`Bcfg2.Server.Plugins.Packages.Packages.get_additional_data` (and thence to client metadata objects). The base implementation simply aggregates :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.url_map` attributes. :returns: list of additional Connector data """ sdata = [] for source in self: sdata.extend(copy.deepcopy(source.url_map)) return sdata def setup_data(self, force_update=False): """ Do any collection-level data setup tasks. This is called when sources are loaded or reloaded by :class:`Bcfg2.Server.Plugins.Packages.Packages`. The base implementation is a no-op; the child :class:`Bcfg2.Server.Plugins.Packages.Source.Source` objects will handle all data setup. :param force_update: Ignore all local cache and setup data from its original upstream sources (i.e., the package repositories) :type force_update: bool """ pass def packages_from_entry(self, entry): """ Given a Package or BoundPackage entry, get a list of the package(s) described by it in a format appropriate for passing to :func:`complete`. By default, that's just the name; only the :mod:`Bcfg2.Server.Plugins.Packages.Yum` backend supports versions or other extended data. See :ref:`pkg-objects` for more details. :param entry: The XML entry describing the package or packages. :type entry: lxml.etree._Element :returns: list of strings, but see :ref:`pkg-objects` """ return [entry.get("name")] def packages_to_entry(self, pkglist, entry): """ Given a list of package objects as returned by :func:`packages_from_entry` or :func:`complete`, return an XML tree describing the BoundPackage entries that should be included in the client configuration. See :ref:`pkg-objects` for more details. :param pkglist: A list of packages as returned by :func:`complete` :type pkglist: list of strings, but see :ref:`pkg-objects` :param entry: The base XML entry to add all of the Package entries to. This should be modified in place. :type entry: lxml.etree._Element """ for pkg in pkglist: lxml.etree.SubElement(entry, 'BoundPackage', name=pkg, version=Bcfg2.Options.setup.packages_version, type=self.ptype, origin='Packages') def get_new_packages(self, initial, complete): """ Compute the difference between the complete package list (as returned by :func:`complete`) and the initial package list computed from the specification. This is necessary because the format may be different between the two lists due to :func:`packages_to_entry` and :func:`packages_from_entry`. See :ref:`pkg-objects` for more details. :param initial: The initial package list :type initial: set of strings, but see :ref:`pkg-objects` :param complete: The final package list :type complete: set of strings, but see :ref:`pkg-objects` :return: set of strings, but see :ref:`pkg-objects` - the set of packages that are in ``complete`` but not in ``initial`` """ return list(complete.difference(initial)) @track_statistics() def complete(self, packagelist, # pylint: disable=R0912,R0914 recommended=None): """ Build a complete list of all packages and their dependencies. :param packagelist: Set of initial packages computed from the specification. :type packagelist: set of strings, but see :ref:`pkg-objects` :returns: tuple of sets - The first element contains a set of strings (but see :ref:`pkg-objects`) describing the complete package list, and the second element is a set of symbols whose dependencies could not be resolved. """ # setup vpkg cache pgrps = tuple(self.get_relevant_groups()) if pgrps not in self.virt_pkgs: self.virt_pkgs[pgrps] = self.get_vpkgs() vpkg_cache = self.virt_pkgs[pgrps] # unclassified is set of unsatisfied requirements (may be pkg # for vpkg) unclassified = set(packagelist) vpkgs = set() both = set() pkgs = set(packagelist) packages = set() examined = set() unknown = set() final_pass = False really_done = False # do while unclassified or vpkgs or both or pkgs while unclassified or pkgs or both or final_pass: if really_done: break if len(unclassified) + len(pkgs) + len(both) == 0: # one more pass then exit really_done = True while unclassified: current = unclassified.pop() examined.add(current) is_pkg = False if self.is_package(current): is_pkg = True is_vpkg = current in vpkg_cache if is_pkg and is_vpkg: both.add(current) elif is_pkg and not is_vpkg: pkgs.add(current) elif is_vpkg and not is_pkg: vpkgs.add(current) elif not is_vpkg and not is_pkg: unknown.add(current) while pkgs: # direct packages; current can be added, and all deps # should be resolved current = pkgs.pop() self.debug_log("Packages: handling package requirement %s" % (current,)) packages.add(current) deps = self.get_deps(current, recommended) newdeps = set(deps).difference(examined) if newdeps: self.debug_log("Packages: Package %s added requirements %s" % (current, newdeps)) unclassified.update(newdeps) satisfied_vpkgs = set() for current in vpkgs: # virtual dependencies, satisfied if one of N in the # config, or can be forced if only one provider if len(vpkg_cache[current]) == 1: self.debug_log("Packages: requirement %s satisfied by %s" % (current, vpkg_cache[current])) unclassified.update( vpkg_cache[current].difference(examined)) satisfied_vpkgs.add(current) else: satisfiers = [item for item in vpkg_cache[current] if item in packages] self.debug_log("Packages: requirement %s satisfied by %s" % (current, satisfiers)) satisfied_vpkgs.add(current) vpkgs.difference_update(satisfied_vpkgs) satisfied_both = set() for current in both: # packages that are both have virtual providers as # well as a package with that name. allow use of virt # through explicit specification, then fall back to # forcing current on last pass satisfiers = [item for item in vpkg_cache[current] if item in packages] if satisfiers: self.debug_log("Packages: requirement %s satisfied by %s" % (current, satisfiers)) satisfied_both.add(current) elif current in packagelist or final_pass: pkgs.add(current) satisfied_both.add(current) both.difference_update(satisfied_both) if len(unclassified) + len(pkgs) == 0: final_pass = True else: final_pass = False self.filter_unknown(unknown) return packages, unknown def __repr__(self): return "%s(%s)" % (self.__class__.__name__, list.__repr__(self)) def get_collection_class(source_type): """ Given a source type, determine the class of Collection object that should be used to contain these sources. Note that ``source_type`` is *not* a :class:`Bcfg2.Server.Plugins.Packages.Source.Source` subclass; it's the name of a source type as given in ``sources.xml``. :param source_type: The type of source, e.g., "yum" or "apt" :type source_type: string :returns: type - the Collection subclass that should be used to instantiate an object to contain sources of the given type. """ for mod in Bcfg2.Options.setup.packages_backends: if mod.__name__.endswith(".%s" % source_type.title()): return getattr(mod, "%sCollection" % source_type.title()) raise Bcfg2.Server.Plugin.PluginExecutionError( "Packages: No collection class found for %s sources" % source_type) src/lib/Bcfg2/Server/Plugins/Packages/Pac.py000066400000000000000000000151301303523157100210200ustar00rootroot00000000000000""" Pacman backend for :mod:`Bcfg2.Server.Plugins.Packages` """ import os import tarfile from Bcfg2.Compat import cPickle from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import Source def parse_db_file(pkgfile): """ Parse a Pacman database file, returning a dictionary with section headings for keys and lists of strings for values. (Reference: ``sync_db_read`` in ``lib/libalpm/be_sync.c``) """ pkg = {} section = None for line in pkgfile: line = line.strip() if section is not None: if not line: section = None else: pkg[section].append(line) elif len(line) >= 2 and line[0] == line[-1] == '%': section = line pkg[section] = [] return pkg def parse_dep(dep): """ Parse a Pacman dependency string, returning the package name, version restriction (or ``None``), and description (or ``None``). (Reference: ``alpm_dep_from_string`` in ``lib/libalpm/deps.c``) """ rest_desc = dep.split(': ', 1) if len(rest_desc) == 1: rest, desc = rest_desc[0], None else: rest, desc = rest_desc # Search for '=' last, since '<=' and '>=' are possible. for symb in ['<', '>', '=']: idx = rest.find(symb) if idx >= 0: name = rest[:idx] version = rest[idx:] break else: name = rest version = None return name, version, desc class PacCollection(Collection): """ Handle collections of Pacman sources. This is a no-op object that simply inherits from :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`, overrides nothing, and defers all operations to :class:`PacSource` """ def __init__(self, metadata, sources, cachepath, basepath, debug=False): # we define an __init__ that just calls the parent __init__, # so that we can set the docstring on __init__ to something # different from the parent __init__ -- namely, the parent # __init__ docstring, minus everything after ``.. -----``, # which we use to delineate the actual docs from the # .. autoattribute hacks we have to do to get private # attributes included in sphinx 1.0 """ Collection.__init__(self, metadata, sources, cachepath, basepath, debug=debug) __init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0] @property def __package_groups__(self): return True class PacSource(Source): """ Handle Pacman sources """ #: PacSource sets the ``type`` on Package entries to "pacman" ptype = 'pacman' def __init__(self, basepath, xsource): self.pacgroups = {} Source.__init__(self, basepath, xsource) __init__.__doc__ = Source.__init__.__doc__ def load_state(self): data = open(self.cachefile, 'rb') (self.pkgnames, self.deps, self.provides, self.recommends, self.pacgroups) = cPickle.load(data) load_state.__doc__ = Source.load_state.__doc__ def save_state(self): cache = open(self.cachefile, 'wb') cPickle.dump((self.pkgnames, self.deps, self.provides, self.recommends, self.pacgroups), cache, 2) cache.close() save_state.__doc__ = Source.save_state.__doc__ @property def urls(self): """ A list of URLs to the base metadata file for each repository described by this source. """ if not self.rawurl: rv = [] for part in self.components: for arch in self.arches: rv.append("%s%s/os/%s/%s.db.tar.gz" % (self.url, part, arch, part)) return rv else: raise Exception("PacSource : RAWUrl not supported (yet)") def read_files(self): # pylint: disable=R0912 bdeps = {} brecs = {} bprov = {} self.pkgnames = set() self.pacgroups = {} for fname in self.files: if not self.rawurl: barch = [x for x in fname.split('@') if x in self.arches][0] else: # RawURL entries assume that they only have one # element and that it is the architecture of the source. barch = self.arches[0] if barch not in bdeps: bdeps[barch] = {} brecs[barch] = {} bprov[barch] = {} try: self.debug_log("Packages: try to read %s" % fname) tar = tarfile.open(fname, "r") except (IOError, tarfile.TarError): self.logger.error("Packages: Failed to read file %s" % fname) raise packages = {} for tarinfo in tar: if not tarinfo.isfile(): continue prefix = os.path.dirname(tarinfo.name) if prefix not in packages: packages[prefix] = {} pkg = parse_db_file(tar.extractfile(tarinfo)) packages[prefix].update(pkg) for pkg in packages.values(): pkgname = pkg['%NAME%'][0] self.pkgnames.add(pkgname) bdeps[barch][pkgname] = [] brecs[barch][pkgname] = [] if '%DEPENDS%' in pkg: for dep in pkg['%DEPENDS%']: dname = parse_dep(dep)[0] bdeps[barch][pkgname].append(dname) if '%OPTDEPENDS%' in pkg: for dep in pkg['%OPTDEPENDS%']: dname = parse_dep(dep)[0] brecs[barch][pkgname].append(dname) if '%PROVIDES%' in pkg: for dep in pkg['%PROVIDES%']: dname = parse_dep(dep)[0] if dname not in bprov[barch]: bprov[barch][dname] = set() bprov[barch][dname].add(pkgname) if '%GROUPS%' in pkg: for group in pkg['%GROUPS%']: if group not in self.pacgroups: self.pacgroups[group] = [] self.pacgroups[group].append(pkgname) tar.close() self.process_files(bdeps, bprov, brecs) read_files.__doc__ = Source.read_files.__doc__ def get_group(self, metadata, group, ptype=None): try: return self.pacgroups[group] except KeyError: return [] get_group.__doc__ = Source.get_group.__doc__ src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py000066400000000000000000000141011303523157100233740ustar00rootroot00000000000000""" PackagesSources handles the :ref:`server-plugins-generators-packages` ``sources.xml`` file""" import os import sys import Bcfg2.Server.Plugin from Bcfg2.Server.Statistics import track_statistics from Bcfg2.Server.Plugins.Packages.Source import SourceInitError # pylint: disable=E0012,R0924 class PackagesSources(Bcfg2.Server.Plugin.StructFile): """ PackagesSources handles parsing of the :mod:`Bcfg2.Server.Plugins.Packages` ``sources.xml`` file, and the creation of the appropriate :class:`Bcfg2.Server.Plugins.Packages.Source.Source` object for each ``Source`` tag. """ __identifier__ = None create = "Sources" def __init__(self, filename, cachepath, packages): """ :param filename: The full path to ``sources.xml`` :type filename: string :param cachepath: The full path to the directory where :class:`Bcfg2.Server.Plugins.Packages.Source.Source` data will be cached :type cachepath: string :param packages: The Packages plugin object ``sources.xml`` is being parsed on behalf of (i.e., the calling object) :type packages: Bcfg2.Server.Plugins.Packages.Packages :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginInitError` - If ``sources.xml`` cannot be read """ Bcfg2.Server.Plugin.StructFile.__init__(self, filename, should_monitor=True) #: The full path to the directory where #: :class:`Bcfg2.Server.Plugins.Packages.Source.Source` data #: will be cached self.cachepath = cachepath if not os.path.exists(self.cachepath): # create cache directory if needed try: os.makedirs(self.cachepath) except OSError: err = sys.exc_info()[1] self.logger.error("Could not create Packages cache at %s: %s" % (self.cachepath, err)) #: The :class:`Bcfg2.Server.Plugins.Packages.Packages` that #: instantiated this ``PackagesSources`` object self.pkg_obj = packages #: The set of all XML files that have been successfully #: parsed. This is used by :attr:`loaded` to determine if the #: sources have been fully parsed and the #: :class:`Bcfg2.Server.Plugins.Packages.Packages` plugin #: should be told to reload its data. self.parsed = set() def set_debug(self, debug): Bcfg2.Server.Plugin.StructFile.set_debug(self, debug) for source in self.entries: source.set_debug(debug) def HandleEvent(self, event=None): """ HandleEvent is called whenever the FAM registers an event. When :attr:`loaded` becomes True, :func:`Bcfg2.Server.Plugins.Packages.Packages.Reload` is called to reload all plugin data from the configured sources. :param event: The event object :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ if event and event.filename != self.name: for fpath in self.extras: if fpath == os.path.abspath(event.filename): self.parsed.add(fpath) break Bcfg2.Server.Plugin.StructFile.HandleEvent(self, event=event) if self.loaded: self.logger.info("Reloading Packages plugin") self.pkg_obj.Reload() @property def loaded(self): """ Whether or not all XML files (``sources.xml`` and everything XIncluded in it) have been parsed. This flag is used to determine if the Packages plugin should be told to load its data. """ return sorted(list(self.parsed)) == sorted(self.extras) @track_statistics() def Index(self): Bcfg2.Server.Plugin.StructFile.Index(self) self.entries = [] if self.loaded: for xsource in self.xdata.findall('.//Source'): source = self.source_from_xml(xsource) if source is not None: self.entries.append(source) Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__ + """ ``Index`` is responsible for calling :func:`source_from_xml` for each ``Source`` tag in each file. """ @track_statistics() def source_from_xml(self, xsource): """ Create a :class:`Bcfg2.Server.Plugins.Packages.Source.Source` subclass object from XML representation of a source in ``sources.xml``. ``source_from_xml`` determines the appropriate subclass of ``Source`` to instantiate according to the ``type`` attribute of the ``Source`` tag. :param xsource: The XML tag representing the source :type xsource: lxml.etree._Element :returns: :class:`Bcfg2.Server.Plugins.Packages.Source.Source` subclass, or None on error """ stype = xsource.get("type") if stype is None: self.logger.error("Packages: No type specified for source at %s, " "skipping" % (xsource.get("rawurl", xsource.get("url")))) return None cls = None for mod in Bcfg2.Options.setup.packages_backends: if mod.__name__.endswith(".%s" % stype.title()): cls = getattr(mod, "%sSource" % stype.title()) break else: self.logger.error("Packages: Unknown source type %s" % stype) return None try: source = cls(self.cachepath, xsource) except SourceInitError: err = sys.exc_info()[1] self.logger.error("Packages: %s" % err) source = None return source def __getitem__(self, key): return self.entries[key] def __repr__(self): return "PackagesSources: %s" % repr(self.entries) def __str__(self): return "PackagesSources: %s sources" % len(self.entries) def __len__(self): return len(self.entries) src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py000066400000000000000000000064741303523157100213760ustar00rootroot00000000000000""" pkgng backend for :mod:`Bcfg2.Server.Plugins.Packages` """ import lzma import tarfile try: import json # py2.4 json library is structured differently json.loads # pylint: disable=W0104 except (ImportError, AttributeError): import simplejson as json from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import Source class PkgngCollection(Collection): """ Handle collections of pkgng sources. This is a no-op object that simply inherits from :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`, overrides nothing, and defers all operations to :class:`PacSource` """ def __init__(self, metadata, sources, cachepath, basepath, debug=False): # we define an __init__ that just calls the parent __init__, # so that we can set the docstring on __init__ to something # different from the parent __init__ -- namely, the parent # __init__ docstring, minus everything after ``.. -----``, # which we use to delineate the actual docs from the # .. autoattribute hacks we have to do to get private # attributes included in sphinx 1.0 """ Collection.__init__(self, metadata, sources, cachepath, basepath, debug=debug) __init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0] class PkgngSource(Source): """ Handle pkgng sources """ #: PkgngSource sets the ``type`` on Package entries to "pkgng" ptype = 'pkgng' @property def urls(self): """ A list of URLs to the base metadata file for each repository described by this source. """ if not self.rawurl: rv = [] for part in self.components: for arch in self.arches: rv.append("%s/freebsd:%s:%s/%s/packagesite.txz" % (self.url, self.version, arch, part)) return rv else: return ["%s/packagesite.txz" % self.rawurl] def read_files(self): bdeps = dict() self.pkgnames = set() for fname in self.files: if not self.rawurl: abi = [x for x in fname.split('@') if x.startswith('freebsd:')][0][8:] barch = ':'.join(abi.split(':')[1:]) else: # RawURL entries assume that they only have one # element and that it is the architecture of the source. barch = self.arches[0] if barch not in bdeps: bdeps[barch] = dict() try: tar = tarfile.open(fileobj=lzma.LZMAFile(fname)) reader = tar.extractfile('packagesite.yaml') except (IOError, tarfile.TarError): self.logger.error("Packages: Failed to read file %s" % fname) raise for line in reader.readlines(): if not isinstance(line, str): line = line.decode('utf-8') pkg = json.loads(line) pkgname = pkg['name'] self.pkgnames.add(pkgname) if 'deps' in pkg: bdeps[barch][pkgname] = pkg['deps'].keys() self.process_files(bdeps, dict()) read_files.__doc__ = Source.read_files.__doc__ src/lib/Bcfg2/Server/Plugins/Packages/Source.py000066400000000000000000001000501303523157100215510ustar00rootroot00000000000000""" ``Source`` objects represent a single tag in ``sources.xml``. Note that a single Source tag can itself describe multiple repositories (if it uses the "url" attribute instead of "rawurl"), and so can the ``Source`` object. This can be the source (har har) of some confusion. See :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.sourcelist` for the proper way to get all repos from a ``Source`` object. Source objects are aggregated into :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` objects, which are actually called by :class:`Bcfg2.Server.Plugins.Packages.Packages`. This way a more advanced subclass can query repositories in aggregate rather than individually, which may give faster or more accurate results. The base ``Source`` object must be subclassed to handle each repository type. How you subclass ``Source`` will depend on how you subclassed :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`; see :mod:`Bcfg2.Server.Plugins.Packages.Collection` for more details on different methods for doing that. If you are using the stock (or a near-stock) :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` object, then you will need to implement the following methods and attributes in your ``Source`` subclass: * :func:`Source.urls` * :func:`Source.read_files` Additionally, you may want to consider overriding the following methods and attributes: * :func:`Source.is_virtual_package` * :func:`Source.get_group` * :attr:`Source.unknown_filter` * :attr:`Source.load_state` * :attr:`Source.save_state` For an example of this kind of ``Source`` object, see :mod:`Bcfg2.Server.Plugins.Packages.Apt`. If you are overriding the ``Collection`` object in more depth, then you have more leeway in what you might want to override or implement in your ``Source`` subclass. For an example of this kind of ``Source`` object, see :mod:`Bcfg2.Server.Plugins.Packages.Yum`. """ import os import re import sys from Bcfg2.Logger import Debuggable from Bcfg2.Compat import HTTPError, HTTPBasicAuthHandler, \ HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, urlopen, \ cPickle, md5 from Bcfg2.Server.Statistics import track_statistics def fetch_url(url): """ Return the content of the given URL. :param url: The URL to fetch content from. :type url: string :raises: ValueError - Malformed URL :raises: URLError - Failure fetching URL :returns: string - the content of the page at the given URL """ if '@' in url: mobj = re.match(r'(\w+://)([^:]+):([^@]+)@(.*)$', url) if not mobj: raise ValueError("Invalid URL") user = mobj.group(2) passwd = mobj.group(3) url = mobj.group(1) + mobj.group(4) auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm()) auth.add_password(None, url, user, passwd) install_opener(build_opener(auth)) return urlopen(url).read() class SourceInitError(Exception): """ Raised when a :class:`Source` object fails instantiation. """ pass #: A regular expression used to determine the base name of a repo from #: its URL. This is used when generating repo configs and by #: :func:`Source.get_repo_name`. It handles `Pulp #: `_ and `mrepo #: `_ repositories specially, #: and otherwise grabs the last component of the URL (as delimited by #: slashes). REPO_RE = re.compile(r'(?:pulp/repos/|/RPMS\.|/)([^/]+)/?$') class Source(Debuggable): # pylint: disable=R0902 """ ``Source`` objects represent a single tag in ``sources.xml``. Note that a single Source tag can itself describe multiple repositories (if it uses the "url" attribute instead of "rawurl"), and so can the ``Source`` object. Note that a number of the attributes of this object may be more or less specific to one backend (e.g., :attr:`essentialpkgs`, :attr:`recommended`, :attr:`gpgkeys`, but they are included in the superclass to make the parsing of sources from XML more consistent, and to make it trivial for other backends to support those features. """ #: The Package type handled by this Source class. The ``type`` #: attribute of Package entries will be set to the value ``ptype`` #: when they are handled by :mod:`Bcfg2.Server.Plugins.Packages`. ptype = None def __init__(self, basepath, xsource): # pylint: disable=R0912 """ :param basepath: The base filesystem path under which cache data for this source should be stored :type basepath: string :param xsource: The XML tag that describes this source :type source: lxml.etree._Element :raises: :class:`Bcfg2.Server.Plugins.Packages.Source.SourceInitError` """ Debuggable.__init__(self) #: The base filesystem path under which cache data for this #: source should be stored self.basepath = basepath #: The XML tag that describes this source self.xsource = xsource #: A set of package names that are deemed "essential" by this #: source self.essentialpkgs = set() #: A list of the text of all 'Component' attributes of this #: source from XML self.components = [] #: A list of the arches supported by this source self.arches = [] #: A list of the the names of packages that are blacklisted #: from this source self.blacklist = [] #: A list of the the names of packages that are whitelisted in #: this source self.whitelist = [] #: Whether or not to include deb-src lines in the generated APT #: configuration self.debsrc = False #: A dict of repository options that will be included in the #: configuration generated on the server side (if such is #: applicable; most backends do not generate any sort of #: repository configuration on the Bcfg2 server) self.server_options = dict() #: A dict of repository options that will be included in the #: configuration generated for the client (if that is #: supported by the backend) self.client_options = dict() #: A list of URLs to GPG keys that apply to this source self.gpgkeys = [] #: Whether or not to include essential packages from this source self.essential = True #: Whether or not to include recommended packages from this source self.recommended = False #: The "rawurl" attribute from :attr:`xsource`, if applicable. #: A trailing slash is automatically appended to this if there #: wasn't one already present. self.rawurl = None #: The "url" attribute from :attr:`xsource`, if applicable. A #: trailing slash is automatically appended to this if there #: wasn't one already present. self.url = None #: The "version" attribute from :attr:`xsource` self.version = None #: The "name" attribute from :attr:`xsource` self.name = None #: A list of predicates that are used to determine if this #: source applies to a given #: :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` #: object. self.conditions = [] #: Formerly, :ref:`server-plugins-generators-packages` only #: supported applying package sources to groups; that is, they #: could not be assigned by more complicated logic like #: per-client repositories and group or client negation. This #: attribute attempts to provide for some limited backwards #: compat with older code that relies on this. self.groups = [] #: A set of all package names in this source. This will not #: necessarily be populated, particularly by backends that #: reimplement large portions of #: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` self.pkgnames = set() #: A dict of ```` -> ````. #: This will not necessarily be populated, particularly by #: backends that reimplement large portions of #: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` self.deps = dict() #: A dict of ```` -> ````. This will not necessarily be populated, #: particularly by backends that reimplement large portions of #: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` self.provides = dict() #: A dict of ```` -> ````. This will not necessarily be populated. self.recommends = dict() self._init_attributes(xsource) #: The file (or directory) used for this source's cache data self.cachefile = os.path.join(self.basepath, "cache-%s" % self.cachekey) if not self.rawurl: baseurl = self.url + "%(version)s/%(component)s/%(arch)s/" else: baseurl = self.rawurl #: A list of dicts, each of which describes the URL to one #: repository contained in this source. Each dict contains #: the following keys: #: #: * ``version``: The version of the repo (``None`` for #: ``rawurl`` repos) #: * ``component``: The component use to form this URL #: (``None`` for ``rawurl`` repos) #: * ``arch``: The architecture of this repo #: * ``baseurl``: Either the ``rawurl`` attribute, or the #: format string built from the ``url`` attribute #: * ``url``: The actual URL to the repository self.url_map = [] for arch in self.arches: if self.url: usettings = [dict(version=self.version, component=comp, arch=arch, debsrc=self.debsrc) for comp in self.components] else: # rawurl given usettings = [dict(version=self.version, component=None, arch=arch, debsrc=self.debsrc)] for setting in usettings: if not self.rawurl: setting['baseurl'] = self.url else: setting['baseurl'] = self.rawurl setting['url'] = baseurl % setting setting['name'] = self.get_repo_name(setting) self.url_map.extend(usettings) def _init_attributes(self, xsource): """ This functions evaluates the Source tag and parses all attributes. Override this function in a sub class to parse specific attributes. Do not use ``__init__`` because ``Source.__init__`` may call other functions that already need this specific fields. This functions is called before any other function. :param xsource: The XML tag that describes this source :type source: lxml.etree._Element """ self.components = [item.text for item in xsource.findall('Component')] self.arches = [item.text for item in xsource.findall('Arch')] self.blacklist = [item.text for item in xsource.findall('Blacklist')] self.whitelist = [item.text for item in xsource.findall('Whitelist')] self.debsrc = xsource.get('debsrc', 'false') == 'true' opts = xsource.findall("Options") for el in opts: repoopts = dict([(k, v) for k, v in el.attrib.items() if k != "clientonly" and k != "serveronly"]) if el.get("clientonly", "false").lower() == "false": self.server_options.update(repoopts) if el.get("serveronly", "false").lower() == "false": self.client_options.update(repoopts) self.gpgkeys = [el.text for el in xsource.findall("GPGKey")] self.essential = xsource.get('essential', 'true').lower() == 'true' self.recommended = xsource.get('recommended', 'false').lower() == 'true' self.rawurl = xsource.get('rawurl', '') if self.rawurl and not self.rawurl.endswith("/"): self.rawurl += "/" self.url = xsource.get('url', '') if self.url and not self.url.endswith("/"): self.url += "/" self.version = xsource.get('version', '') self.name = xsource.get('name', None) for el in xsource.iterancestors(): if el.tag == "Group": if el.get("negate", "false").lower() == "true": self.conditions.append(lambda m, el=el: el.get("name") not in m.groups) else: self.groups.append(el.get("name")) self.conditions.append(lambda m, el=el: el.get("name") in m.groups) elif el.tag == "Client": if el.get("negate", "false").lower() == "true": self.conditions.append(lambda m, el=el: el.get("name") != m.hostname) else: self.conditions.append(lambda m, el=el: el.get("name") == m.hostname) @property def cachekey(self): """ A unique key for this source that will be used to generate :attr:`cachefile` and other cache paths """ return md5(cPickle.dumps([self.version, self.components, self.url, self.rawurl, self.arches])).hexdigest() def get_relevant_groups(self, metadata): """ Get all groups that might be relevant to determining which sources apply to this collection's client. :return: list of strings - group names """ return sorted(list(set([g for g in metadata.groups if (g in self.groups or g in self.arches)]))) def load_state(self): """ Load saved state from :attr:`cachefile`. If caching and state is handled by the package library, then this function does not need to be implemented. :raises: OSError - If the saved data cannot be read :raises: cPickle.UnpicklingError - If the saved data is corrupt """ data = open(self.cachefile, 'rb') (self.pkgnames, self.deps, self.provides, self.essentialpkgs, self.recommends) = cPickle.load(data) def save_state(self): """ Save state to :attr:`cachefile`. If caching and state is handled by the package library, then this function does not need to be implemented. """ cache = open(self.cachefile, 'wb') cPickle.dump((self.pkgnames, self.deps, self.provides, self.essentialpkgs, self.recommends), cache, 2) cache.close() @track_statistics() def setup_data(self, force_update=False): """Perform all data fetching and setup tasks. For most backends, this involves downloading all metadata from the repository, parsing it, and caching the parsed data locally. The order of operations is: #. Call :func:`load_state` to try to load data from the local cache. #. If that fails, call :func:`read_files` to read and parse the locally downloaded metadata files. #. If that fails, call :func:`update` to fetch the metadata, then :func:`read_files` to parse it. Obviously with a backend that leverages repo access libraries to avoid downloading all metadata, many of the functions called by ``setup_data`` can be no-ops (or nearly so). :param force_update: Ignore all locally cached and downloaded data and fetch the metadata anew from the upstream repository. :type force_update: bool """ # there are a bunch of wildcard except statements here, # because the various functions called herein (``load_state``, # ``read_files``, ``update``) are defined entirely by the # Packages plugins that implement them. # # TODO: we should define an exception subclass that each of # these functions can raise when an *expected* error condition # is encountered. # # pylint: disable=W0702 if not force_update: if os.path.exists(self.cachefile): try: self.load_state() except (OSError, cPickle.UnpicklingError): err = sys.exc_info()[1] self.logger.error("Packages: Cachefile %s load failed: %s" % (self.cachefile, err)) self.logger.error("Falling back to file read") try: self.read_files() except: err = sys.exc_info()[1] self.logger.error("Packages: File read failed: %s" % err) self.logger.error("Falling back to file download") force_update = True else: force_update = True if force_update: try: self.update() self.read_files() except: err = sys.exc_info()[1] self.logger.error("Packages: Failed to load data for %s: %s" % (self, err)) self.logger.error("Some Packages will be missing") # pylint: enable=W0702 def get_repo_name(self, url_map): """ Try to find a sensible name for a repository. Since ``sources.xml`` doesn't provide for repository names, we have to try to guess at the names when generating config files or doing other operations that require repository names. This function tries several approaches: #. First, if the source element containts a ``name`` attribute, use that as the name. #. If the map contains a ``component`` key, use that as the name. #. If not, then try to match the repository URL against :attr:`Bcfg2.Server.Plugins.Packages.Source.REPO_RE`. If that succeeds, use the first matched group; additionally, if the Source tag that describes this repo is contained in a ```` tag, prepend that to the name. #. If :attr:`Bcfg2.Server.Plugins.Packages.Source.REPO_RE` does not match the repository, and the Source tag that describes this repo is contained in a ```` tag, use the name of the group. #. Failing that, use the full URL to this repository, with the protocol and trailing slash stripped off if possible. Once that is done, all characters disallowed in yum source names are replaced by dashes. See below for the exact regex. The yum rules are used here because they are so restrictive. ``get_repo_name`` is **not** guaranteed to return a unique name. If you require a unique name, then you will need to generate all repo names and make them unique through the approach of your choice, e.g., appending numbers to non-unique repository names. See :func:`Bcfg2.Server.Plugins.Packages.Yum.Source.get_repo_name` for an example. :param url_map: A single :attr:`url_map` dict, i.e., any single element of :attr:`url_map`. :type url_map: dict :returns: string - the name of the repository. """ if self.name: return self.name if url_map['component']: rname = url_map['component'] else: match = REPO_RE.search(url_map['url']) if match: rname = match.group(1) if self.groups: rname = "%s-%s" % (self.groups[0], rname) elif self.groups: rname = self.groups[0] else: # a global source with no reasonable name. Try to # strip off the protocol and trailing slash. match = re.search(r'^[A-z]://(.*?)/?', url_map['url']) if match: rname = match.group(1) else: # what kind of crazy url is this? I give up! # just use the full url and let the regex below # make it even uglier. rname = url_map['url'] # see yum/__init__.py in the yum source, lines 441-449, for # the source of this regex. yum doesn't like anything but # string.ascii_letters, string.digits, and [-_.:]. There # doesn't seem to be a reason for this, because yum. return re.sub(r'[^A-Za-z0-9-_.:]', '-', rname) def __repr__(self): if self.rawurl: return "%s at %s" % (self.__class__.__name__, self.rawurl) elif self.url: return "%s at %s" % (self.__class__.__name__, self.url) else: return self.__class__.__name__ @property def urls(self): """ A list of URLs to the base metadata file for each repository described by this source. """ return [] @property def files(self): """ A list of files stored in the local cache by this backend. """ return [self.escape_url(url) for url in self.urls] def get_vpkgs(self, metadata): """ Get a list of all virtual packages provided by all sources. :returns: list of strings """ agroups = ['global'] + [a for a in self.arches if a in metadata.groups] vdict = dict() for agrp in agroups: if agrp not in self.provides: self.logger.warning("%s provides no packages for %s" % (self, agrp)) continue for key, value in list(self.provides[agrp].items()): if key not in vdict: vdict[key] = set(value) else: vdict[key].update(value) return vdict def is_virtual_package(self, metadata, package): # pylint: disable=W0613 """ Return True if a name is a virtual package (i.e., is a symbol provided by a real package), False otherwise. :param package: The name of the symbol, but see :ref:`pkg-objects` :type package: string :returns: bool """ return False def escape_url(self, url): """ Given a URL to a repository metadata file, return the full path to a file suitable for storing that file locally. This is acheived by replacing all forward slashes in the URL with ``@``. :param url: The URL to escape :type url: string :returns: string """ return os.path.join(self.basepath, url.replace('/', '@')) def read_files(self): """ Read and parse locally downloaded metadata files and populates :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.pkgnames`. Should call :func:`Bcfg2.Server.Plugins.Packages.Source.Source.process_files` as its final step.""" pass def process_files(self, dependencies, # pylint: disable=R0912,W0102 provides, recommends=dict()): """ Given dicts of depends and provides generated by :func:`read_files`, this generates :attr:`deps` and :attr:`provides` and calls :func:`save_state` to save the cached data to disk. All arguments are dicts of dicts of lists. Keys are the arches of packages contained in this source; values are dicts whose keys are package names and values are lists of either dependencies for each package the symbols provided by each package. :param dependencies: A dict of dependencies found in the metadata for this source. :type dependencies: dict; see above. :param provides: A dict of symbols provided by packages in this repository. :type provides: dict; see above. :param recommends: A dict of recommended dependencies found for this source. :type recommends: dict; see above. """ self.deps['global'] = dict() self.recommends['global'] = dict() self.provides['global'] = dict() for barch in dependencies: self.deps[barch] = dict() self.recommends[barch] = dict() self.provides[barch] = dict() for pkgname in self.pkgnames: pset = set() rset = set() for barch in dependencies: if pkgname not in dependencies[barch]: dependencies[barch][pkgname] = [] pset.add(tuple(dependencies[barch][pkgname])) if len(pset) == 1: self.deps['global'][pkgname] = pset.pop() else: for barch in dependencies: self.deps[barch][pkgname] = dependencies[barch][pkgname] for barch in recommends: if pkgname not in recommends[barch]: recommends[barch][pkgname] = [] rset.add(tuple(recommends[barch][pkgname])) if len(rset) == 1: self.recommends['global'][pkgname] = rset.pop() else: for barch in recommends: self.recommends[barch][pkgname] = \ recommends[barch][pkgname] provided = set() for bprovided in list(provides.values()): provided.update(set(bprovided)) for prov in provided: prset = set() for barch in provides: if prov not in provides[barch]: continue prset.add(tuple(provides[barch].get(prov, ()))) if len(prset) == 1: self.provides['global'][prov] = prset.pop() else: for barch in provides: self.provides[barch][prov] = provides[barch].get(prov, ()) self.save_state() def unknown_filter(self, package): """ A predicate that is used by :func:`filter_unknown` to filter packages from the results of :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.complete` that should not be shown to the end user (i.e., that are not truly unknown, but are rather packaging system artifacts). By default, excludes any package whose name starts with "choice" :param package: The name of a package that was unknown to the backend :type package: string :returns: bool """ return package.startswith("choice") def filter_unknown(self, unknown): """ After :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.complete`, filter out packages that appear in the list of unknown packages but should not be presented to the user. :attr:`unknown_filter` is called to assess whether or not a package is expected to be unknown. :param unknown: A set of unknown packages. The set should be modified in place. :type unknown: set of strings """ unknown.difference_update(set([u for u in unknown if self.unknown_filter(u)])) def update(self): """ Download metadata from the upstream repository and cache it locally. :raises: ValueError - If any URL in :attr:`urls` is malformed :raises: OSError - If there is an error writing the local cache :raises: HTTPError - If there is an error fetching the remote data """ for url in self.urls: self.logger.info("Packages: Updating %s" % url) fname = self.escape_url(url) try: open(fname, 'wb').write(fetch_url(url)) except ValueError: self.logger.error("Packages: Bad url string %s" % url) raise except OSError: err = sys.exc_info()[1] self.logger.error("Packages: Could not write data from %s to " "local cache at %s: %s" % (url, fname, err)) raise except HTTPError: err = sys.exc_info()[1] self.logger.error("Packages: Failed to fetch url %s. HTTP " "response code=%s" % (url, err.code)) raise def applies(self, metadata): """ Return true if this source applies to the given client, i.e., the client is in all necessary groups. :param metadata: The client metadata to check to see if this source applies :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: bool """ # check arch groups if not self.arch_groups_match(metadata): return False # check Group/Client tags from sources.xml for condition in self.conditions: if not condition(metadata): return False return True def get_arches(self, metadata): """ Get a list of architectures that the given client has and for which this source provides packages for. The return value will always include ``global``. :param metadata: The client metadata to get matching architectures for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: list of strings """ return ['global'] + [a for a in self.arches if a in metadata.groups] def get_deps(self, metadata, package, recommended=None): """ Get a list of the dependencies of the given package. :param package: The name of the symbol :type package: string :returns: list of strings """ recs = [] if ((recommended is None and self.recommended) or (recommended and recommended.lower() == 'true')): for arch in self.get_arches(metadata): if package in self.recommends[arch]: recs.extend(self.recommends[arch][package]) for arch in self.get_arches(metadata): if package in self.deps[arch]: recs.extend(self.deps[arch][package]) return recs def get_provides(self, metadata, package): """ Get a list of all symbols provided by the given package. :param package: The name of the package :type package: string :returns: list of strings """ for arch in self.get_arches(metadata): if package in self.provides[arch]: return self.provides[arch][package] return [] def is_package(self, metadata, package): # pylint: disable=W0613 """ Return True if a package is a package, False otherwise. :param package: The name of the package :type package: string :returns: bool """ return (package in self.pkgnames and package not in self.blacklist and (len(self.whitelist) == 0 or package in self.whitelist)) def get_group(self, metadata, group, ptype=None): # pylint: disable=W0613 """ Get the list of packages of the given type in a package group. :param group: The name of the group to query :type group: string :param ptype: The type of packages to get, for backends that support multiple package types in package groups (e.g., "recommended," "optional," etc.) :type ptype: string :returns: list of strings - package names """ return [] def arch_groups_match(self, metadata): """ Returns True if the client is in an arch group that matches the arch of this source. :returns: bool """ for arch in self.arches: if arch in metadata.groups: return True return False src/lib/Bcfg2/Server/Plugins/Packages/Yum.py000066400000000000000000001625351303523157100211030ustar00rootroot00000000000000""" Yum backend for :mod:`Bcfg2.Server.Plugins.Packages`. This module is the most complex backend because it has to handle Yum sources without yum Python libraries, with yum Python libraries, and Pulp sources. (See :ref:`native-yum-libraries` for details on using the yum Python libraries and :ref:`pulp-source-support` for details on Pulp sources.) .. _bcfg2-yum-helper: bcfg2-yum-helper ~~~~~~~~~~~~~~~~ If using the yum Python libraries, :class:`YumCollection` makes shell calls to an external command, ``bcfg2-yum-helper``, which performs the actual yum API calls. This is done because the yum libs have horrific memory leaks, and apparently the right way to get around that in long-running processes it to have a short-lived helper. This is how it's done by yum itself in ``yum-updatesd``, which is a long-running daemon that checks for and applies updates. .. _yum-pkg-objects: Package Objects ~~~~~~~~~~~~~~~ :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` objects have the option to translate from some backend-specific representation of packages to XML entries; see :ref:`pkg-objects` for more information on this. If you are using the Python yum libraries, :class:`Bcfg2.Server.Plugins.Packages.Yum.YumCollection` opts to do this, using the yum tuple representation of packages, which is:: (, , , , ) For shorthand this is occasionally abbrevated "naevr". Any datum that is not defined is ``None``. So a normal package entry that can be any version would be passed to :ref:`bcfg2-yum-helper` as:: ("somepackage", None, None, None, None) A package returned from the helper might look more like this:: ("somepackage", "x86_64", None, "1.2.3", "1.el6") We translate between this representation and the XML representation of packages with :func:`YumCollection.packages_from_entry` and :func:`YumCollection.packages_to_entry`. The Yum Backend ~~~~~~~~~~~~~~~ """ import os import re import sys import time import copy import errno import socket import logging import lxml.etree import Bcfg2.Server.Plugin import Bcfg2.Server.FileMonitor from lockfile import FileLock from Bcfg2.Utils import Executor from distutils.spawn import find_executable # pylint: disable=E0611 # pylint: disable=W0622 from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \ ConfigParser, any # pylint: enable=W0622 from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \ fetch_url from Bcfg2.Server.Statistics import track_statistics LOGGER = logging.getLogger(__name__) # pylint: disable=E0611 try: from pulp.client.consumer.config import ConsumerConfig from pulp.client.api.repository import RepositoryAPI from pulp.client.api.consumer import ConsumerAPI from pulp.client.api import server HAS_PULP = True except ImportError: HAS_PULP = False # pylint: enable=E0611 try: import yum try: import json # py2.4 json library is structured differently json.loads # pylint: disable=W0104 except (ImportError, AttributeError): import simplejson as json HAS_YUM = True except ImportError: HAS_YUM = False LOGGER.info("Packages: No yum libraries found; forcing use of internal " "dependency resolver") XP = '{http://linux.duke.edu/metadata/common}' RP = '{http://linux.duke.edu/metadata/rpm}' RPO = '{http://linux.duke.edu/metadata/repo}' FL = '{http://linux.duke.edu/metadata/filelists}' PULPSERVER = None PULPCONFIG = None options = [ # pylint: disable=C0103 Bcfg2.Options.Common.client_timeout, Bcfg2.Options.PathOption( cf=("packages:yum", "helper"), dest="yum_helper", help="Path to the bcfg2-yum-helper executable"), Bcfg2.Options.BooleanOption( cf=("packages:yum", "use_yum_libraries"), help="Use Python yum libraries"), Bcfg2.Options.PathOption( cf=("packages:yum", "gpg_keypath"), default="/etc/pki/rpm-gpg", help="GPG key path on the client"), Bcfg2.Options.Option( cf=("packages:yum", "*"), dest="yum_options", help="Other yum options to include in generated yum configs")] if HAS_PULP: options.append( Bcfg2.Options.Option( cf=("packages:pulp", "username"), dest="pulp_username", help="Username for Pulp authentication")) options.append( Bcfg2.Options.Option( cf=("packages:pulp", "password"), dest="pulp_password", help="Password for Pulp authentication")) def _setup_pulp(): """ Connect to a Pulp server and pass authentication credentials. This only needs to be called once, but multiple calls won't hurt anything. :returns: :class:`pulp.client.api.server.PulpServer` """ global PULPSERVER, PULPCONFIG if not HAS_PULP: msg = "Packages: Cannot create Pulp collection: Pulp libraries " + \ "not found" LOGGER.error(msg) raise Bcfg2.Server.Plugin.PluginInitError(msg) if PULPSERVER is None: PULPCONFIG = ConsumerConfig() serveropts = PULPCONFIG.server PULPSERVER = server.PulpServer(serveropts['host'], int(serveropts['port']), serveropts['scheme'], serveropts['path']) PULPSERVER.set_basic_auth_credentials( Bcfg2.Options.setup.pulp_username, Bcfg2.Options.setup.pulp_password) server.set_active_server(PULPSERVER) return PULPSERVER class PulpCertificateData(Bcfg2.Server.Plugin.SpecificData): """ Handle pulp consumer certificate data for :class:`PulpCertificateSet` """ def bind_entry(self, entry, _): """ Given an abstract entry, add data to it and return it. :class:`PulpCertificateSet` handles binding entry metadata. :param entry: The abstract entry to bind data to :type entry: lxml.etree._Element :returns: lxml.etree._Element - the bound entry """ entry.set("type", "file") if self.data: entry.text = self.data else: entry.set("empty", "true") return entry class PulpCertificateSet(Bcfg2.Server.Plugin.EntrySet): """ Handle Pulp consumer certificates. """ #: The path to certificates on consumer machines certpath = "/etc/pki/consumer/cert.pem" def __init__(self, path): """ :param path: The path to the directory where Pulp consumer certificates will be stored :type path: string """ Bcfg2.Server.Plugin.EntrySet.__init__(self, os.path.basename(self.certpath), path, PulpCertificateData, "UTF-8") self.metadata = dict(owner='root', group='root', mode='0644', secontext='__default__', important='true', sensitive='true', paranoid=self.metadata['paranoid']) self.fam = Bcfg2.Server.FileMonitor.get_fam() self.fam.AddMonitor(path, self) def HandleEvent(self, event): """ Handle FAM events on certificate files. :param event: The event to handle :type event: Bcfg2.Server.FileMonitor.Event """ if event.filename != self.path: return self.handle_event(event) def write_data(self, data, metadata): """ Write a new certificate to the filesystem. :param data: The new certificate data :type data: string :param metadata: Metadata for the client to write the certificate for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ specific = "%s.H_%s" % (os.path.basename(self.certpath), metadata.hostname) fileloc = os.path.join(self.path, specific) self.logger.info("Packages: Writing certificate data for %s to %s" % (metadata.hostname, fileloc)) try: open(fileloc, 'wb').write(data) except IOError: err = sys.exc_info()[1] self.logger.error("Could not write %s: %s" % (fileloc, err)) return self.verify_file(specific) def verify_file(self, filename): """ Service the FAM events queued up by the key generation so the data structure entries will be available for binding. NOTE: We wait for up to ten seconds. There is some potential for race condition, because if the file monitor doesn't get notified about the new key files in time, those entries won't be available for binding. In practice, this seems "good enough." :param filename: The filename to check for events on :type filename: string """ tries = 0 updated = False while not updated: if tries >= 10: self.logger.error("%s still not registered" % filename) return self.fam.handle_events_in_interval(1) if filename in self.entries: break else: tries += 1 continue class YumCollection(Collection): """ Handle collections of Yum sources. If we're using the yum Python libraries, then this becomes a very full-featured :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` object; if not, then it defers to the :class:`YumSource` object. .. private-include: _add_gpg_instances, _get_pulp_consumer """ _helper = None #: Options that are included in the [packages:yum] section of the #: config but that should not be included in the temporary #: yum.conf we write out option_blacklist = ["use_yum_libraries", "helper"] #: :class:`PulpCertificateSet` object used to handle Pulp certs pulp_cert_set = None def __init__(self, metadata, sources, cachepath, basepath, debug=False): Collection.__init__(self, metadata, sources, cachepath, basepath, debug=debug) self.keypath = os.path.join(self.cachepath, "keys") #: A :class:`Bcfg2.Utils.Executor` object to use to run #: external commands self.cmd = Executor() if self.use_yum: #: Define a unique cache file for this collection to use #: for cached yum metadata self.cachefile = os.path.join(self.cachepath, "cache-%s" % self.cachekey) #: The path to the server-side config file used when #: resolving packages with the Python yum libraries self.cfgfile = os.path.join(self.cachefile, "yum.conf") if not os.path.exists(self.cachefile): self.debug_log("Creating common cache %s" % self.cachefile) os.mkdir(self.cachefile) if Bcfg2.Options.setup.packages_metadata: self.setup_data() self.cmd = Executor() else: self.cachefile = None self.cmd = None if HAS_PULP and self.has_pulp_sources: _setup_pulp() if self.pulp_cert_set is None: certdir = os.path.join( self.basepath, "pulp", os.path.basename(PulpCertificateSet.certpath)) try: os.makedirs(certdir) except OSError: err = sys.exc_info()[1] if err.errno == errno.EEXIST: pass else: self.logger.error("Could not create Pulp consumer " "cert directory at %s: %s" % (certdir, err)) self.__class__.pulp_cert_set = PulpCertificateSet(certdir) @property def __package_groups__(self): return True @property def helper(self): """The full path to :file:`bcfg2-yum-helper`. First, we check in the config file to see if it has been explicitly specified; next we see if it's in $PATH; finally we default to /usr/sbin, the default location. """ # pylint: disable=W0212 if not self._helper: self.__class__._helper = Bcfg2.Options.setup.yum_helper if not self.__class__._helper: # first see if bcfg2-yum-helper is in PATH self.debug_log("Checking for bcfg2-yum-helper in $PATH") self.__class__._helper = find_executable('bcfg2-yum-helper') if not self.__class__._helper: self.__class__._helper = "/usr/sbin/bcfg2-yum-helper" return self.__class__._helper # pylint: enable=W0212 @property def use_yum(self): """ True if we should use the yum Python libraries, False otherwise """ return HAS_YUM and Bcfg2.Options.setup.use_yum_libraries @property def has_pulp_sources(self): """ True if there are any Pulp sources to handle, False otherwise """ return any(s.pulp_id for s in self) @property def cachefiles(self): """ A list of the full path to all cachefiles used by this collection.""" cachefiles = set(Collection.cachefiles.fget(self)) if self.cachefile: cachefiles.add(self.cachefile) return list(cachefiles) @track_statistics() def write_config(self): """ Write the server-side config file to :attr:`cfgfile` based on the data from :func:`get_config`""" if not os.path.exists(self.cfgfile): yumconf = self.get_config(raw=True) yumconf.add_section("main") # we set installroot to the cache directory so # bcfg2-yum-helper works with an empty rpmdb. otherwise # the rpmdb is so hopelessly intertwined with yum that we # have to totally reinvent the dependency resolver. mainopts = dict(cachedir='/', persistdir='/', installroot=self.cachefile, keepcache="0", debuglevel="0", sslverify="0", reposdir="/dev/null") if Bcfg2.Options.setup.debug: mainopts['debuglevel'] = "5" elif Bcfg2.Options.setup.verbose: mainopts['debuglevel'] = "2" try: for opt, val in Bcfg2.Options.setup.yum_options.items(): if opt not in self.option_blacklist: mainopts[opt] = val except ConfigParser.NoSectionError: pass for opt, val in list(mainopts.items()): yumconf.set("main", opt, val) yumconf.write(open(self.cfgfile, 'w')) def get_arch(self): """ If 'arch' for each source is the same, return that arch, otherwise None. This helps bcfg2-yum-helper when the client arch is incompatible with the bcfg2 server's arch. In case multiple arches are found, punt back to the default behavior. """ arches = set() for source in self: for url_map in source.url_map: if url_map['arch'] in self.metadata.groups: arches.add(url_map['arch']) if len(arches) == 1: return arches.pop() else: return None def get_config(self, raw=False): # pylint: disable=W0221 """ Get the yum configuration for this collection. :param raw: Return a :class:`ConfigParser.SafeConfigParser` object representing the configuration instead of a string. This is useful if you need to modify the config before writing it (as :func:`write_config` does in order to produce a server-specific configuration). :type raw: bool :returns: string or ConfigParser.SafeConfigParser """ config = ConfigParser.SafeConfigParser() for source in self: for url_map in source.url_map: if url_map['arch'] not in self.metadata.groups: continue basereponame = source.get_repo_name(url_map) reponame = basereponame added = False rid = 1 while not added: try: config.add_section(reponame) added = True except ConfigParser.DuplicateSectionError: rid += 1 reponame = "%s-%d" % (basereponame, rid) config.set(reponame, "name", reponame) config.set(reponame, "baseurl", url_map['url']) config.set(reponame, "enabled", "1") if len(source.gpgkeys): config.set(reponame, "gpgcheck", "1") config.set(reponame, "gpgkey", " ".join(source.gpgkeys)) else: config.set(reponame, "gpgcheck", "0") if len(source.blacklist): config.set(reponame, "exclude", " ".join(source.blacklist)) if len(source.whitelist): config.set(reponame, "includepkgs", " ".join(source.whitelist)) if raw: opts = source.server_options else: opts = source.client_options for opt, val in opts.items(): config.set(reponame, opt, val) if raw: return config else: # configparser only writes to file, so we have to use a # StringIO object to get the data out as a string buf = StringIO() config.write(buf) return "# This config was generated automatically by the Bcfg2 " \ "Packages plugin\n\n" + buf.getvalue() @track_statistics() def build_extra_structures(self, independent): """ Add additional entries to the ```` section of the final configuration. This adds several kinds of entries: * For GPG keys, adds a ``Package`` entry that describes the version and release of all expected ``gpg-pubkey`` packages; and ``Path`` entries to copy all of the GPG keys to the appropriate place on the client filesystem. Calls :func:`_add_gpg_instances`. * For Pulp Sources, adds a ``Path`` entry for the consumer certificate; and ``Action`` entries to update the consumer-side Pulp config if the consumer is newly registered. Creates a new Pulp consumer from the Bcfg2 server as necessary. :param independent: The XML tag to add extra entries to. This is modified in place. :type independent: lxml.etree._Element """ needkeys = set() for source in self: for key in source.gpgkeys: needkeys.add(key) if len(needkeys): if HAS_YUM: # this must be be HAS_YUM, not use_yum, because # regardless of whether the user wants to use the yum # resolver we want to include gpg key data keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey", type=self.ptype, origin='Packages') else: self.logger.warning("GPGKeys were specified for yum sources " "in sources.xml, but no yum libraries " "were found") self.logger.warning("GPG key version/release data cannot be " "determined automatically") self.logger.warning("Install yum libraries, or manage GPG " "keys manually") keypkg = None for key in needkeys: # figure out the path of the key on the client keydir = Bcfg2.Options.setup.gpg_keypath remotekey = os.path.join(keydir, os.path.basename(key)) localkey = os.path.join(self.keypath, os.path.basename(key)) kdata = open(localkey).read() # copy the key to the client keypath = lxml.etree.Element("BoundPath", name=remotekey, encoding='ascii', owner='root', group='root', type='file', mode='0644', important='true') keypath.text = kdata # hook to add version/release info if possible self._add_gpg_instances(keypkg, localkey, remotekey, keydata=kdata) independent.append(keypath) if keypkg is not None: independent.append(keypkg) if self.has_pulp_sources: consumerapi = ConsumerAPI() consumer = self._get_pulp_consumer(consumerapi=consumerapi) if consumer is None: try: consumer = \ consumerapi.create(self.metadata.hostname, self.metadata.hostname, capabilities=dict(bind=False)) lxml.etree.SubElement( independent, "BoundAction", name="pulp-update", timing="pre", when="always", status="check", command="pulp-consumer consumer update") self.pulp_cert_set.write_data(consumer['certificate'], self.metadata) except server.ServerRequestError: err = sys.exc_info()[1] self.logger.error("Packages: Could not create Pulp " "consumer %s: %s" % (self.metadata.hostname, err)) for source in self: # each pulp source can only have one arch, so we don't # have to check the arch in url_map if (source.pulp_id and source.pulp_id not in consumer['repoids']): try: consumerapi.bind(self.metadata.hostname, source.pulp_id) except server.ServerRequestError: err = sys.exc_info()[1] self.logger.error("Packages: Could not bind %s to " "Pulp repo %s: %s" % (self.metadata.hostname, source.pulp_id, err)) crt = lxml.etree.SubElement(independent, "BoundPath", name=self.pulp_cert_set.certpath) self.pulp_cert_set.bind_entry(crt, self.metadata) @track_statistics() def _get_pulp_consumer(self, consumerapi=None): """ Get a Pulp consumer object for the client. :param consumerapi: A Pulp ConsumerAPI object. If none is passed, one will be instantiated. :type consumerapi: pulp.client.api.consumer.ConsumerAPI :returns: dict - the consumer. Returns None on failure (including if there is no existing Pulp consumer for this client. """ if consumerapi is None: consumerapi = ConsumerAPI() consumer = None try: consumer = consumerapi.consumer(self.metadata.hostname) except server.ServerRequestError: # consumer does not exist pass except socket.error: err = sys.exc_info()[1] self.logger.error("Packages: Could not contact Pulp server: %s" % err) return consumer @track_statistics() def _add_gpg_instances(self, keyentry, localkey, remotekey, keydata=None): """ Add GPG keys instances to a ``Package`` entry. This is called from :func:`build_extra_structures` to add GPG keys to the specification. :param keyentry: The ``Package`` entry to add key instances to. This will be modified in place. :type keyentry: lxml.etree._Element :param localkey: The full path to the key file on the Bcfg2 server :type localkey: string :param remotekey: The full path to the key file on the client. (If they key is not yet on the client, this will be the full path to where the key file will go eventually.) :type remotekey: string :param keydata: The contents of the key file. If this is not provided, read the data from ``localkey``. :type keydata: string """ # this must be be HAS_YUM, not use_yum, because regardless of # whether the user wants to use the yum resolver we want to # include gpg key data if not HAS_YUM: return if keydata is None: keydata = open(localkey).read() try: kinfo = yum.misc.getgpgkeyinfo(keydata) version = yum.misc.keyIdToRPMVer(kinfo['keyid']) release = yum.misc.keyIdToRPMVer(kinfo['timestamp']) lxml.etree.SubElement(keyentry, 'Instance', version=version, release=release, simplefile=remotekey) except ValueError: err = sys.exc_info()[1] self.logger.error("Packages: Could not read GPG key %s: %s" % (localkey, err)) @track_statistics() def get_groups(self, grouplist): """ If using the yum libraries, given a list of package group names, return a dict of ``: ``. This is much faster than implementing :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.get_group`, since we have to make a call to the bcfg2 Yum helper, and each time we do that we make another call to yum, which means we set up yum metadata from the cache (hopefully) each time. So resolving ten groups once is much faster than resolving one group ten times. If you are using the builtin yum parser, this raises a warning and returns an empty dict. :param grouplist: The list of groups to query :type grouplist: list of strings - group names :returns: dict of ``: `` In this implementation the packages may be strings or tuples. See :ref:`yum-pkg-objects` for more information. """ if not grouplist: return dict() gdicts = [] for group, ptype in grouplist: if group.startswith("@"): group = group[1:] if not ptype: ptype = "default" gdicts.append(dict(group=group, type=ptype)) if self.use_yum: try: return self.call_helper("get_groups", inputdata=gdicts) except ValueError: return dict() else: pkgs = dict() for gdict in gdicts: pkgs[gdict['group']] = Collection.get_group(self, gdict['group'], gdict['type']) return pkgs def _element_to_pkg(self, el, name): """ Convert a Package or Instance element to a package tuple """ rv = (name, el.get("arch"), el.get("epoch"), el.get("version"), el.get("release")) if rv[3] in ['any', 'auto']: rv = (rv[0], rv[1], rv[2], None, None) # if a package requires no specific version, we just use # the name, not the tuple. this limits the amount of JSON # encoding/decoding that has to be done to pass the # package list to bcfg2-yum-helper. if rv[1:] == (None, None, None, None): return name else: return rv def packages_from_entry(self, entry): """ When using the Python yum libraries, convert a Package entry to a list of package tuples. See :ref:`yum-pkg-objects` and :ref:`pkg-objects` for more information on this process. :param entry: The Package entry to convert :type entry: lxml.etree._Element :returns: list of tuples """ if not self.use_yum: return Collection.packages_from_entry(self, entry) rv = set() name = entry.get("name") for inst in entry.getchildren(): if inst.tag != "Instance": continue rv.add(self._element_to_pkg(inst, name)) if not rv: rv.add(self._element_to_pkg(entry, name)) return list(rv) def _get_entry_attrs(self, pkgtup): """ Given a package tuple, return a dict of attributes suitable for applying to either a Package or an Instance tag """ attrs = dict(version=Bcfg2.Options.setup.packages_version) if attrs['version'] == 'any' or not isinstance(pkgtup, tuple): return attrs try: if pkgtup[1]: attrs['arch'] = pkgtup[1] if pkgtup[2]: attrs['epoch'] = pkgtup[2] if pkgtup[3]: attrs['version'] = pkgtup[3] if pkgtup[4]: attrs['release'] = pkgtup[4] except IndexError: self.logger.warning("Malformed package tuple: %s" % pkgtup) return attrs def packages_to_entry(self, pkglist, entry): """ When using the Python yum libraries, convert a list of package tuples to a Package entry. See :ref:`yum-pkg-objects` and :ref:`pkg-objects` for more information on this process. If pkglist contains only one package, then its data is converted to a single ``BoundPackage`` entry that is added as a subelement of ``entry``. If pkglist contains more than one package, then a parent ``BoundPackage`` entry is created and child ``Instance`` entries are added to it. :param pkglist: A list of package tuples to convert to an XML Package entry :type pkglist: list of tuples :param entry: The base XML entry to add Package entries to. This is modified in place. :type entry: lxml.etree._Element :returns: None """ if not self.use_yum: return Collection.packages_to_entry(self, pkglist, entry) packages = dict() for pkg in pkglist: try: packages[pkg[0]].append(pkg) except KeyError: packages[pkg[0]] = [pkg] for name, instances in packages.items(): pkgattrs = dict(type=self.ptype, origin='Packages', name=name) if len(instances) > 1: pkg_el = lxml.etree.SubElement(entry, 'BoundPackage', **pkgattrs) for inst in instances: lxml.etree.SubElement(pkg_el, "Instance", self._get_entry_attrs(inst)) else: attrs = self._get_entry_attrs(instances[0]) attrs.update(pkgattrs) lxml.etree.SubElement(entry, 'BoundPackage', **attrs) def get_new_packages(self, initial, complete): """ Compute the difference between the complete package list (as returned by :func:`complete`) and the initial package list computed from the specification, allowing for package tuples. See :ref:`yum-pkg-objects` and :ref:`pkg-objects` for more information on this process. :param initial: The initial package list :type initial: set of strings, but see :ref:`pkg-objects` :param complete: The final package list :type complete: set of strings, but see :ref:`pkg-objects` :return: set of tuples """ initial_names = [] for pkg in initial: if isinstance(pkg, tuple): initial_names.append(pkg[0]) else: initial_names.append(pkg) new = [] for pkg in complete: if isinstance(pkg, tuple): name = pkg[0] else: name = pkg if name not in initial_names: new.append(pkg) return new @track_statistics() def complete(self, packagelist, recommended=None): """ Build a complete list of all packages and their dependencies. When using the Python yum libraries, this defers to the :ref:`bcfg2-yum-helper`; when using the builtin yum parser, this defers to :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.complete`. :param packagelist: Set of initial packages computed from the specification. :type packagelist: set of strings, but see :ref:`pkg-objects` :returns: tuple of sets - The first element contains a set of strings (but see :ref:`pkg-objects`) describing the complete package list, and the second element is a set of symbols whose dependencies could not be resolved. """ if not self.use_yum: return Collection.complete(self, packagelist, recommended) lock = FileLock(os.path.join(self.cachefile, "lock")) slept = 0 while lock.is_locked(): if slept > 30: self.logger.warning("Packages: Timeout waiting for yum cache " "to release its lock") return set(), set() self.logger.debug("Packages: Yum cache is locked, waiting...") time.sleep(3) slept += 3 if packagelist: try: helper_dict = dict(packages=list(packagelist), groups=list(self.get_relevant_groups())) arch = self.get_arch() if arch is not None: helper_dict['arch'] = arch result = self.call_helper("complete", helper_dict) except ValueError: # error reported by call_helper() return set(), packagelist # json doesn't understand sets or tuples, so we get back a # lists of lists (packages) and a list of unicode strings # (unknown). turn those into a set of tuples and a set of # strings, respectively. unknown = set([str(u) for u in result['unknown']]) packages = set([tuple(p) for p in result['packages']]) self.filter_unknown(unknown) return packages, unknown else: return set(), set() @track_statistics() def call_helper(self, command, inputdata=None): """ Make a call to :ref:`bcfg2-yum-helper`. The yum libs have horrific memory leaks, so apparently the right way to get around that in long-running processes it to have a short-lived helper. No, seriously -- check out the yum-updatesd code. It's pure madness. :param command: The :ref:`bcfg2-yum-helper` command to call. :type command: string :param inputdata: The input to pass to ``bcfg2-yum-helper`` on stdin. If this is None, no input will be given at all. :type inputdata: Any JSON-encodable data structure. :returns: Varies depending on the return value of the ``bcfg2-yum-helper`` command. """ cmd = [self.helper, "-c", self.cfgfile] if Bcfg2.Options.setup.verbose: cmd.append("-v") if self.debug_flag: cmd.append("-d") cmd.append(command) self.debug_log("Packages: running %s" % " ".join(cmd)) if inputdata: result = self.cmd.run(cmd, timeout=Bcfg2.Options.setup.client_timeout, inputdata=json.dumps(inputdata)) else: result = self.cmd.run(cmd, timeout=Bcfg2.Options.setup.client_timeout) if not result.success: self.logger.error("Packages: error running bcfg2-yum-helper: %s" % result.error) elif result.stderr: self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" % result.stderr) try: return json.loads(result.stdout) except ValueError: if result.stdout: err = sys.exc_info()[1] self.logger.error("Packages: Error reading bcfg2-yum-helper " "output: %s" % err) self.logger.error("Packages: bcfg2-yum-helper output: %s" % result.stdout) else: self.logger.error("Packages: No bcfg2-yum-helper output") raise def setup_data(self, force_update=False): """ Do any collection-level data setup tasks. This is called when sources are loaded or reloaded by :class:`Bcfg2.Server.Plugins.Packages.Packages`. If the builtin yum parsers are in use, this defers to :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.setup_data`. If using the yum Python libraries, this cleans up cached yum metadata, regenerates the server-side yum config (in order to catch any new sources that have been added to this server), then regenerates the yum cache. :param force_update: Ignore all local cache and setup data from its original upstream sources (i.e., the package repositories) :type force_update: bool """ if not self.use_yum: return Collection.setup_data(self, force_update) if force_update: # clean up data from the old config try: self.call_helper("clean") except ValueError: # error reported by call_helper pass if os.path.exists(self.cfgfile): os.unlink(self.cfgfile) self.write_config() try: self.call_helper("makecache") except ValueError: # error reported by call_helper pass class YumSource(Source): """ Handle yum sources """ #: YumSource sets the ``type`` on Package entries to "yum" ptype = 'yum' def __init__(self, basepath, xsource): self.filemap = dict() self.file_to_arch = dict() self.needed_paths = set() self.packages = dict() self.yumgroups = dict() self.pulp_id = None self.repo = None Source.__init__(self, basepath, xsource) __init__.__doc__ = Source.__init__.__doc__ def _init_attributes(self, xsource): Source._init_attributes(self, xsource) if HAS_PULP and xsource.get("pulp_id"): self.pulp_id = xsource.get("pulp_id") _setup_pulp() repoapi = RepositoryAPI() try: self.repo = repoapi.repository(self.pulp_id) self.gpgkeys = [os.path.join(PULPCONFIG.cds['keyurl'], key) for key in repoapi.listkeys(self.pulp_id)] except server.ServerRequestError: err = sys.exc_info()[1] if err[0] == 401: msg = "Packages: Error authenticating to Pulp: %s" % err[1] elif err[0] == 404: msg = "Packages: Pulp repo id %s not found: %s" % \ (self.pulp_id, err[1]) else: msg = "Packages: Error %d fetching pulp repo %s: %s" % \ (err[0], self.pulp_id, err[1]) raise SourceInitError(msg) except socket.error: err = sys.exc_info()[1] raise SourceInitError("Could not contact Pulp server: %s" % err) self.rawurl = "%s/%s" % (PULPCONFIG.cds['baseurl'], self.repo['relative_path']) self.arches = [self.repo['arch']] self.deps = dict([('global', dict())]) self.provides = dict([('global', dict())]) self.filemap = dict([(x, dict()) for x in ['global'] + self.arches]) _init_attributes.__doc__ = Source._init_attributes.__doc__ @property def use_yum(self): """ True if we should use the yum Python libraries, False otherwise """ return HAS_YUM and Bcfg2.Options.setup.use_yum_libraries def save_state(self): """ If using the builtin yum parser, save state to :attr:`cachefile`. If using the Python yum libraries, yum handles caching and state and this method is a no-op.""" if not self.use_yum: cache = open(self.cachefile, 'wb') cPickle.dump((self.packages, self.deps, self.provides, self.filemap, self.url_map, self.yumgroups), cache, 2) cache.close() def load_state(self): """ If using the builtin yum parser, load saved state from :attr:`cachefile`. If using the Python yum libraries, yum handles caching and state and this method is a no-op.""" if not self.use_yum: data = open(self.cachefile) (self.packages, self.deps, self.provides, self.filemap, self.url_map, self.yumgroups) = cPickle.load(data) @property def urls(self): """ A list of URLs to the base metadata file for each repository described by this source. """ rv = [] for umap in self.url_map: rv.extend(self._get_urls_from_repodata(umap['url'], umap['arch'])) return rv def _get_urls_from_repodata(self, url, arch): """ When using the builtin yum parser, given the base URL of a repository, return the URLs of the various repo metadata files needed to get package data from the repo. If using the yum Python libraries, this just returns ``url`` as it was passed in, but should realistically not be called. :param url: The base URL to the repository (i.e., the directory that contains the ``repodata/`` directory) :type url: string :param arch: The architecture of the directory. :type arch: string :return: list of strings - URLs to metadata files """ if self.use_yum: return [url] rmdurl = '%srepodata/repomd.xml' % url try: repomd = fetch_url(rmdurl) except ValueError: self.logger.error("Packages: Bad url string %s" % rmdurl) return [] except HTTPError: err = sys.exc_info()[1] self.logger.error("Packages: Failed to fetch url %s. code=%s" % (rmdurl, err.code)) return [] except URLError: err = sys.exc_info()[1] self.logger.error("Packages: Failed to fetch url %s. %s" % (rmdurl, err)) return [] try: xdata = lxml.etree.XML(repomd) except lxml.etree.XMLSyntaxError: err = sys.exc_info()[1] self.logger.error("Packages: Failed to process metadata at %s: %s" % (rmdurl, err)) return [] urls = [] for elt in xdata.findall(RPO + 'data'): if elt.get('type') in ['filelists', 'primary', 'group']: floc = elt.find(RPO + 'location') fullurl = url + floc.get('href') urls.append(fullurl) self.file_to_arch[self.escape_url(fullurl)] = arch return urls # pylint: disable=R0911,R0912 # disabling the pylint errors above because we are interesting in # replicating the flow of the RPM code. def _compare_rpm_versions(self, str1, str2): """ Compare RPM versions. This is an attempt to reimplement RPM's rpmvercmp method in python. :param str1: package 1 version string :param str2: package 2 version string :return: 1 - str1 is newer than str2 0 - str1 and str2 are the same version -1 - str2 is newer than str1""" if str1 == str2: return 0 front_strip_re = re.compile('^[^A-Za-z0-9~]+') risdigit = re.compile('(^[0-9]+)') risalpha = re.compile('(^[A-Za-z])') lzeroes = re.compile('^0+') while len(str1) > 0 or len(str2) > 0: str1 = front_strip_re.sub('', str1) str2 = front_strip_re.sub('', str2) if len(str1) == 0 or len(str2) == 0: break # handle the tilde separator if str1[0] == '~' and str2[0] == '~': str1 = str1[1:] str2 = str2[1:] elif str1[0] == '~': return -1 elif str2[0] == '~': return 1 # grab continuous segments from each string isnum = False if risdigit.match(str1): segment1 = risdigit.split(str1)[1] str1 = risdigit.split(str1)[2] if risdigit.match(str2): segment2 = risdigit.split(str2)[1] str2 = risdigit.split(str2)[2] else: segment2 = '' isnum = True else: segment1 = risalpha.split(str1)[1] str1 = risalpha.split(str1)[2] if risalpha.match(str2): segment2 = risalpha.split(str2)[1] str2 = risalpha.split(str2)[2] else: segment2 = '' # numeric segments are always newer than alpha segments if len(segment2) == 0: if isnum: return 1 return -1 if isnum: # discard leading zeroes segment1 = lzeroes.sub('', segment1) segment2 = lzeroes.sub('', segment2) # higher number has more digits if len(segment1) > len(segment2): return 1 elif len(segment2) > len(segment1): return -1 # do a simple string comparison if segment1 > segment2: return 1 elif segment2 > segment1: return -1 # if one of the strings is empty, the version of the longer # string is higher if len(str1) > len(str2): return 1 elif len(str2) > len(str1): return -1 else: return 0 # pylint: enable=R0911,R0912 @track_statistics() def read_files(self): """ When using the builtin yum parser, read and parse locally downloaded metadata files. This diverges from the stock :func:`Bcfg2.Server.Plugins.Packages.Source.Source.read_files` quite a bit. """ # we have to read primary.xml first, and filelists.xml afterwards; primaries = list() filelists = list() groups = list() for fname in self.files: if fname.endswith('primary.xml.gz'): primaries.append(fname) elif fname.endswith('filelists.xml.gz'): filelists.append(fname) elif fname.find('comps'): groups.append(fname) for fname in primaries: farch = self.file_to_arch[fname] fdata = lxml.etree.parse(fname).getroot() self.parse_primary(fdata, farch) for fname in filelists: farch = self.file_to_arch[fname] fdata = lxml.etree.parse(fname).getroot() self.parse_filelist(fdata, farch) for fname in groups: fdata = lxml.etree.parse(fname).getroot() self.parse_group(fdata) # merge data sdata = list(self.packages.values()) try: self.packages['global'] = copy.deepcopy(sdata.pop()) except IndexError: self.logger.error("Packages: No packages in repo") self.packages['global'] = set() while sdata: self.packages['global'].update(sdata.pop()) for key in self.packages: if key == 'global': continue self.packages[key] = \ self.packages[key].difference(self.packages['global']) self.save_state() @track_statistics() def parse_filelist(self, data, arch): """ parse filelists.xml.gz data """ if arch not in self.filemap: self.filemap[arch] = dict() for pkg in data.findall(FL + 'package'): for fentry in pkg.findall(FL + 'file'): if fentry.text in self.needed_paths: if fentry.text in self.filemap[arch]: self.filemap[arch][fentry.text].add(pkg.get('name')) else: self.filemap[arch][fentry.text] = \ set([pkg.get('name')]) @track_statistics() def parse_primary(self, data, arch): """ parse primary.xml.gz data """ if arch not in self.packages: self.packages[arch] = set() if arch not in self.deps: self.deps[arch] = {} if arch not in self.provides: self.provides[arch] = {} versionmap = {} for pkg in data.getchildren(): if not pkg.tag.endswith('package'): continue pkgname = pkg.find(XP + 'name').text vtag = pkg.find(XP + 'version') epoch = vtag.get('epoch') version = vtag.get('ver') release = vtag.get('rel') if pkgname in self.packages[arch]: # skip if version older than a previous version if (self._compare_rpm_versions( epoch, versionmap[pkgname]['epoch']) < 0): continue elif (self._compare_rpm_versions( version, versionmap[pkgname]['version']) < 0): continue elif (self._compare_rpm_versions( release, versionmap[pkgname]['release']) < 0): continue versionmap[pkgname] = {} versionmap[pkgname]['epoch'] = epoch versionmap[pkgname]['version'] = version versionmap[pkgname]['release'] = release self.packages[arch].add(pkgname) pdata = pkg.find(XP + 'format') self.deps[arch][pkgname] = set() pre = pdata.find(RP + 'requires') if pre is not None: for entry in pre.getchildren(): self.deps[arch][pkgname].add(entry.get('name')) if entry.get('name').startswith('/'): self.needed_paths.add(entry.get('name')) pro = pdata.find(RP + 'provides') if pro is not None: for entry in pro.getchildren(): prov = entry.get('name') if prov not in self.provides[arch]: self.provides[arch][prov] = list() self.provides[arch][prov].append(pkgname) @track_statistics() def parse_group(self, data): """ parse comps.xml.gz data """ for group in data.getchildren(): if not group.tag.endswith('group'): continue try: groupid = group.xpath('id')[0].text self.yumgroups[groupid] = {'mandatory': list(), 'default': list(), 'optional': list(), 'conditional': list()} except IndexError: continue try: packagelist = group.xpath('packagelist')[0] except IndexError: continue for pkgreq in packagelist.getchildren(): pkgtype = pkgreq.get('type', None) if pkgtype == 'mandatory': self.yumgroups[groupid]['mandatory'].append(pkgreq.text) elif pkgtype == 'default': self.yumgroups[groupid]['default'].append(pkgreq.text) elif pkgtype == 'optional': self.yumgroups[groupid]['optional'].append(pkgreq.text) elif pkgtype == 'conditional': self.yumgroups[groupid]['conditional'].append(pkgreq.text) def is_package(self, metadata, package): arch = [a for a in self.arches if a in metadata.groups] if not arch: return False try: return ((package in self.packages['global'] or package in self.packages[arch[0]]) and package not in self.blacklist and (len(self.whitelist) == 0 or package in self.whitelist)) except KeyError: self.logger.debug("Packages: Unable to find %s for arch %s" % (package, arch[0])) return False is_package.__doc__ = Source.is_package.__doc__ def get_vpkgs(self, metadata): if self.use_yum: return dict() rv = Source.get_vpkgs(self, metadata) for arch, fmdata in list(self.filemap.items()): if arch not in metadata.groups and arch != 'global': continue for filename, pkgs in list(fmdata.items()): rv[filename] = pkgs return rv get_vpkgs.__doc__ = Source.get_vpkgs.__doc__ def unknown_filter(self, package): """ By default, :class:`Bcfg2.Server.Plugins.Packages.Source.Source` filters out unknown packages that start with "choice", but that doesn't mean anything to Yum or RPM. Instead, we filter out unknown packages that start with "rpmlib", although this is likely legacy behavior; that would seem to indicate that a package required some RPM feature that isn't provided, which is a bad thing. This should probably go away at some point. :param package: The name of a package that was unknown to the backend :type package: string :returns: bool """ return package.startswith("rpmlib") def filter_unknown(self, unknown): if self.use_yum: filtered = set() for unk in unknown: try: if self.unknown_filter(unk): filtered.update(unk) except AttributeError: try: if self.unknown_filter(unk[0]): filtered.update(unk) except (IndexError, AttributeError): pass unknown.difference_update(filtered) else: Source.filter_unknown(self, unknown) filter_unknown.__doc__ = Source.filter_unknown.__doc__ def setup_data(self, force_update=False): if not self.use_yum: Source.setup_data(self, force_update=force_update) setup_data.__doc__ = \ "``setup_data`` is only used by the builtin yum parser. " + \ Source.setup_data.__doc__ def get_repo_name(self, url_map): """ Try to find a sensible name for a repository. First use a repository's Pulp ID, if it has one; if not, then defer to :class:`Bcfg2.Server.Plugins.Packages.Source.Source.get_repo_name` :param url_map: A single :attr:`url_map` dict, i.e., any single element of :attr:`url_map`. :type url_map: dict :returns: string - the name of the repository. """ if self.pulp_id: return self.pulp_id else: return Source.get_repo_name(self, url_map) def get_group(self, metadata, group, ptype=None): # pylint: disable=W0613 """ Get the list of packages of the given type in a package group. :param group: The name of the group to query :type group: string :param ptype: The type of packages to get, for backends that support multiple package types in package groups (e.g., "recommended," "optional," etc.) :type ptype: string :returns: list of strings - package names """ try: yumgroup = self.yumgroups[group] except KeyError: return [] packages = yumgroup['conditional'] + yumgroup['mandatory'] if ptype in ['default', 'optional', 'all']: packages += yumgroup['default'] if ptype in ['optional', 'all']: packages += yumgroup['optional'] return packages src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py000066400000000000000000000336131303523157100222350ustar00rootroot00000000000000""" Libraries for bcfg2-yum-helper plugin, used if yum library support is enabled. The yum libs have horrific memory leaks, so apparently the right way to get around that in long-running processes it to have a short-lived helper. No, seriously -- check out the yum-updatesd code. It's pure madness. """ import os import sys import yum import logging import Bcfg2.Options import Bcfg2.Logger from Bcfg2.Compat import wraps from lockfile import FileLock, LockTimeout try: import json except ImportError: import simplejson as json def pkg_to_tuple(package): """ json doesn't distinguish between tuples and lists, but yum does, so we convert a package in list format to one in tuple format """ if isinstance(package, list): return tuple(package) else: return package def pkgtup_to_string(package): """ given a package tuple, return a human-readable string describing the package """ if package[3] in ['auto', 'any']: return package[0] rv = [package[0], "-"] if package[2]: rv.extend([package[2], ':']) rv.extend([package[3], '-', package[4]]) if package[1]: rv.extend(['.', package[1]]) return ''.join(str(e) for e in rv) class YumHelper(object): """ Yum helper base object """ def __init__(self, cfgfile, verbose=1): self.cfgfile = cfgfile self.yumbase = yum.YumBase() # pylint: disable=E1121,W0212 try: self.yumbase.preconf.debuglevel = verbose self.yumbase.preconf.fn = cfgfile self.yumbase._getConfig() except AttributeError: self.yumbase._getConfig(cfgfile, debuglevel=verbose) # pylint: enable=E1121,W0212 self.logger = logging.getLogger(self.__class__.__name__) class DepSolver(YumHelper): """ Yum dependency solver. This is used for operations that only read from the yum cache, and thus operates in cacheonly mode. """ def __init__(self, cfgfile, verbose=1): YumHelper.__init__(self, cfgfile, verbose=verbose) # internally, yum uses an integer, not a boolean, for conf.cache self.yumbase.conf.cache = 1 self._groups = None def get_groups(self): """ getter for the groups property """ if self._groups is not None: return self._groups else: return ["noarch"] def set_groups(self, groups): """ setter for the groups property """ self._groups = set(groups).union(["noarch"]) groups = property(get_groups, set_groups) def get_package_object(self, pkgtup, silent=False): """ given a package tuple, get a yum package object """ try: matches = yum.packageSack.packagesNewestByName( self.yumbase.pkgSack.searchPkgTuple(pkgtup)) except yum.Errors.PackageSackError: if not silent: self.logger.warning("Package '%s' not found" % self.get_package_name(pkgtup)) matches = [] except yum.Errors.RepoError: err = sys.exc_info()[1] self.logger.error("Temporary failure loading metadata for %s: %s" % (self.get_package_name(pkgtup), err)) matches = [] pkgs = self._filter_arch(matches) if pkgs: return pkgs[0] else: return None def get_group(self, group, ptype="default"): """ Resolve a package group name into a list of packages """ if group.startswith("@"): group = group[1:] try: if self.yumbase.comps.has_group(group): group = self.yumbase.comps.return_group(group) else: self.logger.error("%s is not a valid group" % group) return [] except yum.Errors.GroupsError: err = sys.exc_info()[1] self.logger.warning(err) return [] if ptype == "default": return [p for p, d in list(group.default_packages.items()) if d] elif ptype == "mandatory": return [p for p, m in list(group.mandatory_packages.items()) if m] elif ptype == "optional" or ptype == "all": return group.packages else: self.logger.warning("Unknown group package type '%s'" % ptype) return [] def _filter_arch(self, packages): """ filter packages in the given list that do not have an architecture in the list of groups for this client """ matching = [] for pkg in packages: if pkg.arch in self.groups: matching.append(pkg) else: self.logger.debug("%s has non-matching architecture (%s)" % (pkg, pkg.arch)) if matching: return matching else: # no packages match architecture; we'll assume that the # user knows what s/he is doing and this is a multiarch # box. return packages def get_package_name(self, package): """ get the name of a package or virtual package from the internal representation used by this Collection class """ if isinstance(package, tuple): if len(package) == 3: return yum.misc.prco_tuple_to_string(package) else: return pkgtup_to_string(package) else: return str(package) def complete(self, packagelist): """ resolve dependencies and generate a complete package list from the given list of initial packages """ packages = set() unknown = set() for pkg in packagelist: if isinstance(pkg, tuple): pkgtup = pkg else: pkgtup = (pkg, None, None, None, None) pkgobj = self.get_package_object(pkgtup) if not pkgobj: self.logger.debug("Unknown package %s" % self.get_package_name(pkg)) unknown.add(pkg) else: if self.yumbase.tsInfo.exists(pkgtup=pkgobj.pkgtup): self.logger.debug("%s added to transaction multiple times" % pkgobj) else: self.logger.debug("Adding %s to transaction" % pkgobj) self.yumbase.tsInfo.addInstall(pkgobj) self.yumbase.resolveDeps() for txmbr in self.yumbase.tsInfo: packages.add(txmbr.pkgtup) return list(packages), list(unknown) def acquire_lock(func): """ decorator for CacheManager methods that gets and release a lock while the method runs """ @wraps(func) def inner(self, *args, **kwargs): """ Get and release a lock while running the function this wraps. """ self.logger.debug("Acquiring lock at %s" % self.lockfile) while not self.lock.i_am_locking(): try: self.lock.acquire(timeout=60) # wait up to 60 seconds except LockTimeout: self.lock.break_lock() self.lock.acquire() try: func(self, *args, **kwargs) finally: self.lock.release() self.logger.debug("Released lock at %s" % self.lockfile) return inner class CacheManager(YumHelper): """ Yum cache manager. Unlike :class:`DepSolver`, this can write to the yum cache, and so is used for operations that muck with the cache. (Technically, :func:`CacheManager.clean_cache` could be in either DepSolver or CacheManager, but for consistency I've put it here.) """ def __init__(self, cfgfile, verbose=1): YumHelper.__init__(self, cfgfile, verbose=verbose) self.lockfile = \ os.path.join(os.path.dirname(self.yumbase.conf.config_file_path), "lock") self.lock = FileLock(self.lockfile) @acquire_lock def clean_cache(self): """ clean the yum cache """ for mdtype in ["Headers", "Packages", "Sqlite", "Metadata", "ExpireCache"]: # for reasons that are entirely obvious, all of the yum # API clean* methods return a tuple of 0 (zero, always # zero) and a list containing a single message about how # many files were deleted. so useful. thanks, yum. msg = getattr(self.yumbase, "clean%s" % mdtype)()[1][0] if not msg.startswith("0 "): self.logger.info(msg) @acquire_lock def populate_cache(self): """ populate the yum cache """ for repo in self.yumbase.repos.findRepos('*'): repo.metadata_expire = 0 repo.mdpolicy = "group:all" self.yumbase.doRepoSetup() self.yumbase.repos.doSetup() for repo in self.yumbase.repos.listEnabled(): # this populates the cache as a side effect repo.repoXML # pylint: disable=W0104 try: repo.getGroups() except yum.Errors.RepoMDError: pass # this repo has no groups self.yumbase.repos.populateSack(mdtype='metadata', cacheonly=1) self.yumbase.repos.populateSack(mdtype='filelists', cacheonly=1) self.yumbase.repos.populateSack(mdtype='otherdata', cacheonly=1) # this does something with the groups cache as a side effect self.yumbase.comps # pylint: disable=W0104 class HelperSubcommand(Bcfg2.Options.Subcommand): """ Base class for all yum helper subcommands """ # the value to JSON encode and print out if the command fails fallback = None # whether or not this command accepts input on stdin accept_input = True # logging level verbosity = 0 def run(self, setup): if Bcfg2.Options.setup.debug: self.verbosity = 5 elif Bcfg2.Options.setup.verbose: self.verbosity = 1 data = None if self.accept_input: try: data = json.loads(sys.stdin.read()) except ValueError: self.logger.error("Error decoding JSON input: %s" % sys.exc_info()[1]) print(json.dumps(self.fallback)) return 2 try: print(json.dumps(self._run(setup, data))) except: # pylint: disable=W0702 self.logger.error("Unexpected error running %s: %s" % (self.__class__.__name__.lower(), sys.exc_info()[1]), exc_info=1) print(json.dumps(self.fallback)) return 2 return 0 def _run(self, setup, data): """ Actually run the command """ raise NotImplementedError class DepSolverSubcommand(HelperSubcommand): # pylint: disable=W0223 """ Base class for helper commands that use the depsolver (i.e., only resolve dependencies, don't modify the cache) """ # DepSolver instance used in _run function depsolver = None def run(self, setup): self.depsolver = DepSolver(Bcfg2.Options.setup.yum_config, self.verbosity) HelperSubcommand.run(self, setup) class CacheManagerSubcommand(HelperSubcommand): # pylint: disable=W0223 """ Base class for helper commands that use the cachemanager (i.e., modify the cache) """ fallback = False accept_input = False # CacheManager instance used in _run function cachemgr = None def run(self, setup): self.cachemgr = CacheManager(Bcfg2.Options.setup.yum_config, self.verbosity) HelperSubcommand.run(self, setup) class Clean(CacheManagerSubcommand): """ Clean the cache """ def _run(self, setup, data): # pylint: disable=W0613 self.cachemgr.clean_cache() return True class MakeCache(CacheManagerSubcommand): """ Update the on-disk cache """ def _run(self, setup, data): # pylint: disable=W0613 self.cachemgr.populate_cache() return True class Complete(DepSolverSubcommand): """ Given an initial set of packages, get a complete set of packages with all dependencies resolved """ fallback = dict(packages=[], unknown=[]) def _run(self, _, data): self.depsolver.groups = data['groups'] self.fallback['unknown'] = data['packages'] (packages, unknown) = self.depsolver.complete( [pkg_to_tuple(p) for p in data['packages']]) return dict(packages=list(packages), unknown=list(unknown)) class GetGroups(DepSolverSubcommand): """ Resolve the given package groups """ def _run(self, _, data): rv = dict() for gdata in data: if "type" in gdata: packages = self.depsolver.get_group(gdata['group'], ptype=gdata['type']) else: packages = self.depsolver.get_group(gdata['group']) rv[gdata['group']] = list(packages) return rv Get_Groups = GetGroups # pylint: disable=C0103 class CLI(Bcfg2.Options.CommandRegistry): """ The bcfg2-yum-helper CLI """ options = [ Bcfg2.Options.PathOption( "-c", "--yum-config", help="Yum config file")] def __init__(self): Bcfg2.Options.CommandRegistry.__init__(self) self.register_commands(globals().values(), parent=HelperSubcommand) parser = Bcfg2.Options.get_parser("Bcfg2 yum helper", components=[self]) parser.add_options(self.subcommand_options) parser.parse() self.logger = logging.getLogger(parser.prog) def run(self): """ Run bcfg2-yum-helper """ if not os.path.exists(Bcfg2.Options.setup.yum_config): self.logger.error("Config file %s not found" % Bcfg2.Options.setup.yum_config) return 1 return self.runcommand() src/lib/Bcfg2/Server/Plugins/Packages/__init__.py000066400000000000000000000640701303523157100220630ustar00rootroot00000000000000""" Packages resolves Package entries on the Bcfg2 server in order to present a complete list of Package entries to the client in order to determine the completeness of the client configuration. """ import os import sys import glob import shutil import lxml.etree import Bcfg2.Options import Bcfg2.Server.Cache import Bcfg2.Server.Plugin from Bcfg2.Compat import urlopen, HTTPError, URLError, MutableMapping from Bcfg2.Server.Plugins.Packages.Collection import Collection, \ get_collection_class from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources from Bcfg2.Server.Statistics import track_statistics def packages_boolean(value): """ For historical reasons, the Packages booleans 'resolver' and 'metadata' both accept "enabled" in addition to the normal boolean values. """ if value == 'disabled': return False elif value == 'enabled': return True else: return value class PackagesBackendAction(Bcfg2.Options.ComponentAction): """ ComponentAction to load Packages backends """ bases = ['Bcfg2.Server.Plugins.Packages'] module = True fail_silently = True class OnDemandDict(MutableMapping): """ This maps a set of keys to a set of value-getting functions; the values are populated on-the-fly by the functions as the values are needed (and not before). This is used by :func:`Bcfg2.Server.Plugins.Packages.Packages.get_additional_data`; see the docstring for that function for details on why. Unlike a dict, you should not specify values for for the righthand side of this mapping, but functions that get values. E.g.: .. code-block:: python d = OnDemandDict(foo=load_foo, bar=lambda: "bar"); """ def __init__(self, **getters): self._values = dict() self._getters = dict(**getters) def __getitem__(self, key): if key not in self._values: self._values[key] = self._getters[key]() return self._values[key] def __setitem__(self, key, getter): self._getters[key] = getter def __delitem__(self, key): del self._values[key] del self._getters[key] def __len__(self): return len(self._getters) def __iter__(self): return iter(self._getters.keys()) def __repr__(self): rv = dict(self._values) for key in self._getters.keys(): if key not in rv: rv[key] = 'unknown' return str(rv) class Packages(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.StructureValidator, Bcfg2.Server.Plugin.Generator, Bcfg2.Server.Plugin.Connector, Bcfg2.Server.Plugin.ClientRunHooks): """ Packages resolves Package entries on the Bcfg2 server in order to present a complete list of Package entries to the client in order to determine the completeness of the client configuration. It does so by delegating control of package version information to a number of backends, which may parse repository metadata directly or defer to package manager libraries for truly dynamic resolution. .. private-include: _build_packages""" options = [ Bcfg2.Options.Option( cf=("packages", "backends"), dest="packages_backends", help="Packages backends to load", type=Bcfg2.Options.Types.comma_list, action=PackagesBackendAction, default=['Yum', 'Apt', 'Pac', 'Pkgng']), Bcfg2.Options.PathOption( cf=("packages", "cache"), dest="packages_cache", help="Path to the Packages cache", default='/Packages/cache'), Bcfg2.Options.Option( cf=("packages", "resolver"), dest="packages_resolver", help="Disable the Packages resolver", type=packages_boolean, default=True), Bcfg2.Options.Option( cf=("packages", "metadata"), dest="packages_metadata", help="Disable all Packages metadata processing", type=packages_boolean, default=True), Bcfg2.Options.Option( cf=("packages", "version"), dest="packages_version", help="Set default Package entry version", default="auto", choices=["auto", "any"]), Bcfg2.Options.PathOption( cf=("packages", "yum_config"), help="The default path for generated yum configs", default="/etc/yum.repos.d/bcfg2.repo"), Bcfg2.Options.PathOption( cf=("packages", "apt_config"), help="The default path for generated apt configs", default="/etc/apt/sources.list.d/" "bcfg2-packages-generated-sources.list")] #: Packages is an alternative to #: :mod:`Bcfg2.Server.Plugins.Pkgmgr` and conflicts with it. conflicts = ['Pkgmgr'] #: Packages exposes two additional XML-RPC calls, :func:`Refresh` #: and :func:`Reload` __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload'] def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.StructureValidator.__init__(self) Bcfg2.Server.Plugin.Generator.__init__(self) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) #: Packages does a potentially tremendous amount of on-disk #: caching. ``cachepath`` holds the base directory to where #: data should be cached. self.cachepath = Bcfg2.Options.setup.packages_cache #: Where Packages should store downloaded GPG key files self.keypath = os.path.join(self.cachepath, 'keys') if not os.path.exists(self.keypath): # create key directory if needed os.makedirs(self.keypath) # pylint: disable=C0301 #: The #: :class:`Bcfg2.Server.Plugins.Packages.PackagesSources.PackagesSources` #: object used to generate #: :class:`Bcfg2.Server.Plugins.Packages.Source.Source` objects for #: this plugin. self.sources = PackagesSources(os.path.join(self.data, "sources.xml"), self.cachepath, self) #: We cache #: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` #: objects in ``collections`` so that calling :func:`Refresh` #: or :func:`Reload` can tell the collection objects to clean #: up their cache, but we don't actually use the cache to #: return a ``Collection`` object when one is requested, #: because that prevents new machines from working, since a #: ``Collection`` object gets created by #: :func:`get_additional_data`, which is called for all #: clients at server startup and various other times. (It #: would also prevent machines that change groups from working #: properly; e.g., if you reinstall a machine with a new OS, #: then returning a cached ``Collection`` object would give #: the wrong sources to that client.) These are keyed by the #: collection #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`, #: a unique key identifying the collection by its *config*, #: which could be shared among multiple clients. self.collections = Bcfg2.Server.Cache.Cache("Packages", "collections") #: clients is a cache mapping of hostname -> #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey` #: Unlike :attr:`collections`, this _is_ used to return a #: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` #: object when one is requested, so each entry is very #: short-lived -- it's purged at the end of each client run. self.clients = Bcfg2.Server.Cache.Cache("Packages", "cache") # pylint: enable=C0301 __init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__ def set_debug(self, debug): rv = Bcfg2.Server.Plugin.Plugin.set_debug(self, debug) self.sources.set_debug(debug) for collection in self.collections.values(): collection.set_debug(debug) return rv set_debug.__doc__ = Bcfg2.Server.Plugin.Plugin.set_debug.__doc__ def create_config(self, entry, metadata): """ Create yum/apt config for the specified client. :param entry: The base entry to bind. This will be modified in place. :type entry: lxml.etree._Element :param metadata: The client to create the config for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ attrib = dict(encoding='ascii', owner='root', group='root', type='file', mode='0644', important='true') collection = self.get_collection(metadata) entry.text = collection.get_config() for (key, value) in list(attrib.items()): entry.attrib.__setitem__(key, value) def get_config(self, metadata): """ Get yum/apt config, as a string, for the specified client. :param metadata: The client to create the config for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ return self.get_collection(metadata).get_config() def HandleEntry(self, entry, metadata): """ Bind configuration entries. ``HandleEntry`` handles entries two different ways: * All ``Package`` entries have their ``version`` and ``type`` attributes set according to the appropriate :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` object for this client. * ``Path`` entries are delegated to :func:`create_config` :param entry: The entry to bind :type entry: lxml.etree._Element :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: lxml.etree._Element - The fully bound entry """ if entry.tag == 'Package': collection = self.get_collection(metadata) entry.set('version', Bcfg2.Options.setup.packages_version) entry.set('type', collection.ptype) elif entry.tag == 'Path': self.create_config(entry, metadata) return entry def HandlesEntry(self, entry, metadata): """ Determine if the given entry can be handled. Packages handles two kinds of entries: * ``Package`` entries are handled if the client has any sources at all. * ``Path`` entries are handled if they match the paths that are handled by a backend that can produce client configurations, e.g., :attr:`YUM_CONFIG_DEFAULT`, :attr:`APT_CONFIG_DEFAULT`, or the overridden value of either of those from the configuration. :param entry: The entry to bind :type entry: lxml.etree._Element :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: bool - Whether or not this plugin can handle the entry :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError` """ if entry.tag == 'Package': return True elif entry.tag == 'Path': # managed entries for yum/apt configs if entry.get("name") in [Bcfg2.Options.setup.apt_config, Bcfg2.Options.setup.yum_config]: return True return False @track_statistics() def validate_structures(self, metadata, structures): """ Do the real work of Packages. This does two things: #. Given the full list of all packages that apply to this client from the specification, calls :func:`_build_packages` to resolve dependencies, determine unknown packages (i.e., those that are not in any repository that applies to this client), and build a complete package list. #. Calls :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.build_extra_structures` to add any other extra data required by the backend (e.g., GPG keys) :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param structures: A list of lxml.etree._Element objects describing the structures (i.e., bundles) for this client. This can be modified in place. :type structures: list of lxml.etree._Element objects :returns: None """ collection = self.get_collection(metadata) indep = lxml.etree.Element('Independent', name=self.__class__.__name__) self._build_packages(metadata, indep, structures, collection=collection) collection.build_extra_structures(indep) structures.append(indep) @track_statistics() def _build_packages(self, metadata, independent, # pylint: disable=R0914 structures, collection=None): """ Perform dependency resolution and build the complete list of packages that need to be included in the specification by :func:`validate_structures`, based on the initial list of packages. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param independent: The XML tag to add package entries generated by dependency resolution to. This will be modified in place. :type independent: lxml.etree._Element :param structures: A list of lxml.etree._Element objects describing the structures (i.e., bundles) for this client :type structures: list of lxml.etree._Element objects :param collection: The collection of sources for this client. If none is given, one will be created with :func:`get_collection` :type collection: Bcfg2.Server.Plugins.Packages.Collection.Collection """ if (not Bcfg2.Options.setup.packages_metadata or not Bcfg2.Options.setup.packages_resolver): # Config requests no resolver. Note that disabling # metadata implies disabling the resolver. for struct in structures: for pkg in struct.xpath('//Package | //BoundPackage'): if pkg.get("group"): if pkg.get("type"): pkg.set("choose", pkg.get("type")) return if collection is None: collection = self.get_collection(metadata) initial = set() to_remove = [] groups = [] recommended = dict() for struct in structures: for pkg in struct.xpath('//Package | //BoundPackage'): if pkg.get("name"): initial.update(collection.packages_from_entry(pkg)) if pkg.get("recommended"): recommended[pkg.get("name")] = pkg.get("recommended") elif pkg.get("group"): groups.append((pkg.get("group"), pkg.get("type"))) to_remove.append(pkg) else: self.logger.error( "Packages: Malformed Package: %s" % lxml.etree.tostring( pkg, xml_declaration=False).decode('UTF-8')) # base is the set of initial packages explicitly given in the # specification, packages from expanded package groups, and # packages essential to the distribution base = set(initial) # remove package groups for el in to_remove: el.getparent().remove(el) groups.sort() # check for this set of groups in the group cache gcache = Bcfg2.Server.Cache.Cache("Packages", "pkg_groups", collection.cachekey) gkey = hash(tuple(groups)) if gkey not in gcache: gcache[gkey] = collection.get_groups(groups) for pkgs in gcache[gkey].values(): base.update(pkgs) # essential pkgs are those marked as such by the distribution base.update(collection.get_essential()) # check for this set of packages in the package cache pkey = hash(tuple(base)) pcache = Bcfg2.Server.Cache.Cache("Packages", "pkg_sets", collection.cachekey) if pkey not in pcache: pcache[pkey] = collection.complete(base, recommended) packages, unknown = pcache[pkey] if unknown: self.logger.info("Packages: Got %d unknown entries" % len(unknown)) self.logger.info("Packages: %s" % list(unknown)) newpkgs = collection.get_new_packages(initial, packages) self.debug_log("Packages: %d base, %d complete, %d new" % (len(base), len(packages), len(newpkgs))) newpkgs.sort() collection.packages_to_entry(newpkgs, independent) @track_statistics() def Refresh(self): """ Packages.Refresh() => True|False Reload configuration specification and download sources """ self._load_config(force_update=True) return True @track_statistics() def Reload(self): """ Packages.Refresh() => True|False Reload configuration specification and sources """ self._load_config() return True def child_reload(self, _=None): """ Reload the Packages configuration on a child process. """ self.Reload() def _load_config(self, force_update=False): """ Load the configuration data and setup sources :param force_update: Ignore all locally cached and downloaded data and fetch the metadata anew from the upstream repository. :type force_update: bool """ self._load_sources(force_update) self._load_gpg_keys(force_update) def _load_sources(self, force_update): """ Load sources from the config, downloading if necessary. :param force_update: Ignore all locally cached and downloaded data and fetch the metadata anew from the upstream repository. :type force_update: bool """ cachefiles = set() for collection in list(self.collections.values()): cachefiles.update(collection.cachefiles) if Bcfg2.Options.setup.packages_metadata: collection.setup_data(force_update) # clear Collection and package caches Bcfg2.Server.Cache.expire("Packages") for source in self.sources.entries: cachefiles.add(source.cachefile) if Bcfg2.Options.setup.packages_metadata: source.setup_data(force_update) for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")): if cfile not in cachefiles: try: if os.path.isdir(cfile): shutil.rmtree(cfile) else: os.unlink(cfile) except OSError: err = sys.exc_info()[1] self.logger.error("Packages: Could not remove cache file " "%s: %s" % (cfile, err)) def _load_gpg_keys(self, force_update): """ Load GPG keys from the config, downloading if necessary. :param force_update: Ignore all locally cached and downloaded data and fetch the metadata anew from the upstream repository. :type force_update: bool """ keyfiles = [] keys = [] for source in self.sources.entries: for key in source.gpgkeys: localfile = os.path.join(self.keypath, os.path.basename(key.rstrip("/"))) if localfile not in keyfiles: keyfiles.append(localfile) if ((force_update and key not in keys) or not os.path.exists(localfile)): self.logger.info("Packages: Downloading and parsing %s" % key) try: open(localfile, 'w').write(urlopen(key).read()) keys.append(key) except (URLError, HTTPError): err = sys.exc_info()[1] self.logger.error("Packages: Error downloading %s: %s" % (key, err)) except IOError: err = sys.exc_info()[1] self.logger.error("Packages: Error writing %s to %s: " "%s" % (key, localfile, err)) for kfile in glob.glob(os.path.join(self.keypath, "*")): if kfile not in keyfiles: os.unlink(kfile) @track_statistics() def get_collection(self, metadata): """ Get a :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` object for this client. :param metadata: The client metadata to get a Collection for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: An instance of the appropriate subclass of :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` that contains all relevant sources that apply to the given client """ if not self.sources.loaded: # if sources.xml has not received a FAM event yet, defer; # instantiate a dummy Collection object return Collection(metadata, [], self.cachepath, self.data) if metadata.hostname in self.clients: return self.collections[self.clients[metadata.hostname]] sclasses = set() relevant = list() for source in self.sources.entries: if source.applies(metadata): relevant.append(source) sclasses.update([source.__class__]) if len(sclasses) > 1: self.logger.warning("Packages: Multiple source types found for " "%s: %s" % (metadata.hostname, ",".join([s.__name__ for s in sclasses]))) cclass = Collection elif len(sclasses) == 0: self.logger.error("Packages: No sources found for %s" % metadata.hostname) cclass = Collection else: cclass = get_collection_class( sclasses.pop().__name__.replace("Source", "")) if self.debug_flag: self.logger.error("Packages: Using %s for Collection of sources " "for %s" % (cclass.__name__, metadata.hostname)) collection = cclass(metadata, relevant, self.cachepath, self.data, debug=self.debug_flag) ckey = collection.cachekey if cclass != Collection: self.clients[metadata.hostname] = ckey self.collections[ckey] = collection return collection def get_additional_data(self, metadata): """ Return additional data for the given client. This will be an :class:`Bcfg2.Server.Plugins.Packages.OnDemandDict` containing two keys: * ``sources``, whose value is a list of data returned from :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.get_additional_data`, namely, a list of :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.url_map` data; and * ``get_config``, whose value is the :func:`Bcfg2.Server.Plugins.Packages.Packages.get_config` function, which can be used to get the Packages config for other systems. This uses an OnDemandDict instead of just a normal dict because loading a source collection can be a fairly time-consuming process, particularly for the first time. As a result, when all metadata objects are built at once (such as after the server is restarted, or far more frequently if Metadata caching is disabled), this function would be a major bottleneck if we tried to build all collections at the same time. Instead, they're merely built on-demand. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :return: dict of lists of ``url_map`` data """ def get_sources(): """ getter for the 'sources' key of the OnDemandDict returned by this function. This delays calling get_collection() until it's absolutely necessary. """ return self.get_collection(metadata).get_additional_data() return OnDemandDict( sources=get_sources, get_config=lambda: self.get_config) def end_client_run(self, metadata): """ Hook to clear the cache for this client in :attr:`clients`, which must persist only the duration of a client run. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ self.clients.expire(metadata.hostname) def end_statistics(self, metadata): """ Hook to clear the cache for this client in :attr:`clients` once statistics are processed to ensure that a stray cached :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection` object is not built during statistics and preserved until a subsequent client run. :param metadata: The client metadata :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata """ self.end_client_run(metadata) src/lib/Bcfg2/Server/Plugins/Pkgmgr.py000066400000000000000000000271301303523157100200310ustar00rootroot00000000000000'''This module implements a package management scheme for all images''' import re import sys import logging import lxml.etree import Bcfg2.Server.Plugin from Bcfg2.Server.Plugin import PluginExecutionError logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr') class FuzzyDict(dict): fuzzy = re.compile(r'(?P.*):(?P\S+(,\S+)*)') def __getitem__(self, key): if isinstance(key, str): mdata = self.fuzzy.match(key) if mdata: return dict.__getitem__(self, mdata.groupdict()['name']) else: print("got non-string key %s" % str(key)) return dict.__getitem__(self, key) def __contains__(self, key): if isinstance(key, str): mdata = self.fuzzy.match(key) if mdata: return dict.__contains__(self, mdata.groupdict()['name']) else: print("got non-string key %s" % str(key)) return dict.__contains__(self, key) def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: if default: return default raise class PNode(object): """PNode has a list of packages available at a particular group intersection. """ splitters = dict( rpm=re.compile( r'^(.*/)?(?P[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + r'(?P[\w\d\.]+-([\w\d\.]+))\.(?P\S+)\.rpm$'), encap=re.compile( r'^(?P[\w-]+)-(?P[\w\d\.+-]+).encap.*$')) raw = dict( Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)", Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)") nraw = dict( Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)", Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)") containers = ['Group', 'Client'] ignore = ['Package'] def __init__(self, data, pdict, parent=None): # copy local attributes to all child nodes if no local attribute exists if 'Package' not in pdict: pdict['Package'] = set() for child in data.getchildren(): attrs = set(data.attrib.keys()).difference( child.attrib.keys() + ['name']) for attr in attrs: try: child.set(attr, data.get(attr)) except: # don't fail on things like comments and other # immutable elements pass self.data = data self.contents = {} if parent is None: self.predicate = lambda m, e: True else: predicate = parent.predicate if data.get('negate', 'false').lower() == 'true': psrc = self.nraw else: psrc = self.raw if data.tag in list(psrc.keys()): self.predicate = eval(psrc[data.tag] % {'name': data.get('name')}, {'predicate': predicate}) else: raise PluginExecutionError("Unknown tag: %s" % data.tag) self.children = [] self._load_children(data, pdict) if 'Package' not in self.contents: self.contents['Package'] = FuzzyDict() for pkg in data.findall('./Package'): if ('name' in pkg.attrib and pkg.get('name') not in pdict['Package']): pdict['Package'].add(pkg.get('name')) if pkg.get('name') is not None: self.contents['Package'][pkg.get('name')] = {} if pkg.getchildren(): self.contents['Package'][pkg.get('name')]['__children__'] \ = pkg.getchildren() if 'simplefile' in pkg.attrib: pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile'))) self.contents['Package'][pkg.get('name')].update(pkg.attrib) else: if 'file' in pkg.attrib: if 'multiarch' in pkg.attrib: archs = pkg.get('multiarch').split() srcs = pkg.get('srcs', pkg.get('multiarch')).split() url = ' '.join( ["%s/%s" % (pkg.get('uri'), pkg.get('file') % {'src': srcs[idx], 'arch': archs[idx]}) for idx in range(len(archs))]) pkg.set('url', url) else: pkg.set('url', '%s/%s' % (pkg.get('uri'), pkg.get('file'))) if (pkg.get('type') in self.splitters and pkg.get('file') is not None): mdata = \ self.splitters[pkg.get('type')].match(pkg.get('file')) if not mdata: logger.error("Failed to match pkg %s" % pkg.get('file')) continue pkgname = mdata.group('name') self.contents['Package'][pkgname] = mdata.groupdict() self.contents['Package'][pkgname].update(pkg.attrib) if pkg.attrib.get('file'): self.contents['Package'][pkgname]['url'] = \ pkg.get('url') self.contents['Package'][pkgname]['type'] = \ pkg.get('type') if pkg.get('verify'): self.contents['Package'][pkgname]['verify'] = \ pkg.get('verify') if pkg.get('multiarch'): self.contents['Package'][pkgname]['multiarch'] = \ pkg.get('multiarch') if pkgname not in pdict['Package']: pdict['Package'].add(pkgname) if pkg.getchildren(): self.contents['Package'][pkgname]['__children__'] = \ pkg.getchildren() else: self.contents['Package'][pkg.get('name')].update( pkg.attrib) def _load_children(self, data, idict): """ load children """ for item in data.getchildren(): if item.tag in self.ignore: continue elif item.tag in self.containers: self.children.append(self.__class__(item, idict, self)) else: try: self.contents[item.tag][item.get('name')] = \ dict(item.attrib) except KeyError: self.contents[item.tag] = \ {item.get('name'): dict(item.attrib)} if item.text: self.contents[item.tag][item.get('name')]['__text__'] = \ item.text if item.getchildren(): self.contents[item.tag][item.get('name')]['__children__'] \ = item.getchildren() try: idict[item.tag].append(item.get('name')) except KeyError: idict[item.tag] = [item.get('name')] def Match(self, metadata, data, entry=lxml.etree.Element("None")): """Return a dictionary of package mappings.""" if self.predicate(metadata, entry): for key in self.contents: data.setdefault(key, FuzzyDict).update(self.contents[key]) for child in self.children: child.Match(metadata, data) class PkgSrc(Bcfg2.Server.Plugin.XMLFileBacked): """ XMLSrc files contain a :class:`Bcfg2.Server.Plugin.helpers.INode` hierarchy that returns matching entries. XMLSrc objects are deprecated and :class:`Bcfg2.Server.Plugin.helpers.StructFile` should be preferred where possible.""" __node__ = PNode __cacheobj__ = FuzzyDict __priority_required__ = True def __init__(self, filename, should_monitor=False): Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, should_monitor) self.items = {} self.cache = None self.pnode = None self.priority = -1 def HandleEvent(self, _=None): """Read file upon update.""" try: data = open(self.name).read() except IOError: msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1]) logger.error(msg) raise PluginExecutionError(msg) self.items = {} try: xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: msg = "Failed to parse file %s: %s" % (self.name, sys.exc_info()[1]) logger.error(msg) raise PluginExecutionError(msg) self.pnode = self.__node__(xdata, self.items) self.cache = None try: self.priority = int(xdata.get('priority')) except (ValueError, TypeError): if self.__priority_required__: msg = "Got bogus priority %s for file %s" % \ (xdata.get('priority'), self.name) logger.error(msg) raise PluginExecutionError(msg) del xdata, data def Cache(self, metadata): """Build a package dict for a given host.""" if self.cache is None or self.cache[0] != metadata: cache = (metadata, self.__cacheobj__()) if self.pnode is None: logger.error("Cache method called early for %s; " "forcing data load" % self.name) self.HandleEvent() return self.pnode.Match(metadata, cache[1]) self.cache = cache def __str__(self): return str(self.items) class Pkgmgr(Bcfg2.Server.Plugin.PrioDir): """This is a generator that handles package assignments.""" name = 'Pkgmgr' __author__ = 'bcfg-dev@mcs.anl.gov' __child__ = PkgSrc __element__ = 'Package' def HandleEvent(self, event): '''Handle events and update dispatch table''' Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event) for src in list(self.entries.values()): for itype, children in list(src.items.items()): for child in children: try: self.Entries[itype][child] = self.BindEntry except KeyError: self.Entries[itype] = FuzzyDict([(child, self.BindEntry)]) def BindEntry(self, entry, metadata): """Bind data for entry, and remove instances that are not requested.""" pname = entry.get('name') Bcfg2.Server.Plugin.PrioDir.BindEntry(self, entry, metadata) if entry.findall('Instance'): mdata = FuzzyDict.fuzzy.match(pname) if mdata: arches = mdata.group('alist').split(',') for inst in entry.findall('Instance'): if inst.get('arch') not in arches: entry.remove(inst) def HandlesEntry(self, entry, metadata): return ( entry.tag == 'Package' and entry.get('name').split(':')[0] in self.Entries['Package'].keys()) def HandleEntry(self, entry, metadata): self.BindEntry(entry, metadata) src/lib/Bcfg2/Server/Plugins/Probes.py000066400000000000000000000461071303523157100200410ustar00rootroot00000000000000""" A plugin to gather information from a client machine """ import re import os import sys import time import copy import operator import lxml.etree import Bcfg2.Server import Bcfg2.Server.Cache import Bcfg2.Server.Plugin from Bcfg2.Compat import unicode, any # pylint: disable=W0622 import Bcfg2.Server.FileMonitor from Bcfg2.Logger import Debuggable from Bcfg2.Server.Statistics import track_statistics try: from django.db import models HAS_DJANGO = True except ImportError: HAS_DJANGO = False HAS_DJANGO = False # pylint: disable=C0103 ProbesDataModel = None ProbesGroupsModel = None # pylint: enable=C0103 def load_django_models(): """ Load models for Django after option parsing has completed """ # pylint: disable=W0602 global ProbesDataModel, ProbesGroupsModel # pylint: enable=W0602 if not HAS_DJANGO: return class ProbesDataModel(models.Model, # pylint: disable=W0621,W0612 Bcfg2.Server.Plugin.PluginDatabaseModel): """ The database model for storing probe data """ hostname = models.CharField(max_length=255) probe = models.CharField(max_length=255) timestamp = models.DateTimeField(auto_now=True) data = models.TextField(null=True) class ProbesGroupsModel(models.Model, # pylint: disable=W0621,W0612 Bcfg2.Server.Plugin.PluginDatabaseModel): """ The database model for storing probe groups """ hostname = models.CharField(max_length=255) group = models.CharField(max_length=255) try: import json # py2.4 json library is structured differently json.loads # pylint: disable=W0104 HAS_JSON = True except (ImportError, AttributeError): try: import simplejson as json HAS_JSON = True except ImportError: HAS_JSON = False try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False class ProbeStore(Debuggable): """ Caching abstraction layer between persistent probe data storage and the Probes plugin.""" def __init__(self, core, datadir): # pylint: disable=W0613 Debuggable.__init__(self) self.core = core self._groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups") self._datacache = Bcfg2.Server.Cache.Cache("Probes", "probedata") def get_groups(self, hostname): """ Get the list of groups for the given host """ if hostname not in self._groupcache: self._load_groups(hostname) return self._groupcache.get(hostname, []) def set_groups(self, hostname, groups): """ Set the list of groups for the given host """ raise NotImplementedError def get_data(self, hostname): """ Get a dict of probe data for the given host """ if hostname not in self._datacache: self._load_data(hostname) return self._datacache.get(hostname, dict()) def set_data(self, hostname, data): """ Set probe data for the given host """ raise NotImplementedError def _load_groups(self, hostname): """ When probe groups are not found in the cache, this function is called to load them from the backend (XML or database). """ raise NotImplementedError def _load_data(self, hostname): """ When probe groups are not found in the cache, this function is called to load them from the backend (XML or database). """ raise NotImplementedError def commit(self): """ Commit the current data in the cache to the persistent backend store. This is not used with the :class:`Bcfg2.Server.Plugins.Probes.DBProbeStore`, because it commits on every change. """ pass class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked): """ Caching abstraction layer between the database and the Probes plugin. """ create = False def __init__(self, core, datadir): Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core) ProbeStore.__init__(self, core, datadir) @property def _use_db(self): return True def _load_groups(self, hostname): Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname) groupdata = ProbesGroupsModel.objects.filter(hostname=hostname) self._groupcache[hostname] = list(set(r.group for r in groupdata)) self.core.metadata_cache.expire(hostname) @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock def set_groups(self, hostname, groups): Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname) olddata = self._groupcache.get(hostname, []) self._groupcache[hostname] = groups for group in groups: try: ProbesGroupsModel.objects.get_or_create( hostname=hostname, group=group) except ProbesGroupsModel.MultipleObjectsReturned: ProbesGroupsModel.objects.filter(hostname=hostname, group=group).delete() ProbesGroupsModel.objects.get_or_create( hostname=hostname, group=group) ProbesGroupsModel.objects.filter( hostname=hostname).exclude(group__in=groups).delete() if olddata != groups: self.core.metadata_cache.expire(hostname) def _load_data(self, hostname): Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname) Bcfg2.Server.Cache.expire("Probes", "probedata", hostname) self._datacache[hostname] = ClientProbeDataSet() ts_set = False for pdata in ProbesDataModel.objects.filter(hostname=hostname): if not ts_set: self._datacache[hostname].timestamp = \ time.mktime(pdata.timestamp.timetuple()) ts_set = True self._datacache[hostname][pdata.probe] = ProbeData(pdata.data) self.core.metadata_cache.expire(hostname) @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock def set_data(self, hostname, data): Bcfg2.Server.Cache.expire("Probes", "probedata", hostname) self._datacache[hostname] = ClientProbeDataSet() expire_metadata = False for probe, pdata in data.items(): self._datacache[hostname][probe] = pdata try: record, created = ProbesDataModel.objects.get_or_create( hostname=hostname, probe=probe) except ProbesDataModel.MultipleObjectsReturned: ProbesDataModel.objects.filter(hostname=hostname, probe=probe).delete() record, created = ProbesDataModel.objects.get_or_create( hostname=hostname, probe=probe) expire_metadata |= created if record.data != pdata: record.data = pdata record.save() expire_metadata = True qset = ProbesDataModel.objects.filter( hostname=hostname).exclude(probe__in=data.keys()) if len(qset): qset.delete() expire_metadata = True if expire_metadata: self.core.metadata_cache.expire(hostname) class XMLProbeStore(ProbeStore): """ Caching abstraction layer between ``probed.xml`` and the Probes plugin.""" def __init__(self, core, datadir): ProbeStore.__init__(self, core, datadir) self._fname = os.path.join(datadir, 'probed.xml') self._load_data() def _load_data(self, _=None): """ Load probe data from probed.xml """ Bcfg2.Server.Cache.expire("Probes", "probegroups") Bcfg2.Server.Cache.expire("Probes", "probedata") if not os.path.exists(self._fname): self.commit() try: data = lxml.etree.parse(self._fname, parser=Bcfg2.Server.XMLParser).getroot() except (IOError, lxml.etree.XMLSyntaxError): err = sys.exc_info()[1] self.logger.error("Failed to read file probed.xml: %s" % err) return for client in data.getchildren(): self._datacache[client.get('name')] = \ ClientProbeDataSet(timestamp=client.get("timestamp")) self._groupcache[client.get('name')] = [] for pdata in client: if pdata.tag == 'Probe': self._datacache[client.get('name')][pdata.get('name')] = \ ProbeData(pdata.get("value")) elif pdata.tag == 'Group': self._groupcache[client.get('name')].append( pdata.get('name')) self.core.metadata_cache.expire() def _load_groups(self, hostname): self._load_data(hostname) def commit(self): """ Write received probe data to probed.xml """ top = lxml.etree.Element("Probed") for client, probed in sorted(self._datacache.items()): # make a copy of probe data for this client in case it # submits probe data while we're trying to write # probed.xml probedata = copy.copy(probed) ctag = \ lxml.etree.SubElement(top, 'Client', name=client, timestamp=str(int(probedata.timestamp))) for probe in sorted(probedata): try: lxml.etree.SubElement( ctag, 'Probe', name=probe, value=self._datacache[client][probe].decode('utf-8')) except AttributeError: lxml.etree.SubElement( ctag, 'Probe', name=probe, value=self._datacache[client][probe]) for group in sorted(self._groupcache[client]): lxml.etree.SubElement(ctag, "Group", name=group) try: top.getroottree().write(self._fname, xml_declaration=False, pretty_print='true') except IOError: err = sys.exc_info()[1] self.logger.error("Failed to write %s: %s" % (self._fname, err)) def set_groups(self, hostname, groups): Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname) olddata = self._groupcache.get(hostname, []) self._groupcache[hostname] = groups if olddata != groups: self.core.metadata_cache.expire(hostname) def set_data(self, hostname, data): Bcfg2.Server.Cache.expire("Probes", "probedata", hostname) self._datacache[hostname] = ClientProbeDataSet() expire_metadata = False for probe, pdata in data.items(): olddata = self._datacache[hostname].get(probe, ProbeData('')) self._datacache[hostname][probe] = pdata expire_metadata |= olddata != data if expire_metadata: self.core.metadata_cache.expire(hostname) class ClientProbeDataSet(dict): """ dict of probe => [probe data] that records a timestamp for each host """ def __init__(self, *args, **kwargs): if "timestamp" in kwargs and kwargs['timestamp'] is not None: self.timestamp = kwargs.pop("timestamp") else: self.timestamp = time.time() dict.__init__(self, *args, **kwargs) class ProbeData(str): # pylint: disable=E0012,R0924 """ a ProbeData object emulates a str object, but also has .xdata, .json, and .yaml properties to provide convenient ways to use ProbeData objects as XML, JSON, or YAML data """ def __new__(cls, data): if isinstance(data, unicode): return str.__new__(cls, data.encode('utf-8')) else: return str.__new__(cls, data) def __init__(self, data): # pylint: disable=W0613 str.__init__(self) self._xdata = None self._json = None self._yaml = None @property def data(self): """ provide backwards compatibility with broken ProbeData object in bcfg2 1.2.0 thru 1.2.2 """ return str(self) @property def xdata(self): """ The probe data as a lxml.etree._Element document """ if self._xdata is None: try: self._xdata = lxml.etree.XML(self.data, parser=Bcfg2.Server.XMLParser) except lxml.etree.XMLSyntaxError: pass return self._xdata @property def json(self): """ The probe data as a decoded JSON data structure """ if self._json is None and HAS_JSON: try: self._json = json.loads(self.data) except ValueError: pass return self._json @property def yaml(self): """ The probe data as a decoded YAML data structure """ if self._yaml is None and HAS_YAML: try: self._yaml = yaml.load(self.data) except yaml.YAMLError: pass return self._yaml class ProbeSet(Bcfg2.Server.Plugin.EntrySet): """ Handle universal and group- and host-specific probe files """ ignore = re.compile(r'^(\.#.*|.*~|\..*\.(tmp|sw[px])|probed\.xml)$') probename = \ re.compile(r'(.*/)?(?P\S+?)(\.(?P(?:G\d\d)|H)_\S+)?$') bangline = re.compile(r'^#!\s*(?P.*)$') basename_is_regex = True def __init__(self, path, plugin_name): self.plugin_name = plugin_name Bcfg2.Server.Plugin.EntrySet.__init__(self, r'[0-9A-Za-z_\-]+', path, Bcfg2.Server.Plugin.SpecificData) Bcfg2.Server.FileMonitor.get_fam().AddMonitor(path, self) def HandleEvent(self, event): """ handle events on everything but probed.xml """ if (event.filename != self.path and not event.filename.endswith("probed.xml")): return self.handle_event(event) def get_probe_data(self, metadata): """ Get an XML description of all probes for a client suitable for sending to that client. :param metadata: The client metadata to get probes for. :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :returns: list of lxml.etree._Element objects, each of which represents one probe. """ ret = [] build = dict() candidates = self.get_matching(metadata) candidates.sort(key=operator.attrgetter('specific')) for entry in candidates: rem = self.probename.match(entry.name) pname = rem.group('basename') if pname not in build: build[pname] = entry for (name, entry) in list(build.items()): probe = lxml.etree.Element('probe') probe.set('name', os.path.basename(name)) probe.set('source', self.plugin_name) if (metadata.version_info and metadata.version_info > (1, 3, 1, '', 0)): try: probe.text = entry.data.decode('utf-8') except AttributeError: probe.text = entry.data else: try: probe.text = entry.data except ValueError: self.logger.error("Client unable to handle unicode " "probes. Skipping %s" % probe.get('name')) continue match = self.bangline.match(entry.data.split('\n')[0]) if match: probe.set('interpreter', match.group('interpreter')) else: probe.set('interpreter', '/bin/sh') ret.append(probe) return ret def __str__(self): return "ProbeSet for %s" % self.plugin_name class Probes(Bcfg2.Server.Plugin.Probing, Bcfg2.Server.Plugin.Connector, Bcfg2.Server.Plugin.DatabaseBacked): """ A plugin to gather information from a client machine """ __author__ = 'bcfg-dev@mcs.anl.gov' groupline_re = re.compile(r'^group:\s*(?P\S+)\s*') options = [ Bcfg2.Options.BooleanOption( cf=('probes', 'use_database'), dest="probes_db", help="Use database capabilities of the Probes plugin"), Bcfg2.Options.Option( cf=('probes', 'allowed_groups'), dest="probes_allowed_groups", help="Whitespace-separated list of group name regexps to which " "probes can assign a client", default=[re.compile('.*')], type=Bcfg2.Options.Types.anchored_regex_list)] options_parsed_hook = staticmethod(load_django_models) def __init__(self, core): Bcfg2.Server.Plugin.Probing.__init__(self) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core) self.probes = ProbeSet(self.data, self.name) if self._use_db: self.probestore = DBProbeStore(core, self.data) else: self.probestore = XMLProbeStore(core, self.data) @track_statistics() def GetProbes(self, metadata): return self.probes.get_probe_data(metadata) def ReceiveData(self, client, datalist): cgroups = set() cdata = dict() for data in datalist: groups, cdata[data.get("name")] = \ self.ReceiveDataItem(client, data) cgroups.update(groups) self.probestore.set_groups(client.hostname, list(cgroups)) self.probestore.set_data(client.hostname, cdata) self.probestore.commit() def ReceiveDataItem(self, client, data): """ Receive probe results pertaining to client. Returns a tuple of (, ). """ if data.text is None: self.logger.info("Got null response to probe %s from %s" % (data.get('name'), client.hostname)) return [], '' dlines = data.text.split('\n') self.logger.debug("Processing probe from %s: %s:%s" % (client.hostname, data.get('name'), [line.strip() for line in dlines])) groups = [] for line in dlines[:]: match = self.groupline_re.match(line) if match: newgroup = match.group("groupname") if self._group_allowed(newgroup): groups.append(newgroup) else: self.logger.warning( "Disallowed group assignment %s from %s" % (newgroup, client.hostname)) dlines.remove(line) return (groups, ProbeData("\n".join(dlines))) def get_additional_groups(self, metadata): return self.probestore.get_groups(metadata.hostname) def get_additional_data(self, metadata): return self.probestore.get_data(metadata.hostname) def _group_allowed(self, group): """ Determine if the named group can be set as a probe group by checking the regexes listed in the [probes] groups_allowed setting """ return any(r.match(group) for r in Bcfg2.Options.setup.probes_allowed_groups) src/lib/Bcfg2/Server/Plugins/Properties.py000066400000000000000000000236641303523157100207460ustar00rootroot00000000000000""" The properties plugin maps property files into client metadata instances. """ import os import re import sys import copy import logging import lxml.etree import Bcfg2.Options import Bcfg2.Server.Plugin from Bcfg2.Server.Plugin import PluginExecutionError try: import json # py2.4 json library is structured differently json.loads # pylint: disable=W0104 HAS_JSON = True except (ImportError, AttributeError): try: import simplejson as json HAS_JSON = True except ImportError: HAS_JSON = False try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False LOGGER = logging.getLogger(__name__) class PropertyFile(object): """ Base Properties file handler """ def __init__(self, name, core): """ :param name: The filename of this properties file. :type name: string :param core: The Bcfg2.Server.Core initializing the Properties plugin :type core: Bcfg2.Server.Core .. automethod:: _write """ self.name = name self.core = core def write(self): """ Write the data in this data structure back to the property file. This public method performs checking to ensure that writing is possible and then calls :func:`_write`. """ if not Bcfg2.Options.setup.writes_enabled: raise PluginExecutionError("Properties files write-back is " "disabled in the configuration") try: self.validate_data() except PluginExecutionError: msg = "Cannot write %s: %s" % (self.name, sys.exc_info()[1]) LOGGER.error(msg) raise PluginExecutionError(msg) try: return self._write() except IOError: err = sys.exc_info()[1] msg = "Failed to write %s: %s" % (self.name, err) LOGGER.error(msg) raise PluginExecutionError(msg) def _write(self): """ Write the data in this data structure back to the property file. """ raise NotImplementedError def _expire_metadata_cache(self): """ Expires the metadata cache, if it is required by the caching mode. """ if self.core.metadata_cache_mode in ['cautious', 'aggressive']: self.core.metadata_cache.expire() def validate_data(self): """ Verify that the data in this file is valid. """ raise NotImplementedError def get_additional_data(self, metadata): # pylint: disable=W0613 """ Get file data for inclusion in client metadata. """ return copy.copy(self) class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile): """Handle JSON Properties files.""" def __init__(self, name, core): Bcfg2.Server.Plugin.FileBacked.__init__(self, name) PropertyFile.__init__(self, name, core) self.json = None def Index(self): try: self.json = json.loads(self.data) except ValueError: err = sys.exc_info()[1] raise PluginExecutionError("Could not load JSON data from %s: %s" % (self.name, err)) self._expire_metadata_cache() Index.__doc__ = Bcfg2.Server.Plugin.FileBacked.Index.__doc__ def _write(self): json.dump(self.json, open(self.name, 'wb')) return True _write.__doc__ = PropertyFile._write.__doc__ def validate_data(self): try: json.dumps(self.json) except TypeError: err = sys.exc_info()[1] raise PluginExecutionError("Data for %s cannot be dumped to JSON: " "%s" % (self.name, err)) validate_data.__doc__ = PropertyFile.validate_data.__doc__ def __str__(self): return str(self.json) def __repr__(self): return repr(self.json) class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile): """ Handle YAML Properties files. """ def __init__(self, name, core): Bcfg2.Server.Plugin.FileBacked.__init__(self, name) PropertyFile.__init__(self, name, core) self.yaml = None def Index(self): try: self.yaml = yaml.load(self.data) except yaml.YAMLError: err = sys.exc_info()[1] raise PluginExecutionError("Could not load YAML data from %s: %s" % (self.name, err)) self._expire_metadata_cache() Index.__doc__ = Bcfg2.Server.Plugin.FileBacked.Index.__doc__ def _write(self): yaml.dump(self.yaml, open(self.name, 'wb')) return True _write.__doc__ = PropertyFile._write.__doc__ def validate_data(self): try: yaml.dump(self.yaml) except yaml.YAMLError: err = sys.exc_info()[1] raise PluginExecutionError("Data for %s cannot be dumped to YAML: " "%s" % (self.name, err)) validate_data.__doc__ = PropertyFile.validate_data.__doc__ def __str__(self): return str(self.yaml) def __repr__(self): return repr(self.yaml) class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile): """ Handle XML Properties files. """ def __init__(self, name, core, should_monitor=False): Bcfg2.Server.Plugin.StructFile.__init__(self, name, should_monitor=should_monitor) PropertyFile.__init__(self, name, core) def Index(self): Bcfg2.Server.Plugin.StructFile.Index(self) self._expire_metadata_cache() Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__ def _write(self): open(self.name, "wb").write( lxml.etree.tostring(self.xdata, xml_declaration=False, pretty_print=True).decode('UTF-8')) return True def validate_data(self): """ ensure that the data in this object validates against the XML schema for this property file (if a schema exists) """ schemafile = self.name.replace(".xml", ".xsd") if os.path.exists(schemafile): try: schema = lxml.etree.XMLSchema(file=schemafile) except lxml.etree.XMLSchemaParseError: err = sys.exc_info()[1] raise PluginExecutionError("Failed to process schema for %s: " "%s" % (self.name, err)) else: # no schema exists return True if not schema.validate(self.xdata): raise PluginExecutionError("Data for %s fails to validate; run " "bcfg2-lint for more details" % self.name) else: return True def get_additional_data(self, metadata): if Bcfg2.Options.setup.automatch: default_automatch = "true" else: default_automatch = "false" if self.xdata.get("automatch", default_automatch).lower() == "true": return self.XMLMatch(metadata) else: return copy.copy(self) def __str__(self): return str(self.xdata) def __repr__(self): return repr(self.xdata) class Properties(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector, Bcfg2.Server.Plugin.DirectoryBacked): """ The properties plugin maps property files into client metadata instances. """ options = [ Bcfg2.Options.BooleanOption( cf=("properties", "writes_enabled"), default=True, help="Enable or disable Properties write-back"), Bcfg2.Options.BooleanOption( cf=("properties", "automatch"), help="Enable Properties automatch")] #: Extensions that are understood by Properties. extensions = ["xml"] if HAS_JSON: extensions.append("json") if HAS_YAML: extensions.extend(["yaml", "yml"]) #: Only track and include files whose names and paths match this #: regex. Created on-the-fly based on which libraries are #: installed (and thus which data formats are supported). #: Candidates are ``.xml`` (always supported), ``.json``, #: ``.yaml``, and ``.yml``. patterns = re.compile(r'.*\.%s$' % '|'.join(extensions)) #: Ignore XML schema (``.xsd``) files ignore = re.compile(r'.*\.xsd$') def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) #: Instead of creating children of this object with a static #: object, we use :func:`property_dispatcher` to create a #: child of the appropriate subclass of :class:`PropertyFile` self.__child__ = self.property_dispatcher __init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__ def property_dispatcher(self, fname): """ Dispatch an event on a Properties file to the appropriate object. :param fname: The name of the file that received the event :type fname: string :returns: An object of the appropriate subclass of :class:`PropertyFile` """ if fname.endswith(".xml"): return XMLPropertyFile(fname, self.core) elif HAS_JSON and fname.endswith(".json"): return JSONPropertyFile(fname, self.core) elif HAS_YAML and (fname.endswith(".yaml") or fname.endswith(".yml")): return YAMLPropertyFile(fname, self.core) else: raise Bcfg2.Server.Plugin.PluginExecutionError( "Properties: Unknown extension %s" % fname) def get_additional_data(self, metadata): rv = dict() for fname, pfile in self.entries.items(): rv[fname] = pfile.get_additional_data(metadata) return rv get_additional_data.__doc__ = \ Bcfg2.Server.Plugin.Connector.get_additional_data.__doc__ src/lib/Bcfg2/Server/Plugins/PuppetENC.py000066400000000000000000000124271303523157100204100ustar00rootroot00000000000000""" A plugin to run Puppet external node classifiers """ import os import sys import Bcfg2.Server import Bcfg2.Server.Plugin from Bcfg2.Utils import Executor try: from syck import load as yaml_load, error as yaml_error except ImportError: try: from yaml import load as yaml_load, YAMLError as yaml_error except ImportError: raise ImportError("No yaml library could be found") class PuppetENCFile(Bcfg2.Server.Plugin.FileBacked): """ A representation of a Puppet external node classifier script """ def HandleEvent(self, event=None): return class PuppetENC(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.DirectoryBacked): """ A plugin to run Puppet external node classifiers (http://docs.puppetlabs.com/guides/external_nodes.html) """ __child__ = PuppetENCFile def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) self.cache = dict() self.cmd = Executor() def _run_encs(self, metadata): """ Run all Puppet ENCs """ cache = dict(groups=[], params=dict()) for enc in self.entries.keys(): epath = os.path.join(self.data, enc) self.debug_log("PuppetENC: Running ENC %s for %s" % (enc, metadata.hostname)) result = self.cmd.run([epath, metadata.hostname]) if not result.success: msg = "PuppetENC: Error running ENC %s for %s: %s" % \ (enc, metadata.hostname, result.error) self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) if result.stderr: self.debug_log("ENC Error: %s" % result.stderr) try: yaml = yaml_load(result.stdout) self.debug_log("Loaded data from %s for %s: %s" % (enc, metadata.hostname, yaml)) except yaml_error: err = sys.exc_info()[1] msg = "Error decoding YAML from %s for %s: %s" % \ (enc, metadata.hostname, err) self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) groups = yaml.get("classes", yaml.get("groups", dict())) if groups: if isinstance(groups, list): self.debug_log("ENC %s adding groups to %s: %s" % (enc, metadata.hostname, groups)) cache['groups'].extend(groups) else: self.debug_log("ENC %s adding groups to %s: %s" % (enc, metadata.hostname, groups.keys())) for group, params in groups.items(): cache['groups'].append(group) if params: cache['params'].update(params) if "parameters" in yaml and yaml['parameters']: cache['params'].update(yaml['parameters']) if "environment" in yaml: self.logger.info("Ignoring unsupported environment section of " "ENC %s for %s" % (enc, metadata.hostname)) self.cache[metadata.hostname] = cache def get_additional_groups(self, metadata): if metadata.hostname not in self.cache: self._run_encs(metadata) return self.cache[metadata.hostname]['groups'] def get_additional_data(self, metadata): if metadata.hostname not in self.cache: self._run_encs(metadata) return self.cache[metadata.hostname]['params'] def end_client_run(self, metadata): """ clear the entire cache at the end of each client run. this guarantees that each client will run all ENCs at or near the start of each run; we have to clear the entire cache instead of just the cache for this client because a client that builds templates that use metadata for other clients will populate the cache for those clients, which we don't want. This makes the caching less than stellar, but it does prevent multiple runs of ENCs for a single host a) for groups and data separately; and b) when a single client's metadata is generated multiple times by separate templates """ self.cache = dict() if self.core.metadata_cache_mode == 'aggressive': # clear the metadata client cache if we're in aggressive # mode, and produce a warning. PuppetENC really isn't # compatible with aggressive mode, since we don't know # when the output from a given ENC has changed, and thus # can't invalidate the cache sanely. self.logger.warning("PuppetENC is incompatible with aggressive " "client metadata caching, try 'cautious' or " "'initial' instead") self.core.metadata_cache.expire() def end_statistics(self, metadata): self.end_client_run(self, metadata) src/lib/Bcfg2/Server/Plugins/Reporting.py000066400000000000000000000100311303523157100205430ustar00rootroot00000000000000""" Unified statistics and reporting plugin """ import sys import time import platform import lxml.etree import Bcfg2.Options from Bcfg2.Reporting.Transport.base import TransportError from Bcfg2.Server.Plugin import Statistics, PullSource, Threaded, \ PluginInitError, PluginExecutionError try: import django if django.VERSION[0] == 1 and django.VERSION[1] >= 7: HAS_REPORTING = True else: import south # pylint: disable=W0611 HAS_REPORTING = True except ImportError: HAS_REPORTING = False def _rpc_call(method): """ Given the name of a Reporting Transport method, get a function that defers an XML-RPC call to that method """ def _real_rpc_call(self, *args, **kwargs): """Wrapper for calls to the reporting collector""" try: return self.transport.rpc(method, *args, **kwargs) except TransportError: # this is needed for Admin.Pull raise PluginExecutionError(sys.exc_info()[1]) return _real_rpc_call # pylint: disable=W0223 class Reporting(Statistics, Threaded, PullSource): """ Unified statistics and reporting plugin """ __rmi__ = Statistics.__rmi__ + ['Ping', 'GetExtra', 'GetCurrentEntry'] options = [Bcfg2.Options.Common.reporting_transport] CLIENT_METADATA_FIELDS = ('profile', 'bundles', 'aliases', 'addresses', 'groups', 'categories', 'uuid', 'version') def __init__(self, core): Statistics.__init__(self, core) PullSource.__init__(self) Threaded.__init__(self) self.whoami = platform.node() self.transport = None if not HAS_REPORTING: msg = "Django 1.7+ or Django south is required for Reporting" self.logger.error(msg) raise PluginInitError(msg) # This must be loaded here for bcfg2-admin try: self.transport = Bcfg2.Options.setup.reporting_transport() except TransportError: raise PluginInitError("%s: Failed to instantiate transport: %s" % (self.name, sys.exc_info()[1])) if self.debug_flag: self.transport.set_debug(self.debug_flag) def start_threads(self): """Nothing to do here""" pass def set_debug(self, debug): rv = Statistics.set_debug(self, debug) if self.transport is not None: self.transport.set_debug(debug) return rv def process_statistics(self, client, xdata): stats = xdata.find("Statistics") stats.set('time', time.asctime(time.localtime())) cdata = {'server': self.whoami} for field in self.CLIENT_METADATA_FIELDS: try: value = getattr(client, field) except AttributeError: continue if value: if isinstance(value, set): value = [v for v in value] cdata[field] = value # try 3 times to store the data for i in [1, 2, 3]: try: self.transport.store( client.hostname, cdata, lxml.etree.tostring( stats, xml_declaration=False)) self.debug_log("%s: Queued statistics data for %s" % (self.__class__.__name__, client.hostname)) return except TransportError: continue except: self.logger.error("%s: Attempt %s: Failed to add statistic: %s" % (self.__class__.__name__, i, sys.exc_info()[1])) raise PluginExecutionError("%s: Retry limit reached for %s" % (self.__class__.__name__, client.hostname)) def shutdown(self): super(Reporting, self).shutdown() if self.transport: self.transport.shutdown() Ping = _rpc_call('Ping') GetExtra = _rpc_call('GetExtra') GetCurrentEntry = _rpc_call('GetCurrentEntry') src/lib/Bcfg2/Server/Plugins/Rules.py000066400000000000000000000053621303523157100176770ustar00rootroot00000000000000"""This generator provides rule-based entry mappings.""" import copy import re import string import Bcfg2.Options import Bcfg2.Server.Plugin class NameTemplate(string.Template): """Simple subclass of string.Template with a custom delimiter.""" delimiter = '%' class Rules(Bcfg2.Server.Plugin.PrioDir): """This is a generator that handles service assignments.""" __author__ = 'bcfg-dev@mcs.anl.gov' options = Bcfg2.Server.Plugin.PrioDir.options + [ Bcfg2.Options.BooleanOption( cf=("rules", "regex"), dest="rules_regex", help="Allow regular expressions in Rules"), Bcfg2.Options.BooleanOption( cf=("rules", "replace_name"), dest="rules_replace_name", help="Replace %{name} in attributes with name of target entry")] def __init__(self, core): Bcfg2.Server.Plugin.PrioDir.__init__(self, core) self._regex_cache = dict() def HandlesEntry(self, entry, metadata): for src in self.entries.values(): for candidate in src.XMLMatch(metadata).xpath("//%s" % entry.tag): if self._matches(entry, metadata, candidate): return True return False HandleEntry = Bcfg2.Server.Plugin.PrioDir.BindEntry def _matches(self, entry, metadata, candidate): if Bcfg2.Server.Plugin.PrioDir._matches(self, entry, metadata, candidate): return True elif (entry.tag == "Path" and entry.get('name').rstrip("/") == candidate.get("name").rstrip("/")): # special case for Path tags: # http://trac.mcs.anl.gov/projects/bcfg2/ticket/967 return True elif self._regex_enabled: # attempt regular expression matching rule = candidate.get("name") if rule not in self._regex_cache: self._regex_cache[rule] = re.compile("%s$" % rule) if self._regex_cache[rule].match(entry.get('name')): return True return False def _apply(self, entry, data): if self._replace_name_enabled: data = copy.deepcopy(data) for key, val in list(data.attrib.items()): data.attrib[key] = NameTemplate(val).safe_substitute( name=entry.get('name')) Bcfg2.Server.Plugin.PrioDir._apply(self, entry, data) @property def _regex_enabled(self): """ Return True if rules regexes are enabled, False otherwise """ return Bcfg2.Options.setup.rules_regex @property def _replace_name_enabled(self): """ Return True if the replace_name feature is enabled, False otherwise """ return Bcfg2.Options.setup.rules_replace_name src/lib/Bcfg2/Server/Plugins/SEModules.py000066400000000000000000000061511303523157100204420ustar00rootroot00000000000000""" The SEModules plugin handles SELinux module entries. It supports group- and host-specific module versions, and enabling/disabling modules. You can use ``tools/selinux_baseline.py`` to create a baseline of all of your installed modules. See :ref:`server-selinux` for more information. """ import os import Bcfg2.Server.Plugin from Bcfg2.Compat import b64encode class SEModuleData(Bcfg2.Server.Plugin.SpecificData): """ Representation of a single SELinux module file. Encodes the data using base64 automatically """ def bind_entry(self, entry, _): """ Return a fully-bound entry. The module data is automatically encoded with base64. :param entry: The abstract entry to bind the module for :type entry: lxml.etree._Element :returns: lxml.etree._Element - the fully bound entry """ entry.set('encoding', 'base64') entry.text = b64encode(self.data) return entry class SEModules(Bcfg2.Server.Plugin.GroupSpool): """ Handle SELinux 'module' entries """ __author__ = 'chris.a.st.pierre@gmail.com' #: SEModules is a :class:`Bcfg2.Server.Plugin.helpers.GroupSpool` #: that uses :class:`Bcfg2.Server.Plugins.SEModules.SEModuleData` #: objects as its EntrySet children. es_child_cls = SEModuleData #: SEModules manages ``SEModule`` entries entry_type = 'SEModule' def _get_module_filename(self, entry): """ GroupSpool stores entries as /foo.pp, but we want people to be able to specify module entries as name='foo' or name='foo.pp', so we put this abstraction in between """ if entry.get("name").endswith(".pp"): name = entry.get("name") else: name = entry.get("name") + ".pp" return "/" + name def _get_module_name(self, entry): """ On the client we do most of our logic on just the module name, but we want people to be able to specify module entries as name='foo' or name='foo.pp', so we put this abstraction in between""" if entry.get("name").endswith(".pp"): name = entry.get("name")[:-3] else: name = entry.get("name") return name.lstrip("/") def HandlesEntry(self, entry, metadata): if entry.tag in self.Entries: return self._get_module_filename(entry) in self.Entries[entry.tag] return Bcfg2.Server.Plugin.GroupSpool.HandlesEntry(self, entry, metadata) HandlesEntry.__doc__ = Bcfg2.Server.Plugin.GroupSpool.HandlesEntry.__doc__ def HandleEntry(self, entry, metadata): entry.set("name", self._get_module_name(entry)) bind = self.Entries[entry.tag][self._get_module_filename(entry)] return bind(entry, metadata) HandleEntry.__doc__ = Bcfg2.Server.Plugin.GroupSpool.HandleEntry.__doc__ def add_entry(self, event): self.filename_pattern = \ os.path.basename(os.path.dirname(self.event_path(event))) Bcfg2.Server.Plugin.GroupSpool.add_entry(self, event) add_entry.__doc__ = Bcfg2.Server.Plugin.GroupSpool.add_entry.__doc__ src/lib/Bcfg2/Server/Plugins/SSHbase.py000066400000000000000000000517451303523157100201030ustar00rootroot00000000000000"""This module manages ssh key files for bcfg2""" import re import os import sys import socket import shutil import tempfile import lxml.etree import Bcfg2.Options import Bcfg2.Server.Plugin from itertools import chain from Bcfg2.Utils import Executor from Bcfg2.Server.Plugin import PluginExecutionError from Bcfg2.Compat import any, u_str, b64encode # pylint: disable=W0622 try: from Bcfg2.Server.Encryption import ssl_encrypt, bruteforce_decrypt, \ EVPError HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False class KeyData(Bcfg2.Server.Plugin.SpecificData): """ class to handle key data for HostKeyEntrySet """ def __lt__(self, other): return self.name < other.name def bind_entry(self, entry, _): """ Bind the entry with the data of this key :param entry: The abstract entry to bind. This will be modified in place. :type entry: lxml.etree._Element :returns: None """ entry.set('type', 'file') if entry.get('encoding') == 'base64': entry.text = b64encode(self.data) else: try: entry.text = u_str(self.data, Bcfg2.Options.setup.encoding) except UnicodeDecodeError: msg = "Failed to decode %s: %s" % (entry.get('name'), sys.exc_info()[1]) self.logger.error(msg) self.logger.error("Please verify you are using the proper " "encoding") raise Bcfg2.Server.Plugin.PluginExecutionError(msg) except ValueError: msg = "Error in specification for %s: %s" % (entry.get('name'), sys.exc_info()[1]) self.logger.error(msg) self.logger.error("You need to specify base64 encoding for %s" % entry.get('name')) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) if entry.text in ['', None]: entry.set('empty', 'true') def handle_event(self, event): Bcfg2.Server.Plugin.SpecificData.handle_event(self, event) if event.filename.endswith(".crypt"): if self.data is None: return # todo: let the user specify a passphrase by name try: self.data = bruteforce_decrypt(self.data) except EVPError: raise PluginExecutionError("Failed to decrypt %s" % self.name) class HostKeyEntrySet(Bcfg2.Server.Plugin.EntrySet): """ EntrySet to handle all kinds of host keys """ def __init__(self, basename, path): Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, KeyData) self.metadata = {'owner': 'root', 'group': 'root', 'type': 'file'} if basename.startswith("ssh_host_key"): self.metadata['encoding'] = "base64" if basename.endswith('.pub'): self.metadata['mode'] = '0644' else: self.metadata['mode'] = '0600' def specificity_from_filename(self, fname, specific=None): if fname.endswith(".crypt"): fname = fname[0:-6] return Bcfg2.Server.Plugin.EntrySet.specificity_from_filename( self, fname, specific=specific) class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet): """ EntrySet to handle the ssh_known_hosts file """ def __init__(self, path): Bcfg2.Server.Plugin.EntrySet.__init__(self, "ssh_known_hosts", path, KeyData) self.metadata = {'owner': 'root', 'group': 'root', 'type': 'file', 'mode': '0644'} class SSHbase(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector, Bcfg2.Server.Plugin.Generator, Bcfg2.Server.Plugin.PullTarget): """ The sshbase generator manages ssh host keys (both v1 and v2) for hosts. It also manages the ssh_known_hosts file. It can integrate host keys from other management domains and similarly export its keys. The repository contains files in the following formats: ssh_host_key.H_(hostname) -> the v1 host private key for (hostname) ssh_host_key.pub.H_(hostname) -> the v1 host public key for (hostname) ssh_host_(ec)(dr)sa_key.H_(hostname) -> the v2 ssh host private key for (hostname) ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host public key for (hostname) ssh_host_ed25519_key.H_(hostname) -> the v2 ssh host private key for (hostname) ssh_host_ed25519_key.pub.H_(hostname) -> the v2 ssh host public key for (hostname) ssh_known_hosts -> the current known hosts file. this is regenerated each time a new key is generated. """ __author__ = 'bcfg-dev@mcs.anl.gov' keypatterns = ["ssh_host_dsa_key", "ssh_host_ecdsa_key", "ssh_host_ed25519_key", "ssh_host_rsa_key", "ssh_host_key", "ssh_host_dsa_key.pub", "ssh_host_ecdsa_key.pub", "ssh_host_ed25519_key.pub", "ssh_host_rsa_key.pub", "ssh_host_key.pub"] options = [ Bcfg2.Options.Option( cf=("sshbase", "passphrase"), dest="sshbase_passphrase", help="Passphrase used to encrypt generated private SSH host keys")] def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.Generator.__init__(self) Bcfg2.Server.Plugin.PullTarget.__init__(self) self.ipcache = {} self.namecache = {} self.__skn = False # keep track of which bogus keys we've warned about, and only # do so once self.badnames = dict() self.fam = Bcfg2.Server.FileMonitor.get_fam() self.fam.AddMonitor(self.data, self) self.static = dict() self.entries = dict() self.Entries['Path'] = dict() self.entries['/etc/ssh/ssh_known_hosts'] = \ KnownHostsEntrySet(self.data) self.Entries['Path']['/etc/ssh/ssh_known_hosts'] = self.build_skn for keypattern in self.keypatterns: self.entries["/etc/ssh/" + keypattern] = \ HostKeyEntrySet(keypattern, self.data) self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk self.cmd = Executor() @property def passphrase(self): """ The passphrase used to encrypt private keys """ if HAS_CRYPTO and Bcfg2.Options.setup.sshbase_passphrase: return Bcfg2.Options.setup.passphrases[ Bcfg2.Options.setup.sshbase_passphrase] return None def get_skn(self): """Build memory cache of the ssh known hosts file.""" if not self.__skn: # if no metadata is registered yet, defer if len(self.core.metadata.query.all()) == 0: self.__skn = False return self.__skn skn = [s.data.rstrip() for s in list(self.static.values())] mquery = self.core.metadata.query # build hostname cache names = dict() for cmeta in mquery.all(): names[cmeta.hostname] = set([cmeta.hostname]) names[cmeta.hostname].update(cmeta.aliases) newnames = set() newips = set() for name in names[cmeta.hostname]: newnames.add(name.split('.')[0]) try: newips.update(self.get_ipcache_entry(name)[0]) except PluginExecutionError: continue names[cmeta.hostname].update(newnames) names[cmeta.hostname].update(cmeta.addresses) names[cmeta.hostname].update(newips) # TODO: Only perform reverse lookups on IPs if an # option is set. for ip in newips: try: names[cmeta.hostname].update( self.get_namecache_entry(ip)) except socket.herror: continue names[cmeta.hostname] = sorted(names[cmeta.hostname]) pubkeys = [pubk for pubk in list(self.entries.keys()) if pubk.endswith('.pub')] pubkeys.sort() for pubkey in pubkeys: for entry in sorted(self.entries[pubkey].entries.values(), key=lambda e: (e.specific.hostname or e.specific.group)): specific = entry.specific hostnames = [] if specific.hostname and specific.hostname in names: hostnames = names[specific.hostname] elif specific.group: hostnames = list( chain( *[names[cmeta.hostname] for cmeta in mquery.by_groups([specific.group])])) elif specific.all: # a generic key for all hosts? really? hostnames = list(chain(*list(names.values()))) if not hostnames: if specific.hostname: key = specific.hostname ktype = "host" elif specific.group: key = specific.group ktype = "group" else: # user has added a global SSH key, but # have no clients yet. don't warn about # this. continue if key not in self.badnames: self.badnames[key] = True self.logger.info("Ignoring key for unknown %s %s" % (ktype, key)) continue skn.append("%s %s" % (','.join(hostnames), entry.data.rstrip())) self.__skn = "\n".join(skn) + "\n" return self.__skn def set_skn(self, value): """Set backing data for skn.""" self.__skn = value skn = property(get_skn, set_skn) def HandleEvent(self, event=None): """Local event handler that does skn regen on pubkey change.""" # skip events we don't care about action = event.code2str() if action == "endExist" or event.filename == self.data: return for entry in list(self.entries.values()): if event.filename.endswith(".crypt"): fname = event.filename[0:-6] else: fname = event.filename if entry.specific.match(fname): entry.handle_event(event) if any(event.filename.startswith(kp) for kp in self.keypatterns if kp.endswith(".pub")): self.debug_log("New public key %s; invalidating " "ssh_known_hosts cache" % event.filename) self.skn = False if self.core.metadata_cache_mode in ['cautious', 'aggressive']: self.core.metadata_cache.expire() return if event.filename == 'info.xml': for entry in list(self.entries.values()): entry.handle_event(event) return if event.filename.endswith('.static'): self.logger.info("Static key %s %s; invalidating ssh_known_hosts " "cache" % (event.filename, action)) if action == "deleted" and event.filename in self.static: del self.static[event.filename] self.skn = False else: self.static[event.filename] = Bcfg2.Server.Plugin.FileBacked( os.path.join(self.data, event.filename)) self.static[event.filename].HandleEvent(event) self.skn = False return self.logger.warn("SSHbase: Got unknown event %s %s" % (event.filename, action)) def get_ipcache_entry(self, client): """Build a cache of dns results.""" if client in self.ipcache: if self.ipcache[client]: return self.ipcache[client] else: raise PluginExecutionError("No cached IP address for %s" % client) else: # need to add entry try: ipaddr = set([info[4][0] for info in socket.getaddrinfo(client, None)]) self.ipcache[client] = (ipaddr, client) return (ipaddr, client) except socket.gaierror: result = self.cmd.run(["getent", "hosts", client]) if result.success: ipaddr = result.stdout.strip().split() if ipaddr: self.ipcache[client] = (ipaddr, client) return (ipaddr, client) self.ipcache[client] = False msg = "Failed to find IP address for %s: %s" % (client, result.error) self.logger.error(msg) raise PluginExecutionError(msg) def get_namecache_entry(self, cip): """Build a cache of name lookups from client IP addresses.""" if cip in self.namecache: # lookup cached name from IP if self.namecache[cip]: return self.namecache[cip] else: raise socket.herror else: # add an entry that has not been cached try: rvlookup = socket.gethostbyaddr(cip) if rvlookup[0]: self.namecache[cip] = [rvlookup[0]] else: self.namecache[cip] = [] self.namecache[cip].extend(rvlookup[1]) return self.namecache[cip] except socket.herror: self.namecache[cip] = False self.logger.error("Failed to find any names associated with " "IP address %s" % cip) raise def build_skn(self, entry, metadata): """This function builds builds a host specific known_hosts file.""" try: self.entries[entry.get('name')].bind_entry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: entry.text = self.skn hostkeys = [] for key in self.keypatterns: if key.endswith(".pub"): try: hostkeys.append( self.entries["/etc/ssh/" + key].best_matching(metadata)) except Bcfg2.Server.Plugin.PluginExecutionError: pass hostkeys.sort() for hostkey in hostkeys: entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" \ % hostkey.data self.entries[entry.get('name')].bind_info_to_entry(entry, metadata) def build_hk(self, entry, metadata): """This binds host key data into entries.""" try: self.entries[entry.get('name')].bind_entry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: filename = entry.get('name').split('/')[-1] self.GenerateHostKeyPair(metadata.hostname, filename) # Service the FAM events queued up by the key generation # so the data structure entries will be available for # binding. # # NOTE: We wait for up to ten seconds. There is some # potential for race condition, because if the file # monitor doesn't get notified about the new key files in # time, those entries won't be available for binding. In # practice, this seems "good enough". tries = 0 is_bound = False while not is_bound: if tries >= 10: msg = "%s still not registered" % filename self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) self.fam.handle_events_in_interval(1) tries += 1 try: self.entries[entry.get('name')].bind_entry(entry, metadata) is_bound = True except Bcfg2.Server.Plugin.PluginExecutionError: print("Failed to bind %s: %s") % ( lxml.etree.tostring(entry), sys.exc_info()[1]) def GenerateHostKeyPair(self, client, filename): """Generate new host key pair for client.""" match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa|ed25519)_)?key)', filename) if match: hostkey = "%s.H_%s" % (match.group(1), client) if match.group(2): keytype = match.group(2) else: keytype = 'rsa1' else: raise PluginExecutionError("Unknown key filename: %s" % filename) fileloc = os.path.join(self.data, hostkey) publoc = os.path.join(self.data, ".".join([hostkey.split('.')[0], 'pub', "H_%s" % client])) tempdir = tempfile.mkdtemp() temploc = os.path.join(tempdir, hostkey) cmd = ["ssh-keygen", "-q", "-f", temploc, "-N", "", "-t", keytype, "-C", "root@%s" % client] self.debug_log("SSHbase: Running: %s" % " ".join(cmd)) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError("SSHbase: Error running ssh-keygen: %s" % result.error) if self.passphrase: self.debug_log("SSHbase: Encrypting private key for %s" % fileloc) try: data = ssl_encrypt(open(temploc).read(), self.passphrase) except IOError: raise PluginExecutionError("Unable to read temporary SSH key: " "%s" % sys.exc_info()[1]) except EVPError: raise PluginExecutionError("Unable to encrypt SSH key: %s" % sys.exc_info()[1]) try: open("%s.crypt" % fileloc, "wb").write(data) except IOError: raise PluginExecutionError("Unable to write encrypted SSH " "key: %s" % sys.exc_info()[1]) try: if not self.passphrase: shutil.copy(temploc, fileloc) shutil.copy("%s.pub" % temploc, publoc) except IOError: raise PluginExecutionError("Unable to copy temporary SSH key: %s" % sys.exc_info()[1]) try: os.unlink(temploc) os.unlink("%s.pub" % temploc) os.rmdir(tempdir) except OSError: err = sys.exc_info()[1] raise PluginExecutionError("Failed to unlink temporary ssh keys: " "%s" % err) def AcceptChoices(self, _, metadata): return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)] def AcceptPullData(self, specific, entry, log): """Per-plugin bcfg2-admin pull support.""" # specific will always be host specific filename = os.path.join(self.data, "%s.H_%s" % (entry['name'].split('/')[-1], specific.hostname)) try: open(filename, 'w').write(entry['text']) if log: print("Wrote file %s" % filename) except KeyError: self.logger.error("Failed to pull %s. This file does not " "currently exist on the client" % entry.get('name')) def get_additional_data(self, metadata): data = dict() for key in self.keypatterns: if key.endswith(".pub"): try: keyfile = "/etc/ssh/" + key entry = self.entries[keyfile].best_matching(metadata) data[key] = entry.data except Bcfg2.Server.Plugin.PluginExecutionError: pass return data src/lib/Bcfg2/Server/Plugins/ServiceCompat.py000066400000000000000000000026311303523157100213450ustar00rootroot00000000000000""" Use old-style service modes for older clients """ import Bcfg2.Server.Plugin class ServiceCompat(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.GoalValidator): """ Use old-style service modes for older clients """ create = False __author__ = 'bcfg-dev@mcs.anl.gov' mode_map = {('true', 'true'): 'default', ('interactive', 'true'): 'interactive_only', ('false', 'false'): 'manual'} def validate_goals(self, metadata, config): """ Apply defaults """ if metadata.version_info and metadata.version_info >= (1, 3, 0, '', 0): # do not care about a client that is _any_ 1.3.0 release # (including prereleases and RCs) return for entry in config.xpath("//BoundService|//Service"): mode_key = (entry.get("restart", "true").lower(), entry.get("install", "true").lower()) try: mode = self.mode_map[mode_key] except KeyError: self.logger.info("Could not map restart and install settings " "of %s:%s to an old-style Service mode for " "%s; using 'manual'" % (entry.tag, entry.get("name"), metadata.hostname)) mode = "manual" entry.set("mode", mode) src/lib/Bcfg2/Server/Plugins/Svn.py000066400000000000000000000175231303523157100173550ustar00rootroot00000000000000""" The Svn plugin provides a revision interface for Bcfg2 repos using Subversion. If PySvn libraries are installed, then it exposes two additional XML-RPC methods for committing data to the repository and updating the repository. """ import sys import Bcfg2.Options import Bcfg2.Server.Plugin try: import pysvn HAS_SVN = True except ImportError: from Bcfg2.Utils import Executor HAS_SVN = False class Svn(Bcfg2.Server.Plugin.Version): """Svn is a version plugin for dealing with Bcfg2 repos.""" options = Bcfg2.Server.Plugin.Version.options + [ Bcfg2.Options.Option( cf=("svn", "user"), dest="svn_user", help="SVN username"), Bcfg2.Options.Option( cf=("svn", "password"), dest="svn_password", help="SVN password"), Bcfg2.Options.BooleanOption( cf=("svn", "always_trust"), dest="svn_trust_ssl", help="Always trust SSL certs from SVN server")] if HAS_SVN: options.append( Bcfg2.Options.Option( cf=("svn", "conflict_resolution"), dest="svn_conflict_resolution", type=lambda v: v.replace("-", "_"), # pylint: disable=E1101 choices=dir(pysvn.wc_conflict_choice), default=pysvn.wc_conflict_choice.postpone, # pylint: enable=E1101 help="SVN conflict resolution method")) __author__ = 'bcfg-dev@mcs.anl.gov' __vcs_metadata_path__ = ".svn" if HAS_SVN: __rmi__ = Bcfg2.Server.Plugin.Version.__rmi__ + ['Update', 'Commit'] else: __vcs_metadata_path__ = ".svn" def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.revision = None self.svn_root = None self.client = None self.cmd = None if not HAS_SVN: self.logger.debug("Svn: PySvn not found, using CLI interface to " "SVN") self.cmd = Executor() else: self.client = pysvn.Client() self.debug_log("Svn: Conflicts will be resolved with %s" % Bcfg2.Options.setup.svn_conflict_resolution) self.client.callback_conflict_resolver = self.conflict_resolver if Bcfg2.Options.setup.svn_trust_ssl: self.client.callback_ssl_server_trust_prompt = \ self.ssl_server_trust_prompt if (Bcfg2.Options.setup.svn_user and Bcfg2.Options.setup.svn_password): self.client.callback_get_login = self.get_login self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" % self.vcs_path) def get_login(self, realm, username, may_save): # pylint: disable=W0613 """ PySvn callback to get credentials for HTTP basic authentication """ self.logger.debug("Svn: Logging in with username: %s" % Bcfg2.Options.setup.svn_user) return (True, Bcfg2.Options.setup.svn_user, Bcfg2.Options.setup.svn_password, False) def ssl_server_trust_prompt(self, trust_dict): """ PySvn callback to always trust SSL certificates from SVN server """ self.logger.debug("Svn: Trusting SSL certificate from %s, " "issued by %s for realm %s" % (trust_dict['hostname'], trust_dict['issuer_dname'], trust_dict['realm'])) return True, trust_dict['failures'], False def conflict_resolver(self, conflict_description): """ PySvn callback function to resolve conflicts """ self.logger.info("Svn: Resolving conflict for %s with %s" % (conflict_description['path'], Bcfg2.Options.setup.svn_conflict_resolution)) return Bcfg2.Options.setup.svn_conflict_resolution, None, False def get_revision(self): """Read svn revision information for the Bcfg2 repository.""" msg = None if HAS_SVN: try: info = self.client.info(Bcfg2.Options.setup.vcs_root) self.revision = info.revision self.svn_root = info.url return str(self.revision.number) except pysvn.ClientError: # pylint: disable=E1101 msg = "Svn: Failed to get revision: %s" % sys.exc_info()[1] else: result = self.cmd.run(["env LC_ALL=C", "svn", "info", Bcfg2.Options.setup.vcs_root], shell=True) if result.success: self.revision = [line.split(': ')[1] for line in result.stdout.splitlines() if line.startswith('Revision:')][-1] return self.revision else: msg = "Failed to read svn info: %s" % result.error self.revision = None raise Bcfg2.Server.Plugin.PluginExecutionError(msg) def Update(self): '''Svn.Update() => True|False\nUpdate svn working copy\n''' try: old_revision = self.revision.number self.revision = self.client.update(Bcfg2.Options.setup.vcs_root, recurse=True)[0] except pysvn.ClientError: # pylint: disable=E1101 err = sys.exc_info()[1] # try to be smart about the error we got back details = None if "callback_ssl_server_trust_prompt" in str(err): details = "SVN server certificate is not trusted" elif "callback_get_login" in str(err): details = "SVN credentials not cached" if details is None: self.logger.error("Svn: Failed to update server repository", exc_info=1) else: self.logger.error("Svn: Failed to update server repository: " "%s" % details) return False if old_revision == self.revision.number: self.logger.debug("repository is current") else: self.logger.info("Updated %s from revision %s to %s" % (Bcfg2.Options.setup.vcs_root, old_revision, self.revision.number)) return True def Commit(self): """Svn.Commit() => True|False\nCommit svn repository\n""" # First try to update if not self.Update(): self.logger.error("Failed to update svn repository, refusing to " "commit changes") return False try: self.revision = self.client.checkin([Bcfg2.Options.setup.vcs_root], 'Svn: autocommit', recurse=True) self.revision = self.client.update(Bcfg2.Options.setup.vcs_root, recurse=True)[0] self.logger.info("Svn: Commited changes. At %s" % self.revision.number) return True except pysvn.ClientError: # pylint: disable=E1101 err = sys.exc_info()[1] # try to be smart about the error we got back details = None if "callback_ssl_server_trust_prompt" in str(err): details = "SVN server certificate is not trusted" elif "callback_get_login" in str(err): details = "SVN credentials not cached" if details is None: self.logger.error("Svn: Failed to commit changes", exc_info=1) else: self.logger.error("Svn: Failed to commit changes: %s" % details) return False src/lib/Bcfg2/Server/Plugins/TemplateHelper.py000066400000000000000000000120321303523157100215100ustar00rootroot00000000000000""" A plugin to provide helper classes and functions to templates """ import re import imp import sys import lxml.etree from Bcfg2.Server.Plugin import Plugin, Connector, DirectoryBacked, \ TemplateDataProvider, DefaultTemplateDataProvider from Bcfg2.Logger import Debuggable from Bcfg2.Utils import safe_module_name MODULE_RE = re.compile(r'(?P(?P[^\/]+)\.py)$') class HelperModule(Debuggable): """ Representation of a TemplateHelper module """ def __init__(self, name, core): Debuggable.__init__(self) self.name = name self.core = core #: The name of the module as used by get_additional_data(). #: the name of the file with .py stripped off. self._module_name = MODULE_RE.search(self.name).group('module') #: The attributes exported by this module self._attrs = [] #: The attributes added to the template namespace by this module self.defaults = [] default_prov = DefaultTemplateDataProvider() self.reserved_defaults = default_prov.get_template_data( lxml.etree.Element("Path", name="/dummy"), None, None).keys() + ["path"] def HandleEvent(self, event=None): """ HandleEvent is called whenever the FAM registers an event. :param event: The event object :type event: Bcfg2.Server.FileMonitor.Event :returns: None """ if event and event.code2str() not in ['exists', 'changed', 'created']: return # expire the metadata cache, because the module might have changed if self.core.metadata_cache_mode in ['cautious', 'aggressive']: self.core.metadata_cache.expire() try: module = imp.load_source( safe_module_name('TemplateHelper', self._module_name), self.name) except: # pylint: disable=W0702 # this needs to be a blanket except because the # imp.load_source() call can raise literally any error, # since it imports the module and just passes through any # exceptions raised. err = sys.exc_info()[1] self.logger.error("TemplateHelper: Failed to import %s: %s" % (self.name, err)) return if not hasattr(module, "__export__"): self.logger.error("TemplateHelper: %s has no __export__ list" % self.name) return newattrs = [] for sym in module.__export__ + getattr(module, "__default__", []): if sym in newattrs: # already added to attribute list continue if sym not in self._attrs and hasattr(self, sym): self.logger.warning( "TemplateHelper: %s: %s is a reserved keyword, " "skipping export" % (self.name, sym)) continue try: setattr(self, sym, getattr(module, sym)) newattrs.append(sym) except AttributeError: self.logger.warning( "TemplateHelper: %s exports %s, but has no such attribute" % (self.name, sym)) # remove old exports for sym in set(self._attrs) - set(newattrs): delattr(self, sym) self._attrs = newattrs self.defaults = [] for sym in getattr(module, "__default__", []): if sym in self.reserved_defaults: self.logger.warning( "TemplateHelper: %s: %s is a reserved keyword, not adding " "as default" % (self.name, sym)) self.defaults.append(sym) class TemplateHelper(Plugin, Connector, DirectoryBacked, TemplateDataProvider): """ A plugin to provide helper classes and functions to templates """ __author__ = 'chris.a.st.pierre@gmail.com' ignore = re.compile(r'^(\.#.*|.*~|\..*\.(sw[px])|.*\.py[co])$') patterns = MODULE_RE def __init__(self, core): Plugin.__init__(self, core) Connector.__init__(self) DirectoryBacked.__init__(self, self.data) TemplateDataProvider.__init__(self) # The HelperModule needs access to the core, so we have to construct # it manually and add the custom argument. self.__child__ = lambda fname: HelperModule(fname, core) def get_additional_data(self, _): return dict([(h._module_name, h) # pylint: disable=W0212 for h in self.entries.values()]) def get_template_data(self, *_): rv = dict() source = dict() for helper in self.entries.values(): for key in helper.defaults: if key not in rv: rv[key] = getattr(helper, key) source[key] = helper else: self.logger.warning( "TemplateHelper: Duplicate default variable %s " "provided by both %s and %s" % (key, helper.name, source[key].name)) return rv src/lib/Bcfg2/Server/Plugins/Trigger.py000066400000000000000000000035471303523157100202130ustar00rootroot00000000000000""" Trigger is a plugin that calls external scripts (on the server) """ import os import pipes import Bcfg2.Server.Plugin from Bcfg2.Utils import Executor class TriggerFile(Bcfg2.Server.Plugin.FileBacked): """ Representation of a trigger script file """ def HandleEvent(self, event=None): return class Trigger(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.DirectoryBacked): """Trigger is a plugin that calls external scripts (on the server).""" __author__ = 'bcfg-dev@mcs.anl.gov' def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) self.cmd = Executor() def async_run(self, args): """ Run the trigger script asynchronously in a forked process """ pid = os.fork() if pid: os.waitpid(pid, 0) else: dpid = os.fork() if not dpid: self.debug_log("Running %s" % " ".join(pipes.quote(a) for a in args)) result = self.cmd.run(args) if not result.success: self.logger.error("Trigger: Error running %s: %s" % (args[0], result.error)) elif result.stderr: self.debug_log("Trigger: Error: %s" % result.stderr) os._exit(0) # pylint: disable=W0212 def end_client_run(self, metadata): args = [metadata.hostname, '-p', metadata.profile, '-g', ':'.join([g for g in metadata.groups])] for notifier in self.entries.keys(): npath = os.path.join(self.data, notifier) self.async_run([npath] + args) src/lib/Bcfg2/Server/Plugins/__init__.py000066400000000000000000000000501303523157100203310ustar00rootroot00000000000000"""Imports for Bcfg2.Server.Plugins.""" src/lib/Bcfg2/Server/Reports/000077500000000000000000000000001303523157100162425ustar00rootroot00000000000000src/lib/Bcfg2/Server/Reports/__init__.py000066400000000000000000000001331303523157100203500ustar00rootroot00000000000000__all__ = ['manage', 'nisauth', 'reports', 'settings', 'backends', 'urls', 'importscript'] src/lib/Bcfg2/Server/Reports/reports/000077500000000000000000000000001303523157100177405ustar00rootroot00000000000000src/lib/Bcfg2/Server/Reports/reports/__init__.py000066400000000000000000000000331303523157100220450ustar00rootroot00000000000000__all__ = ['templatetags'] src/lib/Bcfg2/Server/Reports/reports/models.py000066400000000000000000000336761303523157100216140ustar00rootroot00000000000000"""Django models for Bcfg2 reports.""" import sys from django.core.exceptions import ImproperlyConfigured try: from django.db import models except ImproperlyConfigured: e = sys.exc_info()[1] print("Reports: unable to import django models: %s" % e) sys.exit(1) from django.db import connection from django.db.models import Q from datetime import datetime, timedelta from time import strptime from Bcfg2.Reporting.Compat import transaction KIND_CHOICES = ( #These are the kinds of config elements ('Package', 'Package'), ('Path', 'directory'), ('Path', 'file'), ('Path', 'permissions'), ('Path', 'symlink'), ('Service', 'Service'), ) TYPE_GOOD = 0 TYPE_BAD = 1 TYPE_MODIFIED = 2 TYPE_EXTRA = 3 TYPE_CHOICES = ( (TYPE_GOOD, 'Good'), (TYPE_BAD, 'Bad'), (TYPE_MODIFIED, 'Modified'), (TYPE_EXTRA, 'Extra'), ) def convert_entry_type_to_id(type_name): """Convert a entry type to its entry id""" for e_id, e_name in TYPE_CHOICES: if e_name.lower() == type_name.lower(): return e_id return -1 class ClientManager(models.Manager): """Extended client manager functions.""" def active(self, timestamp=None): """returns a set of clients that have been created and have not yet been expired as of optional timestmamp argument. Timestamp should be a datetime object.""" if timestamp is None: timestamp = datetime.now() elif not isinstance(timestamp, datetime): raise ValueError('Expected a datetime object') else: try: timestamp = datetime(*strptime(timestamp, "%Y-%m-%d %H:%M:%S")[0:6]) except ValueError: return self.none() return self.filter( Q(expiration__gt=timestamp) | Q(expiration__isnull=True), creation__lt=timestamp) class Client(models.Model): """Object representing every client we have seen stats for.""" creation = models.DateTimeField(auto_now_add=True) name = models.CharField(max_length=128,) current_interaction = models.ForeignKey('Interaction', null=True, blank=True, related_name="parent_client") expiration = models.DateTimeField(blank=True, null=True) def __str__(self): return self.name objects = ClientManager() class Admin: pass class InteractiveManager(models.Manager): """Manages interactions objects.""" def interaction_per_client(self, maxdate=None, active_only=True): """ Returns the most recent interactions for clients as of a date Arguments: maxdate -- datetime object. Most recent date to pull. (dafault None) active_only -- Include only active clients (default True) """ if maxdate and not isinstance(maxdate, datetime): raise ValueError('Expected a datetime object') return self.filter( id__in=self.get_interaction_per_client_ids(maxdate, active_only)) def get_interaction_per_client_ids(self, maxdate=None, active_only=True): """ Returns the ids of most recent interactions for clients as of a date. Arguments: maxdate -- datetime object. Most recent date to pull. (dafault None) active_only -- Include only active clients (default True) """ from django.db import connection cursor = connection.cursor() cfilter = "expiration is null" sql = 'select reports_interaction.id, x.client_id ' + \ 'from (select client_id, MAX(timestamp) ' + \ 'as timer from reports_interaction' if maxdate: if not isinstance(maxdate, datetime): raise ValueError('Expected a datetime object') sql = sql + " where timestamp <= '%s' " % maxdate cfilter = "(expiration is null or expiration > '%s') and creation <= '%s'" % (maxdate, maxdate) sql = sql + ' GROUP BY client_id) x, reports_interaction where ' + \ 'reports_interaction.client_id = x.client_id AND ' + \ 'reports_interaction.timestamp = x.timer' if active_only: sql = sql + " and x.client_id in (select id from reports_client where %s)" % \ cfilter try: cursor.execute(sql) return [item[0] for item in cursor.fetchall()] except: '''FIXME - really need some error handling''' pass return [] class Interaction(models.Model): """Models each reconfiguration operation interaction between client and server.""" client = models.ForeignKey(Client, related_name="interactions") timestamp = models.DateTimeField(db_index=True) # record timestamp state = models.CharField(max_length=32) # good/bad/modified/etc # repository revision at the time of the latest interaction repo_rev_code = models.CharField(max_length=64) goodcount = models.IntegerField() # of good config-items totalcount = models.IntegerField() # of total config-items server = models.CharField(max_length=256) # server used for interaction bad_entries = models.IntegerField(default=-1) modified_entries = models.IntegerField(default=-1) extra_entries = models.IntegerField(default=-1) def __str__(self): return "With " + self.client.name + " @ " + self.timestamp.isoformat() def percentgood(self): if not self.totalcount == 0: return (self.goodcount / float(self.totalcount)) * 100 else: return 0 def percentbad(self): if not self.totalcount == 0: return ((self.totalcount - self.goodcount) / (float(self.totalcount))) * 100 else: return 0 def isclean(self): if (self.bad_entry_count() == 0 and self.goodcount == self.totalcount): return True else: return False def isstale(self): if (self == self.client.current_interaction): # Is Mostrecent if(datetime.now() - self.timestamp > timedelta(hours=25)): return True else: return False else: #Search for subsequent Interaction for this client #Check if it happened more than 25 hrs ago. if (self.client.interactions.filter(timestamp__gt=self.timestamp) .order_by('timestamp')[0].timestamp - self.timestamp > timedelta(hours=25)): return True else: return False def save(self): super(Interaction, self).save() # call the real save... self.client.current_interaction = self.client.interactions.latest() self.client.save() # save again post update def delete(self): '''Override the default delete. Allows us to remove Performance items''' pitems = list(self.performance_items.all()) super(Interaction, self).delete() for perf in pitems: if perf.interaction.count() == 0: perf.delete() def badcount(self): return self.totalcount - self.goodcount def bad(self): return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_BAD) def bad_entry_count(self): """Number of bad entries. Store the count in the interation field to save db queries.""" if self.bad_entries < 0: self.bad_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_BAD).count() self.save() return self.bad_entries def modified(self): return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_MODIFIED) def modified_entry_count(self): """Number of modified entries. Store the count in the interation field to save db queries.""" if self.modified_entries < 0: self.modified_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_MODIFIED).count() self.save() return self.modified_entries def extra(self): return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_EXTRA) def extra_entry_count(self): """Number of extra entries. Store the count in the interation field to save db queries.""" if self.extra_entries < 0: self.extra_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_EXTRA).count() self.save() return self.extra_entries objects = InteractiveManager() class Admin: list_display = ('client', 'timestamp', 'state') list_filter = ['client', 'timestamp'] pass class Meta: get_latest_by = 'timestamp' ordering = ['-timestamp'] unique_together = ("client", "timestamp") class Reason(models.Model): """reason why modified or bad entry did not verify, or changed.""" owner = models.CharField(max_length=255, blank=True) current_owner = models.CharField(max_length=255, blank=True) group = models.CharField(max_length=255, blank=True) current_group = models.CharField(max_length=255, blank=True) perms = models.CharField(max_length=4, blank=True) current_perms = models.CharField(max_length=4, blank=True) status = models.CharField(max_length=128, blank=True) current_status = models.CharField(max_length=128, blank=True) to = models.CharField(max_length=1024, blank=True) current_to = models.CharField(max_length=1024, blank=True) version = models.CharField(max_length=1024, blank=True) current_version = models.CharField(max_length=1024, blank=True) current_exists = models.BooleanField(default=True) # False means its missing. current_diff = models.TextField(max_length=1024*1024, blank=True) is_binary = models.BooleanField(default=False) is_sensitive = models.BooleanField(default=False) unpruned = models.TextField(max_length=4096, blank=True, default='') def _str_(self): return "Reason" def short_list(self): rv = [] if self.current_owner or self.current_group or self.current_perms: rv.append("File permissions") if self.current_status: rv.append("Incorrect status") if self.current_to: rv.append("Incorrect target") if self.current_version or self.version == 'auto': rv.append("Wrong version") if not self.current_exists: rv.append("Missing") if self.current_diff or self.is_sensitive: rv.append("Incorrect data") if self.unpruned: rv.append("Directory has extra files") if len(rv) == 0: rv.append("Exists") return rv @staticmethod @transaction.atomic def prune_orphans(): '''Prune oprhaned rows... no good way to use the ORM''' cursor = connection.cursor() cursor.execute('delete from reports_reason where not exists (select rei.id from reports_entries_interactions rei where rei.reason_id = reports_reason.id)') transaction.set_dirty() class Entries(models.Model): """Contains all the entries feed by the client.""" name = models.CharField(max_length=128, db_index=True) kind = models.CharField(max_length=16, choices=KIND_CHOICES, db_index=True) def __str__(self): return self.name @staticmethod @transaction.atomic def prune_orphans(): '''Prune oprhaned rows... no good way to use the ORM''' cursor = connection.cursor() cursor.execute('delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)') transaction.set_dirty() class Meta: unique_together = ("name", "kind") class Entries_interactions(models.Model): """Define the relation between the reason, the interaction and the entry.""" entry = models.ForeignKey(Entries) reason = models.ForeignKey(Reason) interaction = models.ForeignKey(Interaction) type = models.IntegerField(choices=TYPE_CHOICES) class Performance(models.Model): """Object representing performance data for any interaction.""" interaction = models.ManyToManyField(Interaction, related_name="performance_items") metric = models.CharField(max_length=128) value = models.DecimalField(max_digits=32, decimal_places=16) def __str__(self): return self.metric @staticmethod @transaction.atomic def prune_orphans(): '''Prune oprhaned rows... no good way to use the ORM''' cursor = connection.cursor() cursor.execute('delete from reports_performance where not exists (select ri.id from reports_performance_interaction ri where ri.performance_id = reports_performance.id)') transaction.set_dirty() class Group(models.Model): """ Groups extracted from interactions name - The group name TODO - Most of this is for future use TODO - set a default group """ name = models.CharField(max_length=255, unique=True) profile = models.BooleanField(default=False) public = models.BooleanField(default=False) category = models.CharField(max_length=1024, blank=True) comment = models.TextField(blank=True) groups = models.ManyToManyField("self", symmetrical=False) bundles = models.ManyToManyField("Bundle") def __unicode__(self): return self.name class Bundle(models.Model): """ Bundles extracted from interactions name - The bundle name """ name = models.CharField(max_length=255, unique=True) def __unicode__(self): return self.name class InteractionMetadata(models.Model): """ InteractionMetadata Hold extra data associated with the client and interaction """ interaction = models.OneToOneField(Interaction, primary_key=True, related_name='metadata') profile = models.ForeignKey(Group, related_name="+") groups = models.ManyToManyField(Group) bundles = models.ManyToManyField(Bundle) src/lib/Bcfg2/Server/Reports/updatefix.py000066400000000000000000000156701303523157100206160ustar00rootroot00000000000000import Bcfg2.DBSettings from django.db import connection import django.core.management import sys import logging import traceback from Bcfg2.Server.models import internal_database_version logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix') # all update function should go here def _merge_database_table_entries(): cursor = connection.cursor() insert_cursor = connection.cursor() find_cursor = connection.cursor() cursor.execute(""" Select name, kind from reports_bad union select name, kind from reports_modified union select name, kind from reports_extra """) # this fetch could be better done entries_map = {} for row in cursor.fetchall(): insert_cursor.execute("insert into reports_entries (name, kind) \ values (%s, %s)", (row[0], row[1])) entries_map[(row[0], row[1])] = insert_cursor.lastrowid cursor.execute(""" Select name, kind, reason_id, interaction_id, 1 from reports_bad inner join reports_bad_interactions on reports_bad.id=reports_bad_interactions.bad_id union Select name, kind, reason_id, interaction_id, 2 from reports_modified inner join reports_modified_interactions on reports_modified.id=reports_modified_interactions.modified_id union Select name, kind, reason_id, interaction_id, 3 from reports_extra inner join reports_extra_interactions on reports_extra.id=reports_extra_interactions.extra_id """) for row in cursor.fetchall(): key = (row[0], row[1]) if entries_map.get(key, None): entry_id = entries_map[key] else: find_cursor.execute("Select id from reports_entries where " "name=%s and kind=%s", key) rowe = find_cursor.fetchone() entry_id = rowe[0] insert_cursor.execute("insert into reports_entries_interactions " "(entry_id, interaction_id, reason_id, type) " "values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4])) def _interactions_constraint_or_idx(): '''sqlite doesn't support alter tables.. or constraints''' cursor = connection.cursor() try: cursor.execute('alter table reports_interaction ' 'add constraint reports_interaction_20100601 ' 'unique (client_id,timestamp)') except: cursor.execute('create unique index reports_interaction_20100601 ' 'on reports_interaction (client_id,timestamp)') def _populate_interaction_entry_counts(): '''Populate up the type totals for the interaction table''' cursor = connection.cursor() count_field = {1: 'bad_entries', 2: 'modified_entries', 3: 'extra_entries'} for type in list(count_field.keys()): cursor.execute("select count(type), interaction_id " "from reports_entries_interactions " "where type = %s group by interaction_id" % type) updates = [] for row in cursor.fetchall(): updates.append(row) try: cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates) except Exception: e = sys.exc_info()[1] print(e) cursor.close() def update_noop(): return True # be sure to test your upgrade query before reflecting the change in the models # the list of function and sql command to do should go here _fixes = [_merge_database_table_entries, # this will remove unused tables "drop table reports_bad;", "drop table reports_bad_interactions;", "drop table reports_extra;", "drop table reports_extra_interactions;", "drop table reports_modified;", "drop table reports_modified_interactions;", "drop table reports_repository;", "drop table reports_metadata;", "alter table reports_interaction add server varchar(256) not null default 'N/A';", # fix revision data type to support $VCS hashes "alter table reports_interaction add repo_rev_code varchar(64) default '';", # Performance enhancements for large sites 'alter table reports_interaction add column bad_entries integer not null default -1;', 'alter table reports_interaction add column modified_entries integer not null default -1;', 'alter table reports_interaction add column extra_entries integer not null default -1;', _populate_interaction_entry_counts, _interactions_constraint_or_idx, 'alter table reports_reason add is_binary bool NOT NULL default False;', 'alter table reports_reason add is_sensitive bool NOT NULL default False;', update_noop, # _remove_table_column('reports_interaction', 'client_version'), "alter table reports_reason add unpruned varchar(1280) not null default 'N/A';"] # this will calculate the last possible version of the database lastversion = len(_fixes) def rollupdate(current_version): """ function responsible to coordinates all the updates need current_version as integer """ ret = None if current_version < lastversion: for i in range(current_version, lastversion): try: if type(_fixes[i]) == str: connection.cursor().execute(_fixes[i]) else: _fixes[i]() except: logger.error("Failed to perform db update %s" % (_fixes[i]), exc_info=1) # since array start at 0 but version start at 1 # we add 1 to the normal count ret = internal_database_version().create(version=i + 1) return ret else: return None def update_database(): ''' methode to search where we are in the revision of the database models and update them ''' try: logger.debug("Running upgrade of models to the new one") django.core.management.call_command("syncdb", interactive=False, verbosity=0) know_version = internal_database_version().order_by('-version') if not know_version: logger.debug("No version, creating initial version") know_version = internal_database_version().create(version=lastversion) else: know_version = know_version[0] logger.debug("Presently at %s" % know_version) if know_version.version > 13000: # SchemaUpdater stuff return elif know_version.version < lastversion: new_version = rollupdate(know_version.version) if new_version: logger.debug("upgraded to %s" % new_version) except: logger.error("Error while updating the database") for x in traceback.format_exc().splitlines(): logger.error(x) src/lib/Bcfg2/Server/SSLServer.py000066400000000000000000000434751303523157100170230ustar00rootroot00000000000000""" Bcfg2 SSL server used by the builtin server core (:mod:`Bcfg2.Server.BuiltinCore`). This needs to be documented better. """ import os import sys import socket import signal import logging import ssl import threading import time from Bcfg2.Compat import xmlrpclib, SimpleXMLRPCServer, SocketServer, \ b64decode class XMLRPCACLCheckException(Exception): """ Raised when ACL checks fail on an RPC request """ class XMLRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher): """ An XML-RPC dispatcher. """ def __init__(self, allow_none, encoding): try: SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding) except: # Python 2.4? SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self) self.logger = logging.getLogger(self.__class__.__name__) self.allow_none = allow_none self.encoding = encoding def _marshaled_dispatch(self, address, data): params, method = xmlrpclib.loads(data) if not self.instance.check_acls(address, method): raise XMLRPCACLCheckException try: if '.' not in method: params = (address, ) + params response = self.instance._dispatch(method, params, self.funcs) # py3k compatibility if type(response) not in [bool, str, list, dict, set, type(None)]: response = (response.decode('utf-8'), ) elif type(response) == set: response = (list(response), ) else: response = (response, ) raw_response = xmlrpclib.dumps(response, methodresponse=True, allow_none=self.allow_none, encoding=self.encoding) except xmlrpclib.Fault: fault = sys.exc_info()[1] raw_response = xmlrpclib.dumps(fault, methodresponse=True, allow_none=self.allow_none, encoding=self.encoding) except: err = sys.exc_info() self.logger.error("Unexpected handler error", exc_info=1) # report exception back to server raw_response = xmlrpclib.dumps( xmlrpclib.Fault(1, "%s:%s" % (err[0].__name__, err[1])), methodresponse=True, allow_none=self.allow_none, encoding=self.encoding) return raw_response class SSLServer(SocketServer.TCPServer, object): """ TCP server supporting SSL encryption. """ allow_reuse_address = True def __init__(self, listen_all, server_address, RequestHandlerClass, keyfile=None, certfile=None, reqCert=False, ca=None, timeout=None, protocol='xmlrpc/tlsv1'): """ :param listen_all: Listen on all interfaces :type listen_all: bool :param server_address: Address to bind to the server :param RequestHandlerClass: Request handler used by TCP server :param keyfile: Full path to SSL encryption key file :type keyfile: string :param certfile: Full path to SSL certificate file :type certfile: string :param reqCert: Require client to present certificate :type reqCert: bool :param ca: Full path to SSL CA that signed the key and cert :type ca: string :param timeout: Timeout for non-blocking request handling :param protocol: The protocol to serve. Supported values are ``xmlrpc/ssl`` and ``xmlrpc/tlsv1``. :type protocol: string """ # check whether or not we should listen on all interfaces if listen_all: listen_address = ('', server_address[1]) else: listen_address = (server_address[0], server_address[1]) # check for IPv6 address if ':' in server_address[0]: self.address_family = socket.AF_INET6 self.logger = logging.getLogger(self.__class__.__name__) try: SocketServer.TCPServer.__init__(self, listen_address, RequestHandlerClass) except socket.gaierror: e = sys.exc_info()[1] self.logger.error("Failed to bind to socket: %s" % e) raise except socket.error: self.logger.error("Failed to bind to socket") raise self.timeout = timeout self.socket.settimeout(timeout) self.keyfile = keyfile if (keyfile is not None and (keyfile is False or not os.path.exists(keyfile) or not os.access(keyfile, os.R_OK))): msg = "Keyfile %s does not exist or is not readable" % keyfile self.logger.error(msg) raise Exception(msg) self.certfile = certfile if (certfile is not None and (certfile is False or not os.path.exists(certfile) or not os.access(certfile, os.R_OK))): msg = "Certfile %s does not exist or is not readable" % certfile self.logger.error(msg) raise Exception(msg) self.ca = ca if (ca is not None and (ca is False or not os.path.exists(ca) or not os.access(ca, os.R_OK))): msg = "CA %s does not exist or is not readable" % ca self.logger.error(msg) raise Exception(msg) self.reqCert = reqCert if ca and certfile: self.mode = ssl.CERT_OPTIONAL else: self.mode = ssl.CERT_NONE if protocol == 'xmlrpc/ssl': self.ssl_protocol = ssl.PROTOCOL_SSLv23 elif protocol == 'xmlrpc/tlsv1': self.ssl_protocol = ssl.PROTOCOL_TLSv1 else: self.logger.error("Unknown protocol %s" % (protocol)) raise Exception("unknown protocol %s" % protocol) def get_request(self): (sock, sockinfo) = self.socket.accept() sock.settimeout(self.timeout) # pylint: disable=E1101 sslsock = ssl.wrap_socket(sock, server_side=True, certfile=self.certfile, keyfile=self.keyfile, cert_reqs=self.mode, ca_certs=self.ca, ssl_version=self.ssl_protocol) return sslsock, sockinfo def close_request(self, request): try: request.unwrap() except: pass try: request.close() except: pass def _get_url(self): port = self.socket.getsockname()[1] hostname = socket.gethostname() protocol = "https" return "%s://%s:%i" % (protocol, hostname, port) url = property(_get_url) class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): """ XML-RPC request handler. Adds support for HTTP authentication. """ def __init__(self, *args, **kwargs): self.logger = logging.getLogger(self.__class__.__name__) SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.__init__(self, *args, **kwargs) def authenticate(self): try: header = self.headers['Authorization'] except KeyError: self.logger.error("No authentication data presented") return False auth_content = b64decode(header.split()[1]) try: # py3k compatibility try: username, password = auth_content.split(":") except TypeError: # pylint: disable=E0602 username, pw = auth_content.split(bytes(":", encoding='utf-8')) password = pw.decode('utf-8') # pylint: enable=E0602 except ValueError: username = auth_content password = "" cert = self.request.getpeercert() client_address = self.request.getpeername() return self.server.instance.authenticate(cert, username, password, client_address) def parse_request(self): """Extends parse_request. Optionally check HTTP authentication when parsing. """ if not SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.parse_request(self): return False try: if not self.authenticate(): self.logger.error("Authentication Failure") self.send_error(401, self.responses[401][0]) return False except: # pylint: disable=W0702 self.logger.error("Unexpected Authentication Failure", exc_info=1) self.send_error(401, self.responses[401][0]) return False return True def do_POST(self): try: max_chunk_size = 10 * 1024 * 1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) chunk = self.rfile.read(chunk_size).decode('utf-8') if not chunk: break L.append(chunk) size_remaining -= len(L[-1]) data = ''.join(L) if data is None: return # response has been sent response = self.server._marshaled_dispatch(self.client_address, data) if sys.hexversion >= 0x03000000: response = response.encode('utf-8') except XMLRPCACLCheckException: self.send_error(401, self.responses[401][0]) self.end_headers() except: # pylint: disable=W0702 self.logger.error("Unexpected dispatch error for %s: %s" % (self.client_address, sys.exc_info()[1])) try: self.send_response(500) self.send_header("Content-length", "0") self.end_headers() except: (etype, msg) = sys.exc_info()[:2] self.logger.error("Error sending 500 response (%s): %s" % (etype.__name__, msg)) raise else: # got a valid XML RPC response client_address = self.request.getpeername() try: self.send_response(200) self.send_header("Content-type", "text/xml") self.send_header("Content-length", str(len(response))) self.end_headers() failcount = 0 while True: try: # If we hit SSL3_WRITE_PENDING here try to resend. self.wfile.write(response) break except ssl.SSLError: e = sys.exc_info()[1] if str(e).find("SSL3_WRITE_PENDING") < 0: raise self.logger.error("SSL3_WRITE_PENDING") failcount += 1 if failcount < 5: continue raise except socket.error: err = sys.exc_info()[1] if isinstance(err, socket.timeout): self.logger.warning("Connection timed out for %s" % self.client_address[0]) elif err[0] == 32: self.logger.warning("Connection dropped from %s" % self.client_address[0]) elif err[0] == 104: self.logger.warning("Connection reset by peer: %s" % self.client_address[0]) else: self.logger.warning("Socket error sending response to %s: " "%s" % (self.client_address[0], err)) except ssl.SSLError: err = sys.exc_info()[1] self.logger.warning("SSLError handling client %s: %s" % (self.client_address[0], err)) except: etype, err = sys.exc_info()[:2] self.logger.error("Unknown error sending response to %s: " "%s (%s)" % (self.client_address[0], err, etype.__name__)) def finish(self): # shut down the connection try: SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.finish(self) except socket.error: err = sys.exc_info()[1] self.logger.warning("Error closing connection: %s" % err) class XMLRPCServer(SocketServer.ThreadingMixIn, SSLServer, XMLRPCDispatcher, object): """ Component XMLRPCServer. """ def __init__(self, listen_all, server_address, RequestHandlerClass=None, keyfile=None, certfile=None, ca=None, protocol='xmlrpc/tlsv1', timeout=10, logRequests=False, register=True, allow_none=True, encoding=None): """ :param listen_all: Listen on all interfaces :type listen_all: bool :param server_address: Address to bind to the server :param RequestHandlerClass: request handler used by TCP server :param keyfile: Full path to SSL encryption key file :type keyfile: string :param certfile: Full path to SSL certificate file :type certfile: string :param ca: Full path to SSL CA that signed the key and cert :type ca: string :param logRequests: Log all requests :type logRequests: bool :param register: Presence should be reported to service-location :type register: bool :param allow_none: Allow None values in XML-RPC :type allow_none: bool :param encoding: Encoding to use for XML-RPC """ XMLRPCDispatcher.__init__(self, allow_none, encoding) if not RequestHandlerClass: # pylint: disable=E0102 class RequestHandlerClass(XMLRPCRequestHandler): """A subclassed request handler to prevent class-attribute conflicts.""" # pylint: enable=E0102 SSLServer.__init__(self, listen_all, server_address, RequestHandlerClass, ca=ca, timeout=timeout, keyfile=keyfile, certfile=certfile, protocol=protocol) self.logRequests = logRequests self.serve = False self.register = register self.register_introspection_functions() self.register_function(self.ping) self.logger.info("service available at %s" % self.url) self.timeout = timeout def _tasks_thread(self): try: while self.serve: try: if self.instance and hasattr(self.instance, 'do_tasks'): self.instance.do_tasks() except: self.logger.error("Unexpected task failure", exc_info=1) time.sleep(self.timeout) except: self.logger.error("tasks_thread failed", exc_info=1) def server_close(self): SSLServer.server_close(self) self.logger.info("server_close()") def _get_require_auth(self): return getattr(self.RequestHandlerClass, "require_auth", False) def _set_require_auth(self, value): self.RequestHandlerClass.require_auth = value require_auth = property(_get_require_auth, _set_require_auth) def _get_credentials(self): try: return self.RequestHandlerClass.credentials except AttributeError: return dict() def _set_credentials(self, value): self.RequestHandlerClass.credentials = value credentials = property(_get_credentials, _set_credentials) def register_instance(self, instance, *args, **kwargs): XMLRPCDispatcher.register_instance(self, instance, *args, **kwargs) try: name = instance.name except AttributeError: name = "unknown" if hasattr(instance, '_get_rmi'): for fname, func in instance._get_rmi().items(): self.register_function(func, name=fname) self.logger.info("serving %s at %s" % (name, self.url)) def serve_forever(self): """Serve single requests until (self.serve == False).""" self.serve = True self.task_thread = \ threading.Thread(name="%sThread" % self.__class__.__name__, target=self._tasks_thread) self.task_thread.start() self.logger.info("serve_forever() [start]") signal.signal(signal.SIGINT, self._handle_shutdown_signal) signal.signal(signal.SIGTERM, self._handle_shutdown_signal) try: while self.serve: try: self.handle_request() except socket.timeout: pass except: self.logger.error("Got unexpected error in handle_request", exc_info=1) finally: self.logger.info("serve_forever() [stop]") def shutdown(self): """Signal that automatic service should stop.""" self.serve = False def _handle_shutdown_signal(self, *_): self.shutdown() def ping(self, *args): """Echo response.""" self.logger.info("ping(%s)" % (", ".join([repr(arg) for arg in args]))) return args src/lib/Bcfg2/Server/Statistics.py000066400000000000000000000100651303523157100173120ustar00rootroot00000000000000""" Module for tracking execution time statistics from the Bcfg2 server core. This data is exposed by :func:`Bcfg2.Server.Core.BaseCore.get_statistics`.""" import time from Bcfg2.Compat import wraps class Statistic(object): """ A single named statistic, tracking minimum, maximum, and average execution time, and number of invocations. """ def __init__(self, name, initial_value): """ :param name: The name of this statistic :type name: string :param initial_value: The initial value to be added to this statistic :type initial_value: int or float """ self.name = name self.min = float(initial_value) self.max = float(initial_value) self.ave = float(initial_value) self.count = 1 def add_value(self, value): """ Add a value to the statistic, recalculating the various metrics. :param value: The value to add to this statistic :type value: int or float """ self.min = min(self.min, float(value)) self.max = max(self.max, float(value)) self.count += 1 self.ave = (((self.ave * (self.count - 1)) + value) / self.count) def get_value(self): """ Get a tuple of all the stats tracked on this named item. The tuple is in the format:: (, (min, max, average, number of values)) This makes it very easy to cast to a dict in :func:`Statistics.display`. :returns: tuple """ return (self.name, (self.min, self.max, self.ave, self.count)) def __repr__(self): return "%s(%s, (min=%s, avg=%s, max=%s, count=%s))" % ( self.__class__.__name__, self.name, self.min, self.ave, self.max, self.count) class Statistics(object): """ A collection of named :class:`Statistic` objects. """ def __init__(self): self.data = dict() def add_value(self, name, value): """ Add a value to the named :class:`Statistic`. This just proxies to :func:`Statistic.add_value` or the :class:`Statistic` constructor as appropriate. :param name: The name of the :class:`Statistic` to add the value to :type name: string :param value: The value to add to the Statistic :type value: int or float """ if name not in self.data: self.data[name] = Statistic(name, value) else: self.data[name].add_value(value) def display(self): """ Return a dict of all :class:`Statistic` object values. Keys are the statistic names, and values are tuples of the statistic metrics as returned by :func:`Statistic.get_value`. """ return dict([value.get_value() for value in list(self.data.values())]) #: A module-level :class:`Statistics` objects used to track all #: execution time metrics for the server. stats = Statistics() # pylint: disable=C0103 class track_statistics(object): # pylint: disable=C0103 """ Decorator that tracks execution time for the given method with :mod:`Bcfg2.Server.Statistics` for reporting via ``bcfg2-admin perf`` """ def __init__(self, name=None): """ :param name: The name under which statistics for this function will be tracked. By default, the name will be the name of the function concatenated with the name of the class the function is a member of. :type name: string """ # if this is None, it will be set later during __call_ self.name = name def __call__(self, func): if self.name is None: self.name = func.__name__ @wraps(func) def inner(obj, *args, **kwargs): """ The decorated function """ name = "%s:%s" % (obj.__class__.__name__, self.name) start = time.time() try: return func(obj, *args, **kwargs) finally: stats.add_value(name, time.time() - start) return inner src/lib/Bcfg2/Server/Test.py000066400000000000000000000237241303523157100161050ustar00rootroot00000000000000""" bcfg2-test libraries and CLI """ import os import sys import shlex import signal import fnmatch import logging import Bcfg2.Logger import Bcfg2.Server.Core from math import ceil from nose.core import TestProgram from nose.suite import LazySuite from unittest import TestCase try: from multiprocessing import Process, Queue, active_children HAS_MULTIPROC = True except ImportError: HAS_MULTIPROC = False def active_children(): """active_children() when multiprocessing lib is missing.""" return [] def get_sigint_handler(core): """ Get a function that handles SIGINT/Ctrl-C by shutting down the core and exiting properly.""" def hdlr(sig, frame): # pylint: disable=W0613 """ Handle SIGINT/Ctrl-C by shutting down the core and exiting properly. """ core.shutdown() os._exit(1) # pylint: disable=W0212 return hdlr class CapturingLogger(object): """ Fake logger that captures logging output so that errors are only displayed for clients that fail tests """ def __init__(self, *args, **kwargs): # pylint: disable=W0613 self.output = [] def error(self, msg): """ discard error messages """ self.output.append(msg) def warning(self, msg): """ discard error messages """ self.output.append(msg) def info(self, msg): """ discard error messages """ self.output.append(msg) def debug(self, msg): """ discard error messages """ if Bcfg2.Options.setup.debug: self.output.append(msg) def reset_output(self): """ Reset the captured output """ self.output = [] class ClientTestFromQueue(TestCase): """ A test case that tests a value that has been enqueued by a child test process. ``client`` is the name of the client that has been tested; ``result`` is the result from the :class:`ClientTest` test. ``None`` indicates a successful test; a string value indicates a failed test; and an exception indicates an error while running the test. """ __test__ = False # Do not collect def __init__(self, client, result): TestCase.__init__(self) self.client = client self.result = result def shortDescription(self): return "Building configuration for %s" % self.client def runTest(self): """ parse the result from this test """ if isinstance(self.result, Exception): raise self.result assert self.result is None, self.result class ClientTest(TestCase): """ A test case representing the build of all of the configuration for a single host. Checks that none of the build config entities has had a failure when it is building. Optionally ignores some config files that we know will cause errors (because they are private files we don't have access to, for instance) """ __test__ = False # Do not collect divider = "-" * 70 def __init__(self, core, client, ignore=None): TestCase.__init__(self) self.core = core self.core.logger = CapturingLogger() self.client = client if ignore is None: self.ignore = dict() else: self.ignore = ignore def ignore_entry(self, tag, name): """ return True if an error on a given entry should be ignored """ if tag in self.ignore: if name in self.ignore[tag]: return True else: # try wildcard matching for pattern in self.ignore[tag]: if fnmatch.fnmatch(name, pattern): return True return False def shortDescription(self): return "Building configuration for %s" % self.client def runTest(self): """ run this individual test """ config = self.core.BuildConfiguration(self.client) output = self.core.logger.output[:] if output: output.append(self.divider) self.core.logger.reset_output() # check for empty client configuration assert len(config.findall("Bundle")) > 0, \ "\n".join(output + ["%s has no content" % self.client]) # check for missing bundles metadata = self.core.build_metadata(self.client) sbundles = [el.get('name') for el in config.findall("Bundle")] missing = [b for b in metadata.bundles if b not in sbundles] assert len(missing) == 0, \ "\n".join(output + ["Configuration is missing bundle(s): %s" % ':'.join(missing)]) # check for unknown packages unknown_pkgs = [el.get("name") for el in config.xpath('//Package[@type="unknown"]') if not self.ignore_entry(el.tag, el.get("name"))] assert len(unknown_pkgs) == 0, \ "Configuration contains unknown packages: %s" % \ ", ".join(unknown_pkgs) failures = [] msg = output + ["Failures:"] for failure in config.xpath('//*[@failure]'): if not self.ignore_entry(failure.tag, failure.get('name')): failures.append(failure) msg.append("%s:%s: %s" % (failure.tag, failure.get("name"), failure.get("failure"))) assert len(failures) == 0, "\n".join(msg) def __str__(self): return "ClientTest(%s)" % self.client id = __str__ class CLI(object): """ The bcfg2-test CLI """ options = [ Bcfg2.Options.PositionalArgument( "clients", help="Specific clients to build", nargs="*"), Bcfg2.Options.Option( "--nose-options", cf=("bcfg2_test", "nose_options"), type=shlex.split, default=[], help='Options to pass to nosetests. Only honored with ' '--children 0'), Bcfg2.Options.Option( "--ignore", cf=('bcfg2_test', 'ignore_entries'), default=[], dest="test_ignore", type=Bcfg2.Options.Types.comma_list, help='Ignore these entries if they fail to build'), Bcfg2.Options.Option( "--children", cf=('bcfg2_test', 'children'), default=0, type=int, help='Spawn this number of children for bcfg2-test (python 2.6+)')] def __init__(self): parser = Bcfg2.Options.get_parser( description="Verify that all clients build without failures", components=[Bcfg2.Server.Core.Core, self]) parser.parse() self.logger = logging.getLogger(parser.prog) if Bcfg2.Options.setup.children and not HAS_MULTIPROC: self.logger.warning("Python multiprocessing library not found, " "running with no children") Bcfg2.Options.setup.children = 0 def get_core(self): """ Get a server core, with events handled """ core = Bcfg2.Server.Core.Core() core.load_plugins() core.block_for_fam_events(handle_events=True) signal.signal(signal.SIGINT, get_sigint_handler(core)) return core def get_ignore(self): """ Get a dict of entry tags and names to ignore errors from """ ignore = dict() for entry in Bcfg2.Options.setup.test_ignore: tag, name = entry.split(":") try: ignore[tag].append(name) except KeyError: ignore[tag] = [name] return ignore def run_child(self, clients, queue): """ Run tests for the given clients in a child process, returning results via the given Queue """ core = self.get_core() ignore = self.get_ignore() for client in clients: try: ClientTest(core, client, ignore).runTest() queue.put((client, None)) except AssertionError: queue.put((client, str(sys.exc_info()[1]))) except: queue.put((client, sys.exc_info()[1])) core.shutdown() def run(self): """ Run bcfg2-test """ core = self.get_core() clients = Bcfg2.Options.setup.clients or core.metadata.clients ignore = self.get_ignore() if Bcfg2.Options.setup.children: if Bcfg2.Options.setup.children > len(clients): self.logger.info("Refusing to spawn more children than " "clients to test, setting children=%s" % len(clients)) Bcfg2.Options.setup.children = len(clients) perchild = int(ceil(len(clients) / float(Bcfg2.Options.setup.children + 1))) queue = Queue() for child in range(Bcfg2.Options.setup.children): start = child * perchild end = (child + 1) * perchild child = Process(target=self.run_child, args=(clients[start:end], queue)) child.start() def generate_tests(): """ Read test results for the clients """ start = Bcfg2.Options.setup.children * perchild for client in clients[start:]: yield ClientTest(core, client, ignore) for i in range(start): # pylint: disable=W0612 yield ClientTestFromQueue(*queue.get()) else: def generate_tests(): """ Run tests for the clients """ for client in clients: yield ClientTest(core, client, ignore) result = TestProgram( argv=sys.argv[:1] + Bcfg2.Options.setup.nose_options, suite=LazySuite(generate_tests), exit=False) # block until all children have completed -- should be # immediate since we've already gotten all the results we # expect for child in active_children(): child.join() core.shutdown() if result.success: os._exit(0) # pylint: disable=W0212 else: os._exit(1) # pylint: disable=W0212 src/lib/Bcfg2/Server/__init__.py000066400000000000000000000003501303523157100167130ustar00rootroot00000000000000"""This is the set of modules for Bcfg2.Server.""" import lxml.etree XI = 'http://www.w3.org/2001/XInclude' XI_NAMESPACE = '{%s}' % XI # pylint: disable=C0103 XMLParser = lxml.etree.XMLParser(remove_blank_text=True) core = None src/lib/Bcfg2/Server/migrations/000077500000000000000000000000001303523157100167605ustar00rootroot00000000000000src/lib/Bcfg2/Server/migrations/0001_initial.py000066400000000000000000000032171303523157100214260ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by Django 1.9.9 on 2016-08-17 18:52 from __future__ import unicode_literals import Bcfg2.Server.Plugin.helpers from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='MetadataClientModel', fields=[ ('hostname', models.CharField(max_length=255, primary_key=True, serialize=False)), ('version', models.CharField(max_length=31, null=True)), ], bases=(models.Model, Bcfg2.Server.Plugin.helpers.PluginDatabaseModel), ), migrations.CreateModel( name='ProbesDataModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hostname', models.CharField(max_length=255)), ('probe', models.CharField(max_length=255)), ('timestamp', models.DateTimeField(auto_now=True)), ('data', models.TextField(null=True)), ], bases=(models.Model, Bcfg2.Server.Plugin.helpers.PluginDatabaseModel), ), migrations.CreateModel( name='ProbesGroupsModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hostname', models.CharField(max_length=255)), ('group', models.CharField(max_length=255)), ], bases=(models.Model, Bcfg2.Server.Plugin.helpers.PluginDatabaseModel), ), ] src/lib/Bcfg2/Server/migrations/__init__.py000066400000000000000000000000001303523157100210570ustar00rootroot00000000000000src/lib/Bcfg2/Server/models.py000066400000000000000000000052501303523157100164430ustar00rootroot00000000000000""" Django database models for all plugins """ import sys import logging import Bcfg2.Options import Bcfg2.Server.Plugins LOGGER = logging.getLogger(__name__) MODELS = [] INTERNAL_DATABASE_VERSION = None class _OptionContainer(object): """Options for Bcfg2 database models.""" # we want to provide a different default plugin list -- # namely, _all_ plugins, so that the database is guaranteed to # work, even if /etc/bcfg2.conf isn't set up properly options = [Bcfg2.Options.Common.plugins] @staticmethod def options_parsed_hook(): # basic invocation to ensure that a default set of models is # loaded, and thus that this module will always work. load_models() Bcfg2.Options.get_parser().add_component(_OptionContainer) def load_models(plugins=None): """ load models from plugins specified in the config """ # this has to be imported after options are parsed, because Django # finalizes its settings as soon as it's loaded, which means that # if we import this before Bcfg2.DBSettings has been populated, # Django gets a null configuration, and subsequent updates to # Bcfg2.DBSettings won't help. from django.db import models global MODELS if not plugins: plugins = Bcfg2.Options.setup.plugins if MODELS: # load_models() has been called once, so first unload all of # the models; otherwise we might call load_models() with no # arguments, end up with _all_ models loaded, and then in a # subsequent call only load a subset of models for model in MODELS: delattr(sys.modules[__name__], model) MODELS = [] for mod in plugins: for sym in dir(mod): obj = getattr(mod, sym) if isinstance(obj, type) and issubclass(obj, models.Model): setattr(sys.modules[__name__], sym, obj) MODELS.append(sym) def internal_database_version(): global INTERNAL_DATABASE_VERSION if INTERNAL_DATABASE_VERSION is None: from django.db import models class InternalDatabaseVersion(models.Model): """ Object that tell us to which version the database is """ version = models.IntegerField() updated = models.DateTimeField(auto_now_add=True) def __str__(self): return "version %d updated %s" % (self.version, self.updated.isoformat()) class Meta: # pylint: disable=C0111,W0232 app_label = "reports" get_latest_by = "version" INTERNAL_DATABASE_VERSION = InternalDatabaseVersion return INTERNAL_DATABASE_VERSION.objects src/lib/Bcfg2/Server/south_migrations/000077500000000000000000000000001303523157100202025ustar00rootroot00000000000000src/lib/Bcfg2/Server/south_migrations/0001_initial.py000066400000000000000000000063271303523157100226550ustar00rootroot00000000000000# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MetadataClientModel' db.create_table(u'Server_metadataclientmodel', ( ('hostname', self.gf('django.db.models.fields.CharField')(max_length=255, primary_key=True)), ('version', self.gf('django.db.models.fields.CharField')(max_length=31, null=True)), )) db.send_create_signal('Server', ['MetadataClientModel']) # Adding model 'ProbesDataModel' db.create_table(u'Server_probesdatamodel', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('hostname', self.gf('django.db.models.fields.CharField')(max_length=255)), ('probe', self.gf('django.db.models.fields.CharField')(max_length=255)), ('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('data', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('Server', ['ProbesDataModel']) # Adding model 'ProbesGroupsModel' db.create_table(u'Server_probesgroupsmodel', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('hostname', self.gf('django.db.models.fields.CharField')(max_length=255)), ('group', self.gf('django.db.models.fields.CharField')(max_length=255)), )) db.send_create_signal('Server', ['ProbesGroupsModel']) def backwards(self, orm): # Deleting model 'MetadataClientModel' db.delete_table(u'Server_metadataclientmodel') # Deleting model 'ProbesDataModel' db.delete_table(u'Server_probesdatamodel') # Deleting model 'ProbesGroupsModel' db.delete_table(u'Server_probesgroupsmodel') models = { 'Server.metadataclientmodel': { 'Meta': {'object_name': 'MetadataClientModel'}, 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '31', 'null': 'True'}) }, 'Server.probesdatamodel': { 'Meta': {'object_name': 'ProbesDataModel'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'probe': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'Server.probesgroupsmodel': { 'Meta': {'object_name': 'ProbesGroupsModel'}, 'group': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) } } complete_apps = ['Server']src/lib/Bcfg2/Server/south_migrations/__init__.py000066400000000000000000000000001303523157100223010ustar00rootroot00000000000000src/lib/Bcfg2/Utils.py000066400000000000000000000274101303523157100150140ustar00rootroot00000000000000""" Miscellaneous useful utility functions, classes, etc., that are used by both client and server. Stuff that doesn't fit anywhere else. """ import fcntl import logging import os import re import select import shlex import sys import subprocess import threading from Bcfg2.Compat import input, any # pylint: disable=W0622 class ClassName(object): """ This very simple descriptor class exists only to get the name of the owner class. This is used because, for historical reasons, we expect every server plugin and every client tool to have a ``name`` attribute that is in almost all cases the same as the ``__class__.__name__`` attribute of the plugin object. This makes that more dynamic so that each plugin and tool isn't repeating its own name.""" def __get__(self, inst, owner): return owner.__name__ class PackedDigitRange(object): # pylint: disable=E0012,R0924 """ Representation of a set of integer ranges. A range is described by a comma-delimited string of integers and ranges, e.g.:: 1,10-12,15-20 Ranges are inclusive on both bounds, and may include 0. Negative numbers are not supported.""" def __init__(self, *ranges): """ May be instantiated in one of two ways:: PackedDigitRange() Or:: PackedDigitRange([, [, ...]]) E.g., both of the following are valid:: PackedDigitRange("1-5,7, 10-12") PackedDigitRange("1-5", 7, "10-12") """ self.ranges = [] self.ints = [] self.str = ",".join(str(r) for r in ranges) if len(ranges) == 1 and "," in ranges[0]: ranges = ranges[0].split(",") for item in ranges: item = str(item).strip() if item.endswith("-"): self.ranges.append((int(item[:-1]), None)) elif '-' in str(item): self.ranges.append(tuple(int(x) for x in item.split('-'))) else: self.ints.append(int(item)) def includes(self, other): """ Return True if ``other`` is included in this range. Functionally equivalent to ``other in range``, which should be used instead. """ return other in self def __contains__(self, other): other = int(other) if other in self.ints: return True return any((end is None and other >= start) or (end is not None and other >= start and other <= end) for start, end in self.ranges) def __repr__(self): return "%s:%s" % (self.__class__.__name__, str(self)) def __str__(self): return "[%s]" % self.str def locked(fd): """ Acquire a lock on a file. :param fd: The file descriptor to lock :type fd: int :returns: bool - True if the file is already locked, False otherwise """ try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: return True return False class ExecutorResult(object): """ Returned as the result of a call to :func:`Bcfg2.Utils.Executor.run`. The result can be accessed via the instance variables, documented below, as a boolean (which is equivalent to :attr:`Bcfg2.Utils.ExecutorResult.success`), or as a tuple, which, for backwards compatibility, is equivalent to ``(result.retval, result.stdout.splitlines())``.""" def __init__(self, stdout, stderr, retval): #: The output of the command if isinstance(stdout, str): self.stdout = stdout else: self.stdout = stdout.decode('utf-8') #: The error produced by the command if isinstance(stdout, str): self.stderr = stderr else: self.stderr = stderr.decode('utf-8') #: The return value of the command. self.retval = retval #: Whether or not the command was successful. If the #: ExecutorResult is used as a boolean, ``success`` is #: returned. self.success = retval == 0 #: A friendly error message self.error = None if self.retval: if self.stderr: self.error = "%s (rv: %s)" % (self.stderr, self.retval) elif self.stdout: self.error = "%s (rv: %s)" % (self.stdout, self.retval) else: self.error = "No output or error; return value %s" % \ self.retval def __repr__(self): if self.error: return "Errored command result: %s" % self.error elif self.stdout: return "Successful command result: %s" % self.stdout else: return "Successful command result: No output" def __getitem__(self, idx): """ This provides compatibility with the old Executor, which returned a tuple of (return value, stdout split by lines). """ return (self.retval, self.stdout.splitlines())[idx] def __len__(self): """ This provides compatibility with the old Executor, which returned a tuple of (return value, stdout split by lines). """ return 2 def __delitem__(self, _): raise TypeError("'%s' object doesn't support item deletion" % self.__class__.__name__) def __setitem__(self, idx, val): raise TypeError("'%s' object does not support item assignment" % self.__class__.__name__) def __nonzero__(self): return self.__bool__() def __bool__(self): return self.success class Executor(object): """ A convenient way to run external commands with :class:`subprocess.Popen` """ def __init__(self, timeout=None): """ :param timeout: Set a default timeout for all commands run by this Executor object :type timeout: float """ self.logger = logging.getLogger(self.__class__.__name__) self.timeout = timeout def _timeout(self, proc): """ A function suitable for passing to :class:`threading.Timer` that kills the given process. :param proc: The process to kill upon timeout. :type proc: subprocess.Popen :returns: None """ if proc.poll() is None: try: proc.kill() self.logger.warning("Process exceeeded timeout, killing") except OSError: pass def run(self, command, inputdata=None, timeout=None, **kwargs): """ Run a command, given as a list, optionally giving it the specified input data. All additional keyword arguments are passed through to :class:`subprocess.Popen`. :param command: The command to run, as a list (preferred) or as a string. See :class:`subprocess.Popen` for details. :type command: list or string :param inputdata: Data to pass to the command on stdin :type inputdata: string :param timeout: Kill the command if it runs longer than this many seconds. Set to 0 or -1 to explicitly override a default timeout. :type timeout: float :returns: :class:`Bcfg2.Utils.ExecutorResult` """ shell = False if 'shell' in kwargs: shell = kwargs['shell'] if isinstance(command, str): cmdstr = command if not shell: command = shlex.split(cmdstr) else: cmdstr = " ".join(command) self.logger.debug("Running: %s" % cmdstr) args = dict(shell=shell, bufsize=16384, close_fds=True) args.update(kwargs) args.update(stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc = subprocess.Popen(command, **args) if timeout is None: timeout = self.timeout if timeout is not None: timer = threading.Timer(float(timeout), self._timeout, [proc]) timer.start() try: if inputdata: for line in inputdata.splitlines(): self.logger.debug('> %s' % line) (stdout, stderr) = proc.communicate(input=inputdata) # py3k fixes if not isinstance(stdout, str): stdout = stdout.decode('utf-8') # pylint: disable=E1103 if not isinstance(stderr, str): stderr = stderr.decode('utf-8') # pylint: disable=E1103 for line in stdout.splitlines(): # pylint: disable=E1103 self.logger.debug('< %s' % line) for line in stderr.splitlines(): # pylint: disable=E1103 self.logger.info(line) return ExecutorResult(stdout, stderr, proc.wait()) # pylint: disable=E1101 finally: if timeout is not None: timer.cancel() def list2range(lst): ''' convert a list of integers to a set of human-readable ranges. e.g.: [1, 2, 3, 6, 9, 10, 11] -> "[1-3,6,9-11]" ''' ilst = sorted(int(i) for i in lst) ranges = [] start = None last = None for i in ilst: if not last or i != last + 1: if start: if start == last: ranges.append(str(start)) else: ranges.append("%d-%d" % (start, last)) start = i last = i if start: if start == last: ranges.append(str(start)) else: ranges.append("%d-%d" % (start, last)) if not ranges: return "" elif len(ranges) > 1 or "-" in ranges[0]: return "[%s]" % ",".join(ranges) else: # only one range consisting of only a single number return ranges[0] def hostnames2ranges(hostnames): ''' convert a list of hostnames to a set of human-readable ranges. e.g.: ["foo1.example.com", "foo2.example.com", "foo3.example.com", "foo6.example.com"] -> ["foo[1-3,6].example.com"]''' hosts = {} hostre = re.compile(r'(\w+?)(\d+)(\..*)$') for host in hostnames: match = hostre.match(host) if match: key = (match.group(1), match.group(3)) try: hosts[key].append(match.group(2)) except KeyError: hosts[key] = [match.group(2)] ranges = [] for name, nums in hosts.items(): ranges.append(name[0] + list2range(nums) + name[1]) return ranges def safe_input(msg): """ input() that flushes the input buffer before accepting input """ # flush input buffer while len(select.select([sys.stdin.fileno()], [], [], 0.0)[0]) > 0: os.read(sys.stdin.fileno(), 4096) return input(msg) def safe_module_name(prefix, module): """ Munge the name of a module with prefix to avoid collisions with other Python modules. E.g., if you want to import user defined helper modules and someone has a helper named 'ldap.py', it should not be added to ``sys.modules`` as ``ldap``, but rather as something more obscure. """ return '__%s_%s' % (prefix, module) class classproperty(object): # pylint: disable=C0103 """ Decorator that can be used to create read-only class properties. """ def __init__(self, getter): self.getter = getter def __get__(self, instance, owner): return self.getter(owner) def is_string(strng, encoding): """ Returns true if the string contains no ASCII control characters and can be decoded from the specified encoding. """ for char in strng: if ord(char) < 9 or ord(char) > 13 and ord(char) < 32: return False if not hasattr(strng, "decode"): # py3k return True try: strng.decode(encoding) return True except: # pylint: disable=W0702 return False src/lib/Bcfg2/__init__.py000066400000000000000000000000371303523157100154470ustar00rootroot00000000000000"""Base modules definition.""" src/lib/Bcfg2/manage.py000077500000000000000000000012301303523157100151370ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import django import Bcfg2.Options import Bcfg2.DBSettings try: import Bcfg2.Server.models except ImportError: pass parser = Bcfg2.Options.get_parser() parser.add_options([Bcfg2.Options.PositionalArgument('django_command', nargs='*')]) parser.parse() if __name__ == "__main__": if django.VERSION[0] == 1 and django.VERSION[1] >= 6: from django.core.management import execute_from_command_line execute_from_command_line(sys.argv[:1] + Bcfg2.Options.setup.django_command) else: from django.core.management import execute_manager execute_manager(Bcfg2.DBSettings.settings) src/lib/Bcfg2/version.py000066400000000000000000000067501303523157100154050ustar00rootroot00000000000000""" bcfg2 version declaration and handling """ import re __version__ = "1.4.0pre2" class Bcfg2VersionInfo(tuple): # pylint: disable=E0012,R0924 """ object to make granular version operations (particularly comparisons) easier """ v_re = re.compile(r'(\d+)(\w+)(\d+)') def __new__(cls, vstr): (major, minor, rest) = vstr.split(".") match = cls.v_re.match(rest) if match: micro, releaselevel, serial = match.groups() else: micro = rest releaselevel = 'final' serial = 0 return tuple.__new__(cls, [int(major), int(minor), int(micro), releaselevel, int(serial)]) def __init__(self, vstr): # pylint: disable=W0613 tuple.__init__(self) self.major, self.minor, self.micro, self.releaselevel, self.serial = \ tuple(self) def __repr__(self): return "%s(major=%s, minor=%s, micro=%s, releaselevel=%s, serial=%s)" \ % ((self.__class__.__name__,) + tuple(self)) def _release_cmp(self, rel1, rel2): # pylint: disable=R0911 """ compare two release numbers """ if rel1 == rel2: return 0 elif rel1 == "final": return -1 elif rel2 == "final": return 1 elif rel1 == "rc": return -1 elif rel2 == "rc": return 1 # should never get to anything past this point elif rel1 == "pre": return -1 elif rel2 == "pre": return 1 else: # wtf? return 0 def __gt__(self, version): if version is None: # older bcfg2 clients didn't report their version, so we # handle this case specially and assume that any reported # version is newer than any indeterminate version return True try: for i in range(3): if self[i] != version[i]: return self[i] > version[i] rel = self._release_cmp(self[3], version[3]) if rel != 0: return rel < 0 return self[4] > version[4] except TypeError: return self > Bcfg2VersionInfo(version) def __lt__(self, version): if version is None: # older bcfg2 clients didn't report their version, so we # handle this case specially and assume that any reported # version is newer than any indeterminate version return False try: for i in range(3): if self[i] != version[i]: return self[i] < version[i] rel = self._release_cmp(self[3], version[3]) if rel != 0: return rel > 0 return self[4] < version[4] except TypeError: return self < Bcfg2VersionInfo(version) def __eq__(self, version): if version is None: # older bcfg2 clients didn't report their version, so we # handle this case specially and assume that any reported # version is newer than any indeterminate version return False try: rv = True for i in range(len(self)): rv &= self[i] == version[i] return rv except TypeError: return self == Bcfg2VersionInfo(version) def __ge__(self, version): return not self < version def __le__(self, version): return not self > version src/sbin/000077500000000000000000000000001303523157100126005ustar00rootroot00000000000000src/sbin/bcfg2000077500000000000000000000003571303523157100135160ustar00rootroot00000000000000#!/usr/bin/env python """Bcfg2 Client""" import sys from Bcfg2.Options import get_parser from Bcfg2.Client import Client if __name__ == '__main__': get_parser("Bcfg2 client", components=[Client]).parse() sys.exit(Client().run()) src/sbin/bcfg2-admin000077500000000000000000000004171303523157100146010ustar00rootroot00000000000000#!/usr/bin/env python """ bcfg2-admin is a script that helps to administer a Bcfg2 deployment. """ import sys from Bcfg2.Server.Admin import CLI if __name__ == '__main__': try: sys.exit(CLI().run()) except KeyboardInterrupt: raise SystemExit(1) src/sbin/bcfg2-crypt000077500000000000000000000003021303523157100146430ustar00rootroot00000000000000#!/usr/bin/env python """ helper for encrypting/decrypting Cfg and Properties files """ import sys from Bcfg2.Server.Encryption import CLI if __name__ == '__main__': sys.exit(CLI().run()) src/sbin/bcfg2-info000077500000000000000000000002761303523157100144470ustar00rootroot00000000000000#!/usr/bin/env python """This tool loads the Bcfg2 core into an interactive debugger.""" import sys from Bcfg2.Server.Info import CLI if __name__ == '__main__': sys.exit(CLI().run()) src/sbin/bcfg2-lint000077500000000000000000000002711303523157100144550ustar00rootroot00000000000000#!/usr/bin/env python """This tool examines your Bcfg2 specifications for errors.""" import sys from Bcfg2.Server.Lint import CLI if __name__ == '__main__': sys.exit(CLI().run()) src/sbin/bcfg2-report-collector000077500000000000000000000015241303523157100170100ustar00rootroot00000000000000#!/usr/bin/env python """ Daemon that runs to collect logs from the LocalFilesystem Reporting transport object and add them to the Reporting storage backend """ import sys import logging import Bcfg2.Logger import Bcfg2.Options import Bcfg2.DBSettings from Bcfg2.Reporting.Collector import ReportingCollector, ReportingError def main(): parser = Bcfg2.Options.get_parser(description="Collect Bcfg2 report data", components=[ReportingCollector]) parser.parse() logger = logging.getLogger('bcfg2-report-collector') # run collector try: ReportingCollector().run() except ReportingError: msg = sys.exc_info()[1] logger.error(msg) raise SystemExit(1) except KeyboardInterrupt: raise SystemExit(1) if __name__ == '__main__': sys.exit(main()) src/sbin/bcfg2-reports000077500000000000000000000002601303523157100152030ustar00rootroot00000000000000#!/usr/bin/env python """Query reporting system for client status.""" import sys from Bcfg2.Reporting.Reports import CLI if __name__ == '__main__': sys.exit(CLI().run()) src/sbin/bcfg2-server000077500000000000000000000025031303523157100150150ustar00rootroot00000000000000#!/usr/bin/env python """The XML-RPC Bcfg2 server.""" import sys import logging import Bcfg2.Options from Bcfg2.Server.Core import CoreInitError class BackendAction(Bcfg2.Options.ComponentAction): """ Action to load Bcfg2 backends """ islist = False bases = ['Bcfg2.Server'] class CLI(object): """ bcfg2-server CLI class """ parse_first = True options = [ Bcfg2.Options.Option( cf=('server', 'backend'), help='Server Backend', default='BuiltinCore', type=lambda b: b.title() + "Core", action=BackendAction)] def __init__(self): parser = Bcfg2.Options.get_parser("Bcfg2 server", components=[self]) parser.parse() self.logger = logging.getLogger(parser.prog) def run(self): """ Run the bcfg2 server """ try: core = Bcfg2.Options.setup.backend() core.run() except CoreInitError: self.logger.error(sys.exc_info()[1]) return 1 except TypeError: self.logger.error("Failed to load %s server backend: %s" % (Bcfg2.Options.setup.backend.__name__, sys.exc_info()[1])) raise except KeyboardInterrupt: return 1 if __name__ == '__main__': sys.exit(CLI().run()) src/sbin/bcfg2-test000077500000000000000000000003211303523157100144620ustar00rootroot00000000000000#!/usr/bin/env python """ This tool verifies that all clients known to the server build without failures """ import sys from Bcfg2.Server.Test import CLI if __name__ == "__main__": sys.exit(CLI().run()) src/sbin/bcfg2-yum-helper000077500000000000000000000007021303523157100155750ustar00rootroot00000000000000#!/usr/bin/env python """ Helper script for the Packages plugin, used if yum library support is enabled. The yum libs have horrific memory leaks, so apparently the right way to get around that in long-running processes it to have a short-lived helper. No, seriously -- check out the yum-updatesd code. It's pure madness. """ import sys from Bcfg2.Server.Plugins.Packages.YumHelper import CLI if __name__ == '__main__': sys.exit(CLI().run()) testsuite/000077500000000000000000000000001303523157100131075ustar00rootroot00000000000000testsuite/Testschema/000077500000000000000000000000001303523157100152075ustar00rootroot00000000000000testsuite/Testschema/test_schema.py000066400000000000000000000052221303523157100200610ustar00rootroot00000000000000import os import sys import glob import lxml.etree from subprocess import Popen, PIPE, STDOUT # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 _path = os.path.dirname(__file__) while _path != '/': if os.path.basename(_path).lower().startswith("test"): sys.path.append(_path) if os.path.basename(_path) == "testsuite": break _path = os.path.dirname(_path) from common import * # path to Bcfg2 schema directory srcpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "schemas")) # test for xmllint existence try: Popen(['xmllint'], stdout=PIPE, stderr=STDOUT).wait() HAS_XMLLINT = True except OSError: HAS_XMLLINT = False XS = "http://www.w3.org/2001/XMLSchema" XS_NS = "{%s}" % XS NSMAP = dict(xs=XS) class TestSchemas(Bcfg2TestCase): schema_url = "http://www.w3.org/2001/XMLSchema.xsd" @skipUnless(HAS_XMLLINT, "xmllint not installed") def test_valid(self): schemas = [s for s in glob.glob(os.path.join(srcpath, '*.xsd'))] xmllint = Popen(['xmllint', '--xinclude', '--noout', '--schema', self.schema_url] + schemas, stdout=PIPE, stderr=STDOUT) print(xmllint.communicate()[0].decode()) self.assertEqual(xmllint.wait(), 0) def test_duplicates(self): entities = dict() for root, _, files in os.walk(srcpath): for fname in files: if not fname.endswith(".xsd"): continue path = os.path.join(root, fname) relpath = path[len(srcpath):].strip("/") schema = lxml.etree.parse(path).getroot() ns = schema.get("targetNamespace") if ns not in entities: entities[ns] = dict(group=dict(), attributeGroup=dict(), simpleType=dict(), complexType=dict()) for entity in schema.xpath("//xs:*[@name]", namespaces=NSMAP): tag = entity.tag[len(XS_NS):] if tag not in entities[ns]: continue name = entity.get("name") if name in entities[ns][tag]: self.assertNotIn(name, entities[ns][tag], "Duplicate %s %s (in %s and %s)" % (tag, name, fname, entities[ns][tag][name])) entities[ns][tag][name] = fname testsuite/Testsrc/000077500000000000000000000000001303523157100145365ustar00rootroot00000000000000testsuite/Testsrc/Testlib/000077500000000000000000000000001303523157100161445ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestClient/000077500000000000000000000000001303523157100202225ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestClient/TestTools/000077500000000000000000000000001303523157100221625ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/000077500000000000000000000000001303523157100237245ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestAugeas.py000066400000000000000000000211151303523157100263430ustar00rootroot00000000000000# -*- coding: utf-8 -*- import os import sys import copy import lxml.etree import tempfile from mock import Mock, MagicMock, patch try: from Bcfg2.Client.Tools.POSIX.Augeas import * HAS_AUGEAS = True except ImportError: POSIXAugeas = None HAS_AUGEAS = False # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from TestPOSIX.Testbase import TestPOSIXTool from common import * test_data = """ content with spaces one two same same same same """ test_xdata = lxml.etree.XML(test_data) class TestPOSIXAugeas(TestPOSIXTool): test_obj = POSIXAugeas applied_commands = dict( insert=lxml.etree.Element( "Insert", label="Thing", path='Test/Children[#attribute/identical = "true"]/Thing'), set=lxml.etree.Element("Set", path="Test/Text/#text", value="content with spaces"), move=lxml.etree.Element( "Move", source="Test/Foo", destination='Test/Children[#attribute/identical = "false"]/Foo'), remove=lxml.etree.Element("Remove", path="Test/Bar"), clear=lxml.etree.Element("Clear", path="Test/Empty/#text"), setm=lxml.etree.Element( "SetMulti", sub="#text", value="same", base='Test/Children[#attribute/multi = "true"]/Thing')) @skipUnless(HAS_AUGEAS, "Python Augeas libraries not found") def setUp(self): TestPOSIXTool.setUp(self) fd, self.tmpfile = tempfile.mkstemp() os.fdopen(fd, 'w').write(test_data) def tearDown(self): tmpfile = getattr(self, "tmpfile", None) if tmpfile and os.path.exists(tmpfile): os.unlink(tmpfile) def test_fully_specified(self): ptool = self.get_obj() entry = lxml.etree.Element("Path", name="/test", type="augeas") self.assertFalse(ptool.fully_specified(entry)) lxml.etree.SubElement(entry, "Set", path="/test", value="test") self.assertTrue(ptool.fully_specified(entry)) def test_install(self): # this is tested adequately by the other tests pass def test_verify(self): # this is tested adequately by the other tests pass @patch("Bcfg2.Client.Tools.POSIX.Augeas.POSIXTool.verify") def _verify(self, commands, mock_verify): ptool = self.get_obj() mock_verify.return_value = True entry = lxml.etree.Element("Path", name=self.tmpfile, type="augeas", lens="Xml") entry.extend(commands) modlist = [] self.assertTrue(ptool.verify(entry, modlist)) mock_verify.assert_called_with(ptool, entry, modlist) self.assertXMLEqual(lxml.etree.parse(self.tmpfile).getroot(), test_xdata) def test_verify_insert(self): """ Test successfully verifying an Insert command """ self._verify([self.applied_commands['insert']]) def test_verify_set(self): """ Test successfully verifying a Set command """ self._verify([self.applied_commands['set']]) def test_verify_move(self): """ Test successfully verifying a Move command """ self._verify([self.applied_commands['move']]) def test_verify_remove(self): """ Test successfully verifying a Remove command """ self._verify([self.applied_commands['remove']]) def test_verify_clear(self): """ Test successfully verifying a Clear command """ self._verify([self.applied_commands['clear']]) def test_verify_set_multi(self): """ Test successfully verifying a SetMulti command """ self._verify([self.applied_commands['setm']]) def test_verify_all(self): """ Test successfully verifying multiple commands """ self._verify(self.applied_commands.values()) @patch("Bcfg2.Client.Tools.POSIX.Augeas.POSIXTool.install") def _install(self, commands, expected, mock_install, **attrs): ptool = self.get_obj() mock_install.return_value = True entry = lxml.etree.Element("Path", name=self.tmpfile, type="augeas", lens="Xml") for key, val in attrs.items(): entry.set(key, val) entry.extend(commands) self.assertTrue(ptool.install(entry)) mock_install.assert_called_with(ptool, entry) self.assertXMLEqual(lxml.etree.parse(self.tmpfile).getroot(), expected) def test_install_set_existing(self): """ Test setting the value of an existing node """ expected = copy.deepcopy(test_xdata) expected.find("Text").text = "Changed content" self._install([lxml.etree.Element("Set", path="Test/Text/#text", value="Changed content")], expected) def test_install_set_new(self): """ Test setting the value of an new node """ expected = copy.deepcopy(test_xdata) newtext = lxml.etree.SubElement(expected, "NewText") newtext.text = "new content" self._install([lxml.etree.Element("Set", path="Test/NewText/#text", value="new content")], expected) def test_install_remove(self): """ Test removing a node """ expected = copy.deepcopy(test_xdata) expected.remove(expected.find("Attrs")) self._install( [lxml.etree.Element("Remove", path='Test/*[#attribute/foo = "foo"]')], expected) def test_install_move(self): """ Test moving a node """ expected = copy.deepcopy(test_xdata) foo = expected.xpath("//Foo")[0] expected.append(foo) self._install( [lxml.etree.Element("Move", source='Test/Children/Foo', destination='Test/Foo')], expected) def test_install_clear(self): """ Test clearing a node """ # TODO: clearing a node doesn't seem to work with the XML lens # # % augtool -b # augtool> set /augeas/load/Xml/incl[3] "/tmp/test.xml" # augtool> load # augtool> clear '/files/tmp/test.xml/Test/Text/#text' # augtool> save # error: Failed to execute command # saving failed (run 'print /augeas//error' for details) # augtool> print /augeas//error # # The error isn't useful. pass def test_install_set_multi(self): """ Test setting multiple nodes at once """ expected = copy.deepcopy(test_xdata) for thing in expected.xpath("Children[@identical='true']/Thing"): thing.text = "same" self._install( [lxml.etree.Element( "SetMulti", value="same", base='Test/Children[#attribute/identical = "true"]', sub="Thing/#text")], expected) def test_install_insert(self): """ Test inserting a node """ expected = copy.deepcopy(test_xdata) children = expected.xpath("Children[@identical='true']")[0] thing = lxml.etree.Element("Thing") thing.text = "three" children.append(thing) self._install( [lxml.etree.Element( "Insert", path='Test/Children[#attribute/identical = "true"]/Thing[2]', label="Thing", where="after"), lxml.etree.Element( "Set", path='Test/Children[#attribute/identical = "true"]/Thing[3]/#text', value="three")], expected) def test_install_initial(self): """ Test creating initial content and then modifying it """ os.unlink(self.tmpfile) expected = copy.deepcopy(test_xdata) expected.find("Text").text = "Changed content" initial = lxml.etree.Element("Initial") initial.text = test_data modify = lxml.etree.Element("Set", path="Test/Text/#text", value="Changed content") self._install([initial, modify], expected, current_exists="false") testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestDevice.py000066400000000000000000000131701303523157100263370ustar00rootroot00000000000000import os import sys import copy import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Client.Tools.POSIX.Device import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from Testbase import TestPOSIXTool from common import * class TestPOSIXDevice(TestPOSIXTool): test_obj = POSIXDevice def test_fully_specified(self): ptool = self.get_obj() orig_entry = lxml.etree.Element("Path", name="/test", type="device", dev_type="fifo") self.assertTrue(ptool.fully_specified(orig_entry)) for dtype in ["block", "char"]: for attr in ["major", "minor"]: entry = copy.deepcopy(orig_entry) entry.set("dev_type", dtype) entry.set(attr, "0") self.assertFalse(ptool.fully_specified(entry)) entry = copy.deepcopy(orig_entry) entry.set("dev_type", dtype) entry.set("major", "0") entry.set("minor", "0") self.assertTrue(ptool.fully_specified(entry)) @patch("os.major") @patch("os.minor") @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify") def test_verify(self, mock_verify, mock_minor, mock_major): entry = lxml.etree.Element("Path", name="/test", type="device", mode='0644', owner='root', group='root', dev_type="block", major="0", minor="10") ptool = self.get_obj() ptool._exists = Mock() def reset(): ptool._exists.reset_mock() mock_verify.reset_mock() mock_minor.reset_mock() mock_major.reset_mock() ptool._exists.return_value = False self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) reset() ptool._exists.return_value = MagicMock() mock_major.return_value = 0 mock_minor.return_value = 10 mock_verify.return_value = True self.assertTrue(ptool.verify(entry, [])) mock_verify.assert_called_with(ptool, entry, []) ptool._exists.assert_called_with(entry) mock_major.assert_called_with(ptool._exists.return_value.st_rdev) mock_minor.assert_called_with(ptool._exists.return_value.st_rdev) reset() ptool._exists.return_value = MagicMock() mock_major.return_value = 0 mock_minor.return_value = 10 mock_verify.return_value = False self.assertFalse(ptool.verify(entry, [])) mock_verify.assert_called_with(ptool, entry, []) ptool._exists.assert_called_with(entry) mock_major.assert_called_with(ptool._exists.return_value.st_rdev) mock_minor.assert_called_with(ptool._exists.return_value.st_rdev) reset() mock_verify.return_value = True entry = lxml.etree.Element("Path", name="/test", type="device", mode='0644', owner='root', group='root', dev_type="fifo") self.assertTrue(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) self.assertFalse(mock_major.called) self.assertFalse(mock_minor.called) @patch("os.makedev") @patch("os.mknod") @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install") def test_install(self, mock_install, mock_mknod, mock_makedev): entry = lxml.etree.Element("Path", name="/test", type="device", mode='0644', owner='root', group='root', dev_type="block", major="0", minor="10") ptool = self.get_obj() ptool._exists = Mock() ptool._exists.return_value = False mock_makedev.return_value = Mock() mock_install.return_value = True self.assertTrue(ptool.install(entry)) ptool._exists.assert_called_with(entry, remove=True) mock_makedev.assert_called_with(0, 10) mock_mknod.assert_called_with(entry.get("name"), # 0o644 device_map[entry.get("dev_type")] | 420, mock_makedev.return_value) mock_install.assert_called_with(ptool, entry) mock_makedev.reset_mock() mock_mknod.reset_mock() ptool._exists.reset_mock() mock_install.reset_mock() mock_makedev.side_effect = OSError self.assertFalse(ptool.install(entry)) mock_makedev.reset_mock() mock_mknod.reset_mock() ptool._exists.reset_mock() mock_install.reset_mock() mock_mknod.side_effect = OSError self.assertFalse(ptool.install(entry)) mock_makedev.reset_mock() mock_mknod.reset_mock() ptool._exists.reset_mock() mock_install.reset_mock() mock_mknod.side_effect = None entry = lxml.etree.Element("Path", name="/test", type="device", mode='0644', owner='root', group='root', dev_type="fifo") self.assertTrue(ptool.install(entry)) ptool._exists.assert_called_with(entry, remove=True) mock_mknod.assert_called_with(entry.get("name"), # 0o644 device_map[entry.get("dev_type")] | 420) mock_install.assert_called_with(ptool, entry) testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestDirectory.py000066400000000000000000000131101303523157100270760ustar00rootroot00000000000000import os import sys import stat import copy import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Client.Tools.POSIX.Directory import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from Testbase import TestPOSIXTool from common import * class TestPOSIXDirectory(TestPOSIXTool): test_obj = POSIXDirectory @patch("os.listdir") @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify") def test_verify(self, mock_verify, mock_listdir): ptool = self.get_obj() ptool._exists = Mock() entry = lxml.etree.Element("Path", name="/test", type="directory", mode='0644', owner='root', group='root') ptool._exists.return_value = False self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) ptool._exists.reset_mock() exists_rv = MagicMock() exists_rv.__getitem__.return_value = stat.S_IFREG | 420 # 0o644 ptool._exists.return_value = exists_rv self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) ptool._exists.reset_mock() mock_verify.return_value = False exists_rv.__getitem__.return_value = stat.S_IFDIR | 420 # 0o644 self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) ptool._exists.reset_mock() mock_verify.reset_mock() mock_verify.return_value = True self.assertTrue(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) ptool._exists.reset_mock() mock_verify.reset_mock() entry.set("prune", "true") orig_entry = copy.deepcopy(entry) entries = ["foo", "bar", "bar/baz"] mock_listdir.return_value = entries modlist = [os.path.join(entry.get("name"), entries[0])] self.assertFalse(ptool.verify(entry, modlist)) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, modlist) mock_listdir.assert_called_with(entry.get("name")) expected = [os.path.join(entry.get("name"), e) for e in entries if os.path.join(entry.get("name"), e) not in modlist] actual = [e.get("name") for e in entry.findall("Prune")] self.assertItemsEqual(expected, actual) mock_verify.reset_mock() ptool._exists.reset_mock() mock_listdir.reset_mock() entry = copy.deepcopy(orig_entry) modlist = [os.path.join(entry.get("name"), e) for e in entries] self.assertTrue(ptool.verify(entry, modlist)) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, modlist) mock_listdir.assert_called_with(entry.get("name")) self.assertEqual(len(entry.findall("Prune")), 0) @patch("os.unlink") @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install") def test_install(self, mock_install, mock_unlink): entry = lxml.etree.Element("Path", name="/test/foo/bar", type="directory", mode='0644', owner='root', group='root') ptool = self.get_obj() ptool._exists = Mock() ptool._makedirs = Mock() ptool._remove = Mock() def reset(): ptool._exists.reset_mock() mock_install.reset_mock() mock_unlink.reset_mock() ptool._makedirs.reset_mock() ptool._remove.reset_mock() ptool._makedirs.return_value = True ptool._exists.return_value = False mock_install.return_value = True self.assertTrue(ptool.install(entry)) ptool._exists.assert_called_with(entry) mock_install.assert_called_with(ptool, entry) ptool._makedirs.assert_called_with(entry) reset() exists_rv = MagicMock() exists_rv.__getitem__.return_value = stat.S_IFREG | 420 # 0o644 ptool._exists.return_value = exists_rv self.assertTrue(ptool.install(entry)) mock_unlink.assert_called_with(entry.get("name")) ptool._exists.assert_called_with(entry) ptool._makedirs.assert_called_with(entry) mock_install.assert_called_with(ptool, entry) reset() exists_rv.__getitem__.return_value = stat.S_IFDIR | 420 # 0o644 mock_install.return_value = True self.assertTrue(ptool.install(entry)) ptool._exists.assert_called_with(entry) mock_install.assert_called_with(ptool, entry) reset() mock_install.return_value = False self.assertFalse(ptool.install(entry)) mock_install.assert_called_with(ptool, entry) entry.set("prune", "true") prune = ["/test/foo/bar/prune1", "/test/foo/bar/prune2"] for path in prune: lxml.etree.SubElement(entry, "Prune", name=path) reset() mock_install.return_value = True self.assertTrue(ptool.install(entry)) ptool._exists.assert_called_with(entry) mock_install.assert_called_with(ptool, entry) self.assertItemsEqual([c[0][0].get("name") for c in ptool._remove.call_args_list], prune) testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py000066400000000000000000000410531303523157100260200ustar00rootroot00000000000000# -*- coding: utf-8 -*- import os import sys import copy import difflib import lxml.etree from Bcfg2.Compat import b64encode, u_str from mock import Mock, MagicMock, patch from Bcfg2.Client.Tools.POSIX.File import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from TestPOSIX.Testbase import TestPOSIXTool from common import * class TestPOSIXFile(TestPOSIXTool): test_obj = POSIXFile def test_fully_specified(self): ptool = self.get_obj() entry = lxml.etree.Element("Path", name="/test", type="file") self.assertFalse(ptool.fully_specified(entry)) entry.set("empty", "true") self.assertTrue(ptool.fully_specified(entry)) entry.set("empty", "false") entry.text = "text" self.assertTrue(ptool.fully_specified(entry)) def test_get_data(self): orig_entry = lxml.etree.Element("Path", name="/test", type="file") Bcfg2.Options.setup.encoding = "ascii" ptool = self.get_obj() entry = copy.deepcopy(orig_entry) entry.text = b64encode("test") entry.set("encoding", "base64") self.assertEqual(ptool._get_data(entry), ("test", True)) entry = copy.deepcopy(orig_entry) entry.set("encoding", "base64") entry.set("empty", "true") self.assertEqual(ptool._get_data(entry), ("", True)) entry = copy.deepcopy(orig_entry) entry.set("empty", "true") self.assertEqual(ptool._get_data(entry), ("", False)) entry = copy.deepcopy(orig_entry) self.assertEqual(ptool._get_data(entry), ("", False)) entry = copy.deepcopy(orig_entry) entry.text = "test" self.assertEqual(ptool._get_data(entry), ("test", False)) if inPy3k: ustr = 'é' else: ustr = u_str('é', 'UTF-8') entry = copy.deepcopy(orig_entry) entry.text = ustr self.assertEqual(ptool._get_data(entry), (ustr, False)) @patch("%s.open" % builtins) @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify") def test_verify(self, mock_verify, mock_open): entry = lxml.etree.Element("Path", name="/test", type="file") ptool = self.get_obj() ptool._exists = Mock() ptool._get_data = Mock() ptool._get_diffs = Mock() def reset(): ptool._get_diffs.reset_mock() ptool._get_data.reset_mock() ptool._exists.reset_mock() mock_verify.reset_mock() mock_open.reset_mock() ptool._get_data.return_value = ("test", False) ptool._exists.return_value = False mock_verify.return_value = True self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) ptool._get_diffs.assert_called_with(entry, interactive=False, sensitive=False, is_binary=False, content="") reset() exists_rv = MagicMock() exists_rv.__getitem__.return_value = 5 ptool._exists.return_value = exists_rv ptool._get_data.return_value = ("test", True) self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) ptool._get_diffs.assert_called_with(entry, interactive=False, sensitive=False, is_binary=True, content=None) reset() ptool._get_data.return_value = ("test", False) exists_rv.__getitem__.return_value = 4 entry.set("sensitive", "true") mock_open.return_value.read.return_value = "tart" self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) mock_open.assert_called_with(entry.get("name")) mock_open.return_value.read.assert_called_with() ptool._get_diffs.assert_called_with(entry, interactive=False, sensitive=True, is_binary=False, content="tart") reset() mock_open.return_value.read.return_value = "test" self.assertTrue(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) mock_open.assert_called_with(entry.get("name")) mock_open.return_value.read.assert_called_with() self.assertFalse(ptool._get_diffs.called) reset() mock_open.side_effect = IOError self.assertFalse(ptool.verify(entry, [])) ptool._exists.assert_called_with(entry) mock_open.assert_called_with(entry.get("name")) @patch("os.fdopen") @patch("tempfile.mkstemp") def test_write_tmpfile(self, mock_mkstemp, mock_fdopen): ptool = self.get_obj() ptool._get_data = Mock() entry = lxml.etree.Element("Path", name="/test", type="file", mode='0644', owner='root', group='root') newfile = "/foo/bar" def reset(): ptool._get_data.reset_mock() mock_mkstemp.reset_mock() mock_fdopen.reset_mock() ptool._get_data.return_value = ("test", False) mock_mkstemp.return_value = (5, newfile) self.assertEqual(ptool._write_tmpfile(entry), newfile) ptool._get_data.assert_called_with(entry) mock_mkstemp.assert_called_with(prefix='test', dir='/') mock_fdopen.assert_called_with(5, 'w') mock_fdopen.return_value.write.assert_called_with("test") reset() mock_mkstemp.side_effect = OSError self.assertFalse(ptool._write_tmpfile(entry)) mock_mkstemp.assert_called_with(prefix='test', dir='/') reset() mock_mkstemp.side_effect = None mock_fdopen.side_effect = OSError self.assertFalse(ptool._write_tmpfile(entry)) mock_mkstemp.assert_called_with(prefix='test', dir='/') ptool._get_data.assert_called_with(entry) mock_fdopen.assert_called_with(5, 'w') @patch("os.rename") @patch("os.unlink") def test_rename_tmpfile(self, mock_unlink, mock_rename): ptool = self.get_obj() entry = lxml.etree.Element("Path", name="/test", type="file", mode='0644', owner='root', group='root') newfile = "/foo/bar" self.assertTrue(ptool._rename_tmpfile(newfile, entry)) mock_rename.assert_called_with(newfile, entry.get("name")) mock_rename.reset_mock() mock_unlink.reset_mock() mock_rename.side_effect = OSError self.assertFalse(ptool._rename_tmpfile(newfile, entry)) mock_rename.assert_called_with(newfile, entry.get("name")) mock_unlink.assert_called_with(newfile) # even if the unlink fails, return false gracefully mock_rename.reset_mock() mock_unlink.reset_mock() mock_unlink.side_effect = OSError self.assertFalse(ptool._rename_tmpfile(newfile, entry)) mock_rename.assert_called_with(newfile, entry.get("name")) mock_unlink.assert_called_with(newfile) @patch("%s.open" % builtins) @patch("Bcfg2.Utils") def test__get_diffs(self, mock_utils, mock_open): orig_entry = lxml.etree.Element("Path", name="/test", type="file", mode='0644', owner='root', group='root') orig_entry.text = "test" ondisk = "test2" Bcfg2.Options.setup.encoding = "utf-8" ptool = self.get_obj() ptool._get_data = Mock() ptool._diff = Mock() def reset(): mock_utils.is_string.reset_mock() ptool._get_data.reset_mock() ptool._diff.reset_mock() mock_open.reset_mock() return copy.deepcopy(orig_entry) mock_utils.is_string.return_value = True ptool._get_data.return_value = (orig_entry.text, False) mock_open.return_value.read.return_value = ondisk ptool._diff.return_value = ["-test2", "+test"] # binary data in the entry entry = reset() ptool._get_diffs(entry, is_binary=True) mock_open.assert_called_with(entry.get("name")) mock_open.return_value.read.assert_any_call() self.assertFalse(ptool._diff.called) self.assertEqual(entry.get("current_bfile"), b64encode(ondisk)) # binary data on disk entry = reset() mock_utils.is_string.return_value = False ptool._get_diffs(entry, content=ondisk) self.assertFalse(mock_open.called) self.assertFalse(ptool._diff.called) self.assertEqual(entry.get("current_bfile"), b64encode(ondisk)) # sensitive, non-interactive -- do nothing entry = reset() mock_utils.is_string.return_value = True ptool._get_diffs(entry, sensitive=True, interactive=False) self.assertFalse(mock_open.called) self.assertFalse(ptool._diff.called) self.assertXMLEqual(entry, orig_entry) # sensitive, interactive entry = reset() ptool._get_diffs(entry, sensitive=True, interactive=True) mock_open.assert_called_with(entry.get("name")) mock_open.return_value.read.assert_any_call() ptool._diff.assert_called_with(ondisk, entry.text, filename=entry.get("name")) self.assertIsNotNone(entry.get("qtext")) del entry.attrib['qtext'] self.assertItemsEqual(orig_entry.attrib, entry.attrib) # non-sensitive, non-interactive entry = reset() ptool._get_diffs(entry, content=ondisk) self.assertFalse(mock_open.called) ptool._diff.assert_called_with(ondisk, entry.text, filename=entry.get("name")) self.assertIsNone(entry.get("qtext")) self.assertEqual(entry.get("current_bdiff"), b64encode("\n".join(ptool._diff.return_value))) del entry.attrib["current_bdiff"] self.assertItemsEqual(orig_entry.attrib, entry.attrib) # non-sensitive, interactive -- do everything. also test # appending to qtext entry = reset() entry.set("qtext", "test") ptool._get_diffs(entry, interactive=True) mock_open.assert_called_with(entry.get("name")) mock_open.return_value.read.assert_any_call() self.assertItemsEqual(ptool._diff.call_args_list, [call(ondisk, entry.text, filename=entry.get("name"))]) self.assertIsNotNone(entry.get("qtext")) self.assertTrue(entry.get("qtext").startswith("test\n")) self.assertEqual(entry.get("current_bdiff"), b64encode("\n".join(ptool._diff.return_value))) del entry.attrib['qtext'] del entry.attrib["current_bdiff"] self.assertItemsEqual(orig_entry.attrib, entry.attrib) # non-sensitive, interactive with unicode data entry = reset() entry.text = u("tëst") encoded = entry.text.encode(Bcfg2.Options.setup.encoding) ptool._diff.return_value = ["-test2", "+tëst"] ptool._get_data.return_value = (encoded, False) ptool._get_diffs(entry, interactive=True) mock_open.assert_called_with(entry.get("name")) mock_open.return_value.read.assert_any_call() self.assertItemsEqual(ptool._diff.call_args_list, [call(ondisk, encoded, filename=entry.get("name"))]) self.assertIsNotNone(entry.get("qtext")) self.assertEqual(entry.get("current_bdiff"), b64encode("\n".join(ptool._diff.return_value))) del entry.attrib['qtext'] del entry.attrib["current_bdiff"] self.assertItemsEqual(orig_entry.attrib, entry.attrib) @patch("os.path.exists") @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install") def test_install(self, mock_install, mock_exists): ptool = self.get_obj() ptool._makedirs = Mock() ptool._set_perms = Mock() ptool._write_tmpfile = Mock() ptool._rename_tmpfile = Mock() entry = lxml.etree.Element("Path", name="/test", type="file", mode='0644', owner='root', group='root') def reset(): ptool._rename_tmpfile.reset_mock() ptool._write_tmpfile.reset_mock() ptool._set_perms.reset_mock() ptool._makedirs.reset_mock() mock_install.reset_mock() mock_exists.reset_mock() mock_exists.return_value = False ptool._makedirs.return_value = False self.assertFalse(ptool.install(entry)) mock_exists.assert_called_with("/") ptool._makedirs.assert_called_with(entry, path="/") reset() ptool._makedirs.return_value = True ptool._write_tmpfile.return_value = False self.assertFalse(ptool.install(entry)) mock_exists.assert_called_with("/") ptool._makedirs.assert_called_with(entry, path="/") ptool._write_tmpfile.assert_called_with(entry) reset() newfile = '/test.X987yS' ptool._write_tmpfile.return_value = newfile ptool._set_perms.return_value = False ptool._rename_tmpfile.return_value = False self.assertFalse(ptool.install(entry)) mock_exists.assert_called_with("/") ptool._makedirs.assert_called_with(entry, path="/") ptool._write_tmpfile.assert_called_with(entry) ptool._set_perms.assert_called_with(entry, path=newfile) ptool._rename_tmpfile.assert_called_with(newfile, entry) reset() ptool._rename_tmpfile.return_value = True mock_install.return_value = False self.assertFalse(ptool.install(entry)) mock_exists.assert_called_with("/") ptool._makedirs.assert_called_with(entry, path="/") ptool._write_tmpfile.assert_called_with(entry) ptool._set_perms.assert_called_with(entry, path=newfile) ptool._rename_tmpfile.assert_called_with(newfile, entry) mock_install.assert_called_with(ptool, entry) reset() mock_install.return_value = True self.assertFalse(ptool.install(entry)) mock_exists.assert_called_with("/") ptool._makedirs.assert_called_with(entry, path="/") ptool._write_tmpfile.assert_called_with(entry) ptool._set_perms.assert_called_with(entry, path=newfile) ptool._rename_tmpfile.assert_called_with(newfile, entry) mock_install.assert_called_with(ptool, entry) reset() ptool._set_perms.return_value = True self.assertTrue(ptool.install(entry)) mock_exists.assert_called_with("/") ptool._makedirs.assert_called_with(entry, path="/") ptool._write_tmpfile.assert_called_with(entry) ptool._set_perms.assert_called_with(entry, path=newfile) ptool._rename_tmpfile.assert_called_with(newfile, entry) mock_install.assert_called_with(ptool, entry) reset() mock_exists.return_value = True self.assertTrue(ptool.install(entry)) mock_exists.assert_called_with("/") self.assertFalse(ptool._makedirs.called) ptool._write_tmpfile.assert_called_with(entry) ptool._set_perms.assert_called_with(entry, path=newfile) ptool._rename_tmpfile.assert_called_with(newfile, entry) mock_install.assert_called_with(ptool, entry) @patch("difflib.unified_diff") def test_diff(self, mock_diff): ptool = self.get_obj() filename = "/test" content1 = "line1\nline2" content2 = "line3" rv = ["line1", "line2", "line3"] mock_diff.return_value = rv self.assertItemsEqual(ptool._diff(content1, content2), rv) mock_diff.assert_called_with(["line1", "line2"], ["line3"], fromfile='', tofile='') mock_diff.reset_mock() self.assertItemsEqual(ptool._diff(content1, content2, filename=filename), rv) mock_diff.assert_called_with(["line1", "line2"], ["line3"], fromfile='/test (on disk)', tofile='/test (from bcfg2)') testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestHardlink.py000066400000000000000000000025561303523157100267020ustar00rootroot00000000000000import os import sys import copy import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Client.Tools.POSIX.Hardlink import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from Testbase import TestPOSIXLinkTool from common import * class TestPOSIXHardlink(TestPOSIXLinkTool): test_obj = POSIXHardlink @patch("os.path.samefile") def test__verify(self, mock_samefile): entry = lxml.etree.Element("Path", name="/test", type="hardlink", to="/dest") ptool = self.get_obj() self.assertEqual(ptool._verify(entry), mock_samefile.return_value) self.assertItemsEqual(mock_samefile.call_args[0], [entry.get("name"), entry.get("to")]) @patch("os.link") def test__link(self, mock_link): entry = lxml.etree.Element("Path", name="/test", type="hardlink", to="/dest") ptool = self.get_obj() self.assertEqual(ptool._link(entry), mock_link.return_value) mock_link.assert_called_with(entry.get("to"), entry.get("name")) testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestNonexistent.py000066400000000000000000000046361303523157100274650ustar00rootroot00000000000000import os import sys import copy import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Client.Tools.POSIX.Nonexistent import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from Test__init import get_config from Testbase import TestPOSIXTool from common import * class TestPOSIXNonexistent(TestPOSIXTool): test_obj = POSIXNonexistent @patch("os.path.lexists") def test_verify(self, mock_lexists): ptool = self.get_obj() entry = lxml.etree.Element("Path", name="/test", type="nonexistent") for val in [True, False]: mock_lexists.reset_mock() mock_lexists.return_value = val self.assertEqual(ptool.verify(entry, []), not val) mock_lexists.assert_called_with(entry.get("name")) def test_install(self): entry = lxml.etree.Element("Path", name="/test", type="nonexistent") ptool = self.get_obj() ptool._remove = Mock() def reset(): ptool._remove.reset_mock() self.assertTrue(ptool.install(entry)) ptool._remove.assert_called_with(entry, recursive=False) reset() entry.set("recursive", "true") self.assertTrue(ptool.install(entry)) ptool._remove.assert_called_with(entry, recursive=True) reset() child_entry = lxml.etree.Element("Path", name="/test/foo", type="nonexistent") ptool = self.get_obj(config=get_config([child_entry])) ptool._remove = Mock() self.assertTrue(ptool.install(entry)) ptool._remove.assert_called_with(entry, recursive=True) reset() child_entry = lxml.etree.Element("Path", name="/test/foo", type="file") ptool = self.get_obj(config=get_config([child_entry])) ptool._remove = Mock() self.assertFalse(ptool.install(entry)) self.assertFalse(ptool._remove.called) reset() entry.set("recursive", "false") ptool._remove.side_effect = OSError self.assertFalse(ptool.install(entry)) ptool._remove.assert_called_with(entry, recursive=False) testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestPermissions.py000066400000000000000000000002431303523157100274500ustar00rootroot00000000000000from Bcfg2.Client.Tools.POSIX.Permissions import * from Testbase import TestPOSIXTool class TestPOSIXPermissions(TestPOSIXTool): test_obj = POSIXPermissions testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestSymlink.py000066400000000000000000000030211303523157100265600ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Client.Tools.POSIX.Symlink import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from Testbase import TestPOSIXLinkTool from common import * class TestPOSIXSymlink(TestPOSIXLinkTool): test_obj = POSIXSymlink @patch("os.readlink") def test__verify(self, mock_readlink): entry = lxml.etree.Element("Path", name="/test", type="symlink", to="/dest") ptool = self.get_obj() mock_readlink.return_value = entry.get("to") self.assertTrue(ptool._verify(entry)) mock_readlink.assert_called_with(entry.get("name")) mock_readlink.reset_mock() mock_readlink.return_value = "/bogus" self.assertFalse(ptool._verify(entry)) mock_readlink.assert_called_with(entry.get("name")) @patch("os.symlink") def test__link(self, mock_symlink): entry = lxml.etree.Element("Path", name="/test", type="symlink", to="/dest") ptool = self.get_obj() self.assertEqual(ptool._link(entry), mock_symlink.return_value) mock_symlink.assert_called_with(entry.get("to"), entry.get("name")) testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Test__init.py000066400000000000000000000220501303523157100263760ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch import Bcfg2.Client.Tools from Bcfg2.Client.Tools.POSIX import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestTools.Test_init import TestTool def get_config(entries): config = lxml.etree.Element("Configuration") bundle = lxml.etree.SubElement(config, "Bundle", name="test") bundle.extend(entries) return config class TestPOSIX(TestTool): test_obj = POSIX def test__init(self): entries = [lxml.etree.Element("Path", name="test", type="file")] posix = self.get_obj(config=get_config(entries)) self.assertIsInstance(posix, Bcfg2.Client.Tools.Tool) self.assertIsInstance(posix, POSIX) self.assertIn('Path', posix.__req__) self.assertGreater(len(posix.__req__['Path']), 0) self.assertGreater(len(posix.__handles__), 0) self.assertItemsEqual(posix.handled, entries) @patch("Bcfg2.Client.Tools.Tool.canVerify") def test_canVerify(self, mock_canVerify): posix = self.get_obj() entry = lxml.etree.Element("Path", name="test", type="file") # first, test superclass canVerify failure mock_canVerify.return_value = False self.assertFalse(posix.canVerify(entry)) mock_canVerify.assert_called_with(posix, entry) # next, test fully_specified failure mock_canVerify.reset_mock() mock_canVerify.return_value = True mock_fully_spec = Mock() mock_fully_spec.return_value = False posix._handlers[entry.get("type")].fully_specified = \ mock_fully_spec self.assertFalse(posix.canVerify(entry)) mock_canVerify.assert_called_with(posix, entry) mock_fully_spec.assert_called_with(entry) # finally, test success mock_canVerify.reset_mock() mock_fully_spec.reset_mock() mock_fully_spec.return_value = True self.assertTrue(posix.canVerify(entry)) mock_canVerify.assert_called_with(posix, entry) mock_fully_spec.assert_called_with(entry) @patch("Bcfg2.Client.Tools.Tool.canInstall") def test_canInstall(self, mock_canInstall): posix = self.get_obj() entry = lxml.etree.Element("Path", name="test", type="file") # first, test superclass canInstall failure mock_canInstall.return_value = False self.assertFalse(posix.canInstall(entry)) mock_canInstall.assert_called_with(posix, entry) # next, test fully_specified failure mock_canInstall.reset_mock() mock_canInstall.return_value = True mock_fully_spec = Mock() mock_fully_spec.return_value = False posix._handlers[entry.get("type")].fully_specified = \ mock_fully_spec self.assertFalse(posix.canInstall(entry)) mock_canInstall.assert_called_with(posix, entry) mock_fully_spec.assert_called_with(entry) # finally, test success mock_canInstall.reset_mock() mock_fully_spec.reset_mock() mock_fully_spec.return_value = True self.assertTrue(posix.canInstall(entry)) mock_canInstall.assert_called_with(posix, entry) mock_fully_spec.assert_called_with(entry) def test_InstallPath(self): posix = self.get_obj() entry = lxml.etree.Element("Path", name="test", type="file") mock_install = Mock() mock_install.return_value = True posix._handlers[entry.get("type")].install = mock_install self.assertTrue(posix.InstallPath(entry)) mock_install.assert_called_with(entry) def test_VerifyPath(self): posix = self.get_obj() entry = lxml.etree.Element("Path", name="test", type="file") modlist = [] mock_verify = Mock() mock_verify.return_value = True posix._handlers[entry.get("type")].verify = mock_verify self.assertTrue(posix.VerifyPath(entry, modlist)) mock_verify.assert_called_with(entry, modlist) mock_verify.reset_mock() mock_verify.return_value = False Bcfg2.Options.setup.interactive = True self.assertFalse(posix.VerifyPath(entry, modlist)) self.assertIsNotNone(entry.get('qtext')) @patch('os.remove') def test_prune_old_backups(self, mock_remove): entry = lxml.etree.Element("Path", name="/etc/foo", type="file") Bcfg2.Options.setup.paranoid_path = '/' Bcfg2.Options.setup.paranoid_copies = 5 Bcfg2.Options.setup.paranoid = True posix = self.get_obj() remove = ["_etc_foo_2012-07-20T04:13:22.364989", "_etc_foo_2012-07-31T04:13:23.894958", "_etc_foo_2012-07-17T04:13:22.493316",] keep = ["_etc_foo_bar_2011-08-07T04:13:22.519978", "_etc_foo_2012-08-04T04:13:22.519978", "_etc_Foo_2011-08-07T04:13:22.519978", "_etc_foo_2012-08-06T04:13:22.519978", "_etc_foo_2012-08-03T04:13:22.191895", "_etc_test_2011-08-07T04:13:22.519978", "_etc_foo_2012-08-07T04:13:22.519978",] @patch('os.listdir') def inner(mock_listdir): mock_listdir.side_effect = OSError posix._prune_old_backups(entry) self.assertFalse(mock_remove.called) mock_listdir.assert_called_with(Bcfg2.Options.setup.paranoid_path) mock_listdir.reset_mock() mock_remove.reset_mock() mock_listdir.side_effect = None mock_listdir.return_value = keep + remove posix._prune_old_backups(entry) mock_listdir.assert_called_with(Bcfg2.Options.setup.paranoid_path) self.assertItemsEqual(mock_remove.call_args_list, [call(os.path.join(Bcfg2.Options.setup.paranoid_path, p)) for p in remove]) mock_listdir.reset_mock() mock_remove.reset_mock() mock_remove.side_effect = OSError # test to ensure that we call os.remove() for all files that # need to be removed even if we get an error posix._prune_old_backups(entry) mock_listdir.assert_called_with(Bcfg2.Options.setup.paranoid_path) self.assertItemsEqual(mock_remove.call_args_list, [call(os.path.join(Bcfg2.Options.setup.paranoid_path, p)) for p in remove]) inner() @patch("shutil.copy") @patch("os.path.isdir") def test_paranoid_backup(self, mock_isdir, mock_copy): entry = lxml.etree.Element("Path", name="/etc/foo", type="file") Bcfg2.Options.setup.paranoid_path = '/' Bcfg2.Options.setup.paranoid_copies = 5 Bcfg2.Options.setup.paranoid = False posix = self.get_obj() posix._prune_old_backups = Mock() # paranoid false globally posix._paranoid_backup(entry) self.assertFalse(posix._prune_old_backups.called) self.assertFalse(mock_copy.called) # paranoid false on the entry Bcfg2.Options.setup.paranoid = True def reset(): mock_isdir.reset_mock() mock_copy.reset_mock() posix._prune_old_backups.reset_mock() reset() posix._paranoid_backup(entry) self.assertFalse(posix._prune_old_backups.called) self.assertFalse(mock_copy.called) # entry does not exist on filesystem reset() entry.set("paranoid", "true") entry.set("current_exists", "false") posix._paranoid_backup(entry) self.assertFalse(posix._prune_old_backups.called) self.assertFalse(mock_copy.called) # entry is a directory on the filesystem reset() entry.set("current_exists", "true") mock_isdir.return_value = True posix._paranoid_backup(entry) self.assertFalse(posix._prune_old_backups.called) self.assertFalse(mock_copy.called) mock_isdir.assert_called_with(entry.get("name")) # test the actual backup now reset() mock_isdir.return_value = False posix._paranoid_backup(entry) mock_isdir.assert_called_with(entry.get("name")) posix._prune_old_backups.assert_called_with(entry) # it's basically impossible to test the shutil.copy() call # exactly because the destination includes microseconds, so we # just test it good enough self.assertEqual(mock_copy.call_args[0][0], entry.get("name")) bkupnam = os.path.join(Bcfg2.Options.setup.paranoid_path, entry.get('name').replace('/', '_')) + '_' self.assertEqual(bkupnam, mock_copy.call_args[0][1][:len(bkupnam)]) testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py000066400000000000000000001400221303523157100260470ustar00rootroot00000000000000import os import sys import copy import stat import lxml.etree from mock import Mock, MagicMock, patch import Bcfg2.Client.Tools from Bcfg2.Client.Tools.POSIX.base import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from TestTools.Test_init import TestTool from common import * try: import selinux HAS_SELINUX = True except ImportError: HAS_SELINUX = False try: import posix1e HAS_ACLS = True except ImportError: HAS_ACLS = False class TestPOSIXTool(TestTool): test_obj = POSIXTool def test_fully_specified(self): # fully_specified should do no checking on the abstract # POSIXTool object ptool = self.get_obj() self.assertTrue(ptool.fully_specified(Mock())) @patch('os.stat') @patch('os.walk') def test_verify(self, mock_walk, mock_stat): ptool = self.get_obj() ptool._verify_metadata = Mock() entry = lxml.etree.Element("Path", name="/test", type="file") mock_stat.return_value = MagicMock() ptool._verify_metadata.return_value = False self.assertFalse(ptool.verify(entry, [])) ptool._verify_metadata.assert_called_with(entry) ptool._verify_metadata.reset_mock() ptool._verify_metadata.return_value = True self.assertTrue(ptool.verify(entry, [])) ptool._verify_metadata.assert_called_with(entry) ptool._verify_metadata.reset_mock() entry.set("recursive", "true") walk_rv = [("/", ["dir1", "dir2"], ["file1", "file2"]), ("/dir1", ["dir3"], []), ("/dir2", [], ["file3", "file4"])] mock_walk.return_value = walk_rv self.assertTrue(ptool.verify(entry, [])) mock_walk.assert_called_with(entry.get("name")) all_verifies = [call(entry)] for root, dirs, files in walk_rv: all_verifies.extend([call(entry, path=os.path.join(root, p)) for p in dirs + files]) self.assertItemsEqual(ptool._verify_metadata.call_args_list, all_verifies) @patch('os.walk') def test_install(self, mock_walk): ptool = self.get_obj() ptool._set_perms = Mock() entry = lxml.etree.Element("Path", name="/test", type="file") ptool._set_perms.return_value = True self.assertTrue(ptool.install(entry)) ptool._set_perms.assert_called_with(entry) ptool._set_perms.reset_mock() entry.set("recursive", "true") walk_rv = [("/", ["dir1", "dir2"], ["file1", "file2"]), ("/dir1", ["dir3"], []), ("/dir2", [], ["file3", "file4"])] mock_walk.return_value = walk_rv ptool._set_perms.return_value = True self.assertTrue(ptool.install(entry)) mock_walk.assert_called_with(entry.get("name")) all_set_perms = [call(entry)] for root, dirs, files in walk_rv: all_set_perms.extend([call(entry, path=os.path.join(root, p)) for p in dirs + files]) self.assertItemsEqual(ptool._set_perms.call_args_list, all_set_perms) mock_walk.reset_mock() ptool._set_perms.reset_mock() def set_perms_rv(entry, path=None): if path == '/dir2/file3': return False else: return True ptool._set_perms.side_effect = set_perms_rv self.assertFalse(ptool.install(entry)) mock_walk.assert_called_with(entry.get("name")) self.assertItemsEqual(ptool._set_perms.call_args_list, all_set_perms) @patch('os.rmdir') @patch('os.unlink') @patch('shutil.rmtree') @patch('os.path.isdir') @patch('os.path.islink') def test_remove(self, mock_islink, mock_isdir, mock_rmtree, mock_unlink, mock_rmdir): ptool = self.get_obj() entry = lxml.etree.Element("Path", name="/etc/foo") def reset(): mock_islink.reset_mock() mock_isdir.reset_mock() mock_rmtree.reset_mock() mock_unlink.reset_mock() mock_rmdir.reset_mock() mock_islink.return_value = True mock_isdir.return_value = False ptool._remove(entry) mock_unlink.assert_called_with(entry.get('name')) self.assertFalse(mock_rmtree.called) self.assertFalse(mock_rmdir.called) reset() mock_islink.return_value = False mock_isdir.return_value = True ptool._remove(entry) mock_rmtree.assert_called_with(entry.get('name')) self.assertFalse(mock_unlink.called) self.assertFalse(mock_rmdir.called) reset() ptool._remove(entry, recursive=False) mock_rmdir.assert_called_with(entry.get('name')) self.assertFalse(mock_unlink.called) self.assertFalse(mock_rmtree.called) reset() mock_islink.return_value = False mock_isdir.return_value = False ptool._remove(entry, recursive=False) mock_unlink.assert_called_with(entry.get('name')) self.assertFalse(mock_rmtree.called) self.assertFalse(mock_rmdir.called) @patch('os.lstat') def test_exists(self, mock_lstat): entry = lxml.etree.Element("Path", name="/etc/foo", type="file") ptool = self.get_obj() ptool._remove = Mock() def reset(): mock_lstat.reset_mock() ptool._remove.reset_mock() mock_lstat.side_effect = OSError self.assertFalse(ptool._exists(entry)) mock_lstat.assert_called_with(entry.get('name')) self.assertFalse(ptool._remove.called) reset() rv = MagicMock() mock_lstat.return_value = rv mock_lstat.side_effect = None self.assertEqual(ptool._exists(entry), rv) mock_lstat.assert_called_with(entry.get('name')) self.assertFalse(ptool._remove.called) reset() self.assertEqual(ptool._exists(entry, remove=True), None) mock_lstat.assert_called_with(entry.get('name')) ptool._remove.assert_called_with(entry) reset() ptool._remove.side_effect = OSError self.assertEqual(ptool._exists(entry, remove=True), rv) mock_lstat.assert_called_with(entry.get('name')) ptool._remove.assert_called_with(entry) @patch("os.chown") @patch("os.chmod") @patch("os.utime") @patch("os.geteuid") def test_set_perms(self, mock_geteuid, mock_utime, mock_chmod, mock_chown): ptool = self.get_obj() ptool._norm_entry_uid = Mock() ptool._norm_entry_gid = Mock() ptool._set_acls = Mock() ptool._set_secontext = Mock() def reset(): ptool._set_secontext.reset_mock() ptool._set_acls.reset_mock() ptool._norm_entry_gid.reset_mock() ptool._norm_entry_uid.reset_mock() mock_chmod.reset_mock() mock_chown.reset_mock() mock_utime.reset_mock() mock_geteuid.reset_mock() # pretend to run as root mock_geteuid.return_value = 0 # test symlink -- no owner, group, permissions entry = lxml.etree.Element("Path", name="/etc/foo", to="/etc/bar", type="symlink") ptool._set_acls.return_value = True ptool._set_secontext.return_value = True self.assertTrue(ptool._set_perms(entry)) ptool._set_secontext.assert_called_with(entry, path=entry.get("name")) ptool._set_acls.assert_called_with(entry, path=entry.get("name")) # test file with owner, group, permissions reset() entry = lxml.etree.Element("Path", name="/etc/foo", owner="owner", group="group", mode="644", type="file") ptool._norm_entry_uid.return_value = 10 ptool._norm_entry_gid.return_value = 100 self.assertTrue(ptool._set_perms(entry)) ptool._norm_entry_uid.assert_called_with(entry) ptool._norm_entry_gid.assert_called_with(entry) mock_chown.assert_called_with(entry.get("name"), 10, 100) mock_chmod.assert_called_with(entry.get("name"), int(entry.get("mode"), 8)) self.assertFalse(mock_utime.called) ptool._set_secontext.assert_called_with(entry, path=entry.get("name")) ptool._set_acls.assert_called_with(entry, path=entry.get("name")) # test file with owner, group, permissions, run as non-root mock_geteuid.return_value = 1000 reset() entry = lxml.etree.Element("Path", name="/etc/foo", owner="owner", group="group", mode="644", type="file") self.assertTrue(ptool._set_perms(entry)) self.assertFalse(ptool._norm_entry_uid.called) self.assertFalse(ptool._norm_entry_gid.called) self.assertFalse(mock_chown.called) mock_chmod.assert_called_with(entry.get("name"), int(entry.get("mode"), 8)) self.assertFalse(mock_utime.called) ptool._set_secontext.assert_called_with(entry, path=entry.get("name")) ptool._set_acls.assert_called_with(entry, path=entry.get("name")) mock_geteuid.return_value = 0 # test with mtime reset() mtime = 1344459042 entry.set("mtime", str(mtime)) self.assertTrue(ptool._set_perms(entry)) ptool._norm_entry_uid.assert_called_with(entry) ptool._norm_entry_gid.assert_called_with(entry) mock_chown.assert_called_with(entry.get("name"), 10, 100) mock_chmod.assert_called_with(entry.get("name"), int(entry.get("mode"), 8)) mock_utime.assert_called_with(entry.get("name"), (mtime, mtime)) ptool._set_secontext.assert_called_with(entry, path=entry.get("name")) ptool._set_acls.assert_called_with(entry, path=entry.get("name")) reset() self.assertTrue(ptool._set_perms(entry, path='/etc/bar')) ptool._norm_entry_uid.assert_called_with(entry) ptool._norm_entry_gid.assert_called_with(entry) mock_chown.assert_called_with('/etc/bar', 10, 100) mock_chmod.assert_called_with('/etc/bar', int(entry.get("mode"), 8)) mock_utime.assert_called_with(entry.get("name"), (mtime, mtime)) ptool._set_secontext.assert_called_with(entry, path='/etc/bar') ptool._set_acls.assert_called_with(entry, path='/etc/bar') # test dev_type modification of perms, failure of chown reset() def chown_rv(path, owner, group): if owner == 0 and group == 0: return True else: raise KeyError os.chown.side_effect = chown_rv entry.set("type", "device") entry.set("dev_type", list(device_map.keys())[0]) self.assertFalse(ptool._set_perms(entry)) ptool._norm_entry_uid.assert_called_with(entry) ptool._norm_entry_gid.assert_called_with(entry) mock_chown.assert_called_with(entry.get("name"), 0, 0) mock_chmod.assert_called_with(entry.get("name"), int(entry.get("mode"), 8) | list(device_map.values())[0]) mock_utime.assert_called_with(entry.get("name"), (mtime, mtime)) ptool._set_secontext.assert_called_with(entry, path=entry.get("name")) ptool._set_acls.assert_called_with(entry, path=entry.get("name")) # test failure of chmod reset() os.chown.side_effect = None os.chmod.side_effect = OSError entry.set("type", "file") del entry.attrib["dev_type"] self.assertFalse(ptool._set_perms(entry)) ptool._norm_entry_uid.assert_called_with(entry) ptool._norm_entry_gid.assert_called_with(entry) mock_chown.assert_called_with(entry.get("name"), 10, 100) mock_chmod.assert_called_with(entry.get("name"), int(entry.get("mode"), 8)) mock_utime.assert_called_with(entry.get("name"), (mtime, mtime)) ptool._set_secontext.assert_called_with(entry, path=entry.get("name")) ptool._set_acls.assert_called_with(entry, path=entry.get("name")) # test that even when everything fails, we try to do it all. # e.g., when chmod fails, we still try to apply acls, set # selinux context, etc. reset() os.chown.side_effect = OSError os.utime.side_effect = OSError ptool._set_acls.return_value = False ptool._set_secontext.return_value = False self.assertFalse(ptool._set_perms(entry)) ptool._norm_entry_uid.assert_called_with(entry) ptool._norm_entry_gid.assert_called_with(entry) mock_chown.assert_called_with(entry.get("name"), 10, 100) mock_chmod.assert_called_with(entry.get("name"), int(entry.get("mode"), 8)) mock_utime.assert_called_with(entry.get("name"), (mtime, mtime)) ptool._set_secontext.assert_called_with(entry, path=entry.get("name")) ptool._set_acls.assert_called_with(entry, path=entry.get("name")) @skipUnless(HAS_ACLS, "ACLS not found, skipping") @patchIf(HAS_ACLS, "posix1e.ACL") @patchIf(HAS_ACLS, "posix1e.Entry") @patch("os.path.isdir") def test_set_acls(self, mock_isdir, mock_Entry, mock_ACL): ptool = self.get_obj() ptool._list_entry_acls = Mock() ptool._norm_uid = Mock() ptool._norm_gid = Mock() entry = lxml.etree.Element("Path", name="/etc/foo", type="file") # disable acls for the initial test Bcfg2.Client.Tools.POSIX.base.HAS_ACLS = False self.assertTrue(ptool._set_acls(entry)) Bcfg2.Client.Tools.POSIX.base.HAS_ACLS = True # build a set of file ACLs to return from posix1e.ACL(file=...) file_acls = [] acl = Mock() acl.tag_type = posix1e.ACL_USER acl.name = "remove" file_acls.append(acl) acl = Mock() acl.tag_type = posix1e.ACL_GROUP acl.name = "remove" file_acls.append(acl) acl = Mock() acl.tag_type = posix1e.ACL_MASK acl.name = "keep" file_acls.append(acl) remove_acls = [a for a in file_acls if a.name == "remove"] # build a set of ACLs listed on the entry as returned by # _list_entry_acls() entry_acls = {("default", posix1e.ACL_USER, "user"): 7, ("access", posix1e.ACL_GROUP, "group"): 5} ptool._list_entry_acls.return_value = entry_acls ptool._norm_uid.return_value = 10 ptool._norm_gid.return_value = 100 # set up the unreasonably complex return value for # posix1e.ACL(), which has three separate uses fileacl_rv = MagicMock() fileacl_rv.valid.return_value = True fileacl_rv.__iter__.return_value = iter(file_acls) filedef_rv = MagicMock() filedef_rv.valid.return_value = True filedef_rv.__iter__.return_value = iter(file_acls) acl_rv = MagicMock() def mock_acl_rv(file=None, filedef=None, acl=None): if file: return fileacl_rv elif filedef: return filedef_rv elif acl: return acl_rv # set up the equally unreasonably complex return value for # posix1e.Entry, which returns a new entry and adds it to # an ACL, so we have to track the Mock objects it returns. # why can't they just have an acl.add_entry() method?!? acl_entries = [] def mock_entry_rv(acl): rv = MagicMock() rv.acl = acl rv.permset = set() acl_entries.append(rv) return rv mock_Entry.side_effect = mock_entry_rv def reset(): mock_isdir.reset_mock() mock_ACL.reset_mock() mock_Entry.reset_mock() fileacl_rv.reset_mock() # test fs mounted noacl mock_ACL.side_effect = IOError(95, "Operation not permitted") self.assertFalse(ptool._set_acls(entry)) # test other error reset() mock_ACL.side_effect = IOError self.assertFalse(ptool._set_acls(entry)) reset() mock_ACL.side_effect = mock_acl_rv mock_isdir.return_value = True self.assertTrue(ptool._set_acls(entry)) self.assertItemsEqual(mock_ACL.call_args_list, [call(file=entry.get("name")), call(filedef=entry.get("name"))]) self.assertItemsEqual(fileacl_rv.delete_entry.call_args_list, [call(a) for a in remove_acls]) self.assertItemsEqual(filedef_rv.delete_entry.call_args_list, [call(a) for a in remove_acls]) ptool._list_entry_acls.assert_called_with(entry) ptool._norm_uid.assert_called_with("user") ptool._norm_gid.assert_called_with("group") fileacl_rv.calc_mask.assert_any_call() fileacl_rv.applyto.assert_called_with(entry.get("name"), posix1e.ACL_TYPE_ACCESS) filedef_rv.calc_mask.assert_any_call() filedef_rv.applyto.assert_called_with(entry.get("name"), posix1e.ACL_TYPE_DEFAULT) # build tuples of the Entry objects that were added to acl # and defacl so they're easier to compare for equality added_acls = [] for acl in acl_entries: added_acls.append((acl.acl, acl.tag_type, acl.qualifier, sum(acl.permset))) self.assertItemsEqual(added_acls, [(filedef_rv, posix1e.ACL_USER, 10, 7), (fileacl_rv, posix1e.ACL_GROUP, 100, 5)]) reset() # have to reassign these because they're iterators, and # they've already been iterated over once fileacl_rv.__iter__.return_value = iter(file_acls) filedef_rv.__iter__.return_value = iter(file_acls) ptool._list_entry_acls.reset_mock() ptool._norm_uid.reset_mock() ptool._norm_gid.reset_mock() mock_isdir.return_value = False acl_entries = [] self.assertTrue(ptool._set_acls(entry, path="/bin/bar")) mock_ACL.assert_called_with(file="/bin/bar") self.assertItemsEqual(fileacl_rv.delete_entry.call_args_list, [call(a) for a in remove_acls]) ptool._list_entry_acls.assert_called_with(entry) ptool._norm_gid.assert_called_with("group") fileacl_rv.calc_mask.assert_any_call() fileacl_rv.applyto.assert_called_with("/bin/bar", posix1e.ACL_TYPE_ACCESS) added_acls = [] for acl in acl_entries: added_acls.append((acl.acl, acl.tag_type, acl.qualifier, sum(acl.permset))) self.assertItemsEqual(added_acls, [(fileacl_rv, posix1e.ACL_GROUP, 100, 5)]) @skipUnless(HAS_SELINUX, "SELinux not found, skipping") @patchIf(HAS_SELINUX, "selinux.restorecon") @patchIf(HAS_SELINUX, "selinux.lgetfilecon") @patchIf(HAS_SELINUX, "selinux.lsetfilecon") def test_set_secontext(self, mock_lsetfilecon, mock_lgetfilecon, mock_restorecon): Bcfg2.Options.setup.secontext_ignore = ['dosfs_t'] ptool = self.get_obj() entry = lxml.etree.Element("Path", name="/etc/foo", type="file") mock_lgetfilecon.return_value = (0, "system_u:object_r:foo_t") # disable selinux for the initial test Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = False self.assertTrue(ptool._set_secontext(entry)) Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = True # no context given self.assertTrue(ptool._set_secontext(entry)) self.assertFalse(mock_restorecon.called) self.assertFalse(mock_lsetfilecon.called) self.assertFalse(mock_lgetfilecon.called) mock_restorecon.reset_mock() mock_lsetfilecon.reset_mock() mock_lgetfilecon.reset_mock() entry.set("secontext", "__default__") self.assertTrue(ptool._set_secontext(entry)) mock_restorecon.assert_called_with(entry.get("name")) mock_lgetfilecon.assert_called_once_with(entry.get("name")) self.assertFalse(mock_lsetfilecon.called) mock_restorecon.reset_mock() mock_lsetfilecon.reset_mock() mock_lgetfilecon.reset_mock() mock_lsetfilecon.return_value = 0 entry.set("secontext", "foo_t") self.assertTrue(ptool._set_secontext(entry)) self.assertFalse(mock_restorecon.called) mock_lgetfilecon.assert_called_once_with(entry.get("name")) mock_lsetfilecon.assert_called_with(entry.get("name"), "foo_t") mock_restorecon.reset_mock() mock_lsetfilecon.reset_mock() mock_lgetfilecon.reset_mock() mock_lsetfilecon.return_value = 1 self.assertFalse(ptool._set_secontext(entry)) self.assertFalse(mock_restorecon.called) mock_lgetfilecon.assert_called_once_with(entry.get("name")) mock_lsetfilecon.assert_called_with(entry.get("name"), "foo_t") # ignored filesystem mock_restorecon.reset_mock() mock_lsetfilecon.reset_mock() mock_lgetfilecon.reset_mock() mock_lgetfilecon.return_value = (0, "system_u:object_r:dosfs_t") self.assertTrue(ptool._set_secontext(entry)) self.assertFalse(mock_restorecon.called) self.assertFalse(mock_lsetfilecon.called) mock_lgetfilecon.assert_called_once_with(entry.get("name")) @patch("grp.getgrnam") def test_norm_gid(self, mock_getgrnam): ptool = self.get_obj() self.assertEqual(5, ptool._norm_gid("5")) self.assertFalse(mock_getgrnam.called) mock_getgrnam.reset_mock() mock_getgrnam.return_value = ("group", "x", 5, []) self.assertEqual(5, ptool._norm_gid("group")) mock_getgrnam.assert_called_with("group") def test_norm_entry_gid(self): ptool = self.get_obj() ptool._norm_gid = Mock() entry = lxml.etree.Element("Path", name="/test", type="file", group="group", owner="user") self.assertEqual(ptool._norm_entry_gid(entry), ptool._norm_gid.return_value) ptool._norm_gid.assert_called_with(entry.get("group")) ptool._norm_gid.reset_mock() ptool._norm_gid.side_effect = KeyError self.assertEqual(ptool._norm_entry_gid(entry), 0) ptool._norm_gid.assert_called_with(entry.get("group")) @patch("pwd.getpwnam") def test_norm_uid(self, mock_getpwnam): ptool = self.get_obj() self.assertEqual(5, ptool._norm_uid("5")) self.assertFalse(mock_getpwnam.called) mock_getpwnam.reset_mock() mock_getpwnam.return_value = ("user", "x", 5, 5, "User", "/home/user", "/bin/zsh") self.assertEqual(5, ptool._norm_uid("user")) mock_getpwnam.assert_called_with("user") def test_norm_entry_uid(self): ptool = self.get_obj() ptool._norm_uid = Mock() entry = lxml.etree.Element("Path", name="/test", type="file", group="group", owner="user") self.assertEqual(ptool._norm_entry_uid(entry), ptool._norm_uid.return_value) ptool._norm_uid.assert_called_with(entry.get("owner")) ptool._norm_uid.reset_mock() ptool._norm_uid.side_effect = KeyError self.assertEqual(ptool._norm_entry_uid(entry), 0) ptool._norm_uid.assert_called_with(entry.get("owner")) def test_norm_acl_perms(self): # there's basically no reasonable way to test the Permset # object parsing feature without writing our own Mock object # that re-implements Permset.test(). silly pylibacl won't let # us create standalone Entry or Permset objects. ptool = self.get_obj() self.assertEqual(5, ptool._norm_acl_perms("5")) self.assertEqual(0, ptool._norm_acl_perms("55")) self.assertEqual(5, ptool._norm_acl_perms("rx")) self.assertEqual(5, ptool._norm_acl_perms("r-x")) self.assertEqual(6, ptool._norm_acl_perms("wr-")) self.assertEqual(0, ptool._norm_acl_perms("rwrw")) self.assertEqual(0, ptool._norm_acl_perms("-")) self.assertEqual(0, ptool._norm_acl_perms("a")) self.assertEqual(6, ptool._norm_acl_perms("rwa")) self.assertEqual(4, ptool._norm_acl_perms("rr")) @patch('os.lstat') def test__gather_data(self, mock_lstat): ptool = self.get_obj() path = '/test' mock_lstat.side_effect = OSError self.assertFalse(ptool._gather_data(path)[0]) mock_lstat.assert_called_with(path) mock_lstat.reset_mock() mock_lstat.side_effect = None # create a return value stat_rv = MagicMock() def stat_getitem(key): if int(key) == stat.ST_UID: return 0 elif int(key) == stat.ST_GID: return 10 elif int(key) == stat.ST_MODE: # return extra bits in the mode to emulate a device # and ensure that they're stripped return int('060660', 8) stat_rv.__getitem__ = Mock(side_effect=stat_getitem) mock_lstat.return_value = stat_rv # disable selinux and acls for this call -- we test them # separately so that we can skip those tests as appropriate states = (Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX, Bcfg2.Client.Tools.POSIX.base.HAS_ACLS) Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = False Bcfg2.Client.Tools.POSIX.base.HAS_ACLS = False self.assertEqual(ptool._gather_data(path), (stat_rv, '0', '10', '0660', None, None)) Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX, \ Bcfg2.Client.Tools.POSIX.base.HAS_ACLS = states mock_lstat.assert_called_with(path) @skipUnless(HAS_SELINUX, "SELinux not found, skipping") def test__gather_data_selinux(self): ptool = self.get_obj() context = 'system_u:object_r:root_t:s0' path = '/test' @patch('os.lstat') @patchIf(HAS_SELINUX, "selinux.lgetfilecon") def inner(mock_lgetfilecon, mock_lstat): mock_lgetfilecon.return_value = [len(context) + 1, context] mock_lstat.return_value = MagicMock() mock_lstat.return_value.__getitem__.return_value = MagicMock() # disable acls for this call and test them separately state = (Bcfg2.Client.Tools.POSIX.base.HAS_ACLS, Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX) Bcfg2.Client.Tools.POSIX.base.HAS_ACLS = False Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = True self.assertEqual(ptool._gather_data(path)[4], 'root_t') Bcfg2.Client.Tools.POSIX.base.HAS_ACLS, \ Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = state mock_lgetfilecon.assert_called_with(path) inner() @skipUnless(HAS_ACLS, "ACLS not found, skipping") @patch('os.lstat') @patch('stat.S_ISLNK') def test__gather_data_acls(self, mock_S_ISLNK, mock_lstat): ptool = self.get_obj() ptool._list_file_acls = Mock() acls = {("default", posix1e.ACL_USER, "testuser"): "rwx", ("access", posix1e.ACL_GROUP, "testgroup"): "rx"} ptool._list_file_acls.return_value = acls path = '/test' mock_lstat.return_value = MagicMock() mock_lstat.return_value.__getitem__.return_value = MagicMock() mock_S_ISLNK.return_value = False # disable selinux for this call and test it separately state = (Bcfg2.Client.Tools.POSIX.base.HAS_ACLS, Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX) Bcfg2.Client.Tools.POSIX.base.HAS_ACLS = True Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = False self.assertItemsEqual(ptool._gather_data(path)[5], acls) ptool._list_file_acls.assert_called_with(path) # symlinks can't have their own ACLs, so ensure that the # _list_file_acls call is skipped and no ACLs are returned mock_S_ISLNK.return_value = True ptool._list_file_acls.reset_mock() self.assertEqual(ptool._gather_data(path)[5], None) self.assertFalse(ptool._list_file_acls.called) Bcfg2.Client.Tools.POSIX.base.HAS_ACLS, \ Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = state @patchIf(HAS_SELINUX, "selinux.matchpathcon") def test_verify_metadata(self, mock_matchpathcon): ptool = self.get_obj() ptool._norm_entry_uid = Mock() ptool._norm_entry_gid = Mock() ptool._verify_acls = Mock() ptool._gather_data = Mock() entry = lxml.etree.Element("Path", name="/test", type="file", group="group", owner="user", mode="664", secontext='unconfined_u:object_r:etc_t:s0') # _verify_metadata() mutates the entry, so we keep a backup so we # can start fresh every time orig_entry = copy.deepcopy(entry) def reset(): ptool._gather_data.reset_mock() ptool._verify_acls.reset_mock() ptool._norm_entry_uid.reset_mock() ptool._norm_entry_gid.reset_mock() return copy.deepcopy(orig_entry) # test nonexistent file ptool._gather_data.return_value = (False, None, None, None, None, None) self.assertFalse(ptool._verify_metadata(entry)) self.assertEqual(entry.get("current_exists", "").lower(), "false") ptool._gather_data.assert_called_with(entry.get("name")) # expected data. tuple of attr, return value index, value expected = [('current_owner', 1, '0'), ('current_group', 2, '10'), ('current_mode', 3, '0664'), ('current_secontext', 4, 'etc_t')] ptool._norm_entry_uid.return_value = 0 ptool._norm_entry_gid.return_value = 10 gather_data_rv = [MagicMock(), None, None, None, None, []] for attr, idx, val in expected: gather_data_rv[idx] = val entry = reset() ptool._gather_data.return_value = tuple(gather_data_rv) self.assertTrue(ptool._verify_metadata(entry)) ptool._gather_data.assert_called_with(entry.get("name")) ptool._verify_acls.assert_called_with(entry, path=entry.get("name")) self.assertEqual(entry.get("current_exists", 'true'), 'true') for attr, idx, val in expected: self.assertEqual(entry.get(attr), val) # test when secontext is None entry = reset() gather_data_rv[4] = None sestate = Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = False ptool._gather_data.return_value = tuple(gather_data_rv) self.assertTrue(ptool._verify_metadata(entry)) ptool._gather_data.assert_called_with(entry.get("name")) ptool._verify_acls.assert_called_with(entry, path=entry.get("name")) self.assertEqual(entry.get("current_exists", 'true'), 'true') for attr, idx, val in expected: if attr != 'current_secontext': self.assertEqual(entry.get(attr), val) Bcfg2.Client.Tools.POSIX.base.HAS_SELINUX = sestate gather_data_rv = [MagicMock(), None, None, None, None, []] for attr, idx, val in expected: gather_data_rv[idx] = val ptool._gather_data.return_value = tuple(gather_data_rv) stat_mode = 17407 mtime = 1344430414 stat_rv = (stat_mode, Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), mtime, Mock()) gather_data_rv[0] = stat_rv entry = reset() entry.set("mtime", str(mtime)) ptool._gather_data.return_value = tuple(gather_data_rv) self.assertTrue(ptool._verify_metadata(entry)) ptool._gather_data.assert_called_with(entry.get("name")) ptool._verify_acls.assert_called_with(entry, path=entry.get("name")) self.assertEqual(entry.get("current_exists", 'true'), 'true') for attr, idx, val in expected: self.assertEqual(entry.get(attr), val) self.assertEqual(entry.get("current_mtime"), str(mtime)) # failure modes for each checked datum. tuple of changed attr, # return value index, new (failing) value failures = [('current_owner', 1, '10'), ('current_group', 2, '100'), ('current_mode', 3, '0660')] if HAS_SELINUX: failures.append(('current_secontext', 4, 'root_t')) for fail_attr, fail_idx, fail_val in failures: entry = reset() entry.set("mtime", str(mtime)) gather_data_rv = [stat_rv, None, None, None, None, []] for attr, idx, val in expected: gather_data_rv[idx] = val gather_data_rv[fail_idx] = fail_val ptool._gather_data.return_value = tuple(gather_data_rv) self.assertFalse(ptool._verify_metadata(entry)) ptool._gather_data.assert_called_with(entry.get("name")) ptool._verify_acls.assert_called_with(entry, path=entry.get("name")) self.assertEqual(entry.get("current_exists", 'true'), 'true') self.assertEqual(entry.get(fail_attr), fail_val) for attr, idx, val in expected: if attr != fail_attr: self.assertEqual(entry.get(attr), val) self.assertEqual(entry.get("current_mtime"), str(mtime)) # failure mode for mtime fail_mtime = 1344431162 entry = reset() entry.set("mtime", str(mtime)) fail_stat_rv = MagicMock() fail_stat_rv.__getitem__.return_value = fail_mtime gather_data_rv = [fail_stat_rv, None, None, None, None, []] for attr, idx, val in expected: gather_data_rv[idx] = val ptool._gather_data.return_value = tuple(gather_data_rv) self.assertFalse(ptool._verify_metadata(entry)) ptool._gather_data.assert_called_with(entry.get("name")) ptool._verify_acls.assert_called_with(entry, path=entry.get("name")) self.assertEqual(entry.get("current_exists", 'true'), 'true') for attr, idx, val in expected: self.assertEqual(entry.get(attr), val) self.assertEqual(entry.get("current_mtime"), str(fail_mtime)) if HAS_SELINUX: # test success and failure for __default__ secontext entry = reset() entry.set("mtime", str(mtime)) entry.set("secontext", "__default__") context1 = "system_u:object_r:etc_t:s0" context2 = "system_u:object_r:root_t:s0" mock_matchpathcon.return_value = [1 + len(context1), context1] gather_data_rv = [stat_rv, None, None, None, None, []] for attr, idx, val in expected: gather_data_rv[idx] = val ptool._gather_data.return_value = tuple(gather_data_rv) self.assertTrue(ptool._verify_metadata(entry)) ptool._gather_data.assert_called_with(entry.get("name")) ptool._verify_acls.assert_called_with(entry, path=entry.get("name")) mock_matchpathcon.assert_called_with(entry.get("name"), stat_mode) self.assertEqual(entry.get("current_exists", 'true'), 'true') for attr, idx, val in expected: self.assertEqual(entry.get(attr), val) self.assertEqual(entry.get("current_mtime"), str(mtime)) entry = reset() entry.set("mtime", str(mtime)) entry.set("secontext", "__default__") mock_matchpathcon.return_value = [1 + len(context2), context2] self.assertFalse(ptool._verify_metadata(entry)) ptool._gather_data.assert_called_with(entry.get("name")) ptool._verify_acls.assert_called_with(entry, path=entry.get("name")) mock_matchpathcon.assert_called_with(entry.get("name"), stat_mode) self.assertEqual(entry.get("current_exists", 'true'), 'true') for attr, idx, val in expected: self.assertEqual(entry.get(attr), val) self.assertEqual(entry.get("current_mtime"), str(mtime)) @skipUnless(HAS_ACLS, "ACLS not found, skipping") def test_list_entry_acls(self): ptool = self.get_obj() entry = lxml.etree.Element("Path", name="/test", type="file") lxml.etree.SubElement(entry, "ACL", scope="user", type="default", user="user", perms="rwx") lxml.etree.SubElement(entry, "ACL", scope="group", type="access", group="group", perms="5") self.assertItemsEqual(ptool._list_entry_acls(entry), {("default", posix1e.ACL_USER, "user"): 7, ("access", posix1e.ACL_GROUP, "group"): 5}) @skipUnless(HAS_ACLS, "ACLS not found, skipping") @patchIf(HAS_ACLS, "posix1e.ACL") @patch("pwd.getpwuid") @patch("grp.getgrgid") @patch("os.path.isdir") def test_list_file_acls(self, mock_isdir, mock_getgrgid, mock_getpwuid, mock_ACL): ptool = self.get_obj() path = '/test' # build a set of file ACLs to return from posix1e.ACL(file=...) file_acls = [] acl = Mock() acl.tag_type = posix1e.ACL_USER acl.qualifier = 10 # yes, this is a bogus permset. thanks to _norm_acl_perms # it works and is easier than many of the alternatives. acl.permset = 'rwx' file_acls.append(acl) acl = Mock() acl.tag_type = posix1e.ACL_GROUP acl.qualifier = 100 acl.permset = 'rx' file_acls.append(acl) acl = Mock() acl.tag_type = posix1e.ACL_MASK file_acls.append(acl) acls = {("access", posix1e.ACL_USER, "user"): 7, ("access", posix1e.ACL_GROUP, "group"): 5} # set up the unreasonably complex return value for # posix1e.ACL(), which has two separate uses fileacl_rv = MagicMock() fileacl_rv.valid.return_value = True fileacl_rv.__iter__.return_value = iter(file_acls) filedef_rv = MagicMock() filedef_rv.valid.return_value = True filedef_rv.__iter__.return_value = iter(file_acls) def mock_acl_rv(file=None, filedef=None): if file: return fileacl_rv elif filedef: return filedef_rv # other return values mock_isdir.return_value = False mock_getgrgid.return_value = ("group", "x", 5, []) mock_getpwuid.return_value = ("user", "x", 5, 5, "User", "/home/user", "/bin/zsh") def reset(): mock_isdir.reset_mock() mock_getgrgid.reset_mock() mock_getpwuid.reset_mock() mock_ACL.reset_mock() mock_ACL.side_effect = IOError(95, "Operation not supported") self.assertItemsEqual(ptool._list_file_acls(path), dict()) reset() mock_ACL.side_effect = IOError self.assertItemsEqual(ptool._list_file_acls(path), dict()) reset() mock_ACL.side_effect = mock_acl_rv self.assertItemsEqual(ptool._list_file_acls(path), acls) mock_isdir.assert_called_with(path) mock_getgrgid.assert_called_with(100) mock_getpwuid.assert_called_with(10) mock_ACL.assert_called_with(file=path) reset() mock_isdir.return_value = True fileacl_rv.__iter__.return_value = iter(file_acls) filedef_rv.__iter__.return_value = iter(file_acls) defacls = acls for akey, perms in list(acls.items()): defacls[('default', akey[1], akey[2])] = perms self.assertItemsEqual(ptool._list_file_acls(path), defacls) mock_isdir.assert_called_with(path) self.assertItemsEqual(mock_getgrgid.call_args_list, [call(100), call(100)]) self.assertItemsEqual(mock_getpwuid.call_args_list, [call(10), call(10)]) self.assertItemsEqual(mock_ACL.call_args_list, [call(file=path), call(filedef=path)]) @skipUnless(HAS_ACLS, "ACLS not found, skipping") def test_verify_acls(self): ptool = self.get_obj() ptool._list_file_acls = Mock() ptool._list_entry_acls = Mock() entry = lxml.etree.Element("Path", name="/test", type="file") # we can't test to make sure that errors get properly sorted # into (missing, extra, wrong) without refactoring the # _verify_acls code, and I don't feel like doing that, so eff # it. let's just test to make sure that failures are # identified at all for now. acls = {("access", posix1e.ACL_USER, "user"): 7, ("default", posix1e.ACL_GROUP, "group"): 5} extra_acls = copy.deepcopy(acls) extra_acls[("access", posix1e.ACL_USER, "user2")] = 4 ptool._list_entry_acls.return_value = acls ptool._list_file_acls.return_value = acls self.assertTrue(ptool._verify_acls(entry)) ptool._list_entry_acls.assert_called_with(entry) ptool._list_file_acls.assert_called_with(entry.get("name")) # test missing ptool._list_entry_acls.reset_mock() ptool._list_file_acls.reset_mock() ptool._list_file_acls.return_value = extra_acls self.assertFalse(ptool._verify_acls(entry)) ptool._list_entry_acls.assert_called_with(entry) ptool._list_file_acls.assert_called_with(entry.get("name")) # test extra ptool._list_entry_acls.reset_mock() ptool._list_file_acls.reset_mock() ptool._list_entry_acls.return_value = extra_acls ptool._list_file_acls.return_value = acls self.assertFalse(ptool._verify_acls(entry)) ptool._list_entry_acls.assert_called_with(entry) ptool._list_file_acls.assert_called_with(entry.get("name")) # test wrong wrong_acls = copy.deepcopy(extra_acls) wrong_acls[("access", posix1e.ACL_USER, "user2")] = 5 ptool._list_entry_acls.reset_mock() ptool._list_file_acls.reset_mock() ptool._list_entry_acls.return_value = extra_acls ptool._list_file_acls.return_value = wrong_acls self.assertFalse(ptool._verify_acls(entry)) ptool._list_entry_acls.assert_called_with(entry) ptool._list_file_acls.assert_called_with(entry.get("name")) @patch("os.makedirs") @patch("os.path.exists") def test_makedirs(self, mock_exists, mock_makedirs): ptool = self.get_obj() ptool._set_perms = Mock() entry = lxml.etree.Element("Path", name="/test/foo/bar", type="directory", mode="0644") parent_entry = lxml.etree.Element("Path", name="/test/foo/bar", type="directory", mode="0755") def reset(): mock_exists.reset_mock() ptool._set_perms.reset_mock() mock_makedirs.reset_mock() ptool._set_perms.return_value = True def path_exists_rv(path): if path == "/test": return True else: return False mock_exists.side_effect = path_exists_rv self.assertTrue(ptool._makedirs(entry)) self.assertItemsEqual(mock_exists.call_args_list, [call("/test"), call("/test/foo"), call("/test/foo/bar")]) for args in ptool._set_perms.call_args_list: self.assertXMLEqual(args[0][0], parent_entry) self.assertItemsEqual([a[1] for a in ptool._set_perms.call_args_list], [dict(path="/test/foo"), dict(path="/test/foo/bar")]) mock_makedirs.assert_called_with(entry.get("name")) reset() mock_makedirs.side_effect = OSError self.assertFalse(ptool._makedirs(entry)) for args in ptool._set_perms.call_args_list: self.assertXMLEqual(args[0][0], parent_entry) self.assertItemsEqual([a[1] for a in ptool._set_perms.call_args_list], [dict(path="/test/foo"), dict(path="/test/foo/bar")]) reset() mock_makedirs.side_effect = None def set_perms_rv(entry, path=None): if path == '/test/foo': return False else: return True ptool._set_perms.side_effect = set_perms_rv self.assertTrue(ptool._makedirs(entry)) self.assertItemsEqual(mock_exists.call_args_list, [call("/test"), call("/test/foo"), call("/test/foo/bar")]) for args in ptool._set_perms.call_args_list: self.assertXMLEqual(args[0][0], parent_entry) self.assertItemsEqual([a[1] for a in ptool._set_perms.call_args_list], [dict(path="/test/foo"), dict(path="/test/foo/bar")]) mock_makedirs.assert_called_with(entry.get("name")) class TestPOSIXLinkTool(TestPOSIXTool): test_obj = POSIXLinkTool @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify") def test_verify(self, mock_verify): entry = lxml.etree.Element("Path", name="/test", type="testlink", to="/dest") ptool = self.get_obj() linktype = ptool.__linktype__ ptool.__linktype__ = "test" ptool._verify = Mock() ptool._verify.return_value = True mock_verify.return_value = False self.assertFalse(ptool.verify(entry, [])) ptool._verify.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) ptool._verify.reset_mock() mock_verify.reset_mock() mock_verify.return_value = True self.assertTrue(ptool.verify(entry, [])) ptool._verify.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) ptool._verify.reset_mock() mock_verify.reset_mock() ptool._verify.return_value = False self.assertFalse(ptool.verify(entry, [])) ptool._verify.assert_called_with(entry) mock_verify.assert_called_with(ptool, entry, []) ptool._verify.reset_mock() mock_verify.reset_mock() ptool._verify.side_effect = OSError self.assertFalse(ptool.verify(entry, [])) ptool._verify.assert_called_with(entry) ptool.__linktype__ = linktype def test__verify(self): ptool = self.get_obj() self.assertRaises(NotImplementedError, ptool._verify, Mock()) @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install") def test_install(self, mock_install): entry = lxml.etree.Element("Path", name="/test", type="symlink", to="/dest") ptool = self.get_obj() linktype = ptool.__linktype__ ptool.__linktype__ = "test" ptool._exists = Mock() ptool._link = Mock() ptool._exists.return_value = False mock_install.return_value = True self.assertTrue(ptool.install(entry)) ptool._exists.assert_called_with(entry, remove=True) ptool._link.assert_called_with(entry) mock_install.assert_called_with(ptool, entry) ptool._link.reset_mock() ptool._exists.reset_mock() mock_install.reset_mock() ptool._link.side_effect = OSError self.assertFalse(ptool.install(entry)) ptool._exists.assert_called_with(entry, remove=True) ptool._link.assert_called_with(entry) mock_install.assert_called_with(ptool, entry) ptool.__linktype__ = linktype def test__link(self): ptool = self.get_obj() self.assertRaises(NotImplementedError, ptool._link, Mock()) testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/__init__.py000066400000000000000000000000001303523157100260230ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIXUsers.py000066400000000000000000000477141303523157100253350ustar00rootroot00000000000000import os import sys import copy import lxml.etree import subprocess from mock import Mock, MagicMock, patch import Bcfg2.Client.Tools from Bcfg2.Client.Tools.POSIXUsers import * from Bcfg2.Utils import PackedDigitRange # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestTools.Test_init import TestTool class TestPOSIXUsers(TestTool): test_obj = POSIXUsers def setUp(self): TestTool.setUp(self) set_setup_default('uid_whitelist', []) set_setup_default('uid_blacklist', []) set_setup_default('gid_whitelist', []) set_setup_default('gid_blacklist', []) set_setup_default('encoding', 'UTF-8') def get_obj(self, config=None): return TestTool.get_obj(self, config) @patch("pwd.getpwall") @patch("grp.getgrall") def test_existing(self, mock_getgrall, mock_getpwall): users = self.get_obj() mock_getgrall.return_value = MagicMock() mock_getpwall.return_value = MagicMock() def reset(): mock_getgrall.reset_mock() mock_getpwall.reset_mock() # make sure we start clean self.assertIsNone(users._existing) self.assertIsInstance(users.existing, dict) self.assertIn("POSIXUser", users.existing) self.assertIn("POSIXGroup", users.existing) mock_getgrall.assert_called_with() mock_getpwall.assert_called_with() reset() self.assertIsInstance(users._existing, dict) self.assertIsInstance(users.existing, dict) self.assertEqual(users.existing, users._existing) self.assertIn("POSIXUser", users.existing) self.assertIn("POSIXGroup", users.existing) self.assertFalse(mock_getgrall.called) self.assertFalse(mock_getpwall.called) reset() users._existing = None self.assertIsInstance(users.existing, dict) self.assertIn("POSIXUser", users.existing) self.assertIn("POSIXGroup", users.existing) mock_getgrall.assert_called_with() mock_getpwall.assert_called_with() def test__in_managed_range(self): users = self.get_obj() users._whitelist = dict(POSIXGroup=PackedDigitRange("1-10")) users._blacklist = dict(POSIXGroup=PackedDigitRange("8-100")) self.assertTrue(users._in_managed_range("POSIXGroup", "9")) users._whitelist = dict(POSIXGroup=None) users._blacklist = dict(POSIXGroup=PackedDigitRange("8-100")) self.assertFalse(users._in_managed_range("POSIXGroup", "9")) users._whitelist = dict(POSIXGroup=None) users._blacklist = dict(POSIXGroup=PackedDigitRange("100-")) self.assertTrue(users._in_managed_range("POSIXGroup", "9")) users._whitelist = dict(POSIXGroup=PackedDigitRange("1-10")) users._blacklist = dict(POSIXGroup=None) self.assertFalse(users._in_managed_range("POSIXGroup", "25")) @patch("Bcfg2.Client.Tools.Tool.canInstall") def test_canInstall(self, mock_canInstall): users = self.get_obj() users._in_managed_range = Mock() users._in_managed_range.return_value = False mock_canInstall.return_value = False def reset(): users._in_managed_range.reset() mock_canInstall.reset() # test failure of inherited method entry = lxml.etree.Element("POSIXUser", name="test") self.assertFalse(users.canInstall(entry)) mock_canInstall.assert_called_with(users, entry) # test with no uid specified reset() mock_canInstall.return_value = True self.assertTrue(users.canInstall(entry)) mock_canInstall.assert_called_with(users, entry) # test with uid specified, not in managed range reset() entry.set("uid", "1000") self.assertFalse(users.canInstall(entry)) mock_canInstall.assert_called_with(users, entry) users._in_managed_range.assert_called_with(entry.tag, "1000") # test with uid specified, in managed range reset() users._in_managed_range.return_value = True self.assertTrue(users.canInstall(entry)) mock_canInstall.assert_called_with(users, entry) users._in_managed_range.assert_called_with(entry.tag, "1000") @patch("Bcfg2.Client.Tools.Tool.Inventory") def test_Inventory(self, mock_Inventory): config = lxml.etree.Element("Configuration") bundle = lxml.etree.SubElement(config, "Bundle", name="test") lxml.etree.SubElement(bundle, "POSIXUser", name="test", group="test") lxml.etree.SubElement(bundle, "POSIXUser", name="test2", group="test2") lxml.etree.SubElement(bundle, "POSIXGroup", name="test2") orig_bundle = copy.deepcopy(bundle) users = self.get_obj(config=config) users.set_defaults['POSIXUser'] = Mock() users.set_defaults['POSIXUser'].side_effect = lambda e: e self.assertEqual(users.Inventory(), mock_Inventory.return_value) mock_Inventory.assert_called_with(users, config.getchildren()) lxml.etree.SubElement(orig_bundle, "POSIXGroup", name="test") self.assertXMLEqual(orig_bundle, bundle) def test_FindExtra(self): users = self.get_obj() users._in_managed_range = Mock() users._in_managed_range.side_effect = lambda t, i: i < 100 def getSupportedEntries(): return [lxml.etree.Element("POSIXUser", name="test1"), lxml.etree.Element("POSIXGroup", name="test1")] users.getSupportedEntries = Mock() users.getSupportedEntries.side_effect = getSupportedEntries users._existing = dict(POSIXUser=dict(test1=("test1", "x", 15), test2=("test2", "x", 25), test3=("test3", "x", 115)), POSIXGroup=dict(test2=("test2", "x", 25))) extra = users.FindExtra() self.assertEqual(len(extra), 2) self.assertItemsEqual([e.tag for e in extra], ["POSIXUser", "POSIXGroup"]) self.assertItemsEqual([e.get("name") for e in extra], ["test2", "test2"]) self.assertItemsEqual(users._in_managed_range.call_args_list, [call("POSIXUser", 25), call("POSIXUser", 115), call("POSIXGroup", 25)]) def test_populate_user_entry(self): users = self.get_obj() users._existing = dict(POSIXUser=dict(), POSIXGroup=dict(root=('root', 'x', 0, []))) cases = [(lxml.etree.Element("POSIXUser", name="test"), lxml.etree.Element("POSIXUser", name="test", group="test", gecos="test", shell="/bin/bash", home="/home/test")), (lxml.etree.Element("POSIXUser", name="root", gecos="Root", shell="/bin/zsh"), lxml.etree.Element("POSIXUser", name="root", group='root', gid='0', gecos="Root", shell="/bin/zsh", home='/root')), (lxml.etree.Element("POSIXUser", name="test2", gecos="", shell="/bin/zsh"), lxml.etree.Element("POSIXUser", name="test2", group='test2', gecos="", shell="/bin/zsh", home='/home/test2'))] for initial, expected in cases: actual = users.populate_user_entry(initial) self.assertXMLEqual(actual, expected) def test_user_supplementary_groups(self): users = self.get_obj() users._existing = \ dict(POSIXUser=dict(), POSIXGroup=dict(root=('root', 'x', 0, []), wheel=('wheel', 'x', 10, ['test']), users=('users', 'x', 100, ['test']))) entry = lxml.etree.Element("POSIXUser", name="test") self.assertItemsEqual(users.user_supplementary_groups(entry), [users.existing['POSIXGroup']['wheel'], users.existing['POSIXGroup']['users']]) entry.set('name', 'test2') self.assertItemsEqual(users.user_supplementary_groups(entry), []) def test_VerifyPOSIXUser(self): users = self.get_obj() users._verify = Mock() users._verify.return_value = True users.populate_user_entry = Mock() users.user_supplementary_groups = Mock() users.user_supplementary_groups.return_value = \ [('wheel', 'x', 10, ['test']), ('users', 'x', 100, ['test'])] def reset(): users._verify.reset_mock() users.populate_user_entry.reset_mock() users.user_supplementary_groups.reset_mock() entry = lxml.etree.Element("POSIXUser", name="test") self.assertFalse(users.VerifyPOSIXUser(entry, [])) users.populate_user_entry.assert_called_with(entry) users._verify.assert_called_with(users.populate_user_entry.return_value) users.user_supplementary_groups.assert_called_with(entry) reset() m1 = lxml.etree.SubElement(entry, "MemberOf", group="wheel") m2 = lxml.etree.SubElement(entry, "MemberOf") m2.text = "users" self.assertTrue(users.VerifyPOSIXUser(entry, [])) users.populate_user_entry.assert_called_with(entry) users._verify.assert_called_with(users.populate_user_entry.return_value) users.user_supplementary_groups.assert_called_with(entry) reset() m3 = lxml.etree.SubElement(entry, "MemberOf", group="extra") self.assertFalse(users.VerifyPOSIXUser(entry, [])) users.populate_user_entry.assert_called_with(entry) users._verify.assert_called_with(users.populate_user_entry.return_value) users.user_supplementary_groups.assert_called_with(entry) reset() def _verify(entry): entry.set("current_exists", "false") return False users._verify.side_effect = _verify self.assertFalse(users.VerifyPOSIXUser(entry, [])) users.populate_user_entry.assert_called_with(entry) users._verify.assert_called_with(users.populate_user_entry.return_value) def test_VerifyPOSIXGroup(self): users = self.get_obj() users._verify = Mock() entry = lxml.etree.Element("POSIXGroup", name="test") self.assertEqual(users._verify.return_value, users.VerifyPOSIXGroup(entry, [])) def test__verify(self): users = self.get_obj() users._existing = \ dict(POSIXUser=dict(test=('test', 'x', 1000, 1000, 'Test McTest', '/home/test', '/bin/zsh')), POSIXGroup=dict(test=('test', 'x', 1000, []))) entry = lxml.etree.Element("POSIXUser", name="nonexistent") self.assertFalse(users._verify(entry)) self.assertEqual(entry.get("current_exists"), "false") entry = lxml.etree.Element("POSIXUser", name="test", group="test", gecos="Bogus", shell="/bin/bash", home="/home/test") self.assertFalse(users._verify(entry)) entry = lxml.etree.Element("POSIXUser", name="test", group="test", gecos="Test McTest", shell="/bin/zsh", home="/home/test") self.assertTrue(users._verify(entry)) entry = lxml.etree.Element("POSIXUser", name="test", group="test", gecos="Test McTest", shell="/bin/zsh", home="/home/test", uid="1000", gid="1000") self.assertTrue(users._verify(entry)) entry = lxml.etree.Element("POSIXUser", name="test", group="test", gecos="Test McTest", shell="/bin/zsh", home="/home/test", uid="1001") self.assertFalse(users._verify(entry)) def test_Install(self): users = self.get_obj() users._install = Mock() users._existing = MagicMock() entries = [lxml.etree.Element("POSIXUser", name="test"), lxml.etree.Element("POSIXGroup", name="test"), lxml.etree.Element("POSIXUser", name="test2")] states = users.Install(entries) self.assertItemsEqual(entries, states.keys()) for state in states.values(): self.assertEqual(state, users._install.return_value) # need to verify two things about _install calls: # 1) _install was called for each entry; # 2) _install was called for all groups before any users self.assertItemsEqual(users._install.call_args_list, [call(e) for e in entries]) users_started = False for args in users._install.call_args_list: if args[0][0].tag == "POSIXUser": users_started = True elif users_started: assert False, "_install() called on POSIXGroup after installing one or more POSIXUsers" def test__install(self): users = self.get_obj() users._get_cmd = Mock() users.cmd = Mock() users.set_defaults = dict(POSIXUser=Mock(), POSIXGroup=Mock()) users._existing = \ dict(POSIXUser=dict(test=('test', 'x', 1000, 1000, 'Test McTest', '/home/test', '/bin/zsh')), POSIXGroup=dict(test=('test', 'x', 1000, []))) def reset(): users._get_cmd.reset_mock() users.cmd.reset_mock() for setter in users.set_defaults.values(): setter.reset_mock() users.modified = [] cmd_rv = Mock() cmd_rv.success = True users.cmd.run.return_value = cmd_rv reset() entry = lxml.etree.Element("POSIXUser", name="test2") self.assertTrue(users._install(entry)) users.set_defaults[entry.tag].assert_called_with(entry) users._get_cmd.assert_called_with("add", users.set_defaults[entry.tag].return_value) users.cmd.run.assert_called_with(users._get_cmd.return_value) self.assertIn(entry, users.modified) reset() entry = lxml.etree.Element("POSIXUser", name="test") self.assertTrue(users._install(entry)) users.set_defaults[entry.tag].assert_called_with(entry) users._get_cmd.assert_called_with("mod", users.set_defaults[entry.tag].return_value) users.cmd.run.assert_called_with(users._get_cmd.return_value) self.assertIn(entry, users.modified) reset() cmd_rv.success = False self.assertFalse(users._install(entry)) users.set_defaults[entry.tag].assert_called_with(entry) users._get_cmd.assert_called_with("mod", users.set_defaults[entry.tag].return_value) users.cmd.run.assert_called_with(users._get_cmd.return_value) self.assertNotIn(entry, users.modified) def test__get_cmd(self): users = self.get_obj() entry = lxml.etree.Element("POSIXUser", name="test", group="test", home="/home/test", shell="/bin/zsh", gecos="Test McTest") m1 = lxml.etree.SubElement(entry, "MemberOf", group="wheel") m2 = lxml.etree.SubElement(entry, "MemberOf") m2.text = "users" cases = [(lxml.etree.Element("POSIXGroup", name="test"), []), (lxml.etree.Element("POSIXGroup", name="test", gid="1001"), ["-g", "1001"]), (lxml.etree.Element("POSIXUser", name="test", group="test", home="/home/test", shell="/bin/zsh", gecos="Test McTest"), ["-g", "test", "-d", "/home/test", "-s", "/bin/zsh", "-c", "Test McTest"]), (lxml.etree.Element("POSIXUser", name="test", group="test", home="/home/test", shell="/bin/zsh", gecos="Test McTest", uid="1001"), ["-u", "1001", "-g", "test", "-d", "/home/test", "-s", "/bin/zsh", "-c", "Test McTest"]), (entry, ["-g", "test", "-G", "wheel,users", "-d", "/home/test", "-s", "/bin/zsh", "-c", "Test McTest"])] for entry, expected in cases: for action in ["add", "mod", "del"]: actual = users._get_cmd(action, entry) if entry.tag == "POSIXGroup": etype = "group" else: etype = "user" self.assertEqual(actual[0], "/usr/sbin/%s%s" % (etype, action)) self.assertEqual(actual[-1], entry.get("name")) if action != "del": self.assertItemsEqual(actual[1:-1], expected) @patch("grp.getgrnam") def test_Remove(self, mock_getgrnam): users = self.get_obj() users._remove = Mock() users.FindExtra = Mock() users._existing = MagicMock() users.extra = MagicMock() def reset(): users._remove.reset_mock() users.FindExtra.reset_mock() users._existing = MagicMock() users.extra = MagicMock() mock_getgrnam.reset_mock() entries = [lxml.etree.Element("POSIXUser", name="test"), lxml.etree.Element("POSIXGroup", name="test"), lxml.etree.Element("POSIXUser", name="test2")] users.Remove(entries) self.assertIsNone(users._existing) users.FindExtra.assert_called_with() self.assertEqual(users.extra, users.FindExtra.return_value) mock_getgrnam.assert_called_with("test") # need to verify two things about _remove calls: # 1) _remove was called for each entry; # 2) _remove was called for all users before any groups self.assertItemsEqual(users._remove.call_args_list, [call(e) for e in entries]) groups_started = False for args in users._remove.call_args_list: if args[0][0].tag == "POSIXGroup": groups_started = True elif groups_started: assert False, "_remove() called on POSIXUser after removing one or more POSIXGroups" reset() mock_getgrnam.side_effect = KeyError users.Remove(entries) self.assertIsNone(users._existing) users.FindExtra.assert_called_with() self.assertEqual(users.extra, users.FindExtra.return_value) mock_getgrnam.assert_called_with("test") self.assertItemsEqual(users._remove.call_args_list, [call(e) for e in entries if e.tag == "POSIXUser"]) def test__remove(self): users = self.get_obj() users._get_cmd = Mock() users.cmd = Mock() cmd_rv = Mock() cmd_rv.success = True users.cmd.run.return_value = cmd_rv def reset(): users._get_cmd.reset_mock() users.cmd.reset_mock() entry = lxml.etree.Element("POSIXUser", name="test2") self.assertTrue(users._remove(entry)) users._get_cmd.assert_called_with("del", entry) users.cmd.run.assert_called_with(users._get_cmd.return_value) reset() cmd_rv.success = False self.assertFalse(users._remove(entry)) users._get_cmd.assert_called_with("del", entry) users.cmd.run.assert_called_with(users._get_cmd.return_value) testsuite/Testsrc/Testlib/TestClient/TestTools/Test_init.py000066400000000000000000000647361303523157100245160ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Compat import long from Bcfg2.Client.Tools import Tool, SvcTool, PkgTool, \ ToolInstantiationError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * class TestTool(Bcfg2TestCase): test_obj = Tool # try to find true if os.path.exists("/bin/true"): true = "/bin/true" elif os.path.exists("/usr/bin/true"): true = "/usr/bin/true" else: true = None def setUp(self): set_setup_default('command_timeout') set_setup_default('interactive', False) set_setup_default('decision') def get_obj(self, config=None): if config is None: config = lxml.etree.Element("Configuration") execs = self.test_obj.__execs__ self.test_obj.__execs__ = [] rv = self.test_obj(config) self.test_obj.__execs__ = execs return rv def test__init(self): @patch("%s.%s._check_execs" % (self.test_obj.__module__, self.test_obj.__name__)) @patch("%s.%s._analyze_config" % (self.test_obj.__module__, self.test_obj.__name__)) def inner(mock_analyze_config, mock_check_execs): self.get_obj() mock_analyze_config.assert_called_with() mock_check_execs.assert_called_with() inner() def test__analyze_config(self): t = self.get_obj() t.getSupportedEntries = Mock() t.__important__ = ["/test"] important = [] t.config = lxml.etree.Element("Config") bundle1 = lxml.etree.SubElement(t.config, "Bundle") important.append(lxml.etree.SubElement(bundle1, "Path", name="/foo", important="true")) lxml.etree.SubElement(bundle1, "Package", name="bar", important="true") lxml.etree.SubElement(bundle1, "Path", name="/bar") bundle2 = lxml.etree.SubElement(t.config, "Bundle") important.append(lxml.etree.SubElement(bundle2, "Path", name="/quux", important="true")) lxml.etree.SubElement(bundle2, "Path", name="/baz", important="false") t._analyze_config() self.assertItemsEqual(t.__important__, ["/test"] + [e.get("name") for e in important]) t.getSupportedEntries.assert_called_with() @skipIf(true is None, "/bin/true or equivalent not found") def test__check_execs(self): t = self.get_obj() if t.__execs__ == []: t.__execs__.append(self.true) @patch("os.stat") def inner(mock_stat): mock_stat.return_value = (33261, 2245040, long(64770), 1, 0, 0, 25552, 1360831382, 1352194410, 1354626626) t._check_execs() self.assertItemsEqual(mock_stat.call_args_list, [call(e) for e in t.__execs__]) # not executable mock_stat.reset_mock() mock_stat.return_value = (33188, 2245040, long(64770), 1, 0, 0, 25552, 1360831382, 1352194410, 1354626626) self.assertRaises(ToolInstantiationError, t._check_execs) # non-existant mock_stat.reset_mock() mock_stat.side_effect = OSError self.assertRaises(ToolInstantiationError, t._check_execs) inner() def test_BundleUpdated(self): pass def test_BundleNotUpdated(self): pass def test_Inventory(self): t = self.get_obj() t.canVerify = Mock() t.canVerify.side_effect = lambda e: e.get("verify") != "false" t.buildModlist = Mock() t.FindExtra = Mock() t.VerifyPath = Mock() t.VerifyPackage = Mock() t.VerifyService = Mock() def reset(): t.canVerify.reset_mock() t.buildModlist.reset_mock() t.FindExtra.reset_mock() t.VerifyPath.reset_mock() t.VerifyPackage.reset_mock() t.VerifyService.reset_mock() paths = [] packages = [] services = [] config = lxml.etree.Element("Configuration") bundle1 = lxml.etree.SubElement(config, "Bundle") paths.append(lxml.etree.SubElement(bundle1, "Path", name="/foo")) lxml.etree.SubElement(bundle1, "Package", name="foo", verify="false") packages.append(lxml.etree.SubElement(bundle1, "Package", name="bar")) lxml.etree.SubElement(bundle1, "Bogus") bundle2 = lxml.etree.SubElement(config, "Bundle") paths.append(lxml.etree.SubElement(bundle2, "Path", name="/bar")) services.append(lxml.etree.SubElement(bundle2, "Service", name="bar")) lxml.etree.SubElement(bundle2, "Path", name="/baz", verify="false") expected_states = dict([(e, t.VerifyPath.return_value) for e in paths]) expected_states.update(dict([(e, t.VerifyPackage.return_value) for e in packages])) expected_states.update(dict([(e, t.VerifyService.return_value) for e in services])) def perform_assertions(states): t.buildModlist.assert_called_with() t.FindExtra.assert_called_with() self.assertItemsEqual(t.canVerify.call_args_list, [call(e) for e in bundle1.getchildren()] + \ [call(e) for e in bundle2.getchildren()]) self.assertItemsEqual(t.VerifyPath.call_args_list, [call(e, t.buildModlist.return_value) for e in paths]) self.assertItemsEqual(t.VerifyPackage.call_args_list, [call(e, t.buildModlist.return_value) for e in packages]) self.assertItemsEqual(t.VerifyService.call_args_list, [call(e, t.buildModlist.return_value) for e in services]) self.assertItemsEqual(states, expected_states) self.assertEqual(t.extra, t.FindExtra.return_value) actual_states = t.Inventory(structures=[bundle1, bundle2]) perform_assertions(actual_states) reset() t.config = config actual_states = t.Inventory() perform_assertions(actual_states) def test_Install(self): t = self.get_obj() t.InstallPath = Mock() t.InstallPackage = Mock() t.InstallService = Mock() t.InstallPath.side_effect = lambda e: e.get("modified") == "true" t.InstallPackage.side_effect = lambda e: e.get("modified") == "true" t.InstallService.side_effect = lambda e: e.get("modified") == "true" entries = [lxml.etree.Element("Path", name="/foo", modified="true"), lxml.etree.Element("Package", name="bar", modified="true"), lxml.etree.Element("Bogus"), lxml.etree.Element("Path", name="/bar", modified="true"), lxml.etree.Element("Service", name="bar")] expected_states = dict([(e, t.InstallPath.return_value) for e in entries if e.tag == "Path"]) expected_states.update(dict([(e, t.InstallPackage.return_value) for e in entries if e.tag == "Package"])) expected_states.update(dict([(e, t.InstallService.return_value) for e in entries if e.tag == "Service"])) t.modified = [] actual_states = t.Install(entries) self.assertItemsEqual(t.InstallPath.call_args_list, [call(e) for e in entries if e.tag == "Path"]) self.assertItemsEqual(t.InstallPackage.call_args_list, [call(e) for e in entries if e.tag == "Package"]) self.assertItemsEqual(t.InstallService.call_args_list, [call(e) for e in entries if e.tag == "Service"]) self.assertItemsEqual(actual_states, expected_states) self.assertItemsEqual(t.modified, [e for e in entries if e.get("modified") == "true"]) def rest_Remove(self): pass def test_getSupportedEntries(self): t = self.get_obj() def handlesEntry(entry): return entry.get("handled") == "true" t.handlesEntry = Mock() t.handlesEntry.side_effect = handlesEntry handled = [] t.config = lxml.etree.Element("Config") bundle1 = lxml.etree.SubElement(t.config, "Bundle") lxml.etree.SubElement(bundle1, "Path", name="/foo") handled.append(lxml.etree.SubElement(bundle1, "Path", name="/bar", handled="true")) bundle2 = lxml.etree.SubElement(t.config, "Bundle") handled.append(lxml.etree.SubElement(bundle2, "Path", name="/quux", handled="true")) lxml.etree.SubElement(bundle2, "Path", name="/baz") self.assertItemsEqual(handled, t.getSupportedEntries()) def test_handlesEntry(self): t = self.get_obj() handles = t.__handles__ t.__handles__ = [("Path", "file"), ("Package", "yum")] self.assertTrue(t.handlesEntry(lxml.etree.Element("Path", type="file", name="/foo"))) self.assertFalse(t.handlesEntry(lxml.etree.Element("Path", type="permissions", name="/bar"))) self.assertFalse(t.handlesEntry(lxml.etree.Element("Bogus", type="file", name="/baz"))) self.assertTrue(t.handlesEntry(lxml.etree.Element("Package", type="yum", name="quux"))) t.__handles__ = handles def test_buildModlist(self): t = self.get_obj() paths = [] t.config = lxml.etree.Element("Config") bundle1 = lxml.etree.SubElement(t.config, "Bundle") paths.append(lxml.etree.SubElement(bundle1, "Path", name="/foo")) lxml.etree.SubElement(bundle1, "Package", name="bar") paths.append(lxml.etree.SubElement(bundle1, "Path", name="/bar")) bundle2 = lxml.etree.SubElement(t.config, "Bundle") paths.append(lxml.etree.SubElement(bundle2, "Path", name="/quux")) lxml.etree.SubElement(bundle2, "Service", name="baz") self.assertItemsEqual([p.get("name") for p in paths], t.buildModlist()) def test_missing_attrs(self): t = self.get_obj() req = t.__req__ t.__req__ = dict(Path=dict(file=["name"], permissions=["name", "owner", "group"]), Package=["name"]) # tuples of , cases = [ (lxml.etree.Element("Path", name="/foo"), ["type"]), (lxml.etree.Element("Path", type="file"), ["name"]), (lxml.etree.Element("Path", type="file", name="/foo"), []), (lxml.etree.Element("Path", type="permissions", name="/foo"), ["owner", "group"]), (lxml.etree.Element("Path", type="permissions", name="/foo", owner="root", group="root", mode="0644"), []), (lxml.etree.Element("Package", type="yum"), ["name"]), (lxml.etree.Element("Package", type="yum", name="/bar"), []), (lxml.etree.Element("Package", type="apt", name="/bar"), [])] for entry, expected in cases: self.assertItemsEqual(t.missing_attrs(entry), expected) t.__req__ = req def test_canVerify(self): t = self.get_obj() entry = Mock() t._entry_is_complete = Mock() self.assertEqual(t.canVerify(entry), t._entry_is_complete.return_value) t._entry_is_complete.assert_called_with(entry, action="verify") def test_FindExtra(self): t = self.get_obj() self.assertItemsEqual(t.FindExtra(), []) def test_canInstall(self): t = self.get_obj() entry = Mock() t._entry_is_complete = Mock() self.assertEqual(t.canInstall(entry), t._entry_is_complete.return_value) t._entry_is_complete.assert_called_with(entry, action="install") def test__entry_is_complete(self): t = self.get_obj() t.handlesEntry = Mock() t.missing_attrs = Mock() def reset(): t.handlesEntry.reset_mock() t.missing_attrs.reset_mock() entry = lxml.etree.Element("Path", name="/test") t.handlesEntry.return_value = False t.missing_attrs.return_value = [] self.assertFalse(t._entry_is_complete(entry)) reset() t.handlesEntry.return_value = True t.missing_attrs.return_value = ["type"] self.assertFalse(t._entry_is_complete(entry)) reset() t.missing_attrs.return_value = [] self.assertTrue(t._entry_is_complete(entry)) reset() entry.set("failure", "failure") self.assertFalse(t._entry_is_complete(entry)) class TestPkgTool(TestTool): test_obj = PkgTool def get_obj(self, **kwargs): @patch("%s.%s.RefreshPackages" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) def inner(): return TestTool.get_obj(self, **kwargs) return inner() def test_VerifyPackage(self): pt = self.get_obj() self.assertRaises(NotImplementedError, pt.VerifyPackage, Mock(), Mock()) def test_Install(self): pt = self.get_obj() pt.cmd = Mock() pt.RefreshPackages = Mock() pt.VerifyPackage = Mock() pt._get_package_command = Mock() pt._get_package_command.side_effect = lambda pkgs: \ [p.get("name") for p in pkgs] packages = [lxml.etree.Element("Package", type="echo", name="foo", version="1.2.3"), lxml.etree.Element("Package", type="echo", name="bar", version="any"), lxml.etree.Element("Package", type="echo", name="baz", version="2.3.4")] def reset(): pt.cmd.reset_mock() pt.RefreshPackages.reset_mock() pt.VerifyPackage.reset_mock() pt._get_package_command.reset_mock() pt.modified = [] # test single-pass install success reset() pt.cmd.run.return_value = True states = pt.Install(packages) pt._get_package_command.assert_called_with(packages) pt.cmd.run.assert_called_with([p.get("name") for p in packages]) self.assertItemsEqual(states, dict([(p, True) for p in packages])) self.assertItemsEqual(pt.modified, packages) # test failed single-pass install reset() def run(cmd): if "foo" in cmd: # fail when installing all packages, and when installing foo return False # succeed otherwise return True pt.VerifyPackage.side_effect = lambda p, m: p.get("name") == "bar" pt.cmd.run.side_effect = run states = pt.Install(packages) pt._get_package_command.assert_any_call(packages) for pkg in packages: pt.VerifyPackage.assert_any_call(pkg, []) if pkg.get("name") != "bar": pt._get_package_command.assert_any_call([pkg]) # pt.cmd.run is called once for all packages, and then once # for each package that does not verify. "bar" verifies, so # it's run for foo and baz self.assertItemsEqual(pt.cmd.run.call_args_list, [call([p.get("name") for p in packages]), call(["foo"]), call(["baz"])]) pt.RefreshPackages.assert_called_with() self.assertItemsEqual(states, dict([(p, p.get("name") != "bar") for p in packages])) # bar is modified, because it verifies successfully; baz is # modified, because it is installed successfully. foo is not # installed successfully, so is not modified. self.assertItemsEqual(pt.modified, [p for p in packages if p.get("name") != "foo"]) def test__get_package_command(self): packages = [lxml.etree.Element("Package", type="test", name="foo", version="1.2.3"), lxml.etree.Element("Package", type="test", name="bar", version="any"), lxml.etree.Element("Package", type="test", name="baz", version="2.3.4")] pt = self.get_obj() pkgtool = pt.pkgtool pt.pkgtool = ("install %s", ("%s-%s", ["name", "version"])) self.assertEqual(pt._get_package_command([ lxml.etree.Element("Package", type="test", name="foo", version="1.2.3")]), "install foo-1.2.3") self.assertItemsEqual(pt._get_package_command(packages).split(), ["install", "foo-1.2.3", "bar-any", "baz-2.3.4"]) def test_RefreshPackages(self): pt = self.get_obj() self.assertRaises(NotImplementedError, pt.RefreshPackages) def test_FindExtra(self): pt = self.get_obj() pt.getSupportedEntries = Mock() pt.getSupportedEntries.return_value = [ lxml.etree.Element("Package", name="foo"), lxml.etree.Element("Package", name="bar"), lxml.etree.Element("Package", name="baz")] pt.installed = dict(foo="1.2.3", bar="2.3.4", quux="3.4.5", xyzzy="4.5.6") extra = pt.FindExtra() self.assertEqual(len(extra), 2) self.assertItemsEqual([e.get("name") for e in extra], ["quux", "xyzzy"]) for el in extra: self.assertEqual(el.tag, "Package") self.assertEqual(el.get("type"), pt.pkgtype) class TestSvcTool(TestTool): test_obj = SvcTool def test_start_service(self): st = self.get_obj() st.get_svc_command = Mock() st.cmd = MagicMock() service = lxml.etree.Element("Service", name="foo", type="test") self.assertEqual(st.start_service(service), st.cmd.run.return_value) st.get_svc_command.assert_called_with(service, "start") st.cmd.run.assert_called_with(st.get_svc_command.return_value) def test_stop_service(self): st = self.get_obj() st.get_svc_command = Mock() st.cmd = MagicMock() service = lxml.etree.Element("Service", name="foo", type="test") self.assertEqual(st.stop_service(service), st.cmd.run.return_value) st.get_svc_command.assert_called_with(service, "stop") st.cmd.run.assert_called_with(st.get_svc_command.return_value) def test_restart_service(self): st = self.get_obj() st.get_svc_command = Mock() st.cmd = MagicMock() def reset(): st.get_svc_command.reset_mock() st.cmd.reset_mock() service = lxml.etree.Element("Service", name="foo", type="test") self.assertEqual(st.restart_service(service), st.cmd.run.return_value) st.get_svc_command.assert_called_with(service, "restart") st.cmd.run.assert_called_with(st.get_svc_command.return_value) reset() service.set('target', 'reload') self.assertEqual(st.restart_service(service), st.cmd.run.return_value) st.get_svc_command.assert_called_with(service, "reload") st.cmd.run.assert_called_with(st.get_svc_command.return_value) def test_check_service(self): st = self.get_obj() st.get_svc_command = Mock() st.cmd = MagicMock() service = lxml.etree.Element("Service", name="foo", type="test") def reset(): st.get_svc_command.reset_mock() st.cmd.reset_mock() st.cmd.run.return_value = True self.assertEqual(st.check_service(service), True) st.get_svc_command.assert_called_with(service, "status") st.cmd.run.assert_called_with(st.get_svc_command.return_value) reset() st.cmd.run.return_value = False self.assertEqual(st.check_service(service), False) st.get_svc_command.assert_called_with(service, "status") st.cmd.run.assert_called_with(st.get_svc_command.return_value) def test_Remove(self): st = self.get_obj() st.InstallService = Mock() services = [lxml.etree.Element("Service", type="test", name="foo"), lxml.etree.Element("Service", type="test", name="bar", status="on")] st.Remove(services) self.assertItemsEqual(st.InstallService.call_args_list, [call(e) for e in services]) for entry in services: self.assertEqual(entry.get("status"), "off") @patch("Bcfg2.Client.prompt") def test_BundleUpdated(self, mock_prompt): Bcfg2.Options.setup.service_mode = 'default' Bcfg2.Options.setup.interactive = False st = self.get_obj() st.handlesEntry = Mock() st.handlesEntry.side_effect = lambda e: e.tag == "Service" st.stop_service = Mock() st.stop_service.return_value = True st.restart_service = Mock() st.restart_service.side_effect = lambda e: e.get("name") != "failed" def reset(): st.handlesEntry.reset_mock() st.stop_service.reset_mock() st.restart_service.reset_mock() mock_prompt.reset_mock() st.restarted = [] norestart = lxml.etree.Element("Service", type="test", name="norestart", restart="false") interactive = lxml.etree.Element("Service", type="test", name="interactive", status="on", restart="interactive") interactive2 = lxml.etree.Element("Service", type="test", name="interactive2", status="on", restart="interactive") stop = lxml.etree.Element("Service", type="test", name="stop", status="off") restart = lxml.etree.Element("Service", type="test", name="restart", status="on") duplicate = lxml.etree.Element("Service", type="test", name="restart", status="on") failed = lxml.etree.Element("Service", type="test", name="failed", status="on") unhandled = lxml.etree.Element("Path", type="file", name="/unhandled") services = [norestart, interactive, interactive2, stop, restart, duplicate, failed] entries = services + [unhandled] bundle = lxml.etree.Element("Bundle") bundle.extend(entries) # test in non-interactive mode reset() states = st.BundleUpdated(bundle) self.assertItemsEqual(st.handlesEntry.call_args_list, [call(e) for e in entries]) st.stop_service.assert_called_with(stop) self.assertItemsEqual(st.restart_service.call_args_list, [call(restart), call(failed)]) self.assertItemsEqual(st.restarted, [restart.get("name")]) self.assertFalse(mock_prompt.called) # test in interactive mode reset() mock_prompt.side_effect = lambda p: "interactive2" not in p Bcfg2.Options.setup.interactive = True states = st.BundleUpdated(bundle) self.assertItemsEqual(st.handlesEntry.call_args_list, [call(e) for e in entries]) st.stop_service.assert_called_with(stop) self.assertItemsEqual(st.restart_service.call_args_list, [call(restart), call(failed), call(interactive)]) self.assertItemsEqual(st.restarted, [restart.get("name"), interactive.get("name")]) self.assertEqual(len(mock_prompt.call_args_list), 4) # test in build mode reset() Bcfg2.Options.setup.interactive = False Bcfg2.Options.setup.service_mode = 'build' states = st.BundleUpdated(bundle) self.assertItemsEqual(st.handlesEntry.call_args_list, [call(e) for e in entries]) self.assertItemsEqual(st.stop_service.call_args_list, [call(restart), call(duplicate), call(failed), call(stop)]) self.assertFalse(mock_prompt.called) self.assertFalse(st.restart_service.called) self.assertItemsEqual(st.restarted, []) @patch("Bcfg2.Client.Tools.Tool.Install") def test_Install(self, mock_Install): install = [lxml.etree.Element("Service", type="test", name="foo")] services = install + [lxml.etree.Element("Service", type="test", name="bar", install="false")] st = self.get_obj() self.assertEqual(st.Install(services), mock_Install.return_value) mock_Install.assert_called_with(st, install) def test_InstallService(self): st = self.get_obj() self.assertRaises(NotImplementedError, st.InstallService, Mock()) testsuite/Testsrc/Testlib/TestClient/TestTools/__init__.py000066400000000000000000000000001303523157100242610ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestClient/__init__.py000066400000000000000000000000001303523157100223210ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestLogger.py000066400000000000000000000032761303523157100206050ustar00rootroot00000000000000import os import sys import logging from mock import Mock from Bcfg2.Logger import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != '/': if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * class TestDebuggable(Bcfg2TestCase): test_obj = Debuggable def setUp(self): set_setup_default('debug', False) def get_obj(self): return self.test_obj() def test__init(self): d = self.get_obj() self.assertIsInstance(d.logger, logging.Logger) self.assertFalse(d.debug_flag) def test_set_debug(self): d = self.get_obj() self.assertEqual(True, d.set_debug(True)) self.assertEqual(d.debug_flag, True) self.assertEqual(False, d.set_debug(False)) self.assertEqual(d.debug_flag, False) def test_toggle_debug(self): d = self.get_obj() d.set_debug = Mock() orig = d.debug_flag self.assertEqual(d.toggle_debug(), d.set_debug.return_value) d.set_debug.assert_called_with(not orig) def test_debug_log(self): d = self.get_obj() d.logger = Mock() d.debug_flag = False d.debug_log("test") self.assertFalse(d.logger.error.called) d.logger.reset_mock() d.debug_log("test", flag=True) self.assertTrue(d.logger.error.called) d.logger.reset_mock() d.debug_flag = True d.debug_log("test") self.assertTrue(d.logger.error.called) testsuite/Testsrc/Testlib/TestOptions/000077500000000000000000000000001303523157100204375ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestOptions/One.py000066400000000000000000000001631303523157100215320ustar00rootroot00000000000000"""Test module for component loading.""" class One(object): """Test class for component loading.""" pass testsuite/Testsrc/Testlib/TestOptions/TestComponents.py000066400000000000000000000216271303523157100240060ustar00rootroot00000000000000"""test component loading.""" import argparse import os from Bcfg2.Options import Option, BooleanOption, PathOption, ComponentAction, \ get_parser, new_parser, Types, ConfigFileAction, Common from testsuite.Testsrc.Testlib.TestOptions import make_config, One, Two, \ OptionTestCase # create a bunch of fake components for testing component loading options class ChildOne(object): """fake component for testing component loading.""" options = [Option("--child-one")] class ChildTwo(object): """fake component for testing component loading.""" options = [Option("--child-two")] class ChildComponentAction(ComponentAction): """child component loader action.""" islist = False mapping = {"one": ChildOne, "two": ChildTwo} class ComponentOne(object): """fake component for testing component loading.""" options = [BooleanOption("--one")] class ComponentTwo(object): """fake component for testing component loading.""" options = [Option("--child", default="one", action=ChildComponentAction)] class ComponentThree(object): """fake component for testing component loading.""" options = [BooleanOption("--three")] class ConfigFileComponent(object): """fake component for testing component loading.""" options = [Option("--config2", action=ConfigFileAction), Option(cf=("config", "test"), dest="config2_test", default="bar")] class PathComponent(object): """fake component for testing macros in child components.""" options = [PathOption(cf=("test", "test_path")), PathOption(cf=("test", "test_path_default"), default="/test/default")] class ParentComponentAction(ComponentAction): """parent component loader action.""" mapping = {"one": ComponentOne, "two": ComponentTwo, "three": ComponentThree, "config": ConfigFileComponent, "path": PathComponent} class TestComponentOptions(OptionTestCase): """test cases for component loading.""" def setUp(self): OptionTestCase.setUp(self) self.options = [ Option("--parent", type=Types.comma_list, default=["one", "two"], action=ParentComponentAction)] self.result = argparse.Namespace() new_parser() self.parser = get_parser(components=[self], namespace=self.result, description="component testing parser") @make_config() def test_loading_components(self, config_file): """load a single component during option parsing.""" self.parser.parse(["-C", config_file, "--parent", "one"]) self.assertEqual(self.result.parent, [ComponentOne]) @make_config() def test_component_option(self, config_file): """use options from a component loaded during option parsing.""" self.parser.parse(["--one", "-C", config_file, "--parent", "one"]) self.assertEqual(self.result.parent, [ComponentOne]) self.assertTrue(self.result.one) @make_config() def test_multi_component_load(self, config_file): """load multiple components during option parsing.""" self.parser.parse(["-C", config_file, "--parent", "one,three"]) self.assertEqual(self.result.parent, [ComponentOne, ComponentThree]) @make_config() def test_multi_component_options(self, config_file): """use options from multiple components during option parsing.""" self.parser.parse(["-C", config_file, "--three", "--parent", "one,three", "--one"]) self.assertEqual(self.result.parent, [ComponentOne, ComponentThree]) self.assertTrue(self.result.one) self.assertTrue(self.result.three) @make_config() def test_component_default_not_loaded(self, config_file): """options from default but unused components not available.""" self.assertRaises( SystemExit, self.parser.parse, ["-C", config_file, "--child", "one", "--parent", "one"]) @make_config() def test_tiered_components(self, config_file): """load child component.""" self.parser.parse(["-C", config_file, "--parent", "two", "--child", "one"]) self.assertEqual(self.result.parent, [ComponentTwo]) self.assertEqual(self.result.child, ChildOne) @make_config() def test_options_tiered_components(self, config_file): """use options from child component.""" self.parser.parse(["--child-one", "foo", "-C", config_file, "--parent", "two", "--child", "one"]) self.assertEqual(self.result.parent, [ComponentTwo]) self.assertEqual(self.result.child, ChildOne) self.assertEqual(self.result.child_one, "foo") @make_config() def test_bogus_component(self, config_file): """error out with bad component name.""" self.assertRaises(SystemExit, self.parser.parse, ["-C", config_file, "--parent", "blargle"]) @make_config() @make_config({"config": {"test": "foo"}}) def test_config_component(self, config1, config2): """load component with alternative config file.""" self.parser.parse(["-C", config1, "--config2", config2, "--parent", "config"]) self.assertEqual(self.result.config2, config2) self.assertEqual(self.result.config2_test, "foo") @make_config() def test_config_component_no_file(self, config_file): """load component with missing alternative config file.""" self.parser.parse(["-C", config_file, "--parent", "config"]) self.assertEqual(self.result.config2, None) @make_config({"test": {"test_path": "/test"}}) def test_macros_in_component_options(self, config_file): """fix up macros in component options.""" self.parser.add_options([Common.repository]) self.parser.parse(["-C", config_file, "-Q", "/foo/bar", "--parent", "path"]) self.assertEqual(self.result.test_path, "/foo/bar/test") self.assertEqual(self.result.test_path_default, "/foo/bar/test/default") class ImportComponentAction(ComponentAction): """action that imports real classes for testing.""" islist = False bases = ["testsuite.Testsrc.Testlib.TestOptions"] class ImportModuleAction(ImportComponentAction): """action that only imports modules for testing.""" module = True class TestImportComponentOptions(OptionTestCase): """test cases for component loading.""" def setUp(self): self.options = [Option("--cls", cf=("config", "cls"), action=ImportComponentAction), Option("--module", action=ImportModuleAction)] self.result = argparse.Namespace() new_parser() self.parser = get_parser(components=[self], namespace=self.result) @make_config() def test_import_component(self, config_file): """load class components by importing.""" self.parser.parse(["-C", config_file, "--cls", "One"]) self.assertEqual(self.result.cls, One.One) @make_config() def test_import_module(self, config_file): """load module components by importing.""" self.parser.parse(["-C", config_file, "--module", "One"]) self.assertEqual(self.result.module, One) @make_config() def test_import_full_path(self, config_file): """load components by importing the full path.""" self.parser.parse(["-C", config_file, "--cls", "os.path"]) self.assertEqual(self.result.cls, os.path) @make_config() def test_import_bogus_class(self, config_file): """fail to load class component that cannot be imported.""" self.assertRaises(SystemExit, self.parser.parse, ["-C", config_file, "--cls", "Three"]) @make_config() def test_import_bogus_module(self, config_file): """fail to load module component that cannot be imported.""" self.assertRaises(SystemExit, self.parser.parse, ["-C", config_file, "--module", "Three"]) @make_config() def test_import_bogus_path(self, config_file): """fail to load component that cannot be imported by full path.""" self.assertRaises(SystemExit, self.parser.parse, ["-C", config_file, "--cls", "Bcfg2.No.Such.Thing"]) @make_config({"config": {"test": "foo", "cls": "Two"}}) def test_default_from_config_for_component_options(self, config_file): """use default value from config file for options added by dynamic loaded component.""" self.parser.parse(["-C", config_file]) self.assertEqual(self.result.cls, Two.Two) self.assertEqual(self.result.test, "foo") testsuite/Testsrc/Testlib/TestOptions/TestConfigFiles.py000066400000000000000000000036101303523157100240410ustar00rootroot00000000000000"""test reading multiple config files.""" import argparse import mock from Bcfg2.Options import Option, PathOption, ConfigFileAction, get_parser, \ new_parser from testsuite.Testsrc.Testlib.TestOptions import make_config, OptionTestCase class TestConfigFiles(OptionTestCase): def setUp(self): self.options = [ PathOption(cf=("test", "config2"), action=ConfigFileAction), PathOption(cf=("test", "config3"), action=ConfigFileAction), Option(cf=("test", "foo")), Option(cf=("test", "bar")), Option(cf=("test", "baz"))] self.results = argparse.Namespace() new_parser() self.parser = get_parser(components=[self], namespace=self.results) @make_config({"test": {"baz": "baz"}}) def test_config_files(self, config3): """read multiple config files.""" # Because make_config() generates temporary files for the # configuration, we have to work backwards here. first we # generate config3, then we generate config2 (which includes a # reference to config3), then we finally generate the main # config file, which contains a reference to config2. oh how # I wish we could use context managers here... @make_config({"test": {"bar": "bar", "config3": config3}}) def inner1(config2): @make_config({"test": {"foo": "foo", "config2": config2}}) def inner2(config): self.parser.parse(["-C", config]) self.assertEqual(self.results.foo, "foo") self.assertEqual(self.results.bar, "bar") self.assertEqual(self.results.baz, "baz") inner2() inner1() @mock.patch("os.path.exists", mock.Mock(return_value=False)) def test_no_config_file(self): """fail to read config file.""" self.assertRaises(SystemExit, self.parser.parse, []) testsuite/Testsrc/Testlib/TestOptions/TestOptionGroups.py000066400000000000000000000130611303523157100243220ustar00rootroot00000000000000"""test reading multiple config files.""" import argparse import sys from Bcfg2.Options import Option, BooleanOption, Parser, OptionGroup, \ ExclusiveOptionGroup, WildcardSectionGroup, new_parser, get_parser from testsuite.common import Bcfg2TestCase, skipUnless from testsuite.Testsrc.Testlib.TestOptions import make_config, OptionTestCase class TestOptionGroups(Bcfg2TestCase): def setUp(self): self.options = None def _test_options(self, options): """test helper.""" result = argparse.Namespace() parser = Parser(components=[self], namespace=result) parser.parse(options) return result def test_option_group(self): """basic option group functionality.""" self.options = [OptionGroup(BooleanOption("--foo"), BooleanOption("--bar"), BooleanOption("--baz"), title="group")] result = self._test_options(["--foo", "--bar"]) self.assertTrue(result.foo) self.assertTrue(result.bar) self.assertFalse(result.baz) def test_exclusive_option_group(self): """parse options from exclusive option group.""" self.options = [ ExclusiveOptionGroup(BooleanOption("--foo"), BooleanOption("--bar"), BooleanOption("--baz"))] result = self._test_options(["--foo"]) self.assertTrue(result.foo) self.assertFalse(result.bar) self.assertFalse(result.baz) self.assertRaises(SystemExit, self._test_options, ["--foo", "--bar"]) def test_required_exclusive_option_group(self): """parse options from required exclusive option group.""" self.options = [ ExclusiveOptionGroup(BooleanOption("--foo"), BooleanOption("--bar"), BooleanOption("--baz"), required=True)] result = self._test_options(["--foo"]) self.assertTrue(result.foo) self.assertFalse(result.bar) self.assertFalse(result.baz) self.assertRaises(SystemExit, self._test_options, []) class TestNestedOptionGroups(TestOptionGroups): def setUp(self): TestOptionGroups.setUp(self) self.options = [ OptionGroup( BooleanOption("--foo"), BooleanOption("--bar"), OptionGroup( BooleanOption("--baz"), BooleanOption("--quux"), ExclusiveOptionGroup( BooleanOption("--test1"), BooleanOption("--test2")), title="inner"), title="outer")] def test_option_group(self): """nest option groups.""" result = self._test_options(["--foo", "--baz", "--test1"]) self.assertTrue(result.foo) self.assertFalse(result.bar) self.assertTrue(result.baz) self.assertFalse(result.quux) self.assertTrue(result.test1) self.assertFalse(result.test2) @skipUnless(sys.version_info >= (2, 7), "Nested exclusive option groups do not work in Python 2.6") def test_nested_exclusive_option_groups(self): """nest exclusive option groups.""" self.assertRaises(SystemExit, self._test_options, ["--test1", "--test2"]) class TestWildcardSectionGroups(OptionTestCase): config = { "four:one": { "foo": "foo one", "bar": "bar one", "baz": "baz one" }, "four:two": { "foo": "foo two", "bar": "bar two" }, "five:one": { "foo": "foo one", "bar": "bar one" }, "five:two": { "foo": "foo two", "bar": "bar two" }, "five:three": { "foo": "foo three", "bar": "bar three" } } def setUp(self): self.options = [ WildcardSectionGroup( Option(cf=("four:*", "foo")), Option(cf=("four:*", "bar"))), WildcardSectionGroup( Option(cf=("five:*", "foo")), Option(cf=("five:*", "bar")), prefix="", dest="sections")] self.results = argparse.Namespace() new_parser() self.parser = get_parser(components=[self], namespace=self.results) @make_config(config) def test_wildcard_section_groups(self, config_file): """parse options from wildcard section groups.""" self.parser.parse(["-C", config_file]) self.assertEqual(self.results.four_four_one_foo, "foo one") self.assertEqual(self.results.four_four_one_bar, "bar one") self.assertEqual(self.results.four_four_two_foo, "foo two") self.assertEqual(self.results.four_four_two_bar, "bar two") self.assertItemsEqual(self.results.four_sections, ["four:one", "four:two"]) self.assertEqual(self.results.five_one_foo, "foo one") self.assertEqual(self.results.five_one_bar, "bar one") self.assertEqual(self.results.five_two_foo, "foo two") self.assertEqual(self.results.five_two_bar, "bar two") self.assertEqual(self.results.five_three_foo, "foo three") self.assertEqual(self.results.five_three_bar, "bar three") self.assertItemsEqual(self.results.sections, ["five:one", "five:two", "five:three"]) testsuite/Testsrc/Testlib/TestOptions/TestOptions.py000066400000000000000000000513671303523157100233200ustar00rootroot00000000000000"""basic option parsing tests.""" import argparse import os import tempfile import mock from Bcfg2.Compat import ConfigParser from Bcfg2.Options import Option, PathOption, RepositoryMacroOption, \ BooleanOption, Parser, PositionalArgument, OptionParserException, \ Common, new_parser, get_parser from testsuite.Testsrc.Testlib.TestOptions import OptionTestCase, \ make_config, clean_environment class TestBasicOptions(OptionTestCase): """test basic option parsing.""" def setUp(self): # parsing options can modify the Option objects themselves. # that's probably bad -- and it's definitely bad if we ever # want to do real on-the-fly config changes -- but it's easier # to leave it as is and set the options on each test. OptionTestCase.setUp(self) self.options = [ BooleanOption("--test-true-boolean", env="TEST_TRUE_BOOLEAN", cf=("test", "true_boolean"), default=True), BooleanOption("--test-false-boolean", env="TEST_FALSE_BOOLEAN", cf=("test", "false_boolean"), default=False), BooleanOption(cf=("test", "true_config_boolean"), default=True), BooleanOption(cf=("test", "false_config_boolean"), default=False), Option("--test-option", env="TEST_OPTION", cf=("test", "option"), default="foo"), PathOption("--test-path-option", env="TEST_PATH_OPTION", cf=("test", "path"), default="/test")] @clean_environment def _test_options(self, options=None, env=None, config=None): """helper to test a set of options. returns the namespace from parsing the given CLI options with the given config and environment. """ if config is not None: config = {"test": config} if options is None: options = [] @make_config(config) def inner(config_file): """do the actual tests, since py2.4 lacks context managers.""" result = argparse.Namespace() parser = Parser(components=[self], namespace=result) parser.parse(argv=["-C", config_file] + options) return result if env is not None: for name, value in env.items(): os.environ[name] = value return inner() def test_expand_path(self): """expand ~ in path option.""" options = self._test_options(options=["--test-path-option", "~/test"]) self.assertEqual(options.test_path_option, os.path.expanduser("~/test")) def test_canonicalize_path(self): """get absolute path from path option.""" options = self._test_options(options=["--test-path-option", "./test"]) self.assertEqual(options.test_path_option, os.path.abspath("./test")) @make_config() def test_default_path_canonicalization(self, config_file): """canonicalize default PathOption values.""" testdir = os.path.expanduser("~/test") result = argparse.Namespace() parser = Parser(namespace=result) parser.add_options([PathOption("--test1", default="~/test"), PathOption(cf=("test", "test2"), default="~/test"), Common.repository]) parser.parse(["-C", config_file]) self.assertEqual(result.test1, testdir) self.assertEqual(result.test2, testdir) def test_default_bool(self): """use the default value of boolean options.""" options = self._test_options() self.assertTrue(options.test_true_boolean) self.assertFalse(options.test_false_boolean) self.assertTrue(options.true_config_boolean) self.assertFalse(options.false_config_boolean) def test_default(self): """use the default value of an option.""" options = self._test_options() self.assertEqual(options.test_option, "foo") def test_default_path(self): """use the default value of a path option.""" options = self._test_options() self.assertEqual(options.test_path_option, "/test") def test_invalid_boolean(self): """set boolean to invalid values.""" self.assertRaises(ValueError, self._test_options, config={"true_boolean": "you betcha"}) self.assertRaises(ValueError, self._test_options, env={"TEST_TRUE_BOOLEAN": "hell no"}) def test_set_boolean_in_config(self): """set boolean options in config files.""" set_to_defaults = {"true_boolean": "1", "false_boolean": "0", "true_config_boolean": "yes", "false_config_boolean": "no"} options = self._test_options(config=set_to_defaults) self.assertTrue(options.test_true_boolean) self.assertFalse(options.test_false_boolean) self.assertTrue(options.true_config_boolean) self.assertFalse(options.false_config_boolean) set_to_other = {"true_boolean": "false", "false_boolean": "true", "true_config_boolean": "off", "false_config_boolean": "on"} options = self._test_options(config=set_to_other) self.assertFalse(options.test_true_boolean) self.assertTrue(options.test_false_boolean) self.assertFalse(options.true_config_boolean) self.assertTrue(options.false_config_boolean) def test_set_in_config(self): """set options in config files.""" options = self._test_options(config={"option": "foo"}) self.assertEqual(options.test_option, "foo") options = self._test_options(config={"option": "bar"}) self.assertEqual(options.test_option, "bar") def test_set_path_in_config(self): """set path options in config files.""" options = self._test_options(config={"path": "/test"}) self.assertEqual(options.test_path_option, "/test") options = self._test_options(config={"path": "/foo"}) self.assertEqual(options.test_path_option, "/foo") def test_set_boolean_in_env(self): """set boolean options in environment.""" set_to_defaults = {"TEST_TRUE_BOOLEAN": "1", "TEST_FALSE_BOOLEAN": "0"} options = self._test_options(env=set_to_defaults) self.assertTrue(options.test_true_boolean) self.assertFalse(options.test_false_boolean) set_to_other = {"TEST_TRUE_BOOLEAN": "false", "TEST_FALSE_BOOLEAN": "true"} options = self._test_options(env=set_to_other) self.assertFalse(options.test_true_boolean) self.assertTrue(options.test_false_boolean) def test_set_in_env(self): """set options in environment.""" options = self._test_options(env={"TEST_OPTION": "foo"}) self.assertEqual(options.test_option, "foo") options = self._test_options(env={"TEST_OPTION": "bar"}) self.assertEqual(options.test_option, "bar") def test_set_path_in_env(self): """set path options in environment.""" options = self._test_options(env={"TEST_PATH_OPTION": "/test"}) self.assertEqual(options.test_path_option, "/test") options = self._test_options(env={"TEST_PATH_OPTION": "/foo"}) self.assertEqual(options.test_path_option, "/foo") def test_version(self): """print version and exit on --version""" self.assertRaises( SystemExit, self._test_options, options=['--version']) def test_set_boolean_in_cli(self): """set boolean options in CLI options.""" # passing the option yields the reverse of the default, no # matter the default options = self._test_options(options=["--test-true-boolean", "--test-false-boolean"]) self.assertFalse(options.test_true_boolean) self.assertTrue(options.test_false_boolean) def test_set_in_cli(self): """set options in CLI options.""" options = self._test_options(options=["--test-option", "foo"]) self.assertEqual(options.test_option, "foo") options = self._test_options(options=["--test-option", "bar"]) self.assertEqual(options.test_option, "bar") def test_set_path_in_cli(self): """set path options in CLI options.""" options = self._test_options(options=["--test-path-option", "/test"]) self.assertEqual(options.test_path_option, "/test") options = self._test_options(options=["--test-path-option", "/foo"]) self.assertEqual(options.test_path_option, "/foo") def test_env_overrides_config_bool(self): """setting boolean option in the environment overrides config file.""" config = {"true_boolean": "false", "false_boolean": "true"} env = {"TEST_TRUE_BOOLEAN": "yes", "TEST_FALSE_BOOLEAN": "no"} options = self._test_options(config=config, env=env) self.assertTrue(options.test_true_boolean) self.assertFalse(options.test_false_boolean) def test_env_overrides_config(self): """setting option in the environment overrides config file.""" options = self._test_options(config={"option": "bar"}, env={"TEST_OPTION": "baz"}) self.assertEqual(options.test_option, "baz") def test_env_overrides_config_path(self): """setting path option in the environment overrides config file.""" options = self._test_options(config={"path": "/foo"}, env={"TEST_PATH_OPTION": "/bar"}) self.assertEqual(options.test_path_option, "/bar") def test_cli_overrides_config_bool(self): """setting boolean option in the CLI overrides config file.""" config = {"true_boolean": "on", "false_boolean": "off"} options = ["--test-true-boolean", "--test-false-boolean"] options = self._test_options(options=options, config=config) self.assertFalse(options.test_true_boolean) self.assertTrue(options.test_false_boolean) def test_cli_overrides_config(self): """setting option in the CLI overrides config file.""" options = self._test_options(options=["--test-option", "baz"], config={"option": "bar"}) self.assertEqual(options.test_option, "baz") def test_cli_overrides_config_path(self): """setting path option in the CLI overrides config file.""" options = self._test_options(options=["--test-path-option", "/bar"], config={"path": "/foo"}) self.assertEqual(options.test_path_option, "/bar") def test_cli_overrides_env_bool(self): """setting boolean option in the CLI overrides environment.""" env = {"TEST_TRUE_BOOLEAN": "0", "TEST_FALSE_BOOLEAN": "1"} options = ["--test-true-boolean", "--test-false-boolean"] options = self._test_options(options=options, env=env) self.assertFalse(options.test_true_boolean) self.assertTrue(options.test_false_boolean) def test_cli_overrides_env(self): """setting option in the CLI overrides environment.""" options = self._test_options(options=["--test-option", "baz"], env={"TEST_OPTION": "bar"}) self.assertEqual(options.test_option, "baz") def test_cli_overrides_env_path(self): """setting path option in the CLI overrides environment.""" options = self._test_options(options=["--test-path-option", "/bar"], env={"TEST_PATH_OPTION": "/foo"}) self.assertEqual(options.test_path_option, "/bar") def test_cli_overrides_all_bool(self): """setting boolean option in the CLI overrides everything else.""" config = {"true_boolean": "no", "false_boolean": "yes"} env = {"TEST_TRUE_BOOLEAN": "0", "TEST_FALSE_BOOLEAN": "1"} options = ["--test-true-boolean", "--test-false-boolean"] options = self._test_options(options=options, env=env) self.assertFalse(options.test_true_boolean) self.assertTrue(options.test_false_boolean) def test_cli_overrides_all(self): """setting option in the CLI overrides everything else.""" options = self._test_options(options=["--test-option", "baz"], env={"TEST_OPTION": "bar"}, config={"test": "quux"}) self.assertEqual(options.test_option, "baz") def test_cli_overrides_all_path(self): """setting path option in the CLI overrides everything else.""" options = self._test_options(options=["--test-path-option", "/bar"], env={"TEST_PATH_OPTION": "/foo"}, config={"path": "/baz"}) self.assertEqual(options.test_path_option, "/bar") @make_config() def _test_dest(self, *args, **kwargs): """helper to test that ``dest`` is set properly.""" args = list(args) expected = args.pop(0) config_file = args.pop() sentinel = object() kwargs["default"] = sentinel result = argparse.Namespace() parser = Parser(namespace=result) parser.add_options([Option(*args, **kwargs)]) parser.parse(["-C", config_file]) self.assertTrue(hasattr(result, expected)) self.assertEqual(getattr(result, expected), sentinel) def test_explicit_dest(self): """set the ``dest`` of an option explicitly.""" self._test_dest("bar", dest="bar") def test_dest_from_env_var(self): """set the ``dest`` of an option from the env var name.""" self._test_dest("foo", env="FOO") def test_dest_from_cf(self): """set the ``dest`` of an option from the config option.""" self._test_dest("foo_bar", cf=("test", "foo-bar")) def test_dest_from_cli(self): """set the ``dest`` of an option from the CLI option.""" self._test_dest("test_foo", "--test-foo") def test_dest_from_all(self): """set the ``dest`` of an option from the best of multiple sources.""" self._test_dest("foo_baz", cf=("test", "foo-bar"), env="FOO_BAZ") self._test_dest("xyzzy", "--xyzzy", cf=("test", "foo-bar"), env="FOO_BAZ") self._test_dest("quux", "--xyzzy", cf=("test", "foo-bar"), env="FOO_BAZ", dest="quux") @make_config() def test_positional_args(self, config_file): """get values from positional arguments.""" result = argparse.Namespace() parser = Parser(namespace=result) parser.add_options([PositionalArgument("single")]) parser.parse(["-C", config_file, "single"]) self.assertEqual(result.single, "single") result = argparse.Namespace() parser = Parser(namespace=result) parser.add_options([PositionalArgument("one"), PositionalArgument("two")]) parser.parse(["-C", config_file, "one", "two"]) self.assertEqual(result.one, "one") self.assertEqual(result.two, "two") def test_duplicate_cli_option(self): """add duplicate CLI option.""" parser = Parser(components=[self]) self.assertRaises( argparse.ArgumentError, parser.add_options, [Option("--test-option")]) def test_duplicate_env_option(self): """add duplicate environment option.""" parser = Parser(components=[self]) self.assertRaises( OptionParserException, parser.add_options, [Option(env="TEST_OPTION")]) def test_duplicate_cf_option(self): """add duplicate config file option.""" parser = Parser(components=[self]) self.assertRaises( OptionParserException, parser.add_options, [Option(cf=("test", "option"))]) @make_config({"test": {"test_path": "/test", "test_macro": ""}}) def test_repository_macro(self, config_file): """fix up macros.""" result = argparse.Namespace() parser = Parser(namespace=result) parser.add_options([PathOption("--test1"), RepositoryMacroOption("--test2"), PathOption(cf=("test", "test_path")), PathOption(cf=("test", "test_path_default"), default="/test/default"), RepositoryMacroOption(cf=("test", "test_macro")), RepositoryMacroOption( cf=("test", "test_macro_default"), default=""), Common.repository]) parser.parse(["-C", config_file, "-Q", "/foo/bar", "--test1", "/test1", "--test2", ""]) self.assertEqual(result.repository, "/foo/bar") self.assertEqual(result.test1, "/foo/bar/test1") self.assertEqual(result.test2, "/foo/bar/foo/bar") self.assertEqual(result.test_macro, "/foo/bar") self.assertEqual(result.test_macro_default, "/foo/bar") self.assertEqual(result.test_path, "/foo/bar/test") self.assertEqual(result.test_path_default, "/foo/bar/test/default") @make_config() def test_file_like_path_option(self, config_file): """get file-like object from PathOption.""" result = argparse.Namespace() parser = Parser(namespace=result) parser.add_options([PathOption("--test", type=argparse.FileType('r'))]) fd, name = tempfile.mkstemp() fh = os.fdopen(fd, "w") fh.write("test") fh.close() try: parser.parse(["-C", config_file, "--test", name]) self.assertEqual(result.test.name, name) self.assertEqual(result.test.read(), "test") finally: os.unlink(name) @clean_environment @make_config() def test_unknown_options(self, config_file): """error on unknown options.""" parser = Parser(components=[self]) self.assertRaises(SystemExit, parser.parse, ["-C", config_file, "--not-a-real-option"]) @clean_environment @make_config() def test_reparse(self, config_file): """reparse options.""" result = argparse.Namespace() parser = Parser(components=[self], namespace=result) parser.parse(["-C", config_file]) self.assertFalse(result.test_false_boolean) parser.parse(["-C", config_file]) self.assertFalse(result.test_false_boolean) parser.reparse() self.assertFalse(result.test_false_boolean) parser.reparse(["-C", config_file, "--test-false-boolean"]) self.assertTrue(result.test_false_boolean) cfp = ConfigParser.ConfigParser() cfp.add_section("test") cfp.set("test", "false_boolean", "on") parser.parse(["-C", config_file]) cfp.write(open(config_file, "w")) self.assertTrue(result.test_false_boolean) class TestParsingHooks(OptionTestCase): """test option parsing hooks.""" def setUp(self): self.options_parsed_hook = mock.Mock() self.options = [BooleanOption("--test", default=False)] self.results = argparse.Namespace() new_parser() self.parser = get_parser(components=[self], namespace=self.results) @make_config() def test_parsing_hooks(self, config_file): """option parsing hooks are called.""" self.parser.parse(["-C", config_file]) self.options_parsed_hook.assert_called_with() class TestEarlyParsingHooks(OptionTestCase): """test early option parsing hooks.""" parse_first = True def setUp(self): self.component_parsed_hook = mock.Mock() self.options = [BooleanOption("--early-test", default=False)] self.results = argparse.Namespace() new_parser() self.parser = get_parser(components=[self], namespace=self.results) @make_config() def test_parsing_hooks(self, config_file): """early option parsing hooks are called.""" self.parser.parse(["-C", config_file, "--early-test"]) self.assertEqual(self.component_parsed_hook.call_count, 1) early_opts = self.component_parsed_hook.call_args[0][0] self.assertTrue(early_opts.early_test) testsuite/Testsrc/Testlib/TestOptions/TestSubcommands.py000066400000000000000000000114041303523157100241240ustar00rootroot00000000000000"""test subcommand option parsing.""" import argparse import sys from Bcfg2.Compat import StringIO from Bcfg2.Options import Option, get_parser, new_parser, Subcommand, \ Subparser, CommandRegistry import Bcfg2.Options.Subcommands from testsuite.Testsrc.Testlib.TestOptions import make_config, OptionTestCase class MockSubcommand(Subcommand): """fake subcommand that just records the options it was called with.""" run_options = None def run(self, setup): self.__class__.run_options = setup class One(MockSubcommand): """fake subcommand for testing.""" options = [Option("--test-one")] class Two(MockSubcommand): """fake subcommand for testing.""" options = [Option("--test-two")] def local_subclass(cls): """get a subclass of ``cls`` that adds no functionality. This can be used to subclass the various test classes above so that their options don't get modified by option parsing. """ return type("Local%s" % cls.__name__, (cls,), {}) class TestSubcommands(OptionTestCase): """tests for subcommands and subparsers.""" def setUp(self): self.registry = CommandRegistry() self.one = local_subclass(One) self.two = local_subclass(Two) self.registry.register_command(self.one) self.registry.register_command(self.two) self.result = argparse.Namespace() Bcfg2.Options.Subcommands.master_setup = self.result new_parser() self.parser = get_parser(namespace=self.result, components=[self]) self.parser.add_options(self.registry.subcommand_options) def test_register_commands(self): """register subcommands.""" registry = CommandRegistry() registry.register_commands(globals().values(), parent=MockSubcommand) self.assertItemsEqual(registry.commands.keys(), ["one", "two", "help"]) self.assertIsInstance(registry.commands['one'], One) self.assertIsInstance(registry.commands['two'], Two) @make_config() def test_get_subcommand(self, config_file): """parse simple subcommands.""" self.parser.parse(["-C", config_file, "localone"]) self.assertEqual(self.result.subcommand, "localone") def test_subcommand_usage(self): """sane usage message from subcommands.""" self.assertEqual( One().usage(), "one [--test-one TEST_ONE] - fake subcommand for testing.") # subclasses do not inherit the docstring from the parent, so # this tests a command subclass without a docstring, even # though that should never happen due to the pylint tests. self.assertEqual(self.one().usage().strip(), "localone [--test-one TEST_ONE]") def _get_subcommand_output(self, args): self.parser.parse(args) old_stdout = sys.stdout sys.stdout = StringIO() rv = self.registry.runcommand() output = [l for l in sys.stdout.getvalue().splitlines() if not l.startswith("DEBUG: ")] sys.stdout = old_stdout return (rv, output) @make_config() def test_help(self, config_file): """sane help message from subcommand registry.""" rv, output = self._get_subcommand_output(["-C", config_file, "help"]) self.assertIn(rv, [0, None]) # the help message will look like: # # localhelp [] # localone [--test-one TEST_ONE] # localtwo [--test-two TEST_TWO] commands = [] command_help = { "help": self.registry.help.usage(), "localone": self.one().usage(), "localtwo": self.two().usage()} for line in output: command = line.split()[0] commands.append(command) if command not in command_help: self.fail("Got help for unknown command %s: %s" % (command, line)) self.assertEqual(line, command_help[command]) self.assertItemsEqual(commands, command_help.keys()) @make_config() def test_subcommand_help(self, config_file): """get help message on a single command.""" rv, output = self._get_subcommand_output( ["-C", config_file, "help", "localone"]) self.assertIn(rv, [0, None]) self.assertEqual(output[0].strip(), "usage: %s" % self.one().usage().strip()) @make_config() def test_nonexistent_subcommand_help(self, config_file): """get help message on a nonexistent command.""" rv, output = self._get_subcommand_output( ["-C", config_file, "help", "blargle"]) self.assertNotEqual(rv, 0) self.assertIn("No such command", output[0]) testsuite/Testsrc/Testlib/TestOptions/TestTypes.py000066400000000000000000000107261303523157100227630ustar00rootroot00000000000000"""test builtin option types.""" import argparse from mock import patch from Bcfg2.Options import Option, Types, Parser from testsuite.common import Bcfg2TestCase class TestOptionTypes(Bcfg2TestCase): """test builtin option types.""" def setUp(self): self.options = None def _test_options(self, options): """helper to test option types. this expects that self.options is set to a single option named test. The value of that option is returned. """ result = argparse.Namespace() parser = Parser(components=[self], namespace=result) parser.parse(options) return result.test def test_comma_list(self): """parse comma-list values.""" self.options = [Option("--test", type=Types.comma_list)] expected = ["one", "two", "three"] self.assertItemsEqual(self._test_options(["--test", "one,two,three"]), expected) self.assertItemsEqual(self._test_options(["--test", "one, two, three"]), expected) self.assertItemsEqual(self._test_options(["--test", "one , two ,three"]), expected) self.assertItemsEqual(self._test_options(["--test", "one two, three"]), ["one two", "three"]) def test_colon_list(self): """parse colon-list values.""" self.options = [Option("--test", type=Types.colon_list)] self.assertItemsEqual(self._test_options(["--test", "one:two three"]), ["one", "two three"]) def test_literal_dict(self): """parse literal-dict values.""" self.options = [Option("--test", type=Types.literal_dict)] expected = { "one": True, "two": 2, "three": "three", "four": False, "five": { "a": 1, "b": 2 }} self.assertDictEqual( self._test_options(["--test", '''{ "one": True, "two": 2, "three": "three", "four": False, "five": { "a": 1, "b": 2 }}''']), expected) def test_anchored_regex_list(self): """parse regex lists.""" self.options = [Option("--test", type=Types.anchored_regex_list)] self.assertItemsEqual( [r.pattern for r in self._test_options(["--test", r'\d+ \s*'])], [r'^\d+$', r'^\s*$']) self.assertRaises(SystemExit, self._test_options, ["--test", '(]']) def test_octal(self): """parse octal options.""" self.options = [Option("--test", type=Types.octal)] self.assertEqual(self._test_options(["--test", "0777"]), 511) self.assertEqual(self._test_options(["--test", "133114255"]), 23894189) @patch("pwd.getpwnam") def test_username(self, mock_getpwnam): """parse username options.""" self.options = [Option("--test", type=Types.username)] mock_getpwnam.return_value = ("test", '********', 1001, 1001, "Test user", "/home/test", "/bin/bash") self.assertEqual(self._test_options(["--test", "1001"]), 1001) self.assertEqual(self._test_options(["--test", "test"]), 1001) @patch("grp.getgrnam") def test_groupname(self, mock_getpwnam): """parse group name options.""" self.options = [Option("--test", type=Types.groupname)] mock_getpwnam.return_value = ("test", '*', 1001, ["test"]) self.assertEqual(self._test_options(["--test", "1001"]), 1001) self.assertEqual(self._test_options(["--test", "test"]), 1001) def test_timeout(self): """parse timeout options.""" self.options = [Option("--test", type=Types.timeout)] self.assertEqual(self._test_options(["--test", "1.0"]), 1.0) self.assertEqual(self._test_options(["--test", "1"]), 1.0) self.assertEqual(self._test_options(["--test", "0"]), None) def test_size(self): """parse human-readable size options.""" self.options = [Option("--test", type=Types.size)] self.assertEqual(self._test_options(["--test", "5k"]), 5120) self.assertEqual(self._test_options(["--test", "5"]), 5) self.assertRaises(SystemExit, self._test_options, ["--test", "g5m"]) testsuite/Testsrc/Testlib/TestOptions/TestWildcards.py000066400000000000000000000033331303523157100235670ustar00rootroot00000000000000"""test wildcard options.""" import argparse from Bcfg2.Options import Option, Parser from testsuite.Testsrc.Testlib.TestOptions import OptionTestCase, make_config class TestWildcardOptions(OptionTestCase): """test parsing wildcard options.""" config = { "foo": { "test1": "test1", "test2": "test2", "thing1": "thing1", "thing2": "thing2", "foo": "foo" } } def setUp(self): # parsing options can modify the Option objects themselves. # that's probably bad -- and it's definitely bad if we ever # want to do real on-the-fly config changes -- but it's easier # to leave it as is and set the options on each test. self.options = [ Option(cf=("foo", "*"), dest="all"), Option(cf=("foo", "test*"), dest="test"), Option(cf=("foo", "bogus*"), dest="unmatched"), Option(cf=("bar", "*"), dest="no_section"), Option(cf=("foo", "foo"))] @make_config(config) def test_wildcard_options(self, config_file): """parse wildcard options.""" result = argparse.Namespace() parser = Parser(components=[self], namespace=result) parser.parse(argv=["-C", config_file]) self.assertDictEqual(result.all, {"test1": "test1", "test2": "test2", "thing1": "thing1", "thing2": "thing2"}) self.assertDictEqual(result.test, {"test1": "test1", "test2": "test2"}) self.assertDictEqual(result.unmatched, {}) self.assertDictEqual(result.no_section, {}) testsuite/Testsrc/Testlib/TestOptions/Two.py000066400000000000000000000003371303523157100215650ustar00rootroot00000000000000"""Test module for component loading.""" from Bcfg2.Options import Option class Two(object): """Test class for component loading.""" options = [Option('--test', cf=("config", "test"), dest="test", default="bar")] testsuite/Testsrc/Testlib/TestOptions/__init__.py000066400000000000000000000050301303523157100225460ustar00rootroot00000000000000"""helper functions for option testing.""" import os import tempfile from Bcfg2.Compat import wraps, ConfigParser from Bcfg2.Options import Parser, PathOption from testsuite.common import Bcfg2TestCase class make_config(object): # pylint: disable=invalid-name """decorator to create a temporary config file from a dict. The filename of the temporary config file is added as the last positional argument to the function call. """ def __init__(self, config_data=None): self.config_data = config_data or {} def __call__(self, func): @wraps(func) def inner(*args, **kwargs): """decorated function.""" cfp = ConfigParser.ConfigParser() for section, options in self.config_data.items(): cfp.add_section(section) for key, val in options.items(): cfp.set(section, key, val) fd, name = tempfile.mkstemp() config_file = os.fdopen(fd, 'w') cfp.write(config_file) config_file.close() args = list(args) + [name] try: rv = func(*args, **kwargs) finally: os.unlink(name) return rv return inner def clean_environment(func): """decorator that unsets any environment variables used by options. The list of options is taken from the first argument, which is presumed to be ``self``. The variables are restored at the end of the function. """ @wraps(func) def inner(self, *args, **kwargs): """decorated function.""" envvars = {} for opt in self.options: if opt.env is not None: envvars[opt.env] = os.environ.get(opt.env) if opt.env in os.environ: del os.environ[opt.env] rv = func(self, *args, **kwargs) for name, val in envvars.items(): if val is None and name in os.environ: del os.environ[name] elif val is not None: os.environ[name] = val return rv return inner class OptionTestCase(Bcfg2TestCase): """test case that doesn't mock out config file reading.""" @classmethod def setUpClass(cls): # ensure that the option parser actually reads config files Parser.unit_test = False Bcfg2TestCase.setUpClass() def setUp(self): Bcfg2TestCase.setUp(self) PathOption.repository = None @classmethod def tearDownClass(cls): Parser.unit_test = True testsuite/Testsrc/Testlib/TestServer/000077500000000000000000000000001303523157100202525ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestServer/TestCache.py000066400000000000000000000040531303523157100224710ustar00rootroot00000000000000import os import sys # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from Bcfg2.Server.Cache import * class TestCache(Bcfg2TestCase): def test_cache(self): md_cache = Cache("Metadata") md_cache['foo.example.com'] = 'foo metadata' md_cache['bar.example.com'] = 'bar metadata' self.assertItemsEqual(list(iter(md_cache)), ["foo.example.com", "bar.example.com"]) probe_cache = Cache("Probes", "data") probe_cache['foo.example.com'] = 'foo probe data' probe_cache['bar.example.com'] = 'bar probe data' self.assertItemsEqual(list(iter(probe_cache)), ["foo.example.com", "bar.example.com"]) md_cache.expire("foo.example.com") self.assertItemsEqual(list(iter(md_cache)), ["bar.example.com"]) self.assertItemsEqual(list(iter(probe_cache)), ["foo.example.com", "bar.example.com"]) probe_cache.expire("bar.example.com") self.assertItemsEqual(list(iter(md_cache)), ["bar.example.com"]) self.assertItemsEqual(list(iter(probe_cache)), ["foo.example.com"]) probe_cache['bar.example.com'] = 'bar probe data' self.assertItemsEqual(list(iter(md_cache)), ["bar.example.com"]) self.assertItemsEqual(list(iter(probe_cache)), ["foo.example.com", "bar.example.com"]) expire("bar.example.com") self.assertEqual(len(md_cache), 0) self.assertItemsEqual(list(iter(probe_cache)), ["foo.example.com"]) probe_cache2 = Cache("Probes", "data") self.assertItemsEqual(list(iter(probe_cache)), list(iter(probe_cache2))) testsuite/Testsrc/Testlib/TestServer/TestEncryption.py000066400000000000000000000153011303523157100236160ustar00rootroot00000000000000# -*- coding: utf-8 -*- import os import sys from Bcfg2.Compat import b64decode, b64encode from mock import Mock, MagicMock, patch # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * try: from Bcfg2.Server.Encryption import * HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False class TestEncryption(Bcfg2TestCase): plaintext = """foo bar baz ö \t\tquux """ + "a" * 16384 # 16K is completely arbitrary iv = "0123456789ABCDEF" salt = "01234567" algo = "des_cbc" @skipUnless(HAS_CRYPTO, "Encryption libraries not found") def setUp(self): Bcfg2.Options.setup.algorithm = "aes_256_cbc" def test_str_crypt(self): """ test str_encrypt/str_decrypt """ key = "a simple key" # simple symmetrical test with no options crypted = str_encrypt(self.plaintext, key) self.assertEqual(self.plaintext, str_decrypt(crypted, key)) # symmetrical test with lots of options crypted = str_encrypt(self.plaintext, key, iv=self.iv, salt=self.salt, algorithm=self.algo) self.assertEqual(self.plaintext, str_decrypt(crypted, key, iv=self.iv, algorithm=self.algo)) # test that different algorithms are actually used self.assertNotEqual(str_encrypt(self.plaintext, key), str_encrypt(self.plaintext, key, algorithm=self.algo)) # test that different keys are actually used self.assertNotEqual(str_encrypt(self.plaintext, key), str_encrypt(self.plaintext, "different key")) # test that different IVs are actually used self.assertNotEqual(str_encrypt(self.plaintext, key, iv=self.iv), str_encrypt(self.plaintext, key)) # test that errors are raised on bad decrypts crypted = str_encrypt(self.plaintext, key, algorithm=self.algo) self.assertRaises(EVPError, str_decrypt, crypted, "bogus key", algorithm=self.algo) self.assertRaises(EVPError, str_decrypt, crypted, key) # bogus algorithm def test_ssl_crypt(self): """ test ssl_encrypt/ssl_decrypt """ passwd = "a simple passphrase" # simple symmetrical test crypted = ssl_encrypt(self.plaintext, passwd) self.assertEqual(self.plaintext, ssl_decrypt(crypted, passwd)) # more complex symmetrical test crypted = ssl_encrypt(self.plaintext, passwd, algorithm=self.algo, salt=self.salt) self.assertEqual(self.plaintext, ssl_decrypt(crypted, passwd, algorithm=self.algo)) # test that different algorithms are actually used self.assertNotEqual(ssl_encrypt(self.plaintext, passwd), ssl_encrypt(self.plaintext, passwd, algorithm=self.algo)) # test that different passwords are actually used self.assertNotEqual(ssl_encrypt(self.plaintext, passwd), ssl_encrypt(self.plaintext, "different pass")) # there's no reasonable test we can do to see if the # output is base64-encoded, unfortunately, but if it's # obviously not we fail crypted = ssl_encrypt(self.plaintext, passwd) self.assertRegexpMatches(crypted, r'^[A-Za-z0-9+/]+[=]{0,2}$') # test that errors are raised on bad decrypts crypted = ssl_encrypt(self.plaintext, passwd, algorithm=self.algo) self.assertRaises(EVPError, ssl_decrypt, crypted, "bogus passwd", algorithm=self.algo) self.assertRaises(EVPError, ssl_decrypt, crypted, passwd) # bogus algorithm def test_bruteforce_decrypt(self): passwd = "a simple passphrase" crypted = ssl_encrypt(self.plaintext, passwd) # test with no passphrases given nor in config Bcfg2.Options.setup.passphrases = dict() self.assertRaises(EVPError, bruteforce_decrypt, crypted) # test with good passphrase given in function call self.assertEqual(self.plaintext, bruteforce_decrypt(crypted, passphrases=["bogus pass", passwd, "also bogus"])) # test with no good passphrase given nor in config. we use # something that isn't a valid ciphertext here since a # ciphertext encrypted with one key may be technically # decryptable with a different key, although it will decrypt # to gibberish. nonetheless, it doesn't raise the requisite # EVPError, so the test fails. self.assertRaises(EVPError, bruteforce_decrypt, b64encode("not an actual ciphertext!"), passphrases=["bogus", "also bogus"]) # test with no good passphrase given nor in config. this # version of the test uses a valid ciphertext, and looks for # *either* EVPError or a failed decrypt. try: plaintext = bruteforce_decrypt(crypted, passphrases=["bogus", "also bogus"]) if plaintext == passwd: self.fail("Successfully decrypted ciphertext with wrong key") except EVPError: # success! pass # test with good passphrase in config file Bcfg2.Options.setup.passphrases = dict(bogus="bogus", real=passwd, bogus2="also bogus") self.assertEqual(self.plaintext, bruteforce_decrypt(crypted)) # test that passphrases given in function call take # precedence over config self.assertRaises(EVPError, bruteforce_decrypt, crypted, passphrases=["bogus", "also bogus"]) # test that different algorithms are used crypted = ssl_encrypt(self.plaintext, passwd, algorithm=self.algo) self.assertEqual(self.plaintext, bruteforce_decrypt(crypted, algorithm=self.algo)) testsuite/Testsrc/Testlib/TestServer/TestPlugin/000077500000000000000000000000001303523157100223505ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py000066400000000000000000000040511303523157100244740ustar00rootroot00000000000000import os import sys from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugin.base import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != '/': if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestLogger import TestDebuggable class TestPlugin(TestDebuggable): test_obj = Plugin def setUp(self): TestDebuggable.setUp(self) set_setup_default("filemonitor", MagicMock()) set_setup_default("repository", datastore) def get_obj(self, core=None): if core is None: core = Mock() @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) def inner(): return self.test_obj(core) return inner() @patch("os.makedirs") @patch("os.path.exists") def test__init(self, mock_exists, mock_makedirs): if self.test_obj.create: core = Mock() core.setup = MagicMock() mock_exists.return_value = True p = self.get_obj(core=core) self.assertEqual(p.data, os.path.join(datastore, p.name)) self.assertEqual(p.core, core) mock_exists.assert_any_call(p.data) self.assertFalse(mock_makedirs.called) mock_exists.reset_mock() mock_makedirs.reset_mock() mock_exists.return_value = False p = self.get_obj(core=core) self.assertEqual(p.data, os.path.join(datastore, p.name)) self.assertEqual(p.core, core) mock_exists.assert_any_call(p.data) mock_makedirs.assert_any_call(p.data) @patch("os.makedirs") def test_init_repo(self, mock_makedirs): self.test_obj.init_repo(datastore) mock_makedirs.assert_called_with(os.path.join(datastore, self.test_obj.name)) testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testexceptions.py000066400000000000000000000020261303523157100257430ustar00rootroot00000000000000import os import sys from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugin.exceptions import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != '/': if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * class TestPluginInitError(Bcfg2TestCase): """ placeholder for future tests """ pass class TestPluginExecutionError(Bcfg2TestCase): """ placeholder for future tests """ pass class TestMetadataConsistencyError(Bcfg2TestCase): """ placeholder for future tests """ pass class TestMetadataRuntimeError(Bcfg2TestCase): """ placeholder for future tests """ pass class TestValidationError(Bcfg2TestCase): """ placeholder for future tests """ pass class TestSpecificityError(Bcfg2TestCase): """ placeholder for future tests """ pass testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py000066400000000000000000002327731303523157100252420ustar00rootroot00000000000000import os import sys import copy import genshi import lxml.etree import Bcfg2.Server import genshi.core from Bcfg2.Compat import reduce from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugin.helpers import * from Bcfg2.Server.Plugin.exceptions import PluginInitError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != '/': if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugin.Testbase import TestPlugin, TestDebuggable from TestServer.TestPlugin.Testinterfaces import TestGenerator try: from Bcfg2.Server.Encryption import EVPError except: pass def tostring(el): return lxml.etree.tostring(el, xml_declaration=False).decode('UTF-8') class FakeElementTree(lxml.etree._ElementTree): xinclude = Mock() parse = Mock class TestFunctions(Bcfg2TestCase): def test_removecomment(self): data = [(None, "test", 1), (None, "test2", 2)] stream = [(genshi.core.COMMENT, "test", 0), data[0], (genshi.core.COMMENT, "test3", 0), data[1]] self.assertItemsEqual(list(removecomment(stream)), data) class TestDatabaseBacked(TestPlugin): test_obj = DatabaseBacked synced = False def setUp(self): TestPlugin.setUp(self) set_setup_default("%s_db" % self.test_obj.__name__.lower(), False) set_setup_default("db_engine", None) @skipUnless(HAS_DJANGO, "Django not found") def test__use_db(self): core = Mock() db = self.get_obj(core=core) attr = "%s_db" % self.test_obj.__name__.lower() db.core.database_available = True setattr(Bcfg2.Options.setup, attr, True) self.assertTrue(db._use_db) setattr(Bcfg2.Options.setup, attr, False) self.assertFalse(db._use_db) db.core.database_available = False self.assertFalse(db._use_db) setattr(Bcfg2.Options.setup, attr, True) self.assertRaises(PluginInitError, self.get_obj, core) def syncdb(self, modeltest): """ Given an instance of a :class:`DBModelTestCase` object, sync and clean the database """ inst = modeltest(methodName='test_syncdb') if not self.__class__.synced: inst.test_syncdb() self.__class__.synced = True inst.test_cleandb() class TestPluginDatabaseModel(Bcfg2TestCase): """ placeholder for future tests """ pass class TestFileBacked(TestDebuggable): test_obj = FileBacked path = os.path.join(datastore, "test") def setUp(self): TestDebuggable.setUp(self) set_setup_default("filemonitor", MagicMock()) def get_obj(self, path=None): if path is None: path = self.path return self.test_obj(path) @patch("%s.open" % builtins) def test_HandleEvent(self, mock_open): fb = self.get_obj() fb.Index = Mock() def reset(): fb.Index.reset_mock() mock_open.reset_mock() for evt in ["exists", "changed", "created"]: reset() event = Mock() event.code2str.return_value = evt fb.HandleEvent(event) mock_open.assert_called_with(self.path) mock_open.return_value.read.assert_any_call() fb.Index.assert_any_call() reset() event = Mock() event.code2str.return_value = "endExist" fb.HandleEvent(event) self.assertFalse(mock_open.called) self.assertFalse(fb.Index.called) class TestDirectoryBacked(TestDebuggable): test_obj = DirectoryBacked testpaths = {1: '', 2: '/foo', 3: '/foo/bar', 4: '/foo/bar/baz', 5: 'quux', 6: 'xyzzy/', 7: 'xyzzy/plugh/'} testfiles = ['foo', 'bar/baz.txt', 'plugh.py'] ignore = [] # ignore no events badevents = [] # DirectoryBacked handles all files, so there's no # such thing as a bad event def setUp(self): TestDebuggable.setUp(self) set_setup_default("filemonitor", MagicMock()) def test_child_interface(self): """ ensure that the child object has the correct interface """ self.assertTrue(hasattr(self.test_obj.__child__, "HandleEvent")) @patch("os.makedirs", Mock()) def get_obj(self, fam=None): if fam is None: fam = Mock() @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) def inner(): return self.test_obj(os.path.join(datastore, self.test_obj.__name__)) return inner() @patch("os.makedirs") @patch("os.path.exists") def test__init(self, mock_exists, mock_makedirs): @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__)) def inner(mock_add_monitor): db = self.test_obj(datastore) mock_exists.return_value = True mock_add_monitor.assert_called_with('') mock_exists.assert_called_with(db.data) self.assertFalse(mock_makedirs.called) mock_add_monitor.reset_mock() mock_exists.reset_mock() mock_makedirs.reset_mock() mock_exists.return_value = False db = self.test_obj(datastore) mock_add_monitor.assert_called_with('') mock_exists.assert_called_with(db.data) mock_makedirs.assert_called_with(db.data) inner() def test__getitem(self): db = self.get_obj() db.entries.update(dict(a=1, b=2, c=3)) self.assertEqual(db['a'], 1) self.assertEqual(db['b'], 2) expected = KeyError try: db['d'] except expected: pass except: err = sys.exc_info()[1] self.assertFalse(True, "%s raised instead of %s" % (err.__class__.__name__, expected.__class__.__name__)) else: self.assertFalse(True, "%s not raised" % expected.__class__.__name__) def test__iter(self): db = self.get_obj() db.entries.update(dict(a=1, b=2, c=3)) self.assertEqual([i for i in db], [i for i in db.entries.items()]) @patch("os.path.isdir") def test_add_directory_monitor(self, mock_isdir): db = self.get_obj() db.fam = Mock() db.fam.rv = 0 def reset(): db.fam.rv += 1 db.fam.AddMonitor.return_value = db.fam.rv db.fam.reset_mock() mock_isdir.reset_mock() mock_isdir.return_value = True for path in self.testpaths.values(): reset() db.add_directory_monitor(path) db.fam.AddMonitor.assert_called_with(os.path.join(db.data, path), db) self.assertIn(db.fam.rv, db.handles) self.assertEqual(db.handles[db.fam.rv], path) reset() # test duplicate adds for path in self.testpaths.values(): reset() db.add_directory_monitor(path) self.assertFalse(db.fam.AddMonitor.called) reset() mock_isdir.return_value = False db.add_directory_monitor('bogus') self.assertFalse(db.fam.AddMonitor.called) self.assertNotIn(db.fam.rv, db.handles) def test_add_entry(self): db = self.get_obj() db.fam = Mock() class MockChild(Mock): def __init__(self, path, **kwargs): Mock.__init__(self, **kwargs) self.path = path self.HandleEvent = Mock() db.__child__ = MockChild for path in self.testpaths.values(): event = Mock() db.add_entry(path, event) self.assertIn(path, db.entries) self.assertEqual(db.entries[path].path, os.path.join(db.data, path)) db.entries[path].HandleEvent.assert_called_with(event) @patch("os.path.isdir") def test_HandleEvent(self, mock_isdir): db = self.get_obj() db.add_entry = Mock() db.add_directory_monitor = Mock() # a path with a leading / should never get into # DirectoryBacked.handles, so strip that test case for rid, path in self.testpaths.items(): path = path.lstrip('/') db.handles[rid] = path def reset(): mock_isdir.reset_mock() db.add_entry.reset_mock() db.add_directory_monitor.reset_mock() def get_event(filename, action, requestID): event = Mock() event.code2str.return_value = action event.filename = filename event.requestID = requestID return event # test events on the data directory itself reset() mock_isdir.return_value = True event = get_event(db.data, "exists", 1) db.HandleEvent(event) db.add_directory_monitor.assert_called_with("") # test events on paths that aren't handled reset() mock_isdir.return_value = False event = get_event('/' + self.testfiles[0], 'created', max(self.testpaths.keys()) + 1) db.HandleEvent(event) self.assertFalse(db.add_directory_monitor.called) self.assertFalse(db.add_entry.called) for req_id, path in self.testpaths.items(): # a path with a leading / should never get into # DirectoryBacked.handles, so strip that test case path = path.lstrip('/') basepath = os.path.join(datastore, path) for fname in self.testfiles: relpath = os.path.join(path, fname) abspath = os.path.join(basepath, fname) # test endExist does nothing reset() event = get_event(fname, 'endExist', req_id) db.HandleEvent(event) self.assertFalse(db.add_directory_monitor.called) self.assertFalse(db.add_entry.called) mock_isdir.return_value = True for evt in ["created", "exists", "changed"]: # test that creating or changing a directory works reset() event = get_event(fname, evt, req_id) db.HandleEvent(event) db.add_directory_monitor.assert_called_with(relpath) self.assertFalse(db.add_entry.called) mock_isdir.return_value = False for evt in ["created", "exists"]: # test that creating a file works reset() event = get_event(fname, evt, req_id) db.HandleEvent(event) db.add_entry.assert_called_with(relpath, event) self.assertFalse(db.add_directory_monitor.called) db.entries[relpath] = MagicMock() # test that changing a file that already exists works reset() event = get_event(fname, "changed", req_id) db.HandleEvent(event) db.entries[relpath].HandleEvent.assert_called_with(event) self.assertFalse(db.add_directory_monitor.called) self.assertFalse(db.add_entry.called) # test that deleting an entry works reset() event = get_event(fname, "deleted", req_id) db.HandleEvent(event) self.assertNotIn(relpath, db.entries) # test that changing a file that doesn't exist works reset() event = get_event(fname, "changed", req_id) db.HandleEvent(event) db.add_entry.assert_called_with(relpath, event) self.assertFalse(db.add_directory_monitor.called) db.entries[relpath] = MagicMock() # test that deleting a directory works. this is a little # strange because the _parent_ directory has to handle the # deletion reset() event = get_event('quux', "deleted", 1) db.HandleEvent(event) for key in db.entries.keys(): self.assertFalse(key.startswith('quux')) # test bad events for fname in self.badevents: reset() event = get_event(fname, "created", 1) db.HandleEvent(event) self.assertFalse(db.add_entry.called) self.assertFalse(db.add_directory_monitor.called) # test ignored events for fname in self.ignore: reset() event = get_event(fname, "created", 1) db.HandleEvent(event) self.assertFalse(mock_isdir.called, msg="Failed to ignore %s" % fname) self.assertFalse(db.add_entry.called, msg="Failed to ignore %s" % fname) self.assertFalse(db.add_directory_monitor.called, msg="Failed to ignore %s" % fname) class TestXMLFileBacked(TestFileBacked): test_obj = XMLFileBacked # can be set to True (on child test cases where should_monitor is # always True) or False (on child test cases where should_monitor # is always False) should_monitor = None path = os.path.join(datastore, "test", "test1.xml") def setUp(self): TestFileBacked.setUp(self) set_setup_default("encoding", 'utf-8') def get_obj(self, path=None, should_monitor=False): if path is None: path = self.path @patchIf(not isinstance(os.path.exists, Mock), "os.path.exists", Mock()) def inner(): return self.test_obj(path, should_monitor=should_monitor) return inner() @patch("Bcfg2.Server.FileMonitor.get_fam") def test__init(self, mock_get_fam): xfb = self.get_obj() self.assertEqual(xfb.fam, mock_get_fam.return_value) if self.should_monitor: xfb = self.get_obj(should_monitor=True) xfb.fam.AddMonitor.assert_called_with(self.path, xfb) else: xfb = self.get_obj() self.assertFalse(xfb.fam.AddMonitor.called) @patch("glob.glob") @patch("lxml.etree.parse") def test_follow_xincludes(self, mock_parse, mock_glob): xfb = self.get_obj() xfb.add_monitor = Mock() xfb.add_monitor.side_effect = lambda p: xfb.extras.append(p) def reset(): xfb.add_monitor.reset_mock() mock_glob.reset_mock() mock_parse.reset_mock() xfb.extras = [] xdata = dict() mock_parse.side_effect = lambda p: xdata[p] mock_glob.side_effect = lambda g: [g] base = os.path.dirname(self.path) # basic functionality test2 = os.path.join(base, 'test2.xml') xdata[test2] = lxml.etree.Element("Test").getroottree() xfb._follow_xincludes(xdata=xdata[test2]) self.assertFalse(xfb.add_monitor.called) if (not hasattr(self.test_obj, "xdata") or not isinstance(self.test_obj.xdata, property)): # if xdata is settable, test that method of getting data # to _follow_xincludes reset() xfb.xdata = xdata[test2].getroot() xfb._follow_xincludes() self.assertFalse(xfb.add_monitor.called) xfb.xdata = None reset() xfb._follow_xincludes(fname=test2) self.assertFalse(xfb.add_monitor.called) # test one level of xinclude xdata[self.path] = lxml.etree.Element("Test").getroottree() lxml.etree.SubElement(xdata[self.path].getroot(), Bcfg2.Server.XI_NAMESPACE + "include", href=test2) reset() xfb._follow_xincludes(fname=self.path) xfb.add_monitor.assert_called_with(test2) self.assertItemsEqual(mock_parse.call_args_list, [call(f) for f in xdata.keys()]) mock_glob.assert_called_with(test2) reset() xfb._follow_xincludes(fname=self.path, xdata=xdata[self.path]) xfb.add_monitor.assert_called_with(test2) self.assertItemsEqual(mock_parse.call_args_list, [call(f) for f in xdata.keys() if f != self.path]) mock_glob.assert_called_with(test2) # test two-deep level of xinclude, with some files in another # directory test3 = os.path.join(base, "test3.xml") test4 = os.path.join(base, "test_dir", "test4.xml") test5 = os.path.join(base, "test_dir", "test5.xml") test6 = os.path.join(base, "test_dir", "test6.xml") xdata[test3] = lxml.etree.Element("Test").getroottree() lxml.etree.SubElement(xdata[test3].getroot(), Bcfg2.Server.XI_NAMESPACE + "include", href=test4) xdata[test4] = lxml.etree.Element("Test").getroottree() lxml.etree.SubElement(xdata[test4].getroot(), Bcfg2.Server.XI_NAMESPACE + "include", href=test5) xdata[test5] = lxml.etree.Element("Test").getroottree() xdata[test6] = lxml.etree.Element("Test").getroottree() # relative includes lxml.etree.SubElement(xdata[self.path].getroot(), Bcfg2.Server.XI_NAMESPACE + "include", href="test3.xml") lxml.etree.SubElement(xdata[test3].getroot(), Bcfg2.Server.XI_NAMESPACE + "include", href="test_dir/test6.xml") reset() xfb._follow_xincludes(fname=self.path) expected = [call(f) for f in xdata.keys() if f != self.path] self.assertItemsEqual(xfb.add_monitor.call_args_list, expected) self.assertItemsEqual(mock_parse.call_args_list, [call(f) for f in xdata.keys()]) self.assertItemsEqual(mock_glob.call_args_list, expected) reset() xfb._follow_xincludes(fname=self.path, xdata=xdata[self.path]) expected = [call(f) for f in xdata.keys() if f != self.path] self.assertItemsEqual(xfb.add_monitor.call_args_list, expected) self.assertItemsEqual(mock_parse.call_args_list, expected) self.assertItemsEqual(mock_glob.call_args_list, expected) # test wildcard xinclude reset() xdata[self.path] = lxml.etree.Element("Test").getroottree() lxml.etree.SubElement(xdata[self.path].getroot(), Bcfg2.Server.XI_NAMESPACE + "include", href="*.xml") def glob_rv(path): if path == os.path.join(base, '*.xml'): return [self.path, test2, test3] else: return [path] mock_glob.side_effect = glob_rv xfb._follow_xincludes(xdata=xdata[self.path]) expected = [call(f) for f in xdata.keys() if f != self.path] self.assertItemsEqual(xfb.add_monitor.call_args_list, expected) self.assertItemsEqual(mock_parse.call_args_list, expected) self.assertItemsEqual(mock_glob.call_args_list, [call(os.path.join(base, '*.xml')), call(test4), call(test5), call(test6)]) @patch("lxml.etree._ElementTree", FakeElementTree) @patch("Bcfg2.Server.Plugin.helpers.%s._follow_xincludes" % test_obj.__name__) def test_Index(self, mock_follow): xfb = self.get_obj() def reset(): mock_follow.reset_mock() FakeElementTree.xinclude.reset_mock() xfb.extras = [] xfb.xdata = None # no xinclude reset() xdata = lxml.etree.Element("Test", name="test") children = [lxml.etree.SubElement(xdata, "Foo"), lxml.etree.SubElement(xdata, "Bar", name="bar")] xfb.data = tostring(xdata) xfb.Index() mock_follow.assert_any_call() try: self.assertEqual(xfb.xdata.base, self.path) except AttributeError: # python 2.4 and/or lxml 2.0 don't store the base_url in # .base -- no idea where it's stored. pass self.assertItemsEqual([tostring(e) for e in xfb.entries], [tostring(e) for e in children]) # with xincludes reset() mock_follow.side_effect = \ lambda: xfb.extras.extend(["/test/test2.xml", "/test/test_dir/test3.xml"]) children.extend([ lxml.etree.SubElement(xdata, Bcfg2.Server.XI_NAMESPACE + "include", href="/test/test2.xml"), lxml.etree.SubElement(xdata, Bcfg2.Server.XI_NAMESPACE + "include", href="/test/test_dir/test3.xml")]) test2 = lxml.etree.Element("Test", name="test2") lxml.etree.SubElement(test2, "Baz") test3 = lxml.etree.Element("Test", name="test3") replacements = {"/test/test2.xml": test2, "/test/test_dir/test3.xml": test3} def xinclude(): for el in xfb.xdata.findall('//%sinclude' % Bcfg2.Server.XI_NAMESPACE): xfb.xdata.replace(el, replacements[el.get("href")]) FakeElementTree.xinclude.side_effect = xinclude xfb.data = tostring(xdata) xfb.Index() mock_follow.assert_any_call() FakeElementTree.xinclude.assert_any_call try: self.assertEqual(xfb.xdata.base, self.path) except AttributeError: pass self.assertItemsEqual([tostring(e) for e in xfb.entries], [tostring(e) for e in children]) @patch("Bcfg2.Server.FileMonitor.get_fam", Mock()) def test_add_monitor(self): xfb = self.get_obj() xfb.add_monitor("/test/test2.xml") self.assertIn("/test/test2.xml", xfb.extra_monitors) xfb = self.get_obj() xfb.fam = Mock() xfb.add_monitor("/test/test4.xml") xfb.fam.AddMonitor.assert_called_with("/test/test4.xml", xfb) self.assertIn("/test/test4.xml", xfb.extra_monitors) class TestStructFile(TestXMLFileBacked): test_obj = StructFile def setUp(self): TestXMLFileBacked.setUp(self) set_setup_default("lax_decryption", False) def _get_test_data(self): """ build a very complex set of test data """ # top-level group and client elements groups = dict() # group and client elements that are descendents of other group or # client elements subgroups = dict() # children of elements in `groups' that should be included in # match results children = dict() # children of elements in `subgroups' that should be included in # match results subchildren = dict() # top-level tags that are not group elements standalone = [] xdata = lxml.etree.Element("Test", name="test") groups[0] = lxml.etree.SubElement(xdata, "Group", name="group1", include="true") children[0] = [lxml.etree.SubElement(groups[0], "Child", name="c1"), lxml.etree.SubElement(groups[0], "Child", name="c2")] subgroups[0] = [lxml.etree.SubElement(groups[0], "Group", name="subgroup1", include="true"), lxml.etree.SubElement(groups[0], "Client", name="client1", include="false")] subchildren[0] = \ [lxml.etree.SubElement(subgroups[0][0], "Child", name="sc1"), lxml.etree.SubElement(subgroups[0][0], "Child", name="sc2", attr="some attr"), lxml.etree.SubElement(subgroups[0][0], "Child", name="sc3")] lxml.etree.SubElement(subchildren[0][-1], "SubChild", name="subchild") lxml.etree.SubElement(subgroups[0][1], "Child", name="sc4") groups[1] = lxml.etree.SubElement(xdata, "Group", name="group2", include="false") children[1] = [] subgroups[1] = [] subchildren[1] = [] lxml.etree.SubElement(groups[1], "Child", name="c3") lxml.etree.SubElement(groups[1], "Child", name="c4") standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s1")) groups[2] = lxml.etree.SubElement(xdata, "Client", name="client2", include="false") children[2] = [] subgroups[2] = [] subchildren[2] = [] lxml.etree.SubElement(groups[2], "Child", name="c5") lxml.etree.SubElement(groups[2], "Child", name="c6") standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s2", attr="some attr")) groups[3] = lxml.etree.SubElement(xdata, "Client", name="client3", include="true") children[3] = [lxml.etree.SubElement(groups[3], "Child", name="c7", attr="some_attr"), lxml.etree.SubElement(groups[3], "Child", name="c8")] subgroups[3] = [] subchildren[3] = [] lxml.etree.SubElement(children[3][-1], "SubChild", name="subchild") standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s3")) lxml.etree.SubElement(standalone[-1], "SubStandalone", name="sub1") return (xdata, groups, subgroups, children, subchildren, standalone) def _get_template_test_data(self): (xdata, groups, subgroups, children, subchildren, standalone) = \ self._get_test_data() template_xdata = \ lxml.etree.Element("Test", name="test", nsmap=dict(py='http://genshi.edgewall.org/')) template_xdata.extend(xdata.getchildren()) return (template_xdata, groups, subgroups, children, subchildren, standalone) @patch("genshi.template.TemplateLoader") def test_Index(self, mock_TemplateLoader): TestXMLFileBacked.test_Index(self) sf = self.get_obj() sf.encryption = False sf.encoding = Mock() (xdata, groups, subgroups, children, subchildren, standalone) = \ self._get_test_data() sf.data = lxml.etree.tostring(xdata) mock_TemplateLoader.reset_mock() sf.Index() self.assertFalse(mock_TemplateLoader.called) mock_TemplateLoader.reset_mock() template_xdata = \ lxml.etree.Element("Test", name="test", nsmap=dict(py='http://genshi.edgewall.org/')) template_xdata.extend(xdata.getchildren()) sf.data = lxml.etree.tostring(template_xdata) sf.Index() mock_TemplateLoader.assert_called_with() loader = mock_TemplateLoader.return_value loader.load.assert_called_with(sf.name, cls=genshi.template.MarkupTemplate, encoding=Bcfg2.Options.setup.encoding) self.assertEqual(sf.template, loader.load.return_value) @skipUnless(HAS_CRYPTO, "No crypto libraries found, skipping") def test_Index_crypto(self): if not self.test_obj.encryption: return Bcfg2.Options.setup.lax_decryption = False sf = self.get_obj() sf._decrypt = Mock() sf._decrypt.return_value = 'plaintext' sf.data = ''' crypted plain ''' # test successful decryption sf.Index() self.assertItemsEqual( sf._decrypt.call_args_list, [call(el) for el in sf.xdata.xpath("//*[@encrypted]")]) for el in sf.xdata.xpath("//*[@encrypted]"): self.assertEqual(el.text, sf._decrypt.return_value) # test failed decryption, strict sf._decrypt.reset_mock() sf._decrypt.side_effect = EVPError self.assertRaises(PluginExecutionError, sf.Index) # test failed decryption, lax Bcfg2.Options.setup.lax_decryption = True sf._decrypt.reset_mock() sf.Index() self.assertItemsEqual( sf._decrypt.call_args_list, [call(el) for el in sf.xdata.xpath("//*[@encrypted]")]) @skipUnless(HAS_CRYPTO, "No crypto libraries found, skipping") @patchIf(HAS_CRYPTO, "Bcfg2.Server.Encryption.ssl_decrypt") def test_decrypt(self, mock_ssl): sf = self.get_obj() def reset(): mock_ssl.reset_mock() # test element without text contents Bcfg2.Options.setup.passphrases = dict() self.assertIsNone(sf._decrypt(lxml.etree.Element("Test"))) self.assertFalse(mock_ssl.called) # test element with a passphrase in the config file reset() el = lxml.etree.Element("Test", encrypted="foo") el.text = "crypted" Bcfg2.Options.setup.passphrases = dict(foo="foopass", bar="barpass") mock_ssl.return_value = "decrypted with ssl" self.assertEqual(sf._decrypt(el), mock_ssl.return_value) mock_ssl.assert_called_with(el.text, "foopass") # test element without valid passphrase reset() el.set("encrypted", "true") self.assertRaises(EVPError, sf._decrypt, el) self.assertFalse(mock_ssl.called) # test failure to decrypt element with a passphrase in the config reset() mock_ssl.side_effect = EVPError self.assertRaises(EVPError, sf._decrypt, el) def test_include_element(self): sf = self.get_obj() metadata = Mock() metadata.groups = ["group1", "group2"] metadata.hostname = "foo.example.com" inc = lambda tag, **attrs: \ sf._include_element(lxml.etree.Element(tag, **attrs), metadata) self.assertFalse(sf._include_element(lxml.etree.Comment("test"), metadata)) self.assertFalse(inc("Group", name="group3")) self.assertFalse(inc("Group", name="group2", negate="true")) self.assertFalse(inc("Group", name="group2", negate="tRuE")) self.assertTrue(inc("Group", name="group2")) self.assertTrue(inc("Group", name="group2", negate="false")) self.assertTrue(inc("Group", name="group2", negate="faLSe")) self.assertTrue(inc("Group", name="group3", negate="true")) self.assertTrue(inc("Group", name="group3", negate="tRUe")) self.assertFalse(inc("Client", name="bogus.example.com")) self.assertFalse(inc("Client", name="foo.example.com", negate="true")) self.assertFalse(inc("Client", name="foo.example.com", negate="tRuE")) self.assertTrue(inc("Client", name="foo.example.com")) self.assertTrue(inc("Client", name="foo.example.com", negate="false")) self.assertTrue(inc("Client", name="foo.example.com", negate="faLSe")) self.assertTrue(inc("Client", name="bogus.example.com", negate="true")) self.assertTrue(inc("Client", name="bogus.example.com", negate="tRUe")) self.assertTrue(inc("Other")) def test__match(self): sf = self.get_obj() sf._include_element = Mock() metadata = Mock() sf._include_element.side_effect = \ lambda x, _: (x.tag not in sf._include_tests.keys() or x.get("include") == "true") for test_data in [self._get_test_data(), self._get_template_test_data()]: (xdata, groups, subgroups, children, subchildren, standalone) = \ test_data for i, group in groups.items(): actual = sf._match(group, metadata) expected = children[i] + subchildren[i] self.assertEqual(len(actual), len(expected)) # easiest way to compare the values is actually to make # them into an XML document and let assertXMLEqual compare # them xactual = lxml.etree.Element("Container") xactual.extend(actual) xexpected = lxml.etree.Element("Container") xexpected.extend(expected) self.assertXMLEqual(xactual, xexpected) for el in standalone: self.assertXMLEqual(el, sf._match(el, metadata)[0]) def test_do_match(self): Bcfg2.Options.setup.lax_decryption = True sf = self.get_obj() sf._match = Mock() def match_rv(el, _): if el.tag not in sf._include_tests.keys(): return [el] elif el.get("include") == "true": return el.getchildren() else: return [] sf._match.side_effect = match_rv metadata = Mock() for test_data in [self._get_test_data(), self._get_template_test_data()]: (xdata, groups, subgroups, children, subchildren, standalone) = \ test_data sf.data = lxml.etree.tostring(xdata) sf.Index() actual = sf._do_match(metadata) expected = reduce(lambda x, y: x + y, list(children.values()) + \ list(subgroups.values())) + standalone self.assertEqual(len(actual), len(expected)) # easiest way to compare the values is actually to make # them into an XML document and let assertXMLEqual compare # them xactual = lxml.etree.Element("Container") xactual.extend(actual) xexpected = lxml.etree.Element("Container") xexpected.extend(expected) self.assertXMLEqual(xactual, xexpected) def test__xml_match(self): sf = self.get_obj() sf._include_element = Mock() metadata = Mock() sf._include_element.side_effect = \ lambda x, _: (x.tag not in sf._include_tests.keys() or x.get("include") == "true") for test_data in [self._get_test_data(), self._get_template_test_data()]: (xdata, groups, subgroups, children, subchildren, standalone) = \ test_data actual = copy.deepcopy(xdata) for el in actual.getchildren(): sf._xml_match(el, metadata) expected = lxml.etree.Element(xdata.tag, **dict(xdata.attrib)) expected.text = xdata.text expected.extend(reduce(lambda x, y: x + y, list(children.values()) + \ list(subchildren.values()))) expected.extend(standalone) self.assertXMLEqual(actual, expected) def test_do_xmlmatch(self): sf = self.get_obj() sf._xml_match = Mock() metadata = Mock() for data_type, test_data in \ [("", self._get_test_data()), ("templated ", self._get_template_test_data())]: (xdata, groups, subgroups, children, subchildren, standalone) = \ test_data sf.xdata = xdata sf._xml_match.reset_mock() sf._do_xmlmatch(metadata) actual = [] for call in sf._xml_match.call_args_list: actual.append(call[0][0]) self.assertEqual(call[0][1], metadata) expected = list(groups.values()) + standalone # easiest way to compare the values is actually to make # them into an XML document and let assertXMLEqual compare # them xactual = lxml.etree.Element("Container") xactual.extend(actual) xexpected = lxml.etree.Element("Container") xexpected.extend(expected) self.assertXMLEqual(xactual, xexpected, "XMLMatch() calls were incorrect for " "%stest data" % data_type) def test_match_ordering(self): """ Match() returns elements in document order """ Bcfg2.Options.setup.lax_decryption = True sf = self.get_obj() sf._match = Mock() def match_rv(el, _): if el.tag not in sf._include_tests.keys(): return [el] elif el.get("include") == "true": return el.getchildren() else: return [] sf._match.side_effect = match_rv metadata = Mock() test_data = lxml.etree.Element("Test") group = lxml.etree.SubElement(test_data, "Group", name="group", include="true") first = lxml.etree.SubElement(group, "Element", name="first") second = lxml.etree.SubElement(test_data, "Element", name="second") # sanity check to ensure that first and second are in the # correct document order if test_data.xpath("//Element") != [first, second]: skip("lxml.etree does not construct documents in a reliable order") sf.data = lxml.etree.tostring(test_data) sf.Index() rv = sf._do_match(metadata) self.assertEqual(len(rv), 2, "Match() seems to be broken, cannot test ordering") msg = "Match() does not return elements in document order:\n" + \ "Expected: [%s, %s]\n" % (first, second) + \ "Actual: %s" % rv self.assertXMLEqual(rv[0], first, msg) self.assertXMLEqual(rv[1], second, msg) # TODO: add tests to ensure that XMLMatch() returns elements # in document order class TestInfoXML(TestStructFile): test_obj = InfoXML def _get_test_data(self): (xdata, groups, subgroups, children, subchildren, standalone) = \ TestStructFile._get_test_data(self) idx = max(groups.keys()) + 1 groups[idx] = lxml.etree.SubElement( xdata, "Path", name="path1", include="true") children[idx] = [lxml.etree.SubElement(groups[idx], "Child", name="pc1")] subgroups[idx] = [lxml.etree.SubElement(groups[idx], "Group", name="pg1", include="true"), lxml.etree.SubElement(groups[idx], "Client", name="pc1", include="false")] subchildren[idx] = [lxml.etree.SubElement(subgroups[idx][0], "SubChild", name="sc1")] idx += 1 groups[idx] = lxml.etree.SubElement( xdata, "Path", name="path2", include="false") children[idx] = [] subgroups[idx] = [] subchildren[idx] = [] path2 = lxml.etree.SubElement(groups[0], "Path", name="path2", include="true") subgroups[0].append(path2) subchildren[0].append(lxml.etree.SubElement(path2, "SubChild", name="sc2")) return xdata, groups, subgroups, children, subchildren, standalone def test_include_element(self): TestStructFile.test_include_element(self) ix = self.get_obj() metadata = Mock() entry = lxml.etree.Element("Path", name="/etc/foo.conf") inc = lambda tag, **attrs: \ ix._include_element(lxml.etree.Element(tag, **attrs), metadata, entry) self.assertFalse(inc("Path", name="/etc/bar.conf")) self.assertFalse(inc("Path", name="/etc/foo.conf", negate="true")) self.assertFalse(inc("Path", name="/etc/foo.conf", negate="tRuE")) self.assertTrue(inc("Path", name="/etc/foo.conf")) self.assertTrue(inc("Path", name="/etc/foo.conf", negate="false")) self.assertTrue(inc("Path", name="/etc/foo.conf", negate="faLSe")) self.assertTrue(inc("Path", name="/etc/bar.conf", negate="true")) self.assertTrue(inc("Path", name="/etc/bar.conf", negate="tRUe")) def test_BindEntry(self): ix = self.get_obj() entry = lxml.etree.Element("Path", name=self.path) metadata = Mock() # test with bogus infoxml ix.Match = Mock() ix.Match.return_value = [] self.assertRaises(PluginExecutionError, ix.BindEntry, entry, metadata) ix.Match.assert_called_with(metadata, entry) # test with valid infoxml ix.Match.reset_mock() ix.Match.return_value = [lxml.etree.Element("Info", mode="0600", owner="root")] ix.BindEntry(entry, metadata) ix.Match.assert_called_with(metadata, entry) self.assertItemsEqual(entry.attrib, dict(name=self.path, mode="0600", owner="root")) def _get_test_data(self): (xdata, groups, subgroups, children, subchildren, standalone) = \ TestStructFile._get_test_data(self) idx = max(groups.keys()) + 1 groups[idx] = lxml.etree.SubElement( xdata, "Path", name="path1", include="true") children[idx] = [lxml.etree.SubElement(groups[idx], "Child", name="pc1")] subgroups[idx] = [lxml.etree.SubElement(groups[idx], "Group", name="pg1", include="true"), lxml.etree.SubElement(groups[idx], "Client", name="pc1", include="false")] subchildren[idx] = [lxml.etree.SubElement(subgroups[idx][0], "SubChild", name="sc1")] idx += 1 groups[idx] = lxml.etree.SubElement( xdata, "Path", name="path2", include="false") children[idx] = [] subgroups[idx] = [] subchildren[idx] = [] path2 = lxml.etree.SubElement(groups[0], "Path", name="path2", include="true") subgroups[0].append(path2) subchildren[0].append(lxml.etree.SubElement(path2, "SubChild", name="sc2")) return xdata, groups, subgroups, children, subchildren, standalone def test_include_element(self): TestStructFile.test_include_element(self) ix = self.get_obj() metadata = Mock() entry = lxml.etree.Element("Path", name="/etc/foo.conf") inc = lambda tag, **attrs: \ ix._include_element(lxml.etree.Element(tag, **attrs), metadata, entry) self.assertFalse(inc("Path", name="/etc/bar.conf")) self.assertFalse(inc("Path", name="/etc/foo.conf", negate="true")) self.assertFalse(inc("Path", name="/etc/foo.conf", negate="tRuE")) self.assertTrue(inc("Path", name="/etc/foo.conf")) self.assertTrue(inc("Path", name="/etc/foo.conf", negate="false")) self.assertTrue(inc("Path", name="/etc/foo.conf", negate="faLSe")) self.assertTrue(inc("Path", name="/etc/bar.conf", negate="true")) self.assertTrue(inc("Path", name="/etc/bar.conf", negate="tRUe")) def test_include_element_altsrc(self): ix = self.get_obj() metadata = Mock() entry = lxml.etree.Element("Path", name="/etc/bar.conf", realname="/etc/foo.conf") inc = lambda tag, **attrs: \ ix._include_element(lxml.etree.Element(tag, **attrs), metadata, entry) self.assertFalse(inc("Path", name="/etc/bar.conf")) self.assertFalse(inc("Path", name="/etc/foo.conf", negate="true")) self.assertFalse(inc("Path", name="/etc/foo.conf", negate="tRuE")) self.assertTrue(inc("Path", name="/etc/foo.conf")) self.assertTrue(inc("Path", name="/etc/foo.conf", negate="false")) self.assertTrue(inc("Path", name="/etc/foo.conf", negate="faLSe")) self.assertTrue(inc("Path", name="/etc/bar.conf", negate="true")) self.assertTrue(inc("Path", name="/etc/bar.conf", negate="tRUe")) def test_BindEntry(self): ix = self.get_obj() entry = lxml.etree.Element("Path", name=self.path) metadata = Mock() # test with bogus infoxml ix.Match = Mock() ix.Match.return_value = [] self.assertRaises(PluginExecutionError, ix.BindEntry, entry, metadata) ix.Match.assert_called_with(metadata, entry) # test with valid infoxml ix.Match.reset_mock() ix.Match.return_value = [lxml.etree.Element("Info", mode="0600", owner="root")] ix.BindEntry(entry, metadata) ix.Match.assert_called_with(metadata, entry) self.assertItemsEqual(entry.attrib, dict(name=self.path, mode="0600", owner="root")) class TestXMLDirectoryBacked(TestDirectoryBacked): test_obj = XMLDirectoryBacked testfiles = ['foo.xml', 'bar/baz.xml', 'plugh.plugh.xml'] badpaths = ["foo", "foo.txt", "foo.xsd", "xml"] class TestPrioDir(TestPlugin, TestGenerator, TestXMLDirectoryBacked): test_obj = PrioDir def setUp(self): TestPlugin.setUp(self) TestGenerator.setUp(self) TestXMLDirectoryBacked.setUp(self) def get_obj(self, core=None): if core is None: core = Mock() @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) def inner(): return self.test_obj(core) return inner() def test_HandleEvent(self): TestXMLDirectoryBacked.test_HandleEvent(self) @patch("Bcfg2.Server.Plugin.helpers.XMLDirectoryBacked.HandleEvent", Mock()) def inner(): pd = self.get_obj() test1 = lxml.etree.Element("Test") lxml.etree.SubElement(test1, "Path", name="/etc/foo.conf") lxml.etree.SubElement(lxml.etree.SubElement(test1, "Group", name="foo"), "Path", name="/etc/bar.conf") test2 = lxml.etree.Element("Test") lxml.etree.SubElement(test2, "Path", name="/etc/baz.conf") lxml.etree.SubElement(test2, "Package", name="quux") lxml.etree.SubElement(lxml.etree.SubElement(test2, "Group", name="bar"), "Package", name="xyzzy") pd.entries = {"/test1.xml": Mock(xdata=test1), "/test2.xml": Mock(xdata=test2)} pd.HandleEvent(Mock()) self.assertItemsEqual(pd.Entries, dict(Path={"/etc/foo.conf": pd.BindEntry, "/etc/bar.conf": pd.BindEntry, "/etc/baz.conf": pd.BindEntry}, Package={"quux": pd.BindEntry, "xyzzy": pd.BindEntry})) inner() def test__matches(self): pd = self.get_obj() entry = lxml.etree.Element("Test", name="/etc/foo.conf") self.assertTrue(pd._matches(entry, Mock(), lxml.etree.Element("Test", name="/etc/foo.conf"))) self.assertFalse(pd._matches(entry, Mock(), lxml.etree.Element("Test", name="/etc/baz.conf"))) def test_BindEntry(self): pd = self.get_obj() children = [lxml.etree.Element("Child", name="child")] metadata = Mock() pd.entries = dict() def reset(): metadata.reset_mock() for src in pd.entries.values(): src.reset_mock() # test with no matches self.assertRaises(PluginExecutionError, pd.BindEntry, Mock(), metadata) def add_entry(name, data): path = os.path.join(pd.data, name) pd.entries[path] = Mock() pd.entries[path].priority = data.get("priority") pd.entries[path].XMLMatch.return_value = data test1 = lxml.etree.Element("Rules", priority="10") path1 = lxml.etree.SubElement(test1, "Path", name="/etc/foo.conf", attr="attr1") path1.extend(children) lxml.etree.SubElement(test1, "Path", name="/etc/bar.conf") add_entry('test1.xml', test1) test2 = lxml.etree.Element("Rules", priority="20") path2 = lxml.etree.SubElement(test2, "Path", name="/etc/bar.conf", attr="attr1") path2.text = "text" lxml.etree.SubElement(test2, "Package", name="quux") lxml.etree.SubElement(test2, "Package", name="xyzzy") add_entry('test2.xml', test2) test3 = lxml.etree.Element("Rules", priority="20") lxml.etree.SubElement(test3, "Path", name="/etc/baz.conf") lxml.etree.SubElement(test3, "Package", name="xyzzy") add_entry('test3.xml', test3) # test with exactly one match, children reset() entry = lxml.etree.Element("Path", name="/etc/foo.conf") pd.BindEntry(entry, metadata) self.assertXMLEqual(entry, path1) self.assertIsNot(entry, path1) for src in pd.entries.values(): src.XMLMatch.assert_called_with(metadata) # test with multiple matches with different priorities, text reset() entry = lxml.etree.Element("Path", name="/etc/bar.conf") pd.BindEntry(entry, metadata) self.assertXMLEqual(entry, path2) self.assertIsNot(entry, path2) for src in pd.entries.values(): src.XMLMatch.assert_called_with(metadata) # test with multiple matches with identical priorities reset() entry = lxml.etree.Element("Package", name="xyzzy") self.assertRaises(PluginExecutionError, pd.BindEntry, entry, metadata) class TestSpecificity(Bcfg2TestCase): test_obj = Specificity def get_obj(self, **kwargs): return self.test_obj(**kwargs) def test_matches(self): metadata = Mock() metadata.hostname = "foo.example.com" metadata.groups = ["group1", "group2"] self.assertTrue(self.get_obj(all=True).matches(metadata)) self.assertTrue(self.get_obj(group="group1").matches(metadata)) self.assertTrue(self.get_obj(hostname="foo.example.com").matches(metadata)) self.assertFalse(self.get_obj().matches(metadata)) self.assertFalse(self.get_obj(group="group3").matches(metadata)) self.assertFalse(self.get_obj(hostname="bar.example.com").matches(metadata)) def test__cmp(self): specs = [self.get_obj(all=True), self.get_obj(group="group1", prio=10), self.get_obj(group="group1", prio=20), self.get_obj(hostname="foo.example.com")] for i in range(len(specs)): for j in range(len(specs)): if i == j: self.assertEqual(0, specs[i].__cmp__(specs[j])) self.assertEqual(0, specs[j].__cmp__(specs[i])) elif i > j: self.assertEqual(-1, specs[i].__cmp__(specs[j])) self.assertEqual(1, specs[j].__cmp__(specs[i])) elif i < j: self.assertEqual(1, specs[i].__cmp__(specs[j])) self.assertEqual(-1, specs[j].__cmp__(specs[i])) def test_cmp(self): """ test __lt__/__gt__/__eq__ """ specs = [self.get_obj(all=True), self.get_obj(group="group1", prio=10), self.get_obj(group="group1", prio=20), self.get_obj(hostname="foo.example.com")] for i in range(len(specs)): for j in range(len(specs)): if i < j: self.assertGreater(specs[i], specs[j]) self.assertLess(specs[j], specs[i]) self.assertGreaterEqual(specs[i], specs[j]) self.assertLessEqual(specs[j], specs[i]) elif i == j: self.assertEqual(specs[i], specs[j]) self.assertEqual(specs[j], specs[i]) self.assertLessEqual(specs[i], specs[j]) self.assertGreaterEqual(specs[j], specs[i]) elif i > j: self.assertLess(specs[i], specs[j]) self.assertGreater(specs[j], specs[i]) self.assertLessEqual(specs[i], specs[j]) self.assertGreaterEqual(specs[j], specs[i]) class TestSpecificData(TestDebuggable): test_obj = SpecificData path = os.path.join(datastore, "test.txt") def setUp(self): TestDebuggable.setUp(self) set_setup_default("encoding", "utf-8") def get_obj(self, name=None, specific=None): if name is None: name = self.path if specific is None: specific = Mock() return self.test_obj(name, specific) def test__init(self): pass @patch("%s.open" % builtins) def test_handle_event(self, mock_open): event = Mock() event.code2str.return_value = 'deleted' sd = self.get_obj() sd.handle_event(event) self.assertFalse(mock_open.called) try: self.assertFalse(hasattr(sd, 'data')) except AssertionError: self.assertIsNone(sd.data) event = Mock() mock_open.return_value.read.return_value = "test" sd.handle_event(event) mock_open.assert_called_with(self.path) mock_open.return_value.read.assert_any_call() self.assertEqual(sd.data, "test") class TestEntrySet(TestDebuggable): test_obj = EntrySet # filenames that should be matched successfully by the EntrySet # 'specific' regex. these are filenames alone -- a specificity # will be added to these basenames = ["test", "test.py", "test with spaces.txt", "test.multiple.dots.py", "test_underscores.and.dots", "really_misleading.G10_test", "name$with*regex(special){chars}", "misleading.H_hostname.test.com"] # filenames that do not match any of the basenames (or the # basename regex, if applicable) bogus_names = ["bogus"] # filenames that should be ignored ignore = ["foo~", ".#foo", ".foo.swp", ".foo.swx", "test.txt.genshi_include", "test.G_foo.genshi_include"] def setUp(self): TestDebuggable.setUp(self) set_setup_default("default_owner") set_setup_default("default_group") set_setup_default("default_mode") set_setup_default("default_secontext") set_setup_default("default_important", False) set_setup_default("default_paranoid", False) set_setup_default("default_sensitive", False) def get_obj(self, basename="test", entry_type=MagicMock()): return self.test_obj(basename, path, entry_type) def test__init(self): for basename in self.basenames: eset = self.get_obj(basename=basename) self.assertIsInstance(eset.specific, re_type) self.assertTrue(eset.specific.match(os.path.join(datastore, basename))) ppath = os.path.join(datastore, "Plugin", basename) self.assertTrue(eset.specific.match(ppath)) self.assertTrue(eset.specific.match(ppath + ".G20_foo")) self.assertTrue(eset.specific.match(ppath + ".G1_foo")) self.assertTrue(eset.specific.match(ppath + ".G32768_foo")) # a group named '_' self.assertTrue(eset.specific.match(ppath + ".G10__")) self.assertTrue(eset.specific.match(ppath + ".H_hostname")) self.assertTrue(eset.specific.match(ppath + ".H_fqdn.subdomain.example.com")) self.assertTrue(eset.specific.match(ppath + ".G20_group_with_underscores")) self.assertFalse(eset.specific.match(ppath + ".G20_group with spaces")) self.assertFalse(eset.specific.match(ppath + ".G_foo")) self.assertFalse(eset.specific.match(ppath + ".G_")) self.assertFalse(eset.specific.match(ppath + ".G20_")) self.assertFalse(eset.specific.match(ppath + ".H_")) for bogus in self.bogus_names: self.assertFalse(eset.specific.match(os.path.join(datastore, "Plugin", bogus))) for ignore in self.ignore: self.assertTrue(eset.ignore.match(ignore), "%s should be ignored but wasn't" % ignore) self.assertFalse(eset.ignore.match(basename)) self.assertFalse(eset.ignore.match(basename + ".G20_foo")) self.assertFalse(eset.ignore.match(basename + ".G1_foo")) self.assertFalse(eset.ignore.match(basename + ".G32768_foo")) self.assertFalse(eset.ignore.match(basename + ".G10__")) self.assertFalse(eset.ignore.match(basename + ".H_hostname")) self.assertFalse(eset.ignore.match(basename + ".H_fqdn.subdomain.example.com")) self.assertFalse(eset.ignore.match(basename + ".G20_group_with_underscores")) def test_get_matching(self): items = {0: Mock(), 1: Mock(), 2: Mock(), 3: Mock(), 4: Mock(), 5: Mock()} items[0].specific.matches.return_value = False items[1].specific.matches.return_value = True items[2].specific.matches.return_value = False items[3].specific.matches.return_value = False items[4].specific.matches.return_value = True items[5].specific.matches.return_value = True metadata = Mock() eset = self.get_obj() eset.entries = items self.assertItemsEqual(eset.get_matching(metadata), [items[1], items[4], items[5]]) for i in items.values(): i.specific.matches.assert_called_with(metadata) def test_best_matching(self): eset = self.get_obj() eset.get_matching = Mock() metadata = Mock() matching = [] def reset(): eset.get_matching.reset_mock() metadata.reset_mock() for m in matching: m.reset_mock() def specific(all=False, group=False, prio=None, hostname=False): spec = Mock() spec.specific = Specificity(all=all, group=group, prio=prio, hostname=hostname) return spec self.assertRaises(PluginExecutionError, eset.best_matching, metadata, matching=[]) reset() eset.get_matching.return_value = matching self.assertRaises(PluginExecutionError, eset.best_matching, metadata) eset.get_matching.assert_called_with(metadata) # test with a single file for all reset() expected = specific(all=True) matching.append(expected) eset.get_matching.return_value = matching self.assertEqual(eset.best_matching(metadata), expected) eset.get_matching.assert_called_with(metadata) # test with a single group-specific file reset() expected = specific(group=True, prio=10) matching.append(expected) eset.get_matching.return_value = matching self.assertEqual(eset.best_matching(metadata), expected) eset.get_matching.assert_called_with(metadata) # test with multiple group-specific files reset() expected = specific(group=True, prio=20) matching.append(expected) eset.get_matching.return_value = matching self.assertEqual(eset.best_matching(metadata), expected) eset.get_matching.assert_called_with(metadata) # test with host-specific file reset() expected = specific(hostname=True) matching.append(expected) eset.get_matching.return_value = matching self.assertEqual(eset.best_matching(metadata), expected) eset.get_matching.assert_called_with(metadata) def test_handle_event(self): eset = self.get_obj() eset.entry_init = Mock() eset.reset_metadata = Mock() eset.update_metadata = Mock() def reset(): eset.update_metadata.reset_mock() eset.reset_metadata.reset_mock() eset.entry_init.reset_mock() fname = "info.xml" for evt in ["exists", "created", "changed"]: reset() event = Mock() event.code2str.return_value = evt event.filename = fname eset.handle_event(event) eset.update_metadata.assert_called_with(event) self.assertFalse(eset.entry_init.called) self.assertFalse(eset.reset_metadata.called) reset() event = Mock() event.code2str.return_value = "deleted" event.filename = fname eset.handle_event(event) eset.reset_metadata.assert_called_with(event) self.assertFalse(eset.entry_init.called) self.assertFalse(eset.update_metadata.called) for evt in ["exists", "created", "changed"]: reset() event = Mock() event.code2str.return_value = evt event.filename = "test.txt" eset.handle_event(event) eset.entry_init.assert_called_with(event) self.assertFalse(eset.reset_metadata.called) self.assertFalse(eset.update_metadata.called) reset() entry = Mock() eset.entries["test.txt"] = entry event = Mock() event.code2str.return_value = "changed" event.filename = "test.txt" eset.handle_event(event) entry.handle_event.assert_called_with(event) self.assertFalse(eset.entry_init.called) self.assertFalse(eset.reset_metadata.called) self.assertFalse(eset.update_metadata.called) reset() entry = Mock() eset.entries["test.txt"] = entry event = Mock() event.code2str.return_value = "deleted" event.filename = "test.txt" eset.handle_event(event) self.assertNotIn("test.txt", eset.entries) def test_entry_init(self): eset = self.get_obj() eset.specificity_from_filename = Mock() def reset(): eset.entry_type.reset_mock() eset.specificity_from_filename.reset_mock() event = Mock() event.code2str.return_value = "created" event.filename = "test.txt" eset.entry_init(event) eset.specificity_from_filename.assert_called_with("test.txt", specific=None) eset.entry_type.assert_called_with( os.path.join(eset.path, "test.txt"), eset.specificity_from_filename.return_value) eset.entry_type.return_value.handle_event.assert_called_with(event) self.assertIn("test.txt", eset.entries) # test duplicate add reset() eset.entry_init(event) self.assertFalse(eset.specificity_from_filename.called) self.assertFalse(eset.entry_type.called) eset.entries["test.txt"].handle_event.assert_called_with(event) # test keyword args etype = Mock() specific = Mock() event = Mock() event.code2str.return_value = "created" event.filename = "test2.txt" eset.entry_init(event, entry_type=etype, specific=specific) eset.specificity_from_filename.assert_called_with("test2.txt", specific=specific) etype.assert_called_with(os.path.join(eset.path, "test2.txt"), eset.specificity_from_filename.return_value) etype.return_value.handle_event.assert_called_with(event) self.assertIn("test2.txt", eset.entries) # test specificity error event = Mock() event.code2str.return_value = "created" event.filename = "test3.txt" eset.specificity_from_filename.side_effect = SpecificityError eset.entry_init(event) eset.specificity_from_filename.assert_called_with("test3.txt", specific=None) self.assertFalse(eset.entry_type.called) @patch("Bcfg2.Server.Plugin.helpers.Specificity") def test_specificity_from_filename(self, mock_spec): # There's a strange scoping issue in py3k that prevents this # test from working as expected on sub-classes of EntrySet. # No idea what's going on, but until I can figure it out we # skip this test on subclasses if inPy3k and self.test_obj != EntrySet: return skip("Skipping this test for py3k scoping issues") def test(eset, fname, **kwargs): mock_spec.reset_mock() if "specific" in kwargs: specific = kwargs['specific'] del kwargs['specific'] else: specific = None self.assertEqual(eset.specificity_from_filename(fname, specific=specific), mock_spec.return_value) mock_spec.assert_called_with(**kwargs) def fails(eset, fname, specific=None): mock_spec.reset_mock() self.assertRaises(SpecificityError, eset.specificity_from_filename, fname, specific=specific) for basename in self.basenames: eset = self.get_obj(basename=basename) ppath = os.path.join(datastore, "Plugin", basename) test(eset, ppath, all=True) test(eset, ppath + ".G20_foo", group="foo", prio=20) test(eset, ppath + ".G1_foo", group="foo", prio=1) test(eset, ppath + ".G32768_foo", group="foo", prio=32768) test(eset, ppath + ".G10__", group="_", prio=10) test(eset, ppath + ".H_hostname", hostname="hostname") test(eset, ppath + ".H_fqdn.subdomain.example.com", hostname="fqdn.subdomain.example.com") test(eset, ppath + ".G20_group_with_underscores", group="group_with_underscores", prio=20) for bogus in self.bogus_names: fails(eset, bogus) fails(eset, ppath + ".G_group with spaces") fails(eset, ppath + ".G_foo") fails(eset, ppath + ".G_") fails(eset, ppath + ".G20_") fails(eset, ppath + ".H_") @patch("%s.open" % builtins) @patch("Bcfg2.Server.Plugin.helpers.InfoXML") def test_update_metadata(self, mock_InfoXML, mock_open): # There's a strange scoping issue in py3k that prevents this # test from working as expected on sub-classes of EntrySet. # No idea what's going on, but until I can figure it out we # skip this test on subclasses if inPy3k and self.test_obj != EntrySet: return skip("Skipping this test for py3k scoping issues") eset = self.get_obj() # add info.xml event = Mock() event.filename = "info.xml" eset.update_metadata(event) mock_InfoXML.assert_called_with(os.path.join(eset.path, "info.xml")) mock_InfoXML.return_value.HandleEvent.assert_called_with(event) self.assertEqual(eset.infoxml, mock_InfoXML.return_value) # modify info.xml mock_InfoXML.reset_mock() eset.update_metadata(event) self.assertFalse(mock_InfoXML.called) eset.infoxml.HandleEvent.assert_called_with(event) @patch("Bcfg2.Server.Plugin.helpers.default_path_metadata") def test_reset_metadata(self, mock_default_path_metadata): eset = self.get_obj() # test info.xml event = Mock() event.filename = "info.xml" eset.infoxml = Mock() eset.reset_metadata(event) self.assertIsNone(eset.infoxml) def test_bind_info_to_entry(self): eset = self.get_obj() eset.metadata = dict(owner="root", group="root") entry = lxml.etree.Element("Path", name="/test") metadata = Mock() eset.infoxml = None eset.bind_info_to_entry(entry, metadata) self.assertItemsEqual(entry.attrib, dict(name="/test", owner="root", group="root")) entry = lxml.etree.Element("Path", name="/test") eset.infoxml = Mock() eset.bind_info_to_entry(entry, metadata) self.assertItemsEqual(entry.attrib, dict(name="/test", owner="root", group="root")) eset.infoxml.BindEntry.assert_called_with(entry, metadata) def test_bind_entry(self): eset = self.get_obj() eset.best_matching = Mock() eset.bind_info_to_entry = Mock() entry = Mock() metadata = Mock() eset.bind_entry(entry, metadata) eset.bind_info_to_entry.assert_called_with(entry, metadata) eset.best_matching.assert_called_with(metadata) eset.best_matching.return_value.bind_entry.assert_called_with(entry, metadata) class TestGroupSpool(TestPlugin, TestGenerator): test_obj = GroupSpool def setUp(self): TestPlugin.setUp(self) TestGenerator.setUp(self) set_setup_default("encoding", "utf-8") def get_obj(self, core=None): if core is None: core = MagicMock() @patch("%s.%s.AddDirectoryMonitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) def inner(): return TestPlugin.get_obj(self, core=core) return inner() def test__init(self): @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) @patch("%s.%s.AddDirectoryMonitor" % (self.test_obj.__module__, self.test_obj.__name__)) def inner(mock_Add): gs = self.test_obj(MagicMock()) mock_Add.assert_called_with('') self.assertItemsEqual(gs.Entries, {gs.entry_type: {}}) inner() @patch("os.path.isdir") @patch("os.path.isfile") def test_add_entry(self, mock_isfile, mock_isdir): gs = self.get_obj() gs.es_cls = Mock() gs.es_child_cls = Mock() gs.event_id = Mock() gs.event_path = Mock() gs.AddDirectoryMonitor = Mock() def reset(): gs.es_cls.reset_mock() gs.es_child_cls.reset_mock() gs.AddDirectoryMonitor.reset_mock() gs.event_path.reset_mock() gs.event_id.reset_mock() mock_isfile.reset_mock() mock_isdir.reset_mock() # directory event = Mock() event.filename = "foo" basedir = "test" epath = os.path.join(gs.data, basedir, event.filename) ident = os.path.join(basedir, event.filename) gs.event_path.return_value = epath gs.event_id.return_value = ident mock_isdir.return_value = True mock_isfile.return_value = False gs.add_entry(event) gs.AddDirectoryMonitor.assert_called_with(os.path.join("/" + basedir, event.filename)) self.assertNotIn(ident, gs.entries) mock_isdir.assert_called_with(epath) # file that is not in self.entries reset() event = Mock() event.filename = "foo" basedir = "test/foo/" epath = os.path.join(gs.data, basedir, event.filename) ident = basedir[:-1] gs.event_path.return_value = epath gs.event_id.return_value = ident mock_isdir.return_value = False mock_isfile.return_value = True gs.add_entry(event) self.assertFalse(gs.AddDirectoryMonitor.called) gs.es_cls.assert_called_with(gs.filename_pattern, gs.data + ident, gs.es_child_cls) self.assertIn(ident, gs.entries) self.assertEqual(gs.entries[ident], gs.es_cls.return_value) self.assertIn(ident, gs.Entries[gs.entry_type]) self.assertEqual(gs.Entries[gs.entry_type][ident], gs.es_cls.return_value.bind_entry) gs.entries[ident].handle_event.assert_called_with(event) mock_isfile.assert_called_with(epath) # file that is in self.entries reset() gs.add_entry(event) self.assertFalse(gs.AddDirectoryMonitor.called) self.assertFalse(gs.es_cls.called) gs.entries[ident].handle_event.assert_called_with(event) def test_event_path(self): gs = self.get_obj() gs.handles[1] = "/var/lib/foo/" gs.handles[2] = "/etc/foo/" gs.handles[3] = "/usr/share/foo/" event = Mock() event.filename = "foo" for i in range(1, 4): event.requestID = i self.assertEqual(gs.event_path(event), os.path.join(datastore, gs.name, gs.handles[event.requestID].lstrip('/'), event.filename)) @patch("os.path.isdir") def test_event_id(self, mock_isdir): gs = self.get_obj() gs.event_path = Mock() def reset(): gs.event_path.reset_mock() mock_isdir.reset_mock() gs.handles[1] = "/var/lib/foo/" gs.handles[2] = "/etc/foo/" gs.handles[3] = "/usr/share/foo/" event = Mock() event.filename = "foo" for i in range(1, 4): event.requestID = i reset() mock_isdir.return_value = True self.assertEqual(gs.event_id(event), os.path.join(gs.handles[event.requestID].lstrip('/'), event.filename)) mock_isdir.assert_called_with(gs.event_path.return_value) reset() mock_isdir.return_value = False self.assertEqual(gs.event_id(event), gs.handles[event.requestID].rstrip('/')) mock_isdir.assert_called_with(gs.event_path.return_value) def test_set_debug(self): gs = self.get_obj() gs.entries = {"/foo": Mock(), "/bar": Mock(), "/baz/quux": Mock()} @patch("Bcfg2.Server.Plugin.helpers.Plugin.set_debug") def inner(mock_debug): gs.set_debug(True) mock_debug.assert_called_with(gs, True) for entry in gs.entries.values(): entry.set_debug.assert_called_with(True) inner() TestPlugin.test_set_debug(self) def test_HandleEvent(self): gs = self.get_obj() gs.entries = {"/foo": Mock(), "/bar": Mock(), "/baz": Mock(), "/baz/quux": Mock()} for path in gs.entries.keys(): gs.Entries[gs.entry_type] = {path: Mock()} gs.handles = {1: "/foo/", 2: "/bar/", 3: "/baz/", 4: "/baz/quux"} gs.add_entry = Mock() gs.event_id = Mock() def reset(): gs.add_entry.reset_mock() gs.event_id.reset_mock() for entry in gs.entries.values(): entry.reset_mock() # test event creation, changing entry that doesn't exist for evt in ["exists", "created", "changed"]: reset() event = Mock() event.filename = "foo" event.requestID = 1 event.code2str.return_value = evt gs.HandleEvent(event) gs.event_id.assert_called_with(event) gs.add_entry.assert_called_with(event) # test deleting entry, changing entry that does exist for evt in ["changed", "deleted"]: reset() event = Mock() event.filename = "quux" event.requestID = 4 event.code2str.return_value = evt gs.event_id.return_value = "/baz/quux" gs.HandleEvent(event) gs.event_id.assert_called_with(event) self.assertIn(gs.event_id.return_value, gs.entries) gs.entries[gs.event_id.return_value].handle_event.assert_called_with(event) self.assertFalse(gs.add_entry.called) # test deleting directory reset() event = Mock() event.filename = "quux" event.requestID = 3 event.code2str.return_value = "deleted" gs.event_id.return_value = "/baz/quux" gs.HandleEvent(event) gs.event_id.assert_called_with(event) self.assertNotIn("/baz/quux", gs.entries) self.assertNotIn("/baz/quux", gs.Entries[gs.entry_type]) testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testinterfaces.py000066400000000000000000000270621303523157100257140ustar00rootroot00000000000000import os import sys import lxml.etree import Bcfg2.Server from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugin.interfaces import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != '/': if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugin.Testbase import TestPlugin class TestGenerator(Bcfg2TestCase): test_obj = Generator def test_HandlesEntry(self): pass def test_HandleEntry(self): pass class TestStructure(Bcfg2TestCase): test_obj = Structure def get_obj(self): return self.test_obj() def test_BuildStructures(self): s = self.get_obj() self.assertRaises(NotImplementedError, s.BuildStructures, None) class TestMetadata(Bcfg2TestCase): test_obj = Metadata def get_obj(self): return self.test_obj() def test_AuthenticateConnection(self): m = self.get_obj() self.assertRaises(NotImplementedError, m.AuthenticateConnection, None, None, None, (None, None)) def test_get_initial_metadata(self): m = self.get_obj() self.assertRaises(NotImplementedError, m.get_initial_metadata, None) def test_merge_additional_data(self): m = self.get_obj() self.assertRaises(NotImplementedError, m.merge_additional_data, None, None, None) def test_merge_additional_groups(self): m = self.get_obj() self.assertRaises(NotImplementedError, m.merge_additional_groups, None, None) class TestConnector(Bcfg2TestCase): """ placeholder """ def test_get_additional_groups(self): pass def test_get_additional_data(self): pass class TestProbing(Bcfg2TestCase): test_obj = Probing def get_obj(self): return self.test_obj() def test_GetProbes(self): p = self.get_obj() self.assertRaises(NotImplementedError, p.GetProbes, None) def test_ReceiveData(self): p = self.get_obj() self.assertRaises(NotImplementedError, p.ReceiveData, None, None) class TestStatistics(TestPlugin): test_obj = Statistics def test_process_statistics(self): s = self.get_obj() self.assertRaises(NotImplementedError, s.process_statistics, None, None) class TestThreaded(Bcfg2TestCase): test_obj = Threaded def get_obj(self): return self.test_obj() def test_start_threads(self): s = self.get_obj() self.assertRaises(NotImplementedError, s.start_threads) class TestThreadedStatistics(TestStatistics, TestThreaded): test_obj = ThreadedStatistics data = [("foo.example.com", ""), ("bar.example.com", "")] def get_obj(self, core=None): return TestStatistics.get_obj(self, core=core) @patch("threading.Thread.start") def test_start_threads(self, mock_start): ts = self.get_obj() ts.start_threads() mock_start.assert_any_call() @patch("%s.open" % builtins) @patch("%s.dump" % cPickle.__name__) @patch("Bcfg2.Server.Plugin.interfaces.ThreadedStatistics.run", Mock()) def test_save(self, mock_dump, mock_open): core = Mock() ts = self.get_obj(core) queue = Mock() queue.empty = Mock(side_effect=Empty) ts.work_queue = queue mock_open.side_effect = IOError # test that save does _not_ raise an exception even when # everything goes pear-shaped ts._save() queue.empty.assert_any_call() mock_open.assert_called_with(ts.pending_file, 'w') queue.reset_mock() mock_open.reset_mock() queue.data = [] for hostname, xml in self.data: md = Mock() md.hostname = hostname queue.data.append((md, lxml.etree.XML(xml))) queue.empty.side_effect = lambda: len(queue.data) == 0 queue.get_nowait = Mock(side_effect=lambda: queue.data.pop()) mock_open.side_effect = None ts._save() queue.empty.assert_any_call() queue.get_nowait.assert_any_call() mock_open.assert_called_with(ts.pending_file, 'w') mock_open.return_value.close.assert_any_call() # the order of the queue data gets changed, so we have to # verify this call in an ugly way self.assertItemsEqual(mock_dump.call_args[0][0], self.data) self.assertEqual(mock_dump.call_args[0][1], mock_open.return_value) @patch("os.unlink") @patch("os.path.exists") @patch("%s.open" % builtins) @patch("lxml.etree.XML") @patch("%s.load" % cPickle.__name__) @patch("Bcfg2.Server.Plugin.interfaces.ThreadedStatistics.run", Mock()) def test_load(self, mock_load, mock_XML, mock_open, mock_exists, mock_unlink): core = Mock() core.terminate.isSet.return_value = False ts = self.get_obj(core) ts.work_queue = Mock() ts.work_queue.data = [] def reset(): core.reset_mock() mock_open.reset_mock() mock_exists.reset_mock() mock_unlink.reset_mock() mock_load.reset_mock() mock_XML.reset_mock() ts.work_queue.reset_mock() ts.work_queue.data = [] mock_exists.return_value = False self.assertTrue(ts._load()) mock_exists.assert_called_with(ts.pending_file) reset() mock_exists.return_value = True mock_open.side_effect = IOError self.assertFalse(ts._load()) mock_exists.assert_called_with(ts.pending_file) mock_open.assert_called_with(ts.pending_file, 'r') reset() mock_open.side_effect = None mock_load.return_value = self.data ts.work_queue.put_nowait.side_effect = Full self.assertTrue(ts._load()) mock_exists.assert_called_with(ts.pending_file) mock_open.assert_called_with(ts.pending_file, 'r') mock_open.return_value.close.assert_any_call() mock_load.assert_called_with(mock_open.return_value) reset() core.build_metadata.side_effect = lambda x: x mock_XML.side_effect = lambda x, parser=None: x ts.work_queue.put_nowait.side_effect = None self.assertTrue(ts._load()) mock_exists.assert_called_with(ts.pending_file) mock_open.assert_called_with(ts.pending_file, 'r') mock_open.return_value.close.assert_any_call() mock_load.assert_called_with(mock_open.return_value) self.assertItemsEqual(mock_XML.call_args_list, [call(x, parser=Bcfg2.Server.XMLParser) for h, x in self.data]) self.assertItemsEqual(ts.work_queue.put_nowait.call_args_list, [call((h, x)) for h, x in self.data]) mock_unlink.assert_called_with(ts.pending_file) @patch("threading.Thread.start", Mock()) @patch("Bcfg2.Server.Plugin.interfaces.ThreadedStatistics._load") @patch("Bcfg2.Server.Plugin.interfaces.ThreadedStatistics._save") @patch("Bcfg2.Server.Plugin.interfaces.ThreadedStatistics.handle_statistic") def test_run(self, mock_handle, mock_save, mock_load): core = Mock() ts = self.get_obj(core) mock_load.return_value = True ts.work_queue = Mock() def reset(): mock_handle.reset_mock() mock_save.reset_mock() mock_load.reset_mock() core.reset_mock() ts.work_queue.reset_mock() ts.work_queue.data = self.data[:] ts.work_queue.get_calls = 0 reset() def get_rv(**kwargs): ts.work_queue.get_calls += 1 try: return ts.work_queue.data.pop() except: raise Empty ts.work_queue.get.side_effect = get_rv def terminate_isset(): # this lets the loop go on a few iterations with an empty # queue to test that it doesn't error out return ts.work_queue.get_calls > 3 core.terminate.isSet.side_effect = terminate_isset ts.work_queue.empty.return_value = False ts.run() mock_load.assert_any_call() self.assertGreaterEqual(ts.work_queue.get.call_count, len(self.data)) self.assertItemsEqual(mock_handle.call_args_list, [call(h, x) for h, x in self.data]) mock_save.assert_any_call() @patch("copy.copy", Mock(side_effect=lambda x: x)) @patch("Bcfg2.Server.Plugin.interfaces.ThreadedStatistics.run", Mock()) def test_process_statistics(self): core = Mock() ts = self.get_obj(core) ts.work_queue = Mock() ts.process_statistics(*self.data[0]) ts.work_queue.put_nowait.assert_called_with(self.data[0]) ts.work_queue.reset_mock() ts.work_queue.put_nowait.side_effect = Full # test that no exception is thrown ts.process_statistics(*self.data[0]) def test_handle_statistic(self): ts = self.get_obj() self.assertRaises(NotImplementedError, ts.handle_statistic, None, None) class TestPullSource(Bcfg2TestCase): def test_GetCurrentEntry(self): ps = PullSource() self.assertRaises(NotImplementedError, ps.GetCurrentEntry, None, None, None) class TestPullTarget(Bcfg2TestCase): def test_AcceptChoices(self): pt = PullTarget() self.assertRaises(NotImplementedError, pt.AcceptChoices, None, None) def test_AcceptPullData(self): pt = PullTarget() self.assertRaises(NotImplementedError, pt.AcceptPullData, None, None, None) class TestDecision(Bcfg2TestCase): test_obj = Decision def get_obj(self): return self.test_obj() def test_GetDecisions(self): d = self.get_obj() self.assertRaises(NotImplementedError, d.GetDecisions, None, None) class TestStructureValidator(Bcfg2TestCase): test_obj = StructureValidator def get_obj(self): return self.test_obj() def test_validate_structures(self): sv = self.get_obj() self.assertRaises(NotImplementedError, sv.validate_structures, None, None) class TestGoalValidator(Bcfg2TestCase): test_obj = GoalValidator def get_obj(self): return self.test_obj() def test_validate_goals(self): gv = self.get_obj() self.assertRaises(NotImplementedError, gv.validate_goals, None, None) class TestVersion(TestPlugin): test_obj = Version def test_get_revision(self): d = self.get_obj() self.assertRaises(NotImplementedError, d.get_revision) class TestClientRunHooks(Bcfg2TestCase): """ placeholder for future tests """ pass class TestClientACLs(Bcfg2TestCase): test_obj = ClientACLs def get_obj(self): return self.test_obj() def test_check_acl_ip(self): ca = self.get_obj() self.assertIn(ca.check_acl_ip(Mock(), Mock()), [True, False, None]) def test_check_acl_metadata(self): ca = self.get_obj() self.assertIn(ca.check_acl_metadata(Mock(), Mock()), [True, False]) testsuite/Testsrc/Testlib/TestServer/TestPlugin/__init__.py000066400000000000000000000007171303523157100244660ustar00rootroot00000000000000import os import sys # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from Testbase import * from Testinterfaces import * from Testhelpers import * from Testexceptions import * testsuite/Testsrc/Testlib/TestServer/TestPlugins/000077500000000000000000000000001303523157100225335ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestACL.py000066400000000000000000000217651303523157100243570ustar00rootroot00000000000000import os import sys import lxml.etree import Bcfg2.Server.Plugin from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.ACL import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestXMLFileBacked, TestStructFile, TestPlugin, \ TestClientACLs class TestFunctions(Bcfg2TestCase): def test_rmi_names_equal(self): good_cases = [('*', 'foo'), ('foo', 'foo'), ('foo.*', 'foo.bar'), ('*.*', 'foo.bar'), ('foo.bar', 'foo.bar'), ('*.bar', 'foo.bar'), ('foo.*.bar', 'foo.baz.bar')] bad_cases = [('foo', 'bar'), ('*', 'foo.bar'), ('*.*', 'foo'), ('*.*', 'foo.bar.baz'), ('foo.*', 'bar.foo'), ('*.bar', 'bar.foo'), ('foo.*', 'foobar')] for first, second in good_cases: self.assertTrue(rmi_names_equal(first, second), "rmi_names_equal(%s, %s) unexpectedly False" % (first, second)) self.assertTrue(rmi_names_equal(second, first), "rmi_names_equal(%s, %s) unexpectedly False" % (second, first)) for first, second in bad_cases: self.assertFalse(rmi_names_equal(first, second), "rmi_names_equal(%s, %s) unexpectedly True" % (first, second)) self.assertFalse(rmi_names_equal(second, first), "rmi_names_equal(%s, %s) unexpectedly True" % (second, first)) def test_ip_matches(self): good_cases = [ ("192.168.1.1", lxml.etree.Element("test", address="192.168.1.1")), ("192.168.1.17", lxml.etree.Element("test", address="192.168.1.0", netmask="24")), ("192.168.1.17", lxml.etree.Element("test", address="192.168.1.0", netmask="255.255.255.0")), ("192.168.1.31", lxml.etree.Element("test", address="192.168.1.0", netmask="255.255.255.224")), ("192.168.1.31", lxml.etree.Element("test", address="192.168.1.0", netmask="27")), ("10.55.67.191", lxml.etree.Element("test", address="10.55.0.0", netmask="16"))] bad_cases = [ ("192.168.1.1", lxml.etree.Element("test", address="192.168.1.2")), ("192.168.2.17", lxml.etree.Element("test", address="192.168.1.0", netmask="24")), ("192.168.2.17", lxml.etree.Element("test", address="192.168.1.0", netmask="255.255.255.0")), ("192.168.1.35", lxml.etree.Element("test", address="192.168.1.0", netmask="255.255.255.224")), ("192.168.1.35", lxml.etree.Element("test", address="192.168.1.0", netmask="27")), ("10.56.67.191", lxml.etree.Element("test", address="10.55.0.0", netmask="16"))] for ip, entry in good_cases: self.assertTrue(ip_matches(ip, entry), "ip_matches(%s, %s) unexpectedly False" % (ip, lxml.etree.tostring(entry))) for ip, entry in bad_cases: self.assertFalse(ip_matches(ip, entry), "ip_matches(%s, %s) unexpectedly True" % (ip, lxml.etree.tostring(entry))) class TestIPACLFile(TestXMLFileBacked): test_obj = IPACLFile @patch("Bcfg2.Server.Plugins.ACL.ip_matches") @patch("Bcfg2.Server.Plugins.ACL.rmi_names_equal") def test_check_acl(self, mock_rmi_names_equal, mock_ip_matches): af = self.get_obj() ip = "10.0.0.8" rmi = "ACL.test" def reset(): mock_rmi_names_equal.reset_mock() mock_ip_matches.reset_mock() # test default defer with no entries af.entries = [] self.assertIsNone(af.check_acl(ip, rmi)) # test explicit allow, deny, and defer entries = dict(Allow=lxml.etree.Element("Allow", method=rmi), Deny=lxml.etree.Element("Deny", method=rmi), Defer=lxml.etree.Element("Defer", method=rmi)) af.entries = list(entries.values()) def get_ip_matches(tag): def ip_matches(ip, entry): return entry.tag == tag return ip_matches mock_rmi_names_equal.return_value = True reset() mock_ip_matches.side_effect = get_ip_matches("Allow") self.assertTrue(af.check_acl(ip, rmi)) mock_ip_matches.assert_called_with(ip, entries['Allow']) mock_rmi_names_equal.assert_called_with(rmi, rmi) reset() mock_ip_matches.side_effect = get_ip_matches("Deny") self.assertFalse(af.check_acl(ip, rmi)) mock_ip_matches.assert_called_with(ip, entries['Deny']) mock_rmi_names_equal.assert_called_with(rmi, rmi) reset() mock_ip_matches.side_effect = get_ip_matches("Defer") self.assertIsNone(af.check_acl(ip, rmi)) mock_ip_matches.assert_called_with(ip, entries['Defer']) mock_rmi_names_equal.assert_called_with(rmi, rmi) # test matching RMI names reset() mock_ip_matches.side_effect = lambda i, e: True mock_rmi_names_equal.side_effect = lambda a, b: a == b rmi = "ACL.test2" matching = lxml.etree.Element("Allow", method=rmi) af.entries.append(matching) self.assertTrue(af.check_acl(ip, rmi)) mock_ip_matches.assert_called_with(ip, matching) self.assertTrue( call('ACL.test', rmi) in mock_rmi_names_equal.call_args_list or call(rmi, 'ACL.test') in mock_rmi_names_equal.call_args_list) # test implicit allow for localhost, defer for others reset() mock_ip_matches.side_effect = lambda i, e: False self.assertIsNone(af.check_acl(ip, rmi)) reset() self.assertTrue(af.check_acl("127.0.0.1", rmi)) class TestMetadataACLFile(TestStructFile): test_obj = MetadataACLFile @patch("Bcfg2.Server.Plugins.ACL.rmi_names_equal") def test_check_acl(self, mock_rmi_names_equal): af = self.get_obj() af.Match = Mock() metadata = Mock() mock_rmi_names_equal.side_effect = lambda a, b: a == b def reset(): af.Match.reset_mock() mock_rmi_names_equal.reset_mock() # test default allow af.entries = [] self.assertTrue(af.check_acl(metadata, 'ACL.test')) # test explicit allow and deny reset() af.entries = [lxml.etree.Element("Allow", method='ACL.test'), lxml.etree.Element("Deny", method='ACL.test2')] af.Match.return_value = af.entries self.assertTrue(af.check_acl(metadata, 'ACL.test')) af.Match.assert_called_with(metadata) self.assertIn(call('ACL.test', 'ACL.test'), mock_rmi_names_equal.call_args_list) reset() self.assertFalse(af.check_acl(metadata, 'ACL.test2')) af.Match.assert_called_with(metadata) self.assertIn(call('ACL.test2', 'ACL.test2'), mock_rmi_names_equal.call_args_list) # test default deny for non-localhost reset() self.assertFalse(af.check_acl(metadata, 'ACL.test3')) af.Match.assert_called_with(metadata) # test default allow for localhost reset() metadata.hostname = 'localhost' self.assertTrue(af.check_acl(metadata, 'ACL.test3')) af.Match.assert_called_with(metadata) class TestACL(TestPlugin, TestClientACLs): test_obj = ACL def test_check_acl_ip(self): acl = self.get_obj() acl.ip_acls = Mock() self.assertEqual(acl.check_acl_ip(("192.168.1.10", "12345"), "ACL.test"), acl.ip_acls.check_acl.return_value) acl.ip_acls.check_acl.assert_called_with("192.168.1.10", "ACL.test") def test_check_acl_metadata(self): acl = self.get_obj() acl.metadata_acls = Mock() metadata = Mock() self.assertEqual(acl.check_acl_metadata(metadata, "ACL.test"), acl.metadata_acls.check_acl.return_value) acl.metadata_acls.check_acl.assert_called_with(metadata, "ACL.test") testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestAWSTags.py000066400000000000000000000106171303523157100252230ustar00rootroot00000000000000import os import sys import lxml.etree import Bcfg2.Server.Plugin from mock import Mock, MagicMock, patch try: from Bcfg2.Server.Plugins.AWSTags import * HAS_BOTO = True except ImportError: HAS_BOTO = False # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestPlugin, TestConnector, TestClientRunHooks config = ''' group1 group2 group3 group-$1 group-$1 ''' tags = { "empty.example.com": {}, "no-matches.example.com": {"nameonly": "foo", "Name": "no-matches", "foo": "bar"}, "foo.example.com": {"name-only": "name-only", "name-and-value": "wrong", "regex-name": "foo"}, "bar.example.com": {"name-and-value": "value", "regex-value": "bar"}} groups = { "empty.example.com": [], "no-matches.example.com": [], "foo.example.com": ["group1", "group2", "group-name"], "bar.example.com": ["group3", "group-value", "group-bar"]} def make_instance(name): rv = Mock() rv.private_dns_name = name rv.tags = tags[name] return rv instances = [make_instance(n) for n in tags.keys()] def get_all_instances(filters=None): insts = [i for i in instances if i.private_dns_name == filters['private-dns-name']] res = Mock() res.instances = insts return [res] if HAS_BOTO: class TestAWSTags(TestPlugin, TestClientRunHooks, TestConnector): test_obj = AWSTags def get_obj(self, core=None): @patchIf(not isinstance(Bcfg2.Server.Plugins.AWSTags.connect_ec2, Mock), "Bcfg2.Server.Plugins.AWSTags.connect_ec2", Mock()) @patch("lxml.etree.Element", Mock()) def inner(): obj = TestPlugin.get_obj(self, core=core) obj.config.data = config obj.config.Index() return obj return inner() @patch("Bcfg2.Server.Plugins.AWSTags.connect_ec2") def test_connect(self, mock_connect_ec2): """ Test connection to EC2 """ key_id = "a09sdbipasdf" access_key = "oiilb234ipwe9" def cfp_get(section, option): if option == "access_key_id": return key_id elif option == "secret_access_key": return access_key else: return Mock() core = Mock() core.setup.cfp.get = Mock(side_effect=cfp_get) awstags = self.get_obj(core=core) mock_connect_ec2.assert_called_with( aws_access_key_id=key_id, aws_secret_access_key=access_key) def test_get_additional_data(self): """ Test AWSTags.get_additional_data() """ awstags = self.get_obj() awstags._ec2.get_all_instances = \ Mock(side_effect=get_all_instances) for hostname, expected in tags.items(): metadata = Mock() metadata.hostname = hostname self.assertItemsEqual(awstags.get_additional_data(metadata), expected) def test_get_additional_groups_caching(self): """ Test AWSTags.get_additional_groups() with caching enabled """ awstags = self.get_obj() awstags._ec2.get_all_instances = \ Mock(side_effect=get_all_instances) for hostname, expected in groups.items(): metadata = Mock() metadata.hostname = hostname actual = awstags.get_additional_groups(metadata) msg = """%s has incorrect groups: actual: %s expected: %s""" % (hostname, actual, expected) self.assertItemsEqual(actual, expected, msg) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestBundler.py000066400000000000000000000142121303523157100253400ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Bundler import * from Bcfg2.version import Bcfg2VersionInfo # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestStructFile, TestPlugin, TestStructure, \ TestXMLDirectoryBacked class TestBundleFile(TestStructFile): test_obj = BundleFile path = os.path.join(datastore, "test", "test1.xml") def test_bundle_name(self): cases = [("foo.xml", "foo"), ("foo.bar.xml", "foo.bar"), ("foo-bar-baz.xml", "foo-bar-baz"), ("foo....xml", "foo..."), ("foo.genshi", "foo")] bf = self.get_obj() for fname, bname in cases: bf.name = fname self.assertEqual(bf.bundle_name, bname) class TestBundler(TestPlugin, TestStructure, TestXMLDirectoryBacked): test_obj = Bundler def get_obj(self, core=None): @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) def inner(): return TestPlugin.get_obj(self, core=core) return inner() @patch("Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent") def test_HandleEvent(self, mock_HandleEvent): b = self.get_obj() b.bundles = dict(foo=Mock(), bar=Mock()) b.entries = {"foo.xml": BundleFile("foo.xml"), "baz.xml": BundleFile("baz.xml")} event = Mock() b.HandleEvent(event) mock_HandleEvent.assert_called_with(b, event) self.assertItemsEqual(b.bundles, dict(foo=b.entries['foo.xml'], baz=b.entries['baz.xml'])) def test_BuildStructures(self): b = self.get_obj() b.bundles = dict(error=Mock(), skip=Mock(), xinclude=Mock(), has_dep=Mock(), is_dep=Mock(), indep=Mock()) expected = dict() b.bundles['error'].XMLMatch.side_effect = TemplateError(None) xinclude = lxml.etree.Element("Bundle") lxml.etree.SubElement(lxml.etree.SubElement(xinclude, "Bundle"), "Path", name="/test") b.bundles['xinclude'].XMLMatch.return_value = xinclude expected['xinclude'] = lxml.etree.Element("Bundle", name="xinclude") lxml.etree.SubElement(expected['xinclude'], "Path", name="/test") has_dep = lxml.etree.Element("Bundle") lxml.etree.SubElement(has_dep, "RequiredBundle", name="is_dep") lxml.etree.SubElement(has_dep, "RequiredBundle", name="is_mod_dep", inherit_modification="true") lxml.etree.SubElement(has_dep, "Package", name="foo") b.bundles['has_dep'].XMLMatch.return_value = has_dep expected['has_dep'] = lxml.etree.Element("Bundle", name="has_dep") lxml.etree.SubElement(expected['has_dep'], "Package", name="foo") lxml.etree.SubElement(expected['has_dep'], "Bundle", name="is_mod_dep") is_dep = lxml.etree.Element("Bundle") lxml.etree.SubElement(is_dep, "Package", name="bar") b.bundles['is_dep'].XMLMatch.return_value = is_dep expected['is_dep'] = lxml.etree.Element("Bundle", name="is_dep") lxml.etree.SubElement(expected['is_dep'], "Package", name="bar") indep = lxml.etree.Element("Bundle", independent="true") lxml.etree.SubElement(indep, "Service", name="baz") b.bundles['indep'].XMLMatch.return_value = indep expected['indep'] = lxml.etree.Element("Independent", name="indep") lxml.etree.SubElement(expected['indep'], "Service", name="baz") metadata = Mock() metadata.bundles = set(["error", "xinclude", "has_dep", "indep"]) metadata.version_info = Bcfg2VersionInfo('1.4.0') rv = b.BuildStructures(metadata) self.assertEqual(len(rv), 4) for bundle in rv: name = bundle.get("name") self.assertIsNotNone(name, "Bundle %s was not built" % name) self.assertIn(name, expected, "Unexpected bundle %s was built" % name) self.assertXMLEqual(bundle, expected[name], "Bundle %s was not built correctly" % name) b.bundles[name].XMLMatch.assert_called_with(metadata) b.bundles['error'].XMLMatch.assert_called_with(metadata) self.assertFalse(b.bundles['skip'].XMLMatch.called) def test_BuildStructuresOldClient(self): b = self.get_obj() b.bundles = dict(has_dep=Mock()) expected = dict() has_dep = lxml.etree.Element("Bundle") lxml.etree.SubElement(has_dep, "RequiredBundle", name="is_dep") lxml.etree.SubElement(has_dep, "RequiredBundle", name="is_mod_dep", inherit_modification="true") lxml.etree.SubElement(has_dep, "Package", name="foo") b.bundles['has_dep'].XMLMatch.return_value = has_dep expected['has_dep'] = lxml.etree.Element("Bundle", name="has_dep") lxml.etree.SubElement(expected['has_dep'], "Package", name="foo") metadata = Mock() metadata.bundles = set(["has_dep"]) metadata.version_info = Bcfg2VersionInfo('1.3.0') rv = b.BuildStructures(metadata) self.assertEqual(len(rv), len(metadata.bundles)) for bundle in rv: name = bundle.get("name") self.assertIsNotNone(name, "Bundle %s was not built" % name) self.assertIn(name, expected, "Unexpected bundle %s was built" % name) self.assertXMLEqual(bundle, expected[name], "Bundle %s was not built correctly" % name) b.bundles[name].XMLMatch.assert_called_with(metadata) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/000077500000000000000000000000001303523157100240725ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgAuthorizedKeysGenerator.py000066400000000000000000000147241303523157100325550ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator import * import Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgGenerator from TestServer.TestPlugin.Testhelpers import TestStructFile class TestCfgAuthorizedKeysGenerator(TestCfgGenerator, TestStructFile): test_obj = CfgAuthorizedKeysGenerator should_monitor = False def setUp(self): TestCfgGenerator.setUp(self) TestStructFile.setUp(self) @patch("Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator.get_cfg") def get_obj(self, mock_get_cfg, name=None, core=None, fam=None): if name is None: name = self.path if core is not None: mock_get_cfg.return_value.core = core return self.test_obj(name) @patch("Bcfg2.Server.Plugins.Cfg.CfgGenerator.handle_event") @patch("Bcfg2.Server.Plugin.helpers.StructFile.HandleEvent") def test_handle_event(self, mock_HandleEvent, mock_handle_event): akg = self.get_obj() evt = Mock() akg.handle_event(evt) mock_HandleEvent.assert_called_with(akg, evt) mock_handle_event.assert_called_with(akg, evt) @patch("Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator.ClientMetadata") def test_get_data(self, mock_ClientMetadata): Bcfg2.Options.setup.sshkeys_category = "category" akg = self.get_obj() akg.XMLMatch = Mock() def ClientMetadata(host, profile, groups, *args): rv = Mock() rv.hostname = host rv.profile = profile rv.groups = groups return rv mock_ClientMetadata.side_effect = ClientMetadata def build_metadata(host): rv = Mock() rv.hostname = host rv.profile = host return rv akg.core.build_metadata = Mock() akg.core.build_metadata.side_effect = build_metadata def Bind(ent, md): ent.text = "%s %s" % (md.profile, ent.get("name")) return ent akg.core.Bind = Mock() akg.core.Bind.side_effect = Bind metadata = Mock() metadata.profile = "profile" metadata.group_in_category.return_value = "profile" entry = lxml.etree.Element("Path", name="/root/.ssh/authorized_keys") def reset(): mock_ClientMetadata.reset_mock() akg.XMLMatch.reset_mock() akg.core.build_metadata.reset_mock() akg.core.Bind.reset_mock() metadata.reset_mock() pubkey = "/home/foo/.ssh/id_rsa.pub" spec = lxml.etree.Element("AuthorizedKeys") lxml.etree.SubElement(spec, "Allow", attrib={"from": pubkey}) akg.XMLMatch.return_value = spec self.assertEqual(akg.get_data(entry, metadata), "profile %s" % pubkey) akg.XMLMatch.assert_called_with(metadata) self.assertEqual(akg.core.Bind.call_args[0][0].get("name"), pubkey) self.assertEqual(akg.core.Bind.call_args[0][1], metadata) reset() group = "somegroup" spec = lxml.etree.Element("AuthorizedKeys") lxml.etree.SubElement(spec, "Allow", attrib={"from": pubkey, "group": group}) akg.XMLMatch.return_value = spec self.assertEqual(akg.get_data(entry, metadata), "%s %s" % (group, pubkey)) akg.XMLMatch.assert_called_with(metadata) self.assertItemsEqual(mock_ClientMetadata.call_args[0][2], [group]) self.assertEqual(akg.core.Bind.call_args[0][0].get("name"), pubkey) self.assertIn(group, akg.core.Bind.call_args[0][1].groups) reset() host = "baz.example.com" spec = lxml.etree.Element("AuthorizedKeys") allow = lxml.etree.SubElement(spec, "Allow", attrib={"from": pubkey, "host": host}) lxml.etree.SubElement(allow, "Option", name="foo", value="foo") lxml.etree.SubElement(allow, "Option", name="bar") lxml.etree.SubElement(allow, "Option", name="baz", value="baz=baz") akg.XMLMatch.return_value = spec params, actual_host, actual_pubkey = akg.get_data(entry, metadata).split() self.assertEqual(actual_host, host) self.assertEqual(actual_pubkey, pubkey) self.assertItemsEqual(params.split(","), ["foo=foo", "bar", "baz=baz=baz"]) akg.XMLMatch.assert_called_with(metadata) akg.core.build_metadata.assert_called_with(host) self.assertEqual(akg.core.Bind.call_args[0][0].get("name"), pubkey) self.assertEqual(akg.core.Bind.call_args[0][1].hostname, host) reset() spec = lxml.etree.Element("AuthorizedKeys") text = lxml.etree.SubElement(spec, "Allow") text.text = "ssh-rsa publickey /foo/bar\n" lxml.etree.SubElement(text, "Option", name="foo") akg.XMLMatch.return_value = spec self.assertEqual(akg.get_data(entry, metadata), "foo %s" % text.text.strip()) akg.XMLMatch.assert_called_with(metadata) self.assertFalse(akg.core.build_metadata.called) self.assertFalse(akg.core.Bind.called) reset() lxml.etree.SubElement(spec, "Allow", attrib={"from": pubkey}) akg.XMLMatch.return_value = spec self.assertItemsEqual(akg.get_data(entry, metadata).splitlines(), ["foo %s" % text.text.strip(), "profile %s" % pubkey]) akg.XMLMatch.assert_called_with(metadata) reset() metadata.group_in_category.return_value = '' spec = lxml.etree.Element("AuthorizedKeys") lxml.etree.SubElement(spec, "Allow", attrib={"from": pubkey}) akg.XMLMatch.return_value = spec self.assertEqual(akg.get_data(entry, metadata), '') akg.XMLMatch.assert_called_with(metadata) self.assertFalse(akg.core.build_metadata.called) self.assertFalse(akg.core.Bind.called) self.assertFalse(mock_ClientMetadata.called) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgCheetahGenerator.py000066400000000000000000000042371303523157100311420ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgGenerator class TestCfgCheetahGenerator(TestCfgGenerator): test_obj = CfgCheetahGenerator @skipUnless(HAS_CHEETAH, "Cheetah libraries not found, skipping") def setUp(self): TestCfgGenerator.setUp(self) set_setup_default("repository", datastore) @patch("Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator.Template") @patch("Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator.get_template_data") def test_get_data(self, mock_get_template_data, mock_Template): ccg = self.get_obj() ccg.data = "data" entry = lxml.etree.Element("Path", name="/test.txt") metadata = Mock() template_vars = dict(name=entry.get("name"), metadata=metadata, path=ccg.name, source_path=ccg.name, repo=datastore) mock_get_template_data.return_value = template_vars self.assertEqual(ccg.get_data(entry, metadata), mock_Template.return_value.respond.return_value) mock_Template.assert_called_with( "data".decode(Bcfg2.Options.setup.encoding), compilerSettings=ccg.settings) tmpl = mock_Template.return_value tmpl.respond.assert_called_with() for key, val in template_vars.items(): self.assertEqual(getattr(tmpl, key), val) self.assertItemsEqual(mock_get_template_data.call_args[0], [entry, metadata, ccg.name]) self.assertIsInstance(mock_get_template_data.call_args[1]['default'], DefaultCheetahDataProvider) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedCheetahGenerator.py000066400000000000000000000027441303523157100330210ustar00rootroot00000000000000import os import sys from Bcfg2.Server.Plugins.Cfg.CfgEncryptedCheetahGenerator import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * try: from TestServer.TestPlugins.TestCfg.TestCfgCheetahGenerator import \ TestCfgCheetahGenerator from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import HAS_CHEETAH except ImportError: TestCfgCheetahGenerator = object HAS_CHEETAH = False try: from TestServer.TestPlugins.TestCfg.TestCfgEncryptedGenerator import \ TestCfgEncryptedGenerator from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import HAS_CRYPTO except ImportError: TestCfgEncryptedGenerator = object HAS_CRYPTO = False class TestCfgEncryptedCheetahGenerator(TestCfgCheetahGenerator, TestCfgEncryptedGenerator): test_obj = CfgEncryptedCheetahGenerator @skipUnless(HAS_CRYPTO, "Encryption libraries not found, skipping") @skipUnless(HAS_CHEETAH, "Cheetah libraries not found, skipping") def setUp(self): pass def test_handle_event(self): TestCfgEncryptedGenerator.test_handle_event(self) def test_get_data(self): TestCfgCheetahGenerator.test_get_data(self) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedGenerator.py000066400000000000000000000047471303523157100315440ustar00rootroot00000000000000import os import sys import lxml.etree import Bcfg2.Server.Plugins.Cfg from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import * from Bcfg2.Server.Plugin import PluginExecutionError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgGenerator class TestCfgEncryptedGenerator(TestCfgGenerator): test_obj = CfgEncryptedGenerator @skipUnless(HAS_CRYPTO, "M2Crypto is not available") def setUp(self): TestCfgGenerator.setUp(self) @patchIf(HAS_CRYPTO, "Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator.bruteforce_decrypt") def test_handle_event(self, mock_decrypt): @patch("Bcfg2.Server.Plugins.Cfg.CfgGenerator.handle_event") @patch("Bcfg2.Options.setup.lax_decryption", False) def inner(mock_handle_event): def reset(): mock_decrypt.reset_mock() mock_handle_event.reset_mock() def get_event_data(obj, event): obj.data = "encrypted" mock_handle_event.side_effect = get_event_data mock_decrypt.side_effect = lambda d, **kw: "plaintext" event = Mock() ceg = self.get_obj() ceg.handle_event(event) mock_handle_event.assert_called_with(ceg, event) mock_decrypt.assert_called_with("encrypted") self.assertEqual(ceg.data, "plaintext") reset() mock_decrypt.side_effect = EVPError self.assertRaises(PluginExecutionError, ceg.handle_event, event) inner() # to perform the tests from the parent test object, we # make bruteforce_decrypt just return whatever data was # given to it mock_decrypt.side_effect = lambda d, **kw: d TestCfgGenerator.test_handle_event(self) def test_get_data(self): ceg = self.get_obj() ceg.data = None entry = lxml.etree.Element("Path", name="/test.txt") metadata = Mock() self.assertRaises(PluginExecutionError, ceg.get_data, entry, metadata) TestCfgGenerator.test_get_data(self) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedGenshiGenerator.py000066400000000000000000000014701303523157100326700ustar00rootroot00000000000000import os import sys from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenshiGenerator import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.TestCfgGenshiGenerator import \ TestCfgGenshiGenerator class TestCfgEncryptedGenshiGenerator(TestCfgGenshiGenerator): test_obj = CfgEncryptedGenshiGenerator @skipUnless(HAS_CRYPTO, "Encryption libraries not found, skipping") def setUp(self): TestCfgGenshiGenerator.setUp(self) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedJinja2Generator.py000066400000000000000000000027271303523157100325760ustar00rootroot00000000000000import os import sys from Bcfg2.Server.Plugins.Cfg.CfgEncryptedJinja2Generator import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * try: from TestServer.TestPlugins.TestCfg.TestCfgJinja2Generator import \ TestCfgJinja2Generator from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import HAS_JINJA2 except ImportError: TestCfgJinja2Generator = object HAS_JINJA2 = False try: from TestServer.TestPlugins.TestCfg.TestCfgEncryptedGenerator import \ TestCfgEncryptedGenerator from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import HAS_CRYPTO except ImportError: TestCfgEncryptedGenerator = object HAS_CRYPTO = False class TestCfgEncryptedJinja2Generator(TestCfgJinja2Generator, TestCfgEncryptedGenerator): test_obj = CfgEncryptedJinja2Generator @skipUnless(HAS_CRYPTO, "Encryption libraries not found, skipping") @skipUnless(HAS_JINJA2, "Jinja2 libraries not found, skipping") def setUp(self): pass def test_handle_event(self): TestCfgEncryptedGenerator.test_handle_event(self) def test_get_data(self): TestCfgJinja2Generator.test_get_data(self) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgExternalCommandVerifier.py000066400000000000000000000060531303523157100325050ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg.CfgExternalCommandVerifier import * from Bcfg2.Server.Plugin import PluginExecutionError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgVerifier class TestCfgExternalCommandVerifier(TestCfgVerifier): test_obj = CfgExternalCommandVerifier def test_verify_entry(self): entry = lxml.etree.Element("Path", name="/test.txt") metadata = Mock() ecv = self.get_obj() ecv.cmd = ["/bin/bash", "-x", "foo"] ecv.exc = Mock() ecv.exc.run.return_value = Mock() ecv.exc.run.return_value.success = True ecv.verify_entry(entry, metadata, "data") ecv.exc.run.assert_called_with(ecv.cmd, inputdata="data") ecv.exc.reset_mock() ecv.exc.run.return_value.success = False self.assertRaises(CfgVerificationError, ecv.verify_entry, entry, metadata, "data") ecv.exc.run.assert_called_with(ecv.cmd, inputdata="data") ecv.exc.reset_mock() ecv.exc.reset_mock() ecv.exc.run.side_effect = OSError self.assertRaises(CfgVerificationError, ecv.verify_entry, entry, metadata, "data") ecv.exc.run.assert_called_with(ecv.cmd, inputdata="data") @patch("os.access") def test_handle_event(self, mock_access): @patch("Bcfg2.Server.Plugins.Cfg.CfgVerifier.handle_event") def inner(mock_handle_event): ecv = self.get_obj() event = Mock() mock_access.return_value = False ecv.data = "data" self.assertRaises(PluginExecutionError, ecv.handle_event, event) mock_handle_event.assert_called_with(ecv, event) mock_access.assert_called_with(ecv.name, os.X_OK) self.assertItemsEqual(ecv.cmd, []) mock_access.reset_mock() mock_handle_event.reset_mock() ecv.data = "#! /bin/bash -x\ntrue" ecv.handle_event(event) mock_handle_event.assert_called_with(ecv, event) mock_access.assert_called_with(ecv.name, os.X_OK) self.assertEqual(ecv.cmd, ["/bin/bash", "-x", ecv.name]) mock_access.reset_mock() mock_handle_event.reset_mock() mock_access.return_value = True ecv.data = "true" ecv.handle_event(event) mock_handle_event.assert_called_with(ecv, event) mock_access.assert_called_with(ecv.name, os.X_OK) self.assertItemsEqual(ecv.cmd, [ecv.name]) inner() mock_access.return_value = True TestCfgVerifier.test_handle_event(self) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgGenshiGenerator.py000066400000000000000000000132721303523157100310150ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch import Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import * from Bcfg2.Server.Plugin import PluginExecutionError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgGenerator class TestCfgGenshiGenerator(TestCfgGenerator): test_obj = CfgGenshiGenerator def setUp(self): TestCfgGenerator.setUp(self) set_setup_default("repository", datastore) def test__init(self): TestCfgGenerator.test__init(self) cgg = self.get_obj() self.assertIsInstance(cgg.loader, cgg.__loader_cls__) @patch("Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator.get_template_data") def test_get_data(self, mock_get_template_data): cgg = self.get_obj() cgg._handle_genshi_exception = Mock() cgg.template = Mock() fltr = Mock() cgg.template.generate.return_value = fltr stream = Mock() fltr.filter.return_value = stream entry = lxml.etree.Element("Path", name="/test.txt") metadata = Mock() def reset(): cgg.template.reset_mock() cgg._handle_genshi_exception.reset_mock() mock_get_template_data.reset_mock() template_vars = dict(name=entry.get("name"), metadata=metadata, path=cgg.name, source_path=cgg.name, repo=datastore) mock_get_template_data.return_value = template_vars self.assertEqual(cgg.get_data(entry, metadata), stream.render.return_value) cgg.template.generate.assert_called_with(**template_vars) self.assertItemsEqual(mock_get_template_data.call_args[0], [entry, metadata, cgg.name]) self.assertIsInstance(mock_get_template_data.call_args[1]['default'], DefaultGenshiDataProvider) fltr.filter.assert_called_with(removecomment) stream.render.assert_called_with( "text", encoding=Bcfg2.Options.setup.encoding, strip_whitespace=False) reset() def render(fmt, **kwargs): stream.render.side_effect = None raise TypeError stream.render.side_effect = render self.assertEqual(cgg.get_data(entry, metadata), stream.render.return_value) cgg.template.generate.assert_called_with(**template_vars) self.assertItemsEqual(mock_get_template_data.call_args[0], [entry, metadata, cgg.name]) self.assertIsInstance(mock_get_template_data.call_args[1]['default'], DefaultGenshiDataProvider) fltr.filter.assert_called_with(removecomment) self.assertEqual(stream.render.call_args_list, [call("text", encoding=Bcfg2.Options.setup.encoding, strip_whitespace=False), call("text", encoding=Bcfg2.Options.setup.encoding)]) reset() stream.render.side_effect = UndefinedError("test") self.assertRaises(UndefinedError, cgg.get_data, entry, metadata) cgg.template.generate.assert_called_with(**template_vars) self.assertItemsEqual(mock_get_template_data.call_args[0], [entry, metadata, cgg.name]) self.assertIsInstance(mock_get_template_data.call_args[1]['default'], DefaultGenshiDataProvider) fltr.filter.assert_called_with(removecomment) stream.render.assert_called_with("text", encoding=Bcfg2.Options.setup.encoding, strip_whitespace=False) reset() stream.render.side_effect = ValueError cgg._handle_genshi_exception.side_effect = ValueError self.assertRaises(ValueError, cgg.get_data, entry, metadata) cgg.template.generate.assert_called_with(**template_vars) self.assertItemsEqual(mock_get_template_data.call_args[0], [entry, metadata, cgg.name]) self.assertIsInstance(mock_get_template_data.call_args[1]['default'], DefaultGenshiDataProvider) fltr.filter.assert_called_with(removecomment) stream.render.assert_called_with("text", encoding=Bcfg2.Options.setup.encoding, strip_whitespace=False) self.assertTrue(cgg._handle_genshi_exception.called) def test_handle_event(self): cgg = self.get_obj() cgg.loader = Mock() event = Mock() cgg.handle_event(event) cgg.loader.load.assert_called_with( cgg.name, cls=NewTextTemplate, encoding=Bcfg2.Options.setup.encoding) cgg.loader.reset_mock() cgg.loader.load.side_effect = TemplateError("test") self.assertRaises(PluginExecutionError, cgg.handle_event, event) cgg.loader.load.assert_called_with( cgg.name, cls=NewTextTemplate, encoding=Bcfg2.Options.setup.encoding) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgInfoXML.py000066400000000000000000000025131303523157100272010ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import * from Bcfg2.Server.Plugin import InfoXML, PluginExecutionError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgInfo class TestCfgInfoXML(TestCfgInfo): test_obj = CfgInfoXML def setUp(self): TestCfgInfo.setUp(self) set_setup_default("filemonitor", MagicMock()) def test__init(self): TestCfgInfo.test__init(self) ci = self.get_obj() self.assertIsInstance(ci.infoxml, InfoXML) def test_bind_info_to_entry(self): ci = self.get_obj() ci.infoxml = Mock() entry = Mock() metadata = Mock() ci.bind_info_to_entry(entry, metadata) ci.infoxml.BindEntry.assert_called_with(entry, metadata) def test_handle_event(self): ci = self.get_obj() ci.infoxml = Mock() ci.handle_event(Mock) ci.infoxml.HandleEvent.assert_called_with() testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgJinja2Generator.py000066400000000000000000000051421303523157100307120ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgGenerator class TestCfgJinja2Generator(TestCfgGenerator): test_obj = CfgJinja2Generator @skipUnless(HAS_JINJA2, "Jinja2 libraries not found, skipping") def setUp(self): TestCfgGenerator.setUp(self) set_setup_default("repository", datastore) def test__init(self): TestCfgGenerator.test__init(self) cgg = self.get_obj() self.assertIsInstance(cgg.loader, cgg.__loader_cls__) self.assertIsInstance(cgg.environment, cgg.__environment_cls__) @patch("Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator.Environment") @patch("Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator.get_template_data") def test_get_data(self, mock_get_template_data, mock_Environment): cgg = self.get_obj() entry = lxml.etree.Element("Path", name="/test.txt") metadata = Mock() # self.template is currently None self.assertRaises(PluginExecutionError, cgg.get_data, entry, metadata) cgg.template = mock_Environment.return_value.get_template.return_value template_vars = dict(name=entry.get("name"), metadata=metadata, path=cgg.name, source_path=cgg.name, repo=datastore) mock_get_template_data.return_value = template_vars tmpl = mock_Environment.return_value.get_template.return_value self.assertEqual(cgg.get_data(entry, metadata), tmpl.render.return_value) tmpl.render.assert_called_with(template_vars) def test_handle_event(self): cgg = self.get_obj() cgg.environment = Mock() event = Mock() cgg.handle_event(event) cgg.environment.get_template.assert_called_with( cgg.name) cgg.environment.reset_mock() cgg.environment.get_template.side_effect = OSError self.assertRaises(PluginExecutionError, cgg.handle_event, event) cgg.environment.get_template.assert_called_with( cgg.name) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPlaintextGenerator.py000066400000000000000000000011001303523157100315330ustar00rootroot00000000000000import os import sys from Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from TestServer.TestPlugins.TestCfg.Test_init import TestCfgGenerator class TestCfgPlaintextGenerator(TestCfgGenerator): test_obj = CfgPlaintextGenerator testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPrivateKeyCreator.py000066400000000000000000000124361303523157100313350ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Compat import StringIO from Bcfg2.Server.Plugins.Cfg import CfgCreationError from Bcfg2.Server.Plugins.Cfg.CfgPrivateKeyCreator import * from Bcfg2.Server.Plugin import PluginExecutionError import Bcfg2.Server.Plugins.Cfg.CfgPrivateKeyCreator try: from Bcfg2.Server.Encryption import EVPError HAS_CRYPTO = True except: HAS_CRYPTO = False # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestXMLCfgCreator class TestCfgPrivateKeyCreator(TestXMLCfgCreator): test_obj = CfgPrivateKeyCreator should_monitor = False def setUp(self): TestXMLCfgCreator.setUp(self) set_setup_default("cfg_category", "category") @patch("Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.get_cfg", Mock()) def get_obj(self, name=None, fam=None): return TestXMLCfgCreator.get_obj(self, name=name) @patch("shutil.rmtree") def _gen_keypair(self, mock_mkdtemp, mock_rmtree): pkc = self.get_obj() pkc.cmd = Mock() pkc.XMLMatch = Mock() metadata = Mock() exc = Mock() exc.success = True pkc.cmd.run.return_value = exc pkc.XMLMatch.return_value = spec def reset(): pkc.XMLMatch.reset_mock() pkc.cmd.reset_mock() mock_mkdtemp.reset_mock() mock_rmtree.reset_mock() self.assertEqual(pkc._gen_keypair(metadata), os.path.join(datastore, "privkey")) pkc.XMLMatch.assert_called_with(metadata) mock_mkdtemp.assert_called_with() pkc.cmd.run.assert_called_with(["ssh-keygen", "-f", os.path.join(datastore, "privkey"), "-t", "rsa", "-N", ""]) reset() lxml.etree.SubElement(spec, "Params", bits="768", type="dsa") passphrase = lxml.etree.SubElement(spec, "Passphrase") passphrase.text = "foo" self.assertEqual(pkc._gen_keypair(metadata), os.path.join(datastore, "privkey")) pkc.XMLMatch.assert_called_with(metadata) mock_mkdtemp.assert_called_with() pkc.cmd.run.assert_called_with(["ssh-keygen", "-f", os.path.join(datastore, "privkey"), "-t", "dsa", "-b", "768", "-N", "foo"]) reset() pkc.cmd.run.return_value.success = False self.assertRaises(CfgCreationError, pkc._gen_keypair, metadata) mock_rmtree.assert_called_with(datastore) @patch("shutil.rmtree") @patch("tempfile.mkdtemp") @patch("%s.open" % builtins) def _create_private_key(self, expected, mock_open, mock_mkdtemp, mock_rmtree, spec=None): pkc = self.get_obj(name="/home/foo/.ssh/id_rsa/privkey.xml") pkc.cmd = MockExecutor() pkc.pubkey_creator.write_data = Mock() pkc.write_data = Mock() mock_mkdtemp.return_value = datastore if spec is None: pkc.xdata = lxml.etree.Element("PrivateKey") else: pkc.xdata = spec privkey_filename = os.path.join(datastore, "privkey") pubkey_filename = os.path.join(datastore, "privkey.pub") entry = lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa") metadata = Mock() metadata.group_in_category.return_value = "foo" def open_key(fname): if fname == privkey_filename: return StringIO("privatekey") elif fname == pubkey_filename: return StringIO("ssh-rsa publickey foo@bar.com") else: self.fail("Unexpected open call: %s" % fname) mock_open.side_effect = open_key self.assertEqual(pkc.create_data(entry, metadata), "privatekey") self.assertItemsEqual(mock_open.call_args_list, [call(pubkey_filename), call(privkey_filename)]) self.assertItemsEqual( pkc.cmd.calls[0]['command'], ['ssh-keygen', '-f', privkey_filename] + expected) metadata.group_in_category.assert_called_with("category") pkc.pubkey_creator.write_data.assert_called_with( "ssh-rsa publickey /home/foo/.ssh/id_rsa.pub/id_rsa.pub.G50_foo\n", group="foo", prio=50) pkc.write_data.assert_called_with("privatekey", group="foo", prio=50) mock_rmtree.assert_called_with(datastore) def test_create_data(self): pass def test_create_private_key_defaults(self): self._create_private_key(['-t', 'rsa', '-N', '']) def test_create_private_key_spec(self): spec = lxml.etree.Element("PrivateKey") lxml.etree.SubElement(spec, "Params", bits="768", type="dsa") passphrase = lxml.etree.SubElement(spec, "Passphrase") passphrase.text = "foo" self._create_private_key(['-t', 'dsa', '-b', '768', '-N', 'foo'], spec=spec) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPublicKeyCreator.py000066400000000000000000000144031303523157100311350ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg import CfgCreationError, CfgCreator from Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator import * import Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator from Bcfg2.Server.Plugin import PluginExecutionError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestServer.TestPlugins.TestCfg.Test_init import TestCfgCreator from TestServer.TestPlugin.Testhelpers import TestStructFile class TestCfgPublicKeyCreator(TestCfgCreator, TestStructFile): test_obj = CfgPublicKeyCreator should_monitor = False @patch("Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.get_cfg", Mock()) def get_obj(self, name=None, fam=None): return TestCfgCreator.get_obj(self, name=name) @patch("Bcfg2.Server.Plugins.Cfg.CfgCreator.handle_event") @patch("Bcfg2.Server.Plugin.helpers.StructFile.HandleEvent") def test_handle_event(self, mock_HandleEvent, mock_handle_event): pkc = self.get_obj() evt = Mock() pkc.handle_event(evt) mock_HandleEvent.assert_called_with(pkc, evt) mock_handle_event.assert_called_with(pkc, evt) @patch("os.unlink") @patch("os.path.exists") @patch("tempfile.mkstemp") @patch("os.fdopen", Mock()) @patch("%s.open" % builtins) def test_create_data(self, mock_open, mock_mkstemp, mock_exists, mock_unlink): metadata = Mock() pkc = self.get_obj() pkc.cfg = Mock() pkc.core = Mock() pkc.cmd = Mock() pkc.write_data = Mock() pubkey = "public key data" privkey_entryset = Mock() privkey_creator = Mock() privkey_creator.get_specificity = Mock() privkey_creator.get_specificity.return_value = dict() fileloc = pkc.get_filename() pkc.cfg.entries = {"/home/foo/.ssh/id_rsa": privkey_entryset} def reset(): privkey_creator.reset_mock() pkc.cmd.reset_mock() pkc.core.reset_mock() pkc.write_data.reset_mock() mock_exists.reset_mock() mock_unlink.reset_mock() mock_mkstemp.reset_mock() mock_open.reset_mock() # public key doesn't end in .pub entry = lxml.etree.Element("Path", name="/home/bar/.ssh/bogus") self.assertRaises(CfgCreationError, pkc.create_data, entry, metadata) self.assertFalse(pkc.write_data.called) # cannot bind private key reset() pkc.core.Bind.side_effect = PluginExecutionError entry = lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa.pub") self.assertRaises(CfgCreationError, pkc.create_data, entry, metadata) self.assertFalse(pkc.write_data.called) # private key not in cfg.entries reset() pkc.core.Bind.side_effect = None pkc.core.Bind.return_value = "private key data" entry = lxml.etree.Element("Path", name="/home/bar/.ssh/id_rsa.pub") self.assertRaises(CfgCreationError, pkc.create_data, entry, metadata) self.assertFalse(pkc.write_data.called) # no privkey.xml defined reset() privkey_entryset.best_matching.side_effect = PluginExecutionError entry = lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa.pub") self.assertRaises(CfgCreationError, pkc.create_data, entry, metadata) self.assertFalse(pkc.write_data.called) # successful operation, create new key reset() pkc.cmd.run.return_value = Mock() pkc.cmd.run.return_value.success = True pkc.cmd.run.return_value.stdout = pubkey mock_mkstemp.return_value = (Mock(), str(Mock())) mock_exists.return_value = False privkey_entryset.best_matching.side_effect = None privkey_entryset.best_matching.return_value = privkey_creator entry = lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa.pub") self.assertEqual(pkc.create_data(entry, metadata), pubkey) self.assertTrue(pkc.core.Bind.called) (privkey_entry, md) = pkc.core.Bind.call_args[0] self.assertXMLEqual(privkey_entry, lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa")) self.assertEqual(md, metadata) privkey_entryset.get_handlers.assert_called_with(metadata, CfgCreator) privkey_entryset.best_matching.assert_called_with( metadata, privkey_entryset.get_handlers.return_value) mock_exists.assert_called_with(fileloc) pkc.cmd.run.assert_called_with(["ssh-keygen", "-y", "-f", mock_mkstemp.return_value[1]]) self.assertEqual(pkc.write_data.call_args[0][0], pubkey) mock_unlink.assert_called_with(mock_mkstemp.return_value[1]) self.assertFalse(mock_open.called) # successful operation, no need to create new key reset() mock_exists.return_value = True mock_open.return_value = Mock() mock_open.return_value.read.return_value = pubkey pkc.cmd.run.return_value.stdout = None self.assertEqual(pkc.create_data(entry, metadata), pubkey) self.assertTrue(pkc.core.Bind.called) (privkey_entry, md) = pkc.core.Bind.call_args[0] self.assertXMLEqual(privkey_entry, lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa")) self.assertEqual(md, metadata) privkey_entryset.get_handlers.assert_called_with(metadata, CfgCreator) privkey_entryset.best_matching.assert_called_with( metadata, privkey_entryset.get_handlers.return_value) mock_exists.assert_called_with(fileloc) mock_open.assert_called_with(fileloc) self.assertFalse(mock_mkstemp.called) self.assertFalse(pkc.write_data.called) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/Test_init.py000066400000000000000000001011441303523157100264070ustar00rootroot00000000000000import os import sys import errno import lxml.etree import Bcfg2.Options from Bcfg2.Compat import walk_packages, ConfigParser from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Cfg import * from Bcfg2.Server.Plugin import PluginExecutionError, Specificity # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestSpecificData, TestEntrySet, TestGroupSpool, \ TestPullTarget, TestStructFile class TestCfgBaseFileMatcher(TestSpecificData): test_obj = CfgBaseFileMatcher path = os.path.join(datastore, "test+test.txt") def test_get_regex(self): if self.test_obj.__basenames__: basenames = self.test_obj.__basenames__ else: basenames = [os.path.basename(self.path)] if self.test_obj.__extensions__: extensions = self.test_obj.__extensions__ else: extensions = [''] for extension in extensions: regex = self.test_obj.get_regex(basenames) for basename in basenames: def test_match(spec): mstr = basename if spec: mstr += "." + spec if extension: mstr += "." + extension return regex.match(mstr) self.assertTrue(test_match('')) self.assertFalse(regex.match("bogus")) if self.test_obj.__specific__: if extension: self.assertFalse(regex.match("bogus." + extension)) self.assertTrue(test_match("G20_foo")) self.assertTrue(test_match("G1_foo")) self.assertTrue(test_match("G32768_foo")) # a group named '_' self.assertTrue(test_match("G10__")) self.assertTrue(test_match("H_hostname")) self.assertTrue(test_match("H_fqdn.subdomain.example.com")) self.assertTrue(test_match("G20_group_with_underscores")) self.assertFalse(test_match("G20_group with spaces")) self.assertFalse(test_match("G_foo")) self.assertFalse(test_match("G_")) self.assertFalse(test_match("G20_")) self.assertFalse(test_match("H_")) else: self.assertFalse(test_match("G20_foo")) self.assertFalse(test_match("H_hostname")) @patch("Bcfg2.Server.Plugins.Cfg.CfgBaseFileMatcher.get_regex") def test_handles(self, mock_get_regex): match = Mock() mock_get_regex.return_value = Mock() mock_get_regex.return_value.match = match evt = Mock() evt.filename = "event.txt" if self.test_obj.__basenames__: match.return_value = False self.assertFalse(self.test_obj.handles(evt)) mock_get_regex.assert_called_with( [b for b in self.test_obj.__basenames__]) print("match calls: %s" % match.call_args_list) print("expected: %s" % [call(evt.filename) for b in self.test_obj.__basenames__]) match.assert_called_with(evt.filename) mock_get_regex.reset_mock() match.reset_mock() match.return_value = True self.assertTrue(self.test_obj.handles(evt)) match.assert_called_with(evt.filename) else: match.return_value = False self.assertFalse(self.test_obj.handles(evt, basename=os.path.basename(self.path))) mock_get_regex.assert_called_with([os.path.basename(self.path)]) match.assert_called_with(evt.filename) mock_get_regex.reset_mock() match.reset_mock() match.return_value = True self.assertTrue(self.test_obj.handles(evt, basename=os.path.basename(self.path))) mock_get_regex.assert_called_with([os.path.basename(self.path)]) match.assert_called_with(evt.filename) def test_ignore(self): evt = Mock() evt.filename = "event.txt" if not self.test_obj.__ignore__: self.assertFalse(self.test_obj.ignore(evt)) else: self.assertFalse(self.test_obj.ignore(evt)) for extension in self.test_obj.__ignore__: for name in ["event.txt", "....", extension, "." + extension]: for filler in ['', '.blah', '......', '.' + extension]: evt.filename = name + filler + '.' + extension self.assertTrue(self.test_obj.ignore(evt)) class TestCfgGenerator(TestCfgBaseFileMatcher): test_obj = CfgGenerator def test_get_data(self): cg = self.get_obj() cg.data = "foo bar baz" self.assertEqual(cg.data, cg.get_data(Mock(), Mock())) class TestCfgFilter(TestCfgBaseFileMatcher): test_obj = CfgFilter def test_modify_data(self): cf = self.get_obj() self.assertRaises(NotImplementedError, cf.modify_data, Mock(), Mock(), Mock()) class TestCfgInfo(TestCfgBaseFileMatcher): test_obj = CfgInfo def get_obj(self, name=None): if name is None: name = self.path return self.test_obj(name) @patch("Bcfg2.Server.Plugins.Cfg.CfgBaseFileMatcher.__init__") def test__init(self, mock__init): ci = self.get_obj("test.txt") mock__init.assert_called_with(ci, "test.txt", None) def test_bind_info_to_entry(self): ci = self.get_obj() self.assertRaises(NotImplementedError, ci.bind_info_to_entry, Mock(), Mock()) class TestCfgVerifier(TestCfgBaseFileMatcher): test_obj = CfgVerifier def test_verify_entry(self): cf = self.get_obj() self.assertRaises(NotImplementedError, cf.verify_entry, Mock(), Mock(), Mock()) class TestCfgCreator(TestCfgBaseFileMatcher): test_obj = CfgCreator path = "/foo/bar/test.txt" should_monitor = False def setUp(self): TestCfgBaseFileMatcher.setUp(self) set_setup_default("filemonitor", MagicMock()) set_setup_default("cfg_passphrase", None) def get_obj(self, name=None): if name is None: name = self.path return self.test_obj(name) def test_create_data(self): cc = self.get_obj() self.assertRaises(NotImplementedError, cc.create_data, Mock(), Mock()) def test_get_filename(self): cc = self.get_obj() # tuples of (args to get_filename(), expected result) cases = [(dict(), "/foo/bar/bar"), (dict(prio=50), "/foo/bar/bar"), (dict(ext=".crypt"), "/foo/bar/bar.crypt"), (dict(ext="bar"), "/foo/bar/barbar"), (dict(host="foo.bar.example.com"), "/foo/bar/bar.H_foo.bar.example.com"), (dict(host="foo.bar.example.com", prio=50, ext=".crypt"), "/foo/bar/bar.H_foo.bar.example.com.crypt"), (dict(group="group", prio=1), "/foo/bar/bar.G01_group"), (dict(group="group", prio=50), "/foo/bar/bar.G50_group"), (dict(group="group", prio=50, ext=".crypt"), "/foo/bar/bar.G50_group.crypt")] for args, expected in cases: self.assertEqual(cc.get_filename(**args), expected) @patch("os.makedirs") @patch("%s.open" % builtins) def test_write_data(self, mock_open, mock_makedirs): cc = self.get_obj() data = "test\ntest" parent = os.path.dirname(self.path) def reset(): mock_open.reset_mock() mock_makedirs.reset_mock() # test writing file reset() spec = dict(group="foogroup", prio=9) cc.write_data(data, **spec) mock_makedirs.assert_called_with(parent) mock_open.assert_called_with(cc.get_filename(**spec), "wb") mock_open.return_value.write.assert_called_with(data) # test already-exists error from makedirs reset() mock_makedirs.side_effect = OSError(errno.EEXIST, self.path) cc.write_data(data) mock_makedirs.assert_called_with(parent) mock_open.assert_called_with(cc.get_filename(), "wb") mock_open.return_value.write.assert_called_with(data) # test error from open reset() mock_open.side_effect = IOError self.assertRaises(CfgCreationError, cc.write_data, data) # test real error from makedirs reset() mock_makedirs.side_effect = OSError self.assertRaises(CfgCreationError, cc.write_data, data) class TestXMLCfgCreator(TestCfgCreator, TestStructFile): test_obj = XMLCfgCreator def setUp(self): TestCfgCreator.setUp(self) TestStructFile.setUp(self) @patch("Bcfg2.Server.Plugins.Cfg.CfgCreator.handle_event") @patch("Bcfg2.Server.Plugin.helpers.StructFile.HandleEvent") def test_handle_event(self, mock_HandleEvent, mock_handle_event): cc = self.get_obj() evt = Mock() cc.handle_event(evt) mock_HandleEvent.assert_called_with(cc, evt) mock_handle_event.assert_called_with(cc, evt) def test_get_specificity(self): cc = self.get_obj() metadata = Mock() def reset(): metadata.group_in_category.reset_mock() category = "%s.%s.category" % (self.test_obj.__module__, self.test_obj.__name__) @patch(category, None) def inner(): cc.xdata = lxml.etree.Element("PrivateKey") self.assertItemsEqual(cc.get_specificity(metadata), dict(host=metadata.hostname)) inner() @patch(category, "foo") def inner2(): cc.xdata = lxml.etree.Element("PrivateKey") self.assertItemsEqual(cc.get_specificity(metadata), dict(group=metadata.group_in_category.return_value, prio=50)) metadata.group_in_category.assert_called_with("foo") reset() cc.xdata = lxml.etree.Element("PrivateKey", perhost="true") self.assertItemsEqual(cc.get_specificity(metadata), dict(host=metadata.hostname)) reset() cc.xdata = lxml.etree.Element("PrivateKey", category="bar") self.assertItemsEqual(cc.get_specificity(metadata), dict(group=metadata.group_in_category.return_value, prio=50)) metadata.group_in_category.assert_called_with("bar") reset() cc.xdata = lxml.etree.Element("PrivateKey", prio="10") self.assertItemsEqual(cc.get_specificity(metadata), dict(group=metadata.group_in_category.return_value, prio=10)) metadata.group_in_category.assert_called_with("foo") reset() cc.xdata = lxml.etree.Element("PrivateKey") metadata.group_in_category.return_value = '' self.assertItemsEqual(cc.get_specificity(metadata), dict(host=metadata.hostname)) metadata.group_in_category.assert_called_with("foo") inner2() class TestCfgDefaultInfo(TestCfgInfo): test_obj = CfgDefaultInfo def get_obj(self, *_): return self.test_obj() def test__init(self): pass def test_handle_event(self): # this CfgInfo handler doesn't handle any events -- it's not # file-driven, but based on the built-in defaults pass @patch("Bcfg2.Server.Plugin.default_path_metadata") def test_bind_info_to_entry(self, mock_default_path_metadata): cdi = self.get_obj() entry = lxml.etree.Element("Test", name="test") mock_default_path_metadata.return_value = \ dict(owner="root", mode="0600") cdi.bind_info_to_entry(entry, Mock()) self.assertItemsEqual(entry.attrib, dict(owner="root", mode="0600", name="test")) class TestCfgEntrySet(TestEntrySet): test_obj = CfgEntrySet def setUp(self): TestEntrySet.setUp(self) set_setup_default("cfg_validation", False) set_setup_default("cfg_handlers", []) def test__init(self): pass def test_handle_event(self): eset = self.get_obj() eset.entry_init = Mock() Bcfg2.Options.setup.cfg_handlers = [Mock(), Mock(), Mock()] for hdlr in Bcfg2.Options.setup.cfg_handlers: hdlr.__name__ = "handler" eset.entries = dict() def reset(): eset.entry_init.reset_mock() for hdlr in Bcfg2.Options.setup.cfg_handlers: hdlr.reset_mock() # test that a bogus deleted event is discarded evt = Mock() evt.code2str.return_value = "deleted" evt.filename = os.path.join(datastore, "test.txt") eset.handle_event(evt) self.assertFalse(eset.entry_init.called) self.assertItemsEqual(eset.entries, dict()) for hdlr in Bcfg2.Options.setup.cfg_handlers: self.assertFalse(hdlr.handles.called) self.assertFalse(hdlr.ignore.called) # test creation of a new file for action in ["exists", "created", "changed"]: print("Testing handling of %s events" % action) evt = Mock() evt.code2str.return_value = action evt.filename = os.path.join(datastore, "test.txt") # test with no handler that handles for hdlr in Bcfg2.Options.setup.cfg_handlers: hdlr.handles.return_value = False hdlr.ignore.return_value = False reset() eset.handle_event(evt) self.assertFalse(eset.entry_init.called) self.assertItemsEqual(eset.entries, dict()) for hdlr in Bcfg2.Options.setup.cfg_handlers: hdlr.handles.assert_called_with(evt, basename=eset.path) hdlr.ignore.assert_called_with(evt, basename=eset.path) # test with a handler that handles the entry reset() Bcfg2.Options.setup.cfg_handlers[-1].handles.return_value = True eset.handle_event(evt) eset.entry_init.assert_called_with(evt, Bcfg2.Options.setup.cfg_handlers[-1]) for hdlr in Bcfg2.Options.setup.cfg_handlers: hdlr.handles.assert_called_with(evt, basename=eset.path) if not hdlr.return_value: hdlr.ignore.assert_called_with(evt, basename=eset.path) # test with a handler that ignores the entry before one # that handles it reset() Bcfg2.Options.setup.cfg_handlers[0].ignore.return_value = True eset.handle_event(evt) self.assertFalse(eset.entry_init.called) Bcfg2.Options.setup.cfg_handlers[0].handles.assert_called_with( evt, basename=eset.path) Bcfg2.Options.setup.cfg_handlers[0].ignore.assert_called_with( evt, basename=eset.path) for hdlr in Bcfg2.Options.setup.cfg_handlers[1:]: self.assertFalse(hdlr.handles.called) self.assertFalse(hdlr.ignore.called) # test changed event with an entry that already exists reset() evt = Mock() evt.code2str.return_value = "changed" evt.filename = os.path.join(datastore, "test.txt") eset.entries[evt.filename] = Mock() eset.handle_event(evt) self.assertFalse(eset.entry_init.called) for hdlr in Bcfg2.Options.setup.cfg_handlers: self.assertFalse(hdlr.handles.called) self.assertFalse(hdlr.ignore.called) eset.entries[evt.filename].handle_event.assert_called_with(evt) # test deleted event with an entry that already exists reset() evt.code2str.return_value = "deleted" eset.handle_event(evt) self.assertFalse(eset.entry_init.called) for hdlr in Bcfg2.Options.setup.cfg_handlers: self.assertFalse(hdlr.handles.called) self.assertFalse(hdlr.ignore.called) self.assertItemsEqual(eset.entries, dict()) def test_get_matching(self): eset = self.get_obj() eset.get_handlers = Mock() metadata = Mock() self.assertEqual(eset.get_matching(metadata), eset.get_handlers.return_value) eset.get_handlers.assert_called_with(metadata, CfgGenerator) @patch("Bcfg2.Server.Plugin.EntrySet.entry_init") def test_entry_init(self, mock_entry_init): eset = self.get_obj() eset.entries = dict() evt = Mock() evt.filename = "test.txt" handler = Mock() handler.__basenames__ = [] handler.__extensions__ = [] handler.deprecated = False handler.experimental = False handler.__specific__ = True # test handling an event with the parent entry_init eset.entry_init(evt, handler) mock_entry_init.assert_called_with(eset, evt, entry_type=handler, specific=handler.get_regex.return_value) self.assertItemsEqual(eset.entries, dict()) # test handling the event with a Cfg handler handler.__specific__ = False eset.entry_init(evt, handler) handler.assert_called_with(os.path.join(eset.path, evt.filename)) self.assertItemsEqual(eset.entries, {evt.filename: handler.return_value}) handler.return_value.handle_event.assert_called_with(evt) # test handling an event for an entry that already exists with # a Cfg handler handler.reset_mock() eset.entry_init(evt, handler) self.assertFalse(handler.called) self.assertItemsEqual(eset.entries, {evt.filename: handler.return_value}) eset.entries[evt.filename].handle_event.assert_called_with(evt) @patch("Bcfg2.Server.Plugins.Cfg.u_str") @patch("Bcfg2.Server.Plugins.Cfg.b64encode") def test_bind_entry(self, mock_b64encode, mock_u_str): mock_u_str.side_effect = lambda x: x Bcfg2.Options.setup.cfg_validation = False eset = self.get_obj() eset.bind_info_to_entry = Mock() eset._generate_data = Mock() eset.get_handlers = Mock() eset._validate_data = Mock() eset.setup = dict(validate=False) def reset(): mock_b64encode.reset_mock() mock_u_str.reset_mock() eset.bind_info_to_entry.reset_mock() eset._generate_data.reset_mock() eset.get_handlers.reset_mock() eset._validate_data.reset_mock() return lxml.etree.Element("Path", name="/test.txt") entry = reset() metadata = Mock() # test basic entry, no validation, no filters, etc. eset._generate_data.return_value = ("data", None) eset.get_handlers.return_value = [] bound = eset.bind_entry(entry, metadata) eset.bind_info_to_entry.assert_called_with(entry, metadata) eset._generate_data.assert_called_with(entry, metadata) self.assertFalse(eset._validate_data.called) expected = lxml.etree.Element("Path", name="/test.txt") expected.text = "data" self.assertXMLEqual(bound, expected) self.assertEqual(bound, entry) # test empty entry entry = reset() eset._generate_data.return_value = ("", None) bound = eset.bind_entry(entry, metadata) eset.bind_info_to_entry.assert_called_with(entry, metadata) eset._generate_data.assert_called_with(entry, metadata) self.assertFalse(eset._validate_data.called) expected = lxml.etree.Element("Path", name="/test.txt", empty="true") self.assertXMLEqual(bound, expected) self.assertEqual(bound, entry) # test filters entry = reset() generator = Mock() generator.specific = Specificity(all=True) eset._generate_data.return_value = ("initial data", generator) filters = [Mock(), Mock()] filters[0].modify_data.return_value = "modified data" filters[1].modify_data.return_value = "final data" eset.get_handlers.return_value = filters bound = eset.bind_entry(entry, metadata) eset.bind_info_to_entry.assert_called_with(entry, metadata) eset._generate_data.assert_called_with(entry, metadata) filters[0].modify_data.assert_called_with(entry, metadata, "initial data") filters[1].modify_data.assert_called_with(entry, metadata, "modified data") self.assertFalse(eset._validate_data.called) expected = lxml.etree.Element("Path", name="/test.txt") expected.text = "final data" self.assertXMLEqual(bound, expected) # test base64 encoding entry = reset() entry.set("encoding", "base64") mock_b64encode.return_value = "base64 data" eset.get_handlers.return_value = [] eset._generate_data.return_value = ("data", None) bound = eset.bind_entry(entry, metadata) eset.bind_info_to_entry.assert_called_with(entry, metadata) eset._generate_data.assert_called_with(entry, metadata) self.assertFalse(eset._validate_data.called) mock_b64encode.assert_called_with("data") self.assertFalse(mock_u_str.called) expected = lxml.etree.Element("Path", name="/test.txt", encoding="base64") expected.text = "base64 data" self.assertXMLEqual(bound, expected) self.assertEqual(bound, entry) # test successful validation entry = reset() Bcfg2.Options.setup.cfg_validation = True bound = eset.bind_entry(entry, metadata) eset.bind_info_to_entry.assert_called_with(entry, metadata) eset._generate_data.assert_called_with(entry, metadata) eset._validate_data.assert_called_with(entry, metadata, "data") expected = lxml.etree.Element("Path", name="/test.txt") expected.text = "data" self.assertXMLEqual(bound, expected) self.assertEqual(bound, entry) # test failed validation entry = reset() eset._validate_data.side_effect = CfgVerificationError self.assertRaises(PluginExecutionError, eset.bind_entry, entry, metadata) eset.bind_info_to_entry.assert_called_with(entry, metadata) eset._generate_data.assert_called_with(entry, metadata) eset._validate_data.assert_called_with(entry, metadata, "data") def test_get_handlers(self): eset = self.get_obj() eset.entries['test1.txt'] = CfgInfo("test1.txt") eset.entries['test2.txt'] = CfgGenerator("test2.txt", Mock()) eset.entries['test2.txt'].specific.matches.return_value = True eset.entries['test3.txt'] = CfgInfo("test3.txt") eset.entries['test4.txt'] = CfgGenerator("test4.txt", Mock()) eset.entries['test4.txt'].specific.matches.return_value = False eset.entries['test5.txt'] = CfgGenerator("test5.txt", Mock()) eset.entries['test5.txt'].specific.matches.return_value = True eset.entries['test6.txt'] = CfgVerifier("test6.txt", Mock()) eset.entries['test6.txt'].specific.matches.return_value = True eset.entries['test7.txt'] = CfgFilter("test7.txt", Mock()) eset.entries['test7.txt'].specific.matches.return_value = False def reset(): for e in eset.entries.values(): if hasattr(e.specific, "reset_mock"): e.specific.reset_mock() metadata = Mock() self.assertItemsEqual(eset.get_handlers(metadata, CfgGenerator), [eset.entries['test2.txt'], eset.entries['test5.txt']]) for ename in ['test2.txt', 'test4.txt', 'test5.txt']: eset.entries[ename].specific.matches.assert_called_with(metadata) for ename in ['test6.txt', 'test7.txt']: self.assertFalse(eset.entries[ename].specific.matches.called) reset() self.assertItemsEqual(eset.get_handlers(metadata, CfgInfo), [eset.entries['test1.txt'], eset.entries['test3.txt']]) for entry in eset.entries.values(): if hasattr(entry.specific.matches, "called"): self.assertFalse(entry.specific.matches.called) reset() self.assertItemsEqual(eset.get_handlers(metadata, CfgVerifier), [eset.entries['test6.txt']]) eset.entries['test6.txt'].specific.matches.assert_called_with(metadata) for ename, entry in eset.entries.items(): if (ename != 'test6.txt' and hasattr(entry.specific.matches, "called")): self.assertFalse(entry.specific.matches.called) reset() self.assertItemsEqual(eset.get_handlers(metadata, CfgFilter), []) eset.entries['test7.txt'].specific.matches.assert_called_with(metadata) for ename, entry in eset.entries.items(): if (ename != 'test7.txt' and hasattr(entry.specific.matches, "called")): self.assertFalse(entry.specific.matches.called) reset() self.assertItemsEqual(eset.get_handlers(metadata, Mock), []) for ename, entry in eset.entries.items(): if hasattr(entry.specific.matches, "called"): self.assertFalse(entry.specific.matches.called) @patch("Bcfg2.Server.Plugins.Cfg.CfgDefaultInfo") def test_bind_info_to_entry(self, mock_DefaultInfo): eset = self.get_obj() eset.get_handlers = Mock() eset.get_handlers.return_value = [] metadata = Mock() def reset(): eset.get_handlers.reset_mock() mock_DefaultInfo.reset_mock() return lxml.etree.Element("Path", name="/test.txt") # test with no info handlers entry = reset() eset.bind_info_to_entry(entry, metadata) eset.get_handlers.assert_called_with(metadata, CfgInfo) mock_DefaultInfo.return_value.bind_info_to_entry.assert_called_with( entry, metadata) self.assertEqual(entry.get("type"), "file") # test with one info handler entry = reset() handler = Mock() eset.get_handlers.return_value = [handler] eset.bind_info_to_entry(entry, metadata) eset.get_handlers.assert_called_with(metadata, CfgInfo) mock_DefaultInfo.return_value.bind_info_to_entry.assert_called_with( entry, metadata) handler.bind_info_to_entry.assert_called_with(entry, metadata) self.assertEqual(entry.get("type"), "file") # test with more than one info handler entry = reset() handlers = [Mock(), Mock()] eset.get_handlers.return_value = handlers eset.bind_info_to_entry(entry, metadata) eset.get_handlers.assert_called_with(metadata, CfgInfo) mock_DefaultInfo.return_value.bind_info_to_entry.assert_called_with( entry, metadata) # we don't care which handler gets called as long as exactly # one of them does called = 0 for handler in handlers: if handler.bind_info_to_entry.called: handler.bind_info_to_entry.assert_called_with(entry, metadata) called += 1 self.assertEqual(called, 1) self.assertEqual(entry.get("type"), "file") def test_create_data(self): eset = self.get_obj() eset.best_matching = Mock() creator = Mock() creator.create_data.return_value = "data" eset.best_matching.return_value = creator eset.get_handlers = Mock() entry = lxml.etree.Element("Path", name="/test.txt", mode="0640") metadata = Mock() def reset(): eset.best_matching.reset_mock() eset.get_handlers.reset_mock() # test success self.assertEqual(eset._create_data(entry, metadata), "data") eset.get_handlers.assert_called_with(metadata, CfgCreator) eset.best_matching.assert_called_with(metadata, eset.get_handlers.return_value) # test failure to create data reset() creator.create_data.side_effect = CfgCreationError self.assertRaises(PluginExecutionError, eset._create_data, entry, metadata) def test_generate_data(self): eset = self.get_obj() eset.best_matching = Mock() eset._create_data = Mock() generator = Mock() generator.get_data.return_value = "data" eset.best_matching.return_value = generator eset.get_handlers = Mock() entry = lxml.etree.Element("Path", name="/test.txt", mode="0640") metadata = Mock() def reset(): eset.best_matching.reset_mock() eset.get_handlers.reset_mock() eset._create_data.reset_mock() # test success self.assertEqual(eset._generate_data(entry, metadata)[0], "data") eset.get_handlers.assert_called_with(metadata, CfgGenerator) eset.best_matching.assert_called_with(metadata, eset.get_handlers.return_value) self.assertFalse(eset._create_data.called) # test failure to generate data reset() generator.get_data.side_effect = OSError self.assertRaises(PluginExecutionError, eset._generate_data, entry, metadata) # test no generator found reset() eset.best_matching.side_effect = PluginExecutionError self.assertEqual(eset._generate_data(entry, metadata), (eset._create_data.return_value, None)) eset.get_handlers.assert_called_with(metadata, CfgGenerator) eset.best_matching.assert_called_with(metadata, eset.get_handlers.return_value) eset._create_data.assert_called_with(entry, metadata) def test_validate_data(self): class MockChild1(Mock): pass class MockChild2(Mock): pass eset = self.get_obj() eset.get_handlers = Mock() handlers1 = [MockChild1(), MockChild1()] handlers2 = [MockChild2()] eset.get_handlers.return_value = [handlers1[0], handlers2[0], handlers1[1]] eset.best_matching = Mock() eset.best_matching.side_effect = lambda m, v: v[0] entry = lxml.etree.Element("Path", name="/test.txt") metadata = Mock() data = "data" eset._validate_data(entry, metadata, data) eset.get_handlers.assert_called_with(metadata, CfgVerifier) self.assertItemsEqual(eset.best_matching.call_args_list, [call(metadata, handlers1), call(metadata, handlers2)]) handlers1[0].verify_entry.assert_called_with(entry, metadata, data) handlers2[0].verify_entry.assert_called_with(entry, metadata, data) def test_specificity_from_filename(self): pass class TestCfg(TestGroupSpool, TestPullTarget): test_obj = Cfg def setUp(self): TestGroupSpool.setUp(self) TestPullTarget.setUp(self) set_setup_default("cfg_handlers", []) def get_obj(self, core=None): if core is None: core = Mock() return TestGroupSpool.get_obj(self, core=core) def test_has_generator(self): cfg = self.get_obj() cfg.entries = dict() entry = lxml.etree.Element("Path", name="/test.txt") metadata = Mock() self.assertFalse(cfg.has_generator(entry, metadata)) eset = Mock() eset.get_handlers.return_value = [] cfg.entries[entry.get("name")] = eset self.assertFalse(cfg.has_generator(entry, metadata)) eset.get_handlers.assert_called_with(metadata, CfgGenerator) eset.get_handlers.reset_mock() eset.get_handlers.return_value = [Mock()] self.assertTrue(cfg.has_generator(entry, metadata)) eset.get_handlers.assert_called_with(metadata, CfgGenerator) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/__init__.py000066400000000000000000000000001303523157100261710ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestDecisions.py000066400000000000000000000040051303523157100256640ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Decisions import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestStructFile, TestPlugin, TestDecision class TestDecisionFile(TestStructFile): test_obj = DecisionFile def test_get_decisions(self): df = self.get_obj() metadata = Mock() df.xdata = None self.assertItemsEqual(df.get_decisions(metadata), []) df.xdata = lxml.etree.Element("Decisions") df.XMLMatch = Mock() df.XMLMatch.return_value = lxml.etree.Element("Decisions") lxml.etree.SubElement(df.XMLMatch.return_value, "Decision", type="Service", name='*') lxml.etree.SubElement(df.XMLMatch.return_value, "Decision", type="Path", name='/etc/apt/apt.conf') self.assertItemsEqual(df.get_decisions(metadata), [("Service", '*'), ("Path", '/etc/apt/apt.conf')]) df.XMLMatch.assert_called_with(metadata) class TestDecisions(TestPlugin, TestDecision): test_obj = Decisions def test_GetDecisions(self): d = self.get_obj() d.whitelist = Mock() d.blacklist = Mock() metadata = Mock() self.assertEqual(d.GetDecisions(metadata, "whitelist"), d.whitelist.get_decisions.return_value) d.whitelist.get_decisions.assert_called_with(metadata) self.assertEqual(d.GetDecisions(metadata, "blacklist"), d.blacklist.get_decisions.return_value) d.blacklist.get_decisions.assert_called_with(metadata) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestDefaults.py000066400000000000000000000065231303523157100255220ustar00rootroot00000000000000import os import sys import copy import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Defaults import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestRules import TestRules from Testinterfaces import TestGoalValidator class TestDefaults(TestRules, TestGoalValidator): test_obj = Defaults def setUp(self): TestRules.setUp(self) set_setup_default("defaults_replace_name", True) def get_obj(self, *args, **kwargs): return TestRules.get_obj(self, *args, **kwargs) def test_HandlesEntry(self): d = self.get_obj() self.assertFalse(d.HandlesEntry(Mock(), Mock())) @patch("Bcfg2.Server.Plugin.helpers.XMLDirectoryBacked.HandleEvent") def test_HandleEvent(self, mock_HandleEvent): d = self.get_obj() evt = Mock() d.HandleEvent(evt) mock_HandleEvent.assert_called_with(d, evt) def test_validate_goals(self): d = self.get_obj() d.BindEntry = Mock() metadata = Mock() entries = [] config = lxml.etree.Element("Configuration") b1 = lxml.etree.SubElement(config, "Bundle") entries.append(lxml.etree.SubElement(b1, "Path", name="/foo")) entries.append(lxml.etree.SubElement(b1, "Path", name="/bar")) b2 = lxml.etree.SubElement(config, "Bundle") entries.append(lxml.etree.SubElement(b2, "Package", name="quux")) d.validate_goals(metadata, config) self.assertItemsEqual(d.BindEntry.call_args_list, [call(e, metadata) for e in entries]) def test__matches_regex_disabled(self): """ cannot disable regex in Defaults plugin """ pass def set_regex_enabled(self, rules_obj, state): pass def test__regex_enabled(self): r = self.get_obj() self.assertTrue(r._regex_enabled) def _do_test(self, name, groups=None): if groups is None: groups = [] d = self.get_obj() metadata = Mock(groups=groups) config = lxml.etree.Element("Configuration") struct = lxml.etree.SubElement(config, "Bundle", name=name) entry = copy.deepcopy(self.abstract[name]) struct.append(entry) d.validate_goals(metadata, config) self.assertXMLEqual(entry, self.concrete[name]) def _do_test_failure(self, name, groups=None, handles=None): if groups is None: groups = [] d = self.get_obj() metadata = Mock(groups=groups) config = lxml.etree.Element("Configuration") struct = lxml.etree.SubElement(config, "Bundle", name=name) orig = copy.deepcopy(self.abstract[name]) entry = copy.deepcopy(self.abstract[name]) struct.append(entry) d.validate_goals(metadata, config) self.assertXMLEqual(entry, orig) def test_regex(self): self._do_test('regex') def test_replace_name(self): Bcfg2.Options.setup.defaults_replace_name = True self._do_test('replace_name') Bcfg2.Options.setup.defaults_replace_name = False testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestGroupPatterns.py000066400000000000000000000130641303523157100265660ustar00rootroot00000000000000import os import sys import lxml.etree import Bcfg2.Server.Plugin from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.GroupPatterns import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestXMLFileBacked, TestPlugin, TestConnector class TestPatternMap(Bcfg2TestCase): def test_ranges(self): """ test processing NameRange patterns """ tests = [("foo[[1-5]]", ["foo1", "foo2", "foo5"], ["foo", "foo0", "foo10"]), ("[[10-99]]foo", ["10foo", "99foo", "25foo"], ["foo", "1foo", "999foo", "110foo"]), ("foo[[1,3,5-10]]bar", ["foo1bar", "foo7bar", "foo10bar"], ["foo2bar", "foobar", "foo3", "5bar"]), ("[[9-15]]foo[[16-20]]", ["9foo18", "13foo17"], ["8foo21", "12foo21", "8foo18", "16foo16", "15foo15", "29foo20", "9foo200", "29foo200"])] groups = MagicMock() for rng, inc, exc in tests: pmap = PatternMap(None, rng, groups) for test in inc: self.assertEqual(pmap.process(test), groups) for test in exc: self.assertIsNone(pmap.process(test)) def test_simple_patterns(self): """ test processing NamePatterns without backreferences """ tests = [("foo.*", ["foo", "foobar", "barfoo", "barfoobar"], ["bar", "fo0"]), ("^[A-z]fooo?$", ["Afoo", "bfooo"], ["foo", "fooo", "AAfoo", "Afoooo"])] groups = ["a", "b", "c"] for rng, inc, exc in tests: pmap = PatternMap(rng, None, groups) for test in inc: self.assertItemsEqual(pmap.process(test), groups) for test in exc: self.assertIsNone(pmap.process(test)) def test_backref_patterns(self): """ test NamePatterns with backreferences """ tests = [("foo(.*)", ['a', 'a$1', '$1a', '$$', '$a', '$1'], {"foo": ['a', 'a', 'a', '$$', '$a', ''], "foooOOo": ['a', 'aoOOo', 'oOOoa', '$$', '$a', 'oOOo'], "barfoo$1": ['a', 'a$1', '$1a', '$$', '$a', '$1']}), ("^([a-z])foo(.+)", ['a', 'a$1', '$1a$2', '$1$$2', '$2'], {"foo": None, "afooa": ['a', 'aa', 'aaa', 'a$a', 'a'], "bfoobar": ['a', 'ab', 'babar', 'b$bar', 'bar']})] for rng, groups, cases in tests: pmap = PatternMap(rng, None, groups) for name, ret in cases.items(): if ret is None: self.assertIsNone(pmap.process(name)) else: self.assertItemsEqual(pmap.process(name), ret) class TestPatternFile(TestXMLFileBacked): test_obj = PatternFile should_monitor = True def get_obj(self, path=None, fam=None, core=None, should_monitor=True): if path is None: path = self.path if fam and not core: core = Mock() core.fam = fam elif not core: core = Mock() @patchIf(not isinstance(lxml.etree.Element, Mock), "lxml.etree.Element", Mock()) def inner(): return self.test_obj(path, core=core) return inner() @patch("Bcfg2.Server.Plugins.GroupPatterns.PatternMap") def test_Index(self, mock_PatternMap): TestXMLFileBacked.test_Index(self) core = Mock() pf = self.get_obj(core=core) pf.data = """ foo.* test1 test2 foo[[1-5]] test3 """ core.metadata_cache_mode = 'aggressive' pf.Index() core.metadata_cache.expire.assert_called_with() self.assertItemsEqual(mock_PatternMap.call_args_list, [call("foo.*", None, ["test1", "test2"]), call(None, "foo[[1-5]]", ["test3"])]) def test_process_patterns(self): pf = self.get_obj() pf.patterns = [Mock(), Mock(), Mock()] pf.patterns[0].process.return_value = ["a", "b"] pf.patterns[1].process.return_value = None pf.patterns[2].process.return_value = ["b", "c"] self.assertItemsEqual(pf.process_patterns("foo.example.com"), ["a", "b", "b", "c"]) for pat in pf.patterns: pat.process.assert_called_with("foo.example.com") class TestGroupPatterns(TestPlugin, TestConnector): test_obj = GroupPatterns def get_obj(self, core=None): @patchIf(not isinstance(lxml.etree.Element, Mock), "lxml.etree.Element", Mock()) def inner(): return TestPlugin.get_obj(self, core=core) return inner() def test_get_additional_groups(self): gp = self.get_obj() gp.config = Mock() metadata = Mock() self.assertEqual(gp.get_additional_groups(metadata), gp.config.process_patterns.return_value) gp.config.process_patterns.assert_called_with(metadata.hostname) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py000066400000000000000000002045071303523157100254750ustar00rootroot00000000000000import os import sys import copy import time import socket import lxml.etree import Bcfg2.Server import Bcfg2.Server.Plugin from mock import Mock, MagicMock, patch # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from Bcfg2.Server.Plugins.Metadata import load_django_models from TestPlugin import TestXMLFileBacked, TestMetadata as _TestMetadata, \ TestClientRunHooks, TestDatabaseBacked load_django_models() from Bcfg2.Server.Plugins.Metadata import * def get_clients_test_tree(): return lxml.etree.XML(''' ''').getroottree() def get_groups_test_tree(): return lxml.etree.XML(''' ''').getroottree() def get_metadata_object(core=None): if core is None: core = Mock() core.metadata_cache = MagicMock() set_setup_default("password") @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) @patchIf(not isinstance(lxml.etree.Element, Mock), "lxml.etree.Element", Mock()) def inner(): return Metadata(core) return inner() class TestMetadataDB(DBModelTestCase): if HAS_DJANGO: models = [MetadataClientModel] class TestClientVersions(TestDatabaseBacked): test_clients = dict(client1="1.2.0", client2="1.2.2", client3="1.3.0pre1", client4="1.1.0", client5=None, client6=None) @skipUnless(HAS_DJANGO, "Django not found") def setUp(self): TestDatabaseBacked.setUp(self) self.test_obj = ClientVersions self.syncdb(TestMetadataDB) for client, version in self.test_clients.items(): MetadataClientModel(hostname=client, version=version).save() def test__contains(self): v = self.get_obj() self.assertIn("client1", v) self.assertIn("client5", v) self.assertNotIn("client__contains", v) def test_keys(self): v = self.get_obj() self.assertItemsEqual(self.test_clients.keys(), v.keys()) def test__setitem(self): v = self.get_obj() # test setting version of existing client v["client1"] = "1.2.3" self.assertIn("client1", v) self.assertEqual(v['client1'], "1.2.3") client = MetadataClientModel.objects.get(hostname="client1") self.assertEqual(client.version, "1.2.3") # test adding new client new = "client__setitem" v[new] = "1.3.0" self.assertIn(new, v) self.assertEqual(v[new], "1.3.0") client = MetadataClientModel.objects.get(hostname=new) self.assertEqual(client.version, "1.3.0") # test adding new client with no version new2 = "client__setitem_2" v[new2] = None self.assertIn(new2, v) self.assertEqual(v[new2], None) client = MetadataClientModel.objects.get(hostname=new2) self.assertEqual(client.version, None) def test__getitem(self): v = self.get_obj() # test getting existing client self.assertEqual(v['client2'], "1.2.2") self.assertIsNone(v['client5']) # test exception on nonexistent client expected = KeyError try: v['clients__getitem'] except expected: pass except: err = sys.exc_info()[1] self.assertFalse(True, "%s raised instead of %s" % (err.__class__.__name__, expected.__class__.__name__)) else: self.assertFalse(True, "%s not raised" % expected.__class__.__name__) def test__len(self): v = self.get_obj() self.assertEqual(len(v), MetadataClientModel.objects.count()) def test__iter(self): v = self.get_obj() self.assertItemsEqual([h for h in iter(v)], v.keys()) def test__delitem(self): v = self.get_obj() # test adding new client new = "client__delitem" v[new] = "1.3.0" del v[new] self.assertIn(new, v) self.assertIsNone(v[new]) class TestXMLMetadataConfig(TestXMLFileBacked): test_obj = XMLMetadataConfig path = os.path.join(datastore, 'Metadata', 'clients.xml') def get_obj(self, basefile="clients.xml", core=None): self.metadata = get_metadata_object(core=core) @patchIf(not isinstance(lxml.etree.Element, Mock), "lxml.etree.Element", Mock()) def inner(): return XMLMetadataConfig(self.metadata, basefile) return inner() @patch("Bcfg2.Server.FileMonitor.get_fam", Mock()) def test__init(self): xmc = self.get_obj() self.assertNotIn(call(xmc.basefile), xmc.fam.AddMonitor.call_args_list) def test_xdata(self): config = self.get_obj() expected = Bcfg2.Server.Plugin.MetadataRuntimeError try: config.xdata except expected: pass except: err = sys.exc_info()[1] self.assertFalse(True, "%s raised instead of %s" % (err.__class__.__name__, expected.__class__.__name__)) else: self.assertFalse(True, "%s not raised" % expected.__class__.__name__) pass config.data = "" self.assertEqual(config.xdata, "") def test_base_xdata(self): config = self.get_obj() # we can't use assertRaises here because base_xdata is a property expected = Bcfg2.Server.Plugin.MetadataRuntimeError try: config.base_xdata except expected: pass except: err = sys.exc_info()[1] self.assertFalse(True, "%s raised instead of %s" % (err.__class__.__name__, expected.__class__.__name__)) else: self.assertFalse(True, "%s not raised" % expected.__class__.__name__) pass config.basedata = "" self.assertEqual(config.base_xdata, "") def test_add_monitor(self): config = self.get_obj() config.fam = Mock() fname = "test.xml" fpath = os.path.join(self.metadata.data, fname) config.extras = [] config.add_monitor(fpath) config.fam.AddMonitor.assert_called_with(fpath, config.metadata) self.assertItemsEqual(config.extras, [fpath]) def test_Index(self): # Index() isn't used on XMLMetadataConfig objects pass @patch("lxml.etree.parse") @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig._follow_xincludes") def test_load_xml(self, mock_follow, mock_parse): config = self.get_obj("clients.xml") def reset(): mock_parse.reset_mock() mock_follow.reset_mock() config.data = None config.basedata = None reset() config.load_xml() mock_follow.assert_called_with(xdata=mock_parse.return_value) mock_parse.assert_called_with(os.path.join(config.basedir, "clients.xml"), parser=Bcfg2.Server.XMLParser) self.assertFalse(mock_parse.return_value.xinclude.called) self.assertEqual(config.data, mock_parse.return_value) self.assertIsNotNone(config.basedata) reset() mock_parse.side_effect = lxml.etree.XMLSyntaxError(None, 0, 0, 0) config.load_xml() mock_parse.assert_called_with(os.path.join(config.basedir, "clients.xml"), parser=Bcfg2.Server.XMLParser) self.assertIsNone(config.data) self.assertIsNone(config.basedata) reset() mock_parse.side_effect = None def follow_xincludes(xdata=None): config.extras = [Mock(), Mock()] mock_follow.side_effect = follow_xincludes config.load_xml() mock_follow.assert_called_with(xdata=mock_parse.return_value) mock_parse.assert_called_with(os.path.join(config.basedir, "clients.xml"), parser=Bcfg2.Server.XMLParser) mock_parse.return_value.xinclude.assert_any_call() self.assertEqual(config.data, mock_parse.return_value) self.assertIsNotNone(config.basedata) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.write_xml") def test_write(self, mock_write_xml): config = self.get_obj("clients.xml") config.basedata = "" config.write() mock_write_xml.assert_called_with(os.path.join(self.metadata.data, "clients.xml"), "") @patch('Bcfg2.Utils.locked', Mock(return_value=False)) @patch('fcntl.lockf', Mock()) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml") @patch('os.open') @patch('os.fdopen') @patch('os.unlink') @patch('os.rename') @patch('os.path.islink') @patch('os.readlink') def test_write_xml(self, mock_readlink, mock_islink, mock_rename, mock_unlink, mock_fdopen, mock_open, mock_load_xml): fname = "clients.xml" config = self.get_obj(fname) fpath = os.path.join(self.metadata.data, fname) tmpfile = "%s.new" % fpath linkdest = os.path.join(self.metadata.data, "client-link.xml") def reset(): mock_readlink.reset_mock() mock_islink.reset_mock() mock_rename.reset_mock() mock_unlink.reset_mock() mock_fdopen.reset_mock() mock_open.reset_mock() mock_load_xml.reset_mock() mock_islink.return_value = False # basic test - everything works config.write_xml(fpath, get_clients_test_tree()) mock_open.assert_called_with(tmpfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY) mock_fdopen.assert_called_with(mock_open.return_value, 'w') self.assertTrue(mock_fdopen.return_value.write.called) mock_islink.assert_called_with(fpath) mock_rename.assert_called_with(tmpfile, fpath) mock_load_xml.assert_called_with() # test: clients.xml.new is locked the first time we write it def rv(fname, mode): mock_open.side_effect = None raise OSError(17, fname) reset() mock_open.side_effect = rv config.write_xml(fpath, get_clients_test_tree()) self.assertItemsEqual(mock_open.call_args_list, [call(tmpfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY), call(tmpfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY)]) mock_fdopen.assert_called_with(mock_open.return_value, 'w') self.assertTrue(mock_fdopen.return_value.write.called) mock_islink.assert_called_with(fpath) mock_rename.assert_called_with(tmpfile, fpath) mock_load_xml.assert_called_with() # test writing a symlinked clients.xml reset() mock_open.side_effect = None mock_islink.return_value = True mock_readlink.return_value = linkdest config.write_xml(fpath, get_clients_test_tree()) mock_rename.assert_called_with(tmpfile, linkdest) mock_load_xml.assert_called_with() # test failure of os.rename() reset() mock_rename.side_effect = OSError self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError, config.write_xml, fpath, get_clients_test_tree()) mock_unlink.assert_called_with(tmpfile) # test failure of file.write() reset() mock_open.return_value.write.side_effect = IOError self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError, config.write_xml, fpath, get_clients_test_tree()) mock_unlink.assert_called_with(tmpfile) # test failure of os.open() (other than EEXIST) reset() mock_open.side_effect = OSError self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError, config.write_xml, fpath, get_clients_test_tree()) # test failure of os.fdopen() reset() mock_fdopen.side_effect = OSError self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError, config.write_xml, fpath, get_clients_test_tree()) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch('lxml.etree.parse') def test_find_xml_for_xpath(self, mock_parse): config = self.get_obj("groups.xml") config.basedata = get_groups_test_tree() xpath = "//Group[@name='group1']" self.assertItemsEqual(config.find_xml_for_xpath(xpath), dict(filename=os.path.join(self.metadata.data, "groups.xml"), xmltree=get_groups_test_tree(), xquery=get_groups_test_tree().xpath(xpath))) self.assertEqual(config.find_xml_for_xpath("//boguselement"), dict()) config.extras = [os.path.join(self.metadata.data, p) for p in ["foo.xml", "bar.xml", "clients.xml"]] def parse_side_effect(fname, parser=Bcfg2.Server.XMLParser): if fname == os.path.join(self.metadata.data, "clients.xml"): return get_clients_test_tree() else: return lxml.etree.XML("").getroottree() mock_parse.side_effect = parse_side_effect xpath = "//Client[@secure='true']" self.assertItemsEqual(config.find_xml_for_xpath(xpath), dict(filename=os.path.join(self.metadata.data, "clients.xml"), xmltree=get_clients_test_tree(), xquery=get_clients_test_tree().xpath(xpath))) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml") def test_HandleEvent(self, mock_load_xml): config = self.get_obj("groups.xml") evt = Mock() evt.filename = os.path.join(self.metadata.data, "groups.xml") evt.code2str = Mock(return_value="changed") self.assertTrue(config.HandleEvent(evt)) mock_load_xml.assert_called_with() class TestClientMetadata(Bcfg2TestCase): def test_inGroup(self): cm = ClientMetadata("client1", "group1", ["group1", "group2"], ["bundle1"], [], [], [], None, None, None, None) self.assertTrue(cm.inGroup("group1")) self.assertFalse(cm.inGroup("group3")) class TestMetadata(_TestMetadata, TestClientRunHooks, TestDatabaseBacked): test_obj = Metadata def setUp(self): _TestMetadata.setUp(self) TestClientRunHooks.setUp(self) TestDatabaseBacked.setUp(self) Bcfg2.Options.setup.metadata_db = False Bcfg2.Options.setup.authentication = "cert+password" def get_obj(self, core=None): return get_metadata_object(core=core) @skipUnless(HAS_DJANGO, "Django not found") def test__use_db(self): # with the way we've set up our metadata tests, it's unweildy # to test _use_db. however, given the way get_obj works, if # there was a bug in _use_db it'd be almost certain to shake # out in the rest of the testing. pass def get_nonexistent_client(self, metadata, prefix="newclient"): if metadata is None: metadata = self.load_clients_data() i = 0 client_name = "%s%s" % (prefix, i) while client_name in metadata.clients: i += 1 client_name = "%s%s" % (prefix, i) return client_name @patch("Bcfg2.Server.FileMonitor.get_fam") def test__init(self, mock_get_fam): core = MagicMock() metadata = self.get_obj(core=core) self.assertEqual(len(metadata.states), 2) mock_get_fam.return_value.AddMonitor.assert_any_call( os.path.join(metadata.data, "groups.xml"), metadata) mock_get_fam.return_value.AddMonitor.assert_any_call( os.path.join(metadata.data, "clients.xml"), metadata) @patch('os.makedirs', Mock()) @patch('%s.open' % builtins) def test_init_repo(self, mock_open): Metadata.init_repo(datastore, groups_xml="groups", clients_xml="clients") mock_open.assert_any_call(os.path.join(datastore, "Metadata", "groups.xml"), "w") mock_open.assert_any_call(os.path.join(datastore, "Metadata", "clients.xml"), "w") def test_search_xdata(self): # test finding a node with the proper name metadata = self.get_obj() tree = get_groups_test_tree() res = metadata._search_xdata("Group", "group1", tree) self.assertIsInstance(res, lxml.etree._Element) self.assertEqual(res.get("name"), "group1") # test finding a node with the wrong name but correct alias metadata = self.get_obj() tree = get_clients_test_tree() res = metadata._search_xdata("Client", "alias3", tree, alias=True) self.assertIsInstance(res, lxml.etree._Element) self.assertNotEqual(res.get("name"), "alias3") # test failure finding a node metadata = self.get_obj() tree = get_clients_test_tree() res = metadata._search_xdata("Client", self.get_nonexistent_client(metadata), tree, alias=True) self.assertIsNone(res) def search_xdata(self, tag, name, tree, alias=False): metadata = self.get_obj() res = metadata._search_xdata(tag, name, tree, alias=alias) self.assertIsInstance(res, lxml.etree._Element) if not alias: self.assertEqual(res.get("name"), name) def test_search_group(self): # test finding a group with the proper name tree = get_groups_test_tree() self.search_xdata("Group", "group1", tree) def test_search_bundle(self): # test finding a bundle with the proper name tree = get_groups_test_tree() self.search_xdata("Bundle", "bundle1", tree) def test_search_client(self): # test finding a client with the proper name tree = get_clients_test_tree() self.search_xdata("Client", "client1", tree, alias=True) self.search_xdata("Client", "alias1", tree, alias=True) def test_add_group(self): metadata = self.get_obj() metadata.groups_xml.write = Mock() metadata.groups_xml.load_xml = Mock() metadata.groups_xml.data = lxml.etree.XML('').getroottree() metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.add_group("test1", dict()) metadata.groups_xml.write.assert_any_call() grp = metadata.search_group("test1", metadata.groups_xml.base_xdata) self.assertIsNotNone(grp) self.assertEqual(grp.attrib, dict(name='test1')) # have to call this explicitly -- usually load_xml does this # on FAM events metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.add_group("test2", dict(foo='bar')) metadata.groups_xml.write.assert_any_call() grp = metadata.search_group("test2", metadata.groups_xml.base_xdata) self.assertIsNotNone(grp) self.assertEqual(grp.attrib, dict(name='test2', foo='bar')) # have to call this explicitly -- usually load_xml does this # on FAM events metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.groups_xml.write.reset_mock() self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.add_group, "test1", dict()) self.assertFalse(metadata.groups_xml.write.called) def test_update_group(self): metadata = self.get_obj() metadata.groups_xml.write_xml = Mock() metadata.groups_xml.load_xml = Mock() metadata.groups_xml.data = copy.deepcopy(get_groups_test_tree()) metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.update_group("group1", dict(foo="bar")) grp = metadata.search_group("group1", metadata.groups_xml.base_xdata) self.assertIsNotNone(grp) self.assertIn("foo", grp.attrib) self.assertEqual(grp.get("foo"), "bar") self.assertTrue(metadata.groups_xml.write_xml.called) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.update_group, "bogus_group", dict()) def test_remove_group(self): metadata = self.get_obj() metadata.groups_xml.write_xml = Mock() metadata.groups_xml.load_xml = Mock() metadata.groups_xml.data = copy.deepcopy(get_groups_test_tree()) metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.remove_group("group5") grp = metadata.search_group("group5", metadata.groups_xml.base_xdata) self.assertIsNone(grp) self.assertTrue(metadata.groups_xml.write_xml.called) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.remove_group, "bogus_group") def test_add_bundle(self): metadata = self.get_obj() metadata.groups_xml.write = Mock() metadata.groups_xml.load_xml = Mock() metadata.groups_xml.data = lxml.etree.XML('').getroottree() metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.add_bundle("bundle1") metadata.groups_xml.write.assert_any_call() bundle = metadata.search_bundle("bundle1", metadata.groups_xml.base_xdata) self.assertIsNotNone(bundle) self.assertEqual(bundle.attrib, dict(name='bundle1')) # have to call this explicitly -- usually load_xml does this # on FAM events metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.groups_xml.write.reset_mock() self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.add_bundle, "bundle1") self.assertFalse(metadata.groups_xml.write.called) def test_remove_bundle(self): metadata = self.get_obj() metadata.groups_xml.write_xml = Mock() metadata.groups_xml.load_xml = Mock() metadata.groups_xml.data = copy.deepcopy(get_groups_test_tree()) metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) metadata.remove_bundle("bundle1") grp = metadata.search_bundle("bundle1", metadata.groups_xml.base_xdata) self.assertIsNone(grp) self.assertTrue(metadata.groups_xml.write_xml.called) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.remove_bundle, "bogus_bundle") def test_add_client(self): metadata = self.get_obj() metadata.clients_xml.write = Mock() metadata.clients_xml.load_xml = Mock() metadata.clients_xml.data = lxml.etree.XML('').getroottree() metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data) new1 = self.get_nonexistent_client(metadata) new1_client = metadata.add_client(new1, dict()) metadata.clients_xml.write.assert_any_call() grp = metadata.search_client(new1, metadata.clients_xml.base_xdata) self.assertIsNotNone(grp) self.assertEqual(grp.attrib, dict(name=new1)) # have to call this explicitly -- usually load_xml does this # on FAM events metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data) metadata._handle_clients_xml_event(Mock()) new2 = self.get_nonexistent_client(metadata) metadata.add_client(new2, dict(foo='bar')) metadata.clients_xml.write.assert_any_call() grp = metadata.search_client(new2, metadata.clients_xml.base_xdata) self.assertIsNotNone(grp) self.assertEqual(grp.attrib, dict(name=new2, foo='bar')) # have to call this explicitly -- usually load_xml does this # on FAM events metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data) metadata.clients_xml.write.reset_mock() self.assertXMLEqual(metadata.add_client(new1, dict()), new1_client) self.assertFalse(metadata.clients_xml.write.called) def test_update_client(self): metadata = self.get_obj() metadata.clients_xml.write_xml = Mock() metadata.clients_xml.load_xml = Mock() metadata.clients_xml.data = copy.deepcopy(get_clients_test_tree()) metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data) metadata.update_client("client1", dict(foo="bar")) grp = metadata.search_client("client1", metadata.clients_xml.base_xdata) self.assertIsNotNone(grp) self.assertIn("foo", grp.attrib) self.assertEqual(grp.get("foo"), "bar") self.assertTrue(metadata.clients_xml.write_xml.called) new = self.get_nonexistent_client(metadata) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.update_client, new, dict()) def load_clients_data(self, metadata=None, xdata=None): if metadata is None: metadata = self.get_obj() metadata.clients_xml.data = \ xdata or copy.deepcopy(get_clients_test_tree()) metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data) evt = Mock() evt.filename = os.path.join(datastore, "Metadata", "clients.xml") evt.code2str = Mock(return_value="changed") metadata.HandleEvent(evt) return metadata def test_handle_clients_xml_event(self): metadata = self.get_obj() metadata.profiles = ["group1", "group2"] metadata.clients_xml = Mock() metadata.clients_xml.xdata = copy.deepcopy(get_clients_test_tree()) metadata._handle_clients_xml_event(Mock()) if not Bcfg2.Options.setup.metadata_db: self.assertItemsEqual(metadata.clients, dict([(c.get("name"), c.get("profile")) for c in get_clients_test_tree().findall("//Client")])) aliases = dict([(a.get("name"), a.getparent().get("name")) for a in get_clients_test_tree().findall("//Alias")]) self.assertItemsEqual(metadata.aliases, aliases) raliases = dict([(c.get("name"), set()) for c in get_clients_test_tree().findall("//Client")]) for alias in get_clients_test_tree().findall("//Alias"): raliases[alias.getparent().get("name")].add(alias.get("name")) self.assertItemsEqual(metadata.raliases, raliases) self.assertEqual(metadata.secure, [c.get("name") for c in get_clients_test_tree().findall("//Client[@secure='true']")]) self.assertEqual(metadata.floating, ["client1", "client10"]) addresses = dict([(c.get("address"), []) for c in get_clients_test_tree().findall("//*[@address]")]) raddresses = dict() for client in get_clients_test_tree().findall("//Client[@address]"): addresses[client.get("address")].append(client.get("name")) try: raddresses[client.get("name")].append(client.get("address")) except KeyError: raddresses[client.get("name")] = [client.get("address")] for alias in get_clients_test_tree().findall("//Alias[@address]"): addresses[alias.get("address")].append(alias.getparent().get("name")) try: raddresses[alias.getparent().get("name")].append(alias.get("address")) except KeyError: raddresses[alias.getparent().get("name")] = alias.get("address") self.assertItemsEqual(metadata.addresses, addresses) self.assertItemsEqual(metadata.raddresses, raddresses) self.assertTrue(metadata.states['clients.xml']) def load_groups_data(self, metadata=None, xdata=None): if metadata is None: metadata = self.get_obj() metadata.groups_xml.data = \ xdata or copy.deepcopy(get_groups_test_tree()) metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) evt = Mock() evt.filename = os.path.join(datastore, "Metadata", "groups.xml") evt.code2str = Mock(return_value="changed") metadata.HandleEvent(evt) return metadata def test_handle_groups_xml_event(self): metadata = self.get_obj() metadata.groups_xml = Mock() metadata.groups_xml.xdata = get_groups_test_tree() metadata._handle_groups_xml_event(Mock()) self.assertTrue(metadata.states['groups.xml']) self.assertTrue(metadata.groups['group1'].is_public) self.assertTrue(metadata.groups['group2'].is_public) self.assertFalse(metadata.groups['group3'].is_public) self.assertTrue(metadata.groups['group1'].is_profile) self.assertTrue(metadata.groups['group2'].is_profile) self.assertFalse(metadata.groups['group3'].is_profile) self.assertItemsEqual(metadata.groups.keys(), set(g.get("name") for g in get_groups_test_tree().findall("//Group"))) self.assertEqual(metadata.groups['group1'].category, 'category1') self.assertEqual(metadata.groups['group2'].category, 'category1') self.assertEqual(metadata.groups['group3'].category, 'category2') self.assertEqual(metadata.groups['group4'].category, 'category1') self.assertEqual(metadata.default, "group1") all_groups = set() negated_groups = set() for group in get_groups_test_tree().xpath("//Groups/Client//*") + \ get_groups_test_tree().xpath("//Groups/Group//*"): if group.tag == 'Group' and not group.getchildren(): if group.get("negate", "false").lower() == 'true': negated_groups.add(group.get("name")) else: all_groups.add(group.get("name")) self.assertItemsEqual(metadata.ordered_groups, all_groups) self.assertItemsEqual(metadata.group_membership.keys(), all_groups) self.assertItemsEqual(metadata.negated_groups.keys(), negated_groups) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_set_profile(self): metadata = self.get_obj() if 'clients.xml' in metadata.states: metadata.states['clients.xml'] = False self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError, metadata.set_profile, None, None, None) self.load_groups_data(metadata=metadata) self.load_clients_data(metadata=metadata) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.set_profile, "client1", "group5", None) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.set_profile, "client1", "group3", None) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_set_profile_db(self): metadata = self.load_clients_data(metadata=self.load_groups_data()) if metadata._use_db: profile = "group1" client_name = self.get_nonexistent_client(metadata) metadata.set_profile(client_name, profile, None) self.assertIn(client_name, metadata.clients) self.assertRaises(Bcfg2.Server.Plugin.PluginExecutionError, metadata.set_profile, client_name, profile, None) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.Metadata.add_client") @patch("Bcfg2.Server.Plugins.Metadata.Metadata.update_client") def test_set_profile_xml(self, mock_update_client, mock_add_client): metadata = self.load_clients_data(metadata=self.load_groups_data()) if not metadata._use_db: metadata.clients_xml.write = Mock() metadata.core.build_metadata = Mock() metadata.core.build_metadata.side_effect = \ lambda c: metadata.get_initial_metadata(c) metadata.set_profile("client1", "group2", None) mock_update_client.assert_called_with("client1", dict(profile="group2")) self.assertEqual(metadata.clientgroups["client1"], ["group2"]) metadata.clients_xml.write.reset_mock() new1 = self.get_nonexistent_client(metadata) metadata.set_profile(new1, "group1", None) mock_add_client.assert_called_with(new1, dict(profile="group1")) metadata.clients_xml.write.assert_any_call() self.assertEqual(metadata.clientgroups[new1], ["group1"]) metadata.clients_xml.write.reset_mock() new2 = self.get_nonexistent_client(metadata) metadata.session_cache[('1.2.3.6', None)] = (None, new2) metadata.set_profile("uuid_new", "group1", ('1.2.3.6', None)) mock_add_client.assert_called_with(new2, dict(uuid='uuid_new', profile="group1", address='1.2.3.6')) metadata.clients_xml.write.assert_any_call() self.assertEqual(metadata.clientgroups["uuid_new"], ["group1"]) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("socket.getnameinfo") def test_resolve_client(self, mock_getnameinfo): metadata = self.load_clients_data(metadata=self.load_groups_data()) metadata.session_cache[('1.2.3.3', None)] = (time.time(), 'client3') self.assertEqual(metadata.resolve_client(('1.2.3.3', None)), 'client3') self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.resolve_client, ('1.2.3.2', None)) self.assertEqual(metadata.resolve_client(('1.2.3.1', None)), 'client1') metadata.session_cache[('1.2.3.3', None)] = (time.time() - 100, 'client3') self.assertEqual(metadata.resolve_client(('1.2.3.3', None)), 'client3') self.assertEqual(metadata.resolve_client(('1.2.3.3', None), cleanup_cache=True), 'client3') self.assertEqual(metadata.session_cache, dict()) mock_getnameinfo.return_value = ('client6', [], ['1.2.3.6']) self.assertEqual(metadata.resolve_client(('1.2.3.6', 6789)), 'client6') mock_getnameinfo.assert_called_with(('1.2.3.6', 6789), socket.NI_NAMEREQD) mock_getnameinfo.reset_mock() mock_getnameinfo.return_value = ('alias3', [], ['1.2.3.7']) self.assertEqual(metadata.resolve_client(('1.2.3.7', 6789)), 'client4') mock_getnameinfo.assert_called_with(('1.2.3.7', 6789), socket.NI_NAMEREQD) mock_getnameinfo.reset_mock() mock_getnameinfo.return_value = None mock_getnameinfo.side_effect = socket.herror self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.resolve_client, ('1.2.3.8', 6789)) mock_getnameinfo.assert_called_with(('1.2.3.8', 6789), socket.NI_NAMEREQD) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.write_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.ClientMetadata") def test_get_initial_metadata(self, mock_clientmetadata): metadata = self.get_obj() if 'clients.xml' in metadata.states: metadata.states['clients.xml'] = False self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError, metadata.get_initial_metadata, None) self.load_groups_data(metadata=metadata) self.load_clients_data(metadata=metadata) # test address, password metadata.get_initial_metadata("client1") mock_clientmetadata.assert_called_with("client1", "group1", set(["group1"]), set(), set(), set(["1.2.3.1"]), dict(category1='group1'), None, 'password2', None, metadata.query) # test address, bundles, category suppression metadata.get_initial_metadata("client2") mock_clientmetadata.assert_called_with("client2", "group2", set(["group2"]), set(["bundle1", "bundle2"]), set(), set(["1.2.3.2"]), dict(category1="group2"), None, None, None, metadata.query) # test aliases, address, uuid, password imd = metadata.get_initial_metadata("alias1") mock_clientmetadata.assert_called_with("client3", "group1", set(["group1"]), set(), set(['alias1']), set(["1.2.3.3"]), dict(category1="group1"), 'uuid1', 'password2', None, metadata.query) # test new client creation new1 = self.get_nonexistent_client(metadata) imd = metadata.get_initial_metadata(new1) mock_clientmetadata.assert_called_with(new1, "group1", set(["group1"]), set(), set(), set(), dict(category1="group1"), None, None, None, metadata.query) # test nested groups, address, per-client groups imd = metadata.get_initial_metadata("client8") mock_clientmetadata.assert_called_with("client8", "group1", set(["group1", "group8", "group9", "group10"]), set(), set(), set(["1.2.3.5"]), dict(category1="group1"), None, None, None, metadata.query) # test setting per-client groups, group negation, nested groups imd = metadata.get_initial_metadata("client9") mock_clientmetadata.assert_called_with("client9", "group2", set(["group2", "group8", "group11"]), set(["bundle1", "bundle2"]), set(), set(), dict(category1="group2"), None, "password3", None, metadata.query) # test new client with no default profile metadata.default = None new2 = self.get_nonexistent_client(metadata) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.get_initial_metadata, new2) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_merge_groups(self): metadata = self.get_obj() self.load_groups_data(metadata=metadata) self.load_clients_data(metadata=metadata) self.assertEqual(metadata._merge_groups("client1", set(["group1"]), categories=dict(group1="category1")), (set(["group1"]), dict(group1="category1"))) self.assertEqual(metadata._merge_groups("client8", set(["group1", "group8", "group9"]), categories=dict(group1="category1")), (set(["group1", "group8", "group9", "group10"]), dict(group1="category1"))) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_get_all_group_names(self): metadata = self.load_groups_data() self.assertItemsEqual(metadata.get_all_group_names(), set([g.get("name") for g in get_groups_test_tree().findall("//Group")])) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_get_all_groups_in_category(self): metadata = self.load_groups_data() self.assertItemsEqual(metadata.get_all_groups_in_category("category1"), set([g.get("name") for g in get_groups_test_tree().findall("//Group[@category='category1']")])) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_get_client_names_by_profiles(self): metadata = self.load_clients_data(metadata=self.load_groups_data()) metadata.core.build_metadata = Mock() metadata.core.build_metadata.side_effect = \ lambda c: metadata.get_initial_metadata(c) self.assertItemsEqual(metadata.get_client_names_by_profiles(["group2"]), [c.get("name") for c in get_clients_test_tree().findall("//Client[@profile='group2']")]) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_get_client_names_by_groups(self): metadata = self.load_clients_data(metadata=self.load_groups_data()) # this is not the best test in the world, since we mock # core.build_metadata to just build _initial_ metadata, which # is not at all the same thing. it turns out that mocking # this out without starting a Bcfg2 server is pretty # non-trivial, so this works-ish metadata.core.build_metadata = Mock() metadata.core.build_metadata.side_effect = \ lambda c: metadata.get_initial_metadata(c) self.assertItemsEqual(metadata.get_client_names_by_groups(["group2"]), [c.get("name") for c in get_clients_test_tree().findall("//Client[@profile='group2']")]) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_merge_additional_groups(self): metadata = self.load_clients_data(metadata=self.load_groups_data()) imd = metadata.get_initial_metadata("client2") # test adding a group excluded by categories oldgroups = imd.groups metadata.merge_additional_groups(imd, ["group4"]) self.assertEqual(imd.groups, oldgroups) # test adding a private group oldgroups = imd.groups metadata.merge_additional_groups(imd, ["group3"]) self.assertEqual(imd.groups, oldgroups) # test adding groups with bundles oldgroups = imd.groups oldbundles = imd.bundles metadata.merge_additional_groups(imd, ["group7"]) self.assertEqual(imd.groups, oldgroups.union(["group7"])) self.assertEqual(imd.bundles, oldbundles.union(["bundle3"])) # test adding groups with categories oldgroups = imd.groups metadata.merge_additional_groups(imd, ["group12"]) self.assertEqual(imd.groups, oldgroups.union(["group12"])) self.assertIn("category3", imd.categories) self.assertEqual(imd.categories["category3"], "group12") # test adding multiple groups imd = metadata.get_initial_metadata("client2") oldgroups = imd.groups metadata.merge_additional_groups(imd, ["group6", "group8"]) self.assertItemsEqual(imd.groups, oldgroups.union(["group6", "group8", "group9"])) # test adding a group that is not defined in groups.xml imd = metadata.get_initial_metadata("client2") oldgroups = imd.groups metadata.merge_additional_groups(imd, ["group6", "newgroup"]) self.assertItemsEqual(imd.groups, oldgroups.union(["group6", "newgroup"])) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) def test_merge_additional_data(self): metadata = self.load_clients_data(metadata=self.load_groups_data()) imd = metadata.get_initial_metadata("client1") # we need to use a unique attribute name for this test. this # is probably overkill, but it works pattern = "connector%d" for i in range(0, 100): connector = pattern % i if not hasattr(imd, connector): break self.assertFalse(hasattr(imd, connector), "Could not find unique connector name to test " "merge_additional_data()") metadata.merge_additional_data(imd, connector, "test data") self.assertEqual(getattr(imd, connector), "test data") self.assertIn(connector, imd.connectors) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.Metadata.resolve_client") def test_validate_client_address(self, mock_resolve_client): metadata = self.load_clients_data(metadata=self.load_groups_data()) self.assertTrue(metadata.validate_client_address("client1", (None, None))) self.assertTrue(metadata.validate_client_address("client2", ("1.2.3.2", None))) self.assertFalse(metadata.validate_client_address("client2", ("1.2.3.8", None))) self.assertTrue(metadata.validate_client_address("client4", ("1.2.3.2", None))) # this is upper case to ensure that case is folded properly in # validate_client_address() mock_resolve_client.return_value = "CLIENT4" self.assertTrue(metadata.validate_client_address("client4", ("1.2.3.7", None))) mock_resolve_client.assert_called_with(("1.2.3.7", None)) mock_resolve_client.reset_mock() self.assertFalse(metadata.validate_client_address("client5", ("1.2.3.5", None))) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.Metadata.validate_client_address") @patch("Bcfg2.Server.Plugins.Metadata.Metadata.resolve_client") def test_AuthenticateConnection(self, mock_resolve_client, mock_validate_client_address): metadata = self.load_clients_data(metadata=self.load_groups_data()) metadata.password = "password1" cert = dict(subject=[[("commonName", "client1")]]) mock_validate_client_address.return_value = False self.assertFalse(metadata.AuthenticateConnection(cert, "root", None, "1.2.3.1")) mock_validate_client_address.return_value = True self.assertTrue(metadata.AuthenticateConnection(cert, "root", None, "1.2.3.1")) # floating cert-auth clients add themselves to the cache self.assertIn("1.2.3.1", metadata.session_cache) self.assertEqual(metadata.session_cache["1.2.3.1"][1], "client1") cert = dict(subject=[[("commonName", "client7")]]) self.assertTrue(metadata.AuthenticateConnection(cert, "root", None, "1.2.3.4")) # non-floating cert-auth clients do not add themselves to the cache self.assertNotIn("1.2.3.4", metadata.session_cache) cert = dict(subject=[[("commonName", "client8")]]) mock_resolve_client.return_value = "client5" self.assertTrue(metadata.AuthenticateConnection(None, "root", "password1", "1.2.3.8")) mock_resolve_client.side_effect = \ Bcfg2.Server.Plugin.MetadataConsistencyError self.assertFalse(metadata.AuthenticateConnection(None, "root", "password1", "1.2.3.8")) # secure mode, no password self.assertFalse(metadata.AuthenticateConnection(None, 'client2', None, "1.2.3.2")) self.assertTrue(metadata.AuthenticateConnection(None, 'uuid1', "password1", "1.2.3.3")) # non-root, non-cert clients populate session cache self.assertIn("1.2.3.3", metadata.session_cache) self.assertEqual(metadata.session_cache["1.2.3.3"][1], "client3") # use alternate password self.assertTrue(metadata.AuthenticateConnection(None, 'client3', "password2", "1.2.3.3")) # test secure mode self.assertFalse(metadata.AuthenticateConnection(None, 'client9', "password1", "1.2.3.9")) self.assertTrue(metadata.AuthenticateConnection(None, 'client9', "password3", "1.2.3.9")) self.assertFalse(metadata.AuthenticateConnection(None, "client5", "password2", "1.2.3.7")) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.Metadata.update_client") def test_end_statistics(self, mock_update_client): metadata = self.load_clients_data(metadata=self.load_groups_data()) md = Mock() md.hostname = "client6" metadata.end_statistics(md) mock_update_client.assert_called_with(md.hostname, dict(auth='cert')) mock_update_client.reset_mock() md.hostname = "client5" metadata.end_statistics(md) self.assertFalse(mock_update_client.called) def test_viz(self): pass class TestMetadataBase(TestMetadata): """ base test object for testing Metadata with database enabled """ __test__ = False @skipUnless(HAS_DJANGO, "Django not found") def setUp(self): _TestMetadata.setUp(self) TestClientRunHooks.setUp(self) TestDatabaseBacked.setUp(self) Bcfg2.Options.setup.metadata_db = True self.syncdb(TestMetadataDB) def load_clients_data(self, metadata=None, xdata=None): if metadata is None: metadata = get_obj() for client in get_clients_test_tree().findall("Client"): metadata.add_client(client.get("name")) return metadata def get_nonexistent_client(self, _, prefix="newclient"): clients = [o.hostname for o in MetadataClientModel.objects.all()] i = 0 client_name = "%s%s" % (prefix, i) while client_name in clients: i += 1 client_name = "%s%s" % (prefix, i) return client_name @patch('os.path.exists') @patch('Bcfg2.Server.FileMonitor.get_fam') def test__init(self, mock_get_fam, mock_exists): mock_exists.return_value = False metadata = self.get_obj() self.assertIsInstance(metadata, Bcfg2.Server.Plugin.DatabaseBacked) mock_get_fam.return_value.AddMonitor.assert_called_with( os.path.join(metadata.data, "groups.xml"), metadata) mock_exists.return_value = True mock_get_fam.reset_mock() metadata = self.get_obj() mock_get_fam.return_value.AddMonitor.assert_any_call( os.path.join(metadata.data, "groups.xml"), metadata) mock_get_fam.return_value.AddMonitor.assert_any_call( os.path.join(metadata.data, "clients.xml"), metadata) def test_add_group(self): pass def test_add_bundle(self): pass def test_add_client(self): metadata = self.get_obj() hostname = self.get_nonexistent_client(metadata) client = metadata.add_client(hostname) self.assertIsInstance(client, MetadataClientModel) self.assertEqual(client.hostname, hostname) self.assertIn(hostname, metadata.clients) self.assertIn(hostname, metadata.list_clients()) self.assertItemsEqual(metadata.clients, [c.hostname for c in MetadataClientModel.objects.all()]) def test_update_group(self): pass def test_update_bundle(self): pass def test_update_client(self): pass def test_list_clients(self): metadata = self.get_obj() self.assertItemsEqual(metadata.list_clients(), [c.hostname for c in MetadataClientModel.objects.all()]) def test_remove_group(self): pass def test_remove_bundle(self): pass def test_remove_client(self): metadata = self.get_obj() client_name = self.get_nonexistent_client(metadata) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.remove_client, client_name) metadata.add_client(client_name) metadata.remove_client(client_name) self.assertNotIn(client_name, metadata.clients) self.assertNotIn(client_name, metadata.list_clients()) self.assertItemsEqual(metadata.clients, [c.hostname for c in MetadataClientModel.objects.all()]) def test_process_statistics(self): pass class TestMetadata_NoClientsXML(TestMetadataBase): """ test Metadata without a clients.xml. we have to disable or override tests that rely on client options """ __test__ = True def load_groups_data(self, metadata=None, xdata=None): if metadata is None: metadata = self.get_obj() if not xdata: xdata = copy.deepcopy(get_groups_test_tree()) for client in get_clients_test_tree().findall("Client"): newclient = \ lxml.etree.SubElement(xdata.getroot(), "Client", name=client.get("name")) lxml.etree.SubElement(newclient, "Group", name=client.get("profile")) metadata.groups_xml.data = xdata metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data) evt = Mock() evt.filename = os.path.join(datastore, "Metadata", "groups.xml") evt.code2str = Mock(return_value="changed") metadata.HandleEvent(evt) return metadata @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.write_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.ClientMetadata") def test_get_initial_metadata(self, mock_clientmetadata): metadata = self.get_obj() if 'clients.xml' in metadata.states: metadata.states['clients.xml'] = False self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError, metadata.get_initial_metadata, None) self.load_groups_data(metadata=metadata) self.load_clients_data(metadata=metadata) # test basic client metadata metadata.get_initial_metadata("client1") mock_clientmetadata.assert_called_with("client1", "group1", set(["group1"]), set(), set(), set(), dict(category1='group1'), None, None, None, metadata.query) # test bundles, category suppression metadata.get_initial_metadata("client2") mock_clientmetadata.assert_called_with("client2", "group2", set(["group2"]), set(["bundle1", "bundle2"]), set(), set(), dict(category1="group2"), None, None, None, metadata.query) # test new client creation new1 = self.get_nonexistent_client(metadata) imd = metadata.get_initial_metadata(new1) mock_clientmetadata.assert_called_with(new1, "group1", set(["group1"]), set(), set(), set(), dict(category1="group1"), None, None, None, metadata.query) # test nested groups, per-client groups imd = metadata.get_initial_metadata("client8") mock_clientmetadata.assert_called_with("client8", "group1", set(["group1", "group8", "group9", "group10"]), set(), set(), set(), dict(category1="group1"), None, None, None, metadata.query) # test per-client groups, group negation, nested groups imd = metadata.get_initial_metadata("client9") mock_clientmetadata.assert_called_with("client9", "group2", set(["group2", "group8", "group11"]), set(["bundle1", "bundle2"]), set(), set(), dict(category1="group2"), None, None, None, metadata.query) # test exception on new client with no default profile metadata.default = None new2 = self.get_nonexistent_client(metadata) self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.get_initial_metadata, new2) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.Metadata.resolve_client") def test_validate_client_address(self, mock_resolve_client): metadata = self.load_clients_data(metadata=self.load_groups_data()) # this is upper case to ensure that case is folded properly in # validate_client_address() mock_resolve_client.return_value = "CLIENT4" self.assertTrue(metadata.validate_client_address("client4", ("1.2.3.7", None))) mock_resolve_client.assert_called_with(("1.2.3.7", None)) mock_resolve_client.reset_mock() self.assertFalse(metadata.validate_client_address("client5", ("1.2.3.5", None))) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("Bcfg2.Server.Plugins.Metadata.Metadata.validate_client_address") @patch("Bcfg2.Server.Plugins.Metadata.Metadata.resolve_client") def test_AuthenticateConnection(self, mock_resolve_client, mock_validate_client_address): metadata = self.load_clients_data(metadata=self.load_groups_data()) metadata.password = "password1" cert = dict(subject=[[("commonName", "client1")]]) mock_validate_client_address.return_value = False self.assertFalse(metadata.AuthenticateConnection(cert, "root", None, "1.2.3.1")) mock_validate_client_address.return_value = True self.assertTrue(metadata.AuthenticateConnection(cert, "root", metadata.password, "1.2.3.1")) cert = dict(subject=[[("commonName", "client8")]]) mock_resolve_client.return_value = "client5" self.assertTrue(metadata.AuthenticateConnection(None, "root", "password1", "1.2.3.8")) mock_resolve_client.side_effect = \ Bcfg2.Server.Plugin.MetadataConsistencyError self.assertFalse(metadata.AuthenticateConnection(None, "root", "password1", "1.2.3.8")) @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock()) @patch("socket.getnameinfo") def test_resolve_client(self, mock_getnameinfo): metadata = self.load_clients_data(metadata=self.load_groups_data()) metadata.session_cache[('1.2.3.3', None)] = (time.time(), 'client3') self.assertEqual(metadata.resolve_client(('1.2.3.3', None)), 'client3') metadata.session_cache[('1.2.3.3', None)] = (time.time() - 100, 'client3') mock_getnameinfo.return_value = ("client3", [], ['1.2.3.3']) self.assertEqual(metadata.resolve_client(('1.2.3.3', None), cleanup_cache=True), 'client3') self.assertEqual(metadata.session_cache, dict()) mock_getnameinfo.return_value = ('client6', [], ['1.2.3.6']) self.assertEqual(metadata.resolve_client(('1.2.3.6', 6789), socket.NI_NAMEREQD), 'client6') mock_getnameinfo.assert_called_with(('1.2.3.6', 6789), socket.NI_NAMEREQD) mock_getnameinfo.reset_mock() mock_getnameinfo.return_value = None mock_getnameinfo.side_effect = socket.herror self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError, metadata.resolve_client, ('1.2.3.8', 6789), socket.NI_NAMEREQD) mock_getnameinfo.assert_called_with(('1.2.3.8', 6789), socket.NI_NAMEREQD) def test_handle_clients_xml_event(self): pass def test_end_statistics(self): # bootstrap mode, which is what is being tested here, doesn't # work without clients.xml pass class TestMetadata_ClientsXML(TestMetadataBase): """ test Metadata with a clients.xml. """ __test__ = True def load_clients_data(self, metadata=None, xdata=None): if metadata is None: metadata = self.get_obj() fam = Bcfg2.Server.FileMonitor._FAM Bcfg2.Server.FileMonitor._FAM = MagicMock() @patchIf(not isinstance(lxml.etree.Element, Mock), "lxml.etree.Element", Mock()) def inner(): metadata.clients_xml = metadata._handle_file("clients.xml") inner() metadata = TestMetadata.load_clients_data(self, metadata=metadata, xdata=xdata) rv = TestMetadataBase.load_clients_data(self, metadata=metadata, xdata=xdata) Bcfg2.Server.FileMonitor._FAM = fam return rv testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py000066400000000000000000000336541303523157100252120ustar00rootroot00000000000000import os import re import sys import shutil import tempfile import lxml.etree import Bcfg2.version import Bcfg2.Server import Bcfg2.Server.Plugin from mock import Mock, MagicMock, patch # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from Bcfg2.Server.Plugins.Probes import load_django_models from TestPlugin import TestEntrySet, TestPlugin, \ TestDatabaseBacked load_django_models() from Bcfg2.Server.Plugins.Probes import * if HAS_JSON: json = json if HAS_YAML: yaml = yaml # test data for JSON and YAML tests test_data = dict(a=1, b=[1, 2, 3], c="test", d=dict(a=1, b=dict(a=1), c=(1, "2", 3))) class FakeList(list): pass class TestProbesDB(DBModelTestCase): if HAS_DJANGO: models = [ProbesGroupsModel, ProbesDataModel] class TestProbeData(Bcfg2TestCase): def test_str(self): # a value that is not valid XML, JSON, or YAML val = "'test" # test string behavior data = ProbeData(val) self.assertIsInstance(data, str) self.assertEqual(data, val) # test 1.2.0-1.2.2 broken behavior self.assertEqual(data.data, val) # test that formatted data accessors return None self.assertIsNone(data.xdata) self.assertIsNone(data.yaml) self.assertIsNone(data.json) def test_xdata(self): xdata = lxml.etree.Element("test") lxml.etree.SubElement(xdata, "test2") data = ProbeData( lxml.etree.tostring(xdata, xml_declaration=False).decode('UTF-8')) self.assertIsNotNone(data.xdata) self.assertIsNotNone(data.xdata.find("test2")) @skipUnless(HAS_JSON, "JSON libraries not found, skipping JSON tests") def test_json(self): jdata = json.dumps(test_data) data = ProbeData(jdata) self.assertIsNotNone(data.json) self.assertItemsEqual(test_data, data.json) @skipUnless(HAS_YAML, "YAML libraries not found, skipping YAML tests") def test_yaml(self): jdata = yaml.dump(test_data) data = ProbeData(jdata) self.assertIsNotNone(data.yaml) self.assertItemsEqual(test_data, data.yaml) class TestProbeSet(TestEntrySet): test_obj = ProbeSet basenames = ["test", "_test", "test-test"] ignore = ["foo~", ".#foo", ".foo.swp", ".foo.swx", "probed.xml"] bogus_names = ["test.py"] def get_obj(self, path=datastore, encoding=None, plugin_name="Probes", basename=None): # get_obj() accepts the basename argument, accepted by the # parent get_obj() method, and just throws it away, since # ProbeSet uses a regex for the "basename" rv = self.test_obj(path, plugin_name) rv.entry_type = MagicMock() return rv @patch("Bcfg2.Server.FileMonitor.get_fam") def test__init(self, mock_get_fam): ps = self.get_obj() self.assertEqual(ps.plugin_name, "Probes") mock_get_fam.return_value.AddMonitor.assert_called_with(datastore, ps) TestEntrySet.test__init(self) def test_HandleEvent(self): ps = self.get_obj() ps.handle_event = Mock() # test that events on the data store itself are skipped evt = Mock() evt.filename = datastore ps.HandleEvent(evt) self.assertFalse(ps.handle_event.called) # test that events on probed.xml are skipped evt.reset_mock() evt.filename = "probed.xml" ps.HandleEvent(evt) self.assertFalse(ps.handle_event.called) # test that other events are processed appropriately evt.reset_mock() evt.filename = "fooprobe" ps.HandleEvent(evt) ps.handle_event.assert_called_with(evt) @patch("%s.list" % builtins, FakeList) def test_get_probe_data(self): ps = self.get_obj() # build some fairly complex test data for this. in the end, # we want the probe data to include only the most specific # version of a given probe, and by basename only, not full # (specific) name. We don't fully test the specificity stuff, # we just check to make sure sort() is called and trust that # sort() does the right thing on Specificity objects. (I.e., # trust that Specificity is well-tested. Hah!) We also test # to make sure the interpreter is determined correctly. ps.get_matching = Mock() matching = FakeList() matching.sort = Mock() p1 = Mock() p1.specific = Bcfg2.Server.Plugin.Specificity(group=True, prio=10) p1.name = "fooprobe.G10_foogroup" p1.data = """#!/bin/bash group-specific""" matching.append(p1) p2 = Mock() p2.specific = Bcfg2.Server.Plugin.Specificity(all=True) p2.name = "fooprobe" p2.data = "#!/bin/bash" matching.append(p2) p3 = Mock() p3.specific = Bcfg2.Server.Plugin.Specificity(all=True) p3.name = "barprobe" p3.data = "#! /usr/bin/env python" matching.append(p3) p4 = Mock() p4.specific = Bcfg2.Server.Plugin.Specificity(all=True) p4.name = "bazprobe" p4.data = "" matching.append(p4) ps.get_matching.return_value = matching metadata = Mock() metadata.version_info = \ Bcfg2.version.Bcfg2VersionInfo(Bcfg2.version.__version__) pdata = ps.get_probe_data(metadata) ps.get_matching.assert_called_with(metadata) # we can't create a matching operator.attrgetter object, and I # don't feel the need to mock that out -- this is a good # enough check self.assertTrue(matching.sort.called) self.assertEqual(len(pdata), 3, "Found: %s" % [p.get("name") for p in pdata]) for probe in pdata: if probe.get("name") == "fooprobe": self.assertIn("group-specific", probe.text) self.assertEqual(probe.get("interpreter"), "/bin/bash") elif probe.get("name") == "barprobe": self.assertEqual(probe.get("interpreter"), "/usr/bin/env python") elif probe.get("name") == "bazprobe": self.assertIsNotNone(probe.get("interpreter")) else: assert False, "Strange probe found in get_probe_data() return" class TestProbes(TestDatabaseBacked): test_obj = Probes test_xdata = lxml.etree.Element("test") lxml.etree.SubElement(test_xdata, "test", foo="foo") test_xdoc = lxml.etree.tostring(test_xdata, xml_declaration=False).decode('UTF-8') data = dict() data['xml'] = "group:group\n" + test_xdoc data['text'] = "freeform text" data['multiline'] = """multiple lines of freeform text group:group-with-dashes group: group:with:colons """ data['empty'] = '' data['almost_empty'] = 'group: other_group' if HAS_JSON: data['json'] = json.dumps(test_data) if HAS_YAML: data['yaml'] = yaml.dump(test_data) def setUp(self): Bcfg2TestCase.setUp(self) set_setup_default("probes_db") set_setup_default("probes_allowed_groups", [re.compile(".*")]) self.datastore = None Bcfg2.Server.Cache.expire("Probes") def tearDown(self): Bcfg2.Server.Cache.expire("Probes") if self.datastore is not None: shutil.rmtree(self.datastore) self.datastore = None Bcfg2.Options.setup.repository = datastore def get_obj(self, core=None): if not Bcfg2.Options.setup.probes_db: # actually use a real datastore so we can read and write # probed.xml if self.datastore is None: self.datastore = tempfile.mkdtemp() Bcfg2.Options.setup.repository = self.datastore datadir = os.path.join(self.datastore, self.test_obj.name) if not os.path.exists(datadir): os.makedirs(datadir) return TestPlugin.get_obj(self, core) def test__init(self): if Bcfg2.Options.setup.probes_db: TestPlugin.test__init(self) def test_GetProbes(self): p = self.get_obj() p.probes = Mock() metadata = Mock() p.GetProbes(metadata) p.probes.get_probe_data.assert_called_with(metadata) def additionalDataEqual(self, actual, expected): self.assertItemsEqual( dict([(k, str(d)) for k, d in actual.items()]), expected) def test_probes_xml(self): """ Set and retrieve probe data with database disabled """ Bcfg2.Options.setup.probes_db = False self._perform_tests() @skipUnless(HAS_DJANGO, "Django not found") def test_probes_db(self): """ Set and retrieve probe data with database enabled """ Bcfg2.Options.setup.probes_db = True self.syncdb(TestProbesDB) self._perform_tests() def test_allowed_cgroups(self): """ Test option to only allow probes to set certain groups """ probes = self.get_obj() test_text = """a couple lines of freeform text """ test_groups = ["group", "group2", "group-with-dashes"] test_probe_data = lxml.etree.Element("Probe", name="test") test_probe_data.text = test_text for group in test_groups: test_probe_data.text += "group:%s\n" % group client = Mock() groups, data = probes.ReceiveDataItem(client, test_probe_data) self.assertItemsEqual(groups, test_groups) self.assertEqual(data, test_text) old_allowed_groups = Bcfg2.Options.setup.probes_allowed_groups Bcfg2.Options.setup.probes_allowed_groups = [re.compile(r'^group.?$')] groups, data = probes.ReceiveDataItem(client, test_probe_data) self.assertItemsEqual(groups, ['group', 'group2']) self.assertEqual(data, test_text) Bcfg2.Options.setup.probes_allowed_groups = old_allowed_groups def _perform_tests(self): p = self.get_obj() # first, sanity checks foo_md = Mock(hostname="foo.example.com") bar_md = Mock(hostname="bar.example.com") self.assertItemsEqual(p.get_additional_groups(foo_md), []) self.assertItemsEqual(p.get_additional_data(foo_md), dict()) self.assertItemsEqual(p.get_additional_groups(bar_md), []) self.assertItemsEqual(p.get_additional_data(bar_md), dict()) # next, set some initial probe data foo_datalist = [] for key in ['xml', 'text', 'multiline']: pdata = lxml.etree.Element("Probe", name=key) pdata.text = self.data[key] foo_datalist.append(pdata) foo_addl_data = dict(xml=self.test_xdoc, text="freeform text", multiline="""multiple lines of freeform text""") bar_datalist = [] for key in ['empty', 'almost_empty', 'json', 'yaml']: if key in self.data: pdata = lxml.etree.Element("Probe", name=key) pdata.text = self.data[key] bar_datalist.append(pdata) bar_addl_data = dict(empty="", almost_empty="") if HAS_JSON: bar_addl_data['json'] = self.data['json'] if HAS_YAML: bar_addl_data['yaml'] = self.data['yaml'] p.ReceiveData(foo_md, foo_datalist) self.assertItemsEqual(p.get_additional_groups(foo_md), ["group", "group-with-dashes", "group:with:colons"]) self.additionalDataEqual(p.get_additional_data(foo_md), foo_addl_data) p.ReceiveData(bar_md, bar_datalist) self.assertItemsEqual(p.get_additional_groups(foo_md), ["group", "group-with-dashes", "group:with:colons"]) self.additionalDataEqual(p.get_additional_data(foo_md), foo_addl_data) self.assertItemsEqual(p.get_additional_groups(bar_md), ['other_group']) self.additionalDataEqual(p.get_additional_data(bar_md), bar_addl_data) # instantiate a new Probes object and clear Probes caches to # imitate a server restart p = self.get_obj() Bcfg2.Server.Cache.expire("Probes") self.assertItemsEqual(p.get_additional_groups(foo_md), ["group", "group-with-dashes", "group:with:colons"]) self.additionalDataEqual(p.get_additional_data(foo_md), foo_addl_data) self.assertItemsEqual(p.get_additional_groups(bar_md), ['other_group']) self.additionalDataEqual(p.get_additional_data(bar_md), bar_addl_data) # set new data (and groups) for foo foo_datalist = [] pdata = lxml.etree.Element("Probe", name='xml') pdata.text = self.data['xml'] foo_datalist.append(pdata) foo_addl_data = dict(xml=self.test_xdoc) p.ReceiveData(foo_md, foo_datalist) self.assertItemsEqual(p.get_additional_groups(foo_md), ["group"]) self.additionalDataEqual(p.get_additional_data(foo_md), foo_addl_data) self.assertItemsEqual(p.get_additional_groups(bar_md), ['other_group']) self.additionalDataEqual(p.get_additional_data(bar_md), bar_addl_data) # instantiate a new Probes object and clear Probes caches to # imitate a server restart p = self.get_obj() Bcfg2.Server.Cache.expire("Probes") self.assertItemsEqual(p.get_additional_groups(foo_md), ["group"]) self.additionalDataEqual(p.get_additional_data(foo_md), foo_addl_data) self.assertItemsEqual(p.get_additional_groups(bar_md), ['other_group']) self.additionalDataEqual(p.get_additional_data(bar_md), bar_addl_data) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProperties.py000066400000000000000000000254441303523157100261120ustar00rootroot00000000000000import os import sys import lxml.etree from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Properties import * from Bcfg2.Server.Plugin import PluginExecutionError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestStructFile, TestFileBacked, TestConnector, \ TestPlugin, TestDirectoryBacked try: import json JSON = "json" except ImportError: JSON = "simplejson" class TestPropertyFile(Bcfg2TestCase): test_obj = PropertyFile path = os.path.join(datastore, "test") def get_obj(self, path=None, core=None, *args, **kwargs): set_setup_default("writes_enabled", False) if path is None: path = self.path if core is None: core = Mock() core.metadata_cache_mode = 'none' return self.test_obj(path, core, *args, **kwargs) def test_write(self): pf = self.get_obj() pf.validate_data = Mock() pf._write = Mock() xstr = u("\n") pf.xdata = lxml.etree.XML(xstr) def reset(): pf.validate_data.reset_mock() pf._write.reset_mock() # test writes disabled Bcfg2.Options.setup.writes_enabled = False self.assertRaises(PluginExecutionError, pf.write) self.assertFalse(pf.validate_data.called) self.assertFalse(pf._write.called) # test successful write reset() Bcfg2.Options.setup.writes_enabled = True self.assertEqual(pf.write(), pf._write.return_value) pf.validate_data.assert_called_with() pf._write.assert_called_with() # test error from _write reset() pf._write.side_effect = IOError self.assertRaises(PluginExecutionError, pf.write) pf.validate_data.assert_called_with() pf._write.assert_called_with() # test error from validate_data reset() pf.validate_data.side_effect = PluginExecutionError self.assertRaises(PluginExecutionError, pf.write) pf.validate_data.assert_called_with() def test__write(self): pf = self.get_obj() self.assertRaises(NotImplementedError, pf._write) def test_validate_data(self): pf = self.get_obj() self.assertRaises(NotImplementedError, pf.validate_data) @patch("copy.copy") def test_get_additional_data(self, mock_copy): pf = self.get_obj() self.assertEqual(pf.get_additional_data(Mock()), mock_copy.return_value) mock_copy.assert_called_with(pf) class TestJSONPropertyFile(TestFileBacked, TestPropertyFile): test_obj = JSONPropertyFile @skipUnless(HAS_JSON, "JSON libraries not found, skipping") def setUp(self): TestFileBacked.setUp(self) TestPropertyFile.setUp(self) def get_obj(self, *args, **kwargs): return TestPropertyFile.get_obj(self, *args, **kwargs) @patch("%s.loads" % JSON) def test_Index(self, mock_loads): pf = self.get_obj() pf.Index() mock_loads.assert_called_with(pf.data) self.assertEqual(pf.json, mock_loads.return_value) mock_loads.reset_mock() mock_loads.side_effect = ValueError self.assertRaises(PluginExecutionError, pf.Index) mock_loads.assert_called_with(pf.data) @patch("%s.dump" % JSON) @patch("%s.open" % builtins) def test__write(self, mock_open, mock_dump): pf = self.get_obj() self.assertTrue(pf._write()) mock_open.assert_called_with(pf.name, 'wb') mock_dump.assert_called_with(pf.json, mock_open.return_value) @patch("%s.dumps" % JSON) def test_validate_data(self, mock_dumps): pf = self.get_obj() pf.validate_data() mock_dumps.assert_called_with(pf.json) mock_dumps.reset_mock() mock_dumps.side_effect = TypeError self.assertRaises(PluginExecutionError, pf.validate_data) mock_dumps.assert_called_with(pf.json) class TestYAMLPropertyFile(TestFileBacked, TestPropertyFile): test_obj = YAMLPropertyFile @skipUnless(HAS_YAML, "YAML libraries not found, skipping") def setUp(self): TestFileBacked.setUp(self) TestPropertyFile.setUp(self) def get_obj(self, *args, **kwargs): return TestPropertyFile.get_obj(self, *args, **kwargs) @patch("yaml.load") def test_Index(self, mock_load): pf = self.get_obj() pf.Index() mock_load.assert_called_with(pf.data) self.assertEqual(pf.yaml, mock_load.return_value) mock_load.reset_mock() mock_load.side_effect = yaml.YAMLError self.assertRaises(PluginExecutionError, pf.Index) mock_load.assert_called_with(pf.data) @patch("yaml.dump") @patch("%s.open" % builtins) def test__write(self, mock_open, mock_dump): pf = self.get_obj() self.assertTrue(pf._write()) mock_open.assert_called_with(pf.name, 'wb') mock_dump.assert_called_with(pf.yaml, mock_open.return_value) @patch("yaml.dump") def test_validate_data(self, mock_dump): pf = self.get_obj() pf.validate_data() mock_dump.assert_called_with(pf.yaml) mock_dump.reset_mock() mock_dump.side_effect = yaml.YAMLError self.assertRaises(PluginExecutionError, pf.validate_data) mock_dump.assert_called_with(pf.yaml) class TestXMLPropertyFile(TestPropertyFile, TestStructFile): test_obj = XMLPropertyFile path = TestStructFile.path def setUp(self): TestPropertyFile.setUp(self) TestStructFile.setUp(self) set_setup_default("automatch", False) def get_obj(self, *args, **kwargs): return TestPropertyFile.get_obj(self, *args, **kwargs) @patch("%s.open" % builtins) def test__write(self, mock_open): pf = self.get_obj() pf.xdata = lxml.etree.Element("Test") self.assertTrue(pf._write()) mock_open.assert_called_with(pf.name, "wb") self.assertXMLEqual(pf.xdata, lxml.etree.XML(mock_open.return_value.write.call_args[0][0])) @patch("os.path.exists") @patch("lxml.etree.XMLSchema") def test_validate_data(self, mock_XMLSchema, mock_exists): pf = self.get_obj() pf.name = os.path.join(datastore, "Properties", "test.xml") schemafile = os.path.join(datastore, "Properties", "test.xsd") def reset(): mock_XMLSchema.reset_mock() mock_exists.reset_mock() # test no schema file mock_exists.return_value = False self.assertTrue(pf.validate_data()) mock_exists.assert_called_with(schemafile) # test schema file exists, valid data reset() mock_exists.return_value = True mock_XMLSchema.return_value = Mock() mock_XMLSchema.return_value.validate.return_value = True self.assertTrue(pf.validate_data()) mock_exists.assert_called_with(schemafile) mock_XMLSchema.assert_called_with(file=schemafile) mock_XMLSchema.return_value.validate.assert_called_with(pf.xdata) # test schema file exists, invalid data reset() mock_XMLSchema.return_value = Mock() mock_XMLSchema.return_value.validate.return_value = False self.assertRaises(PluginExecutionError, pf.validate_data) mock_exists.assert_called_with(schemafile) mock_XMLSchema.assert_called_with(file=schemafile) mock_XMLSchema.return_value.validate.assert_called_with(pf.xdata) # test invalid schema file reset() mock_XMLSchema.side_effect = lxml.etree.XMLSchemaParseError(pf.xdata) self.assertRaises(PluginExecutionError, pf.validate_data) mock_exists.assert_called_with(schemafile) mock_XMLSchema.assert_called_with(file=schemafile) @patch("copy.copy") def test_get_additional_data(self, mock_copy): pf = self.get_obj() pf.setup = Mock() pf.XMLMatch = Mock() metadata = Mock() def reset(): mock_copy.reset_mock() pf.XMLMatch.reset_mock() pf.setup.reset_mock() pf.xdata = lxml.etree.Element("Properties", automatch="true") for Bcfg2.Options.setup.automatch in [True, False]: reset() self.assertEqual(pf.get_additional_data(metadata), pf.XMLMatch.return_value) pf.XMLMatch.assert_called_with(metadata) self.assertFalse(mock_copy.called) pf.xdata = lxml.etree.Element("Properties", automatch="false") for Bcfg2.Options.setup.automatch in [True, False]: reset() self.assertEqual(pf.get_additional_data(metadata), mock_copy.return_value) mock_copy.assert_called_with(pf) self.assertFalse(pf.XMLMatch.called) pf.xdata = lxml.etree.Element("Properties") reset() Bcfg2.Options.setup.automatch = False self.assertEqual(pf.get_additional_data(metadata), mock_copy.return_value) mock_copy.assert_called_with(pf) self.assertFalse(pf.XMLMatch.called) reset() Bcfg2.Options.setup.automatch = True self.assertEqual(pf.get_additional_data(metadata), pf.XMLMatch.return_value) pf.XMLMatch.assert_called_with(metadata) self.assertFalse(mock_copy.called) class TestProperties(TestPlugin, TestConnector, TestDirectoryBacked): test_obj = Properties testfiles = ['foo.xml', 'bar.baz.xml'] if HAS_JSON: testfiles.extend(["foo.json", "foo.xml.json"]) if HAS_YAML: testfiles.extend(["foo.yaml", "foo.yml", "foo.xml.yml"]) ignore = ['foo.xsd', 'bar.baz.xsd', 'quux.xml.xsd'] badevents = ['bogus.txt'] def get_obj(self, core=None): @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) def inner(): return TestPlugin.get_obj(self, core=core) return inner() @patch("copy.copy") def test_get_additional_data(self, mock_copy): TestConnector.test_get_additional_data(self) p = self.get_obj() metadata = Mock() p.entries = {"foo.xml": Mock(), "foo.yml": Mock()} rv = p.get_additional_data(metadata) expected = dict() for name, entry in p.entries.items(): entry.get_additional_data.assert_called_with(metadata) expected[name] = entry.get_additional_data.return_value self.assertItemsEqual(rv, expected) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestRules.py000066400000000000000000000177061303523157100250520ustar00rootroot00000000000000import os import sys import copy import lxml.etree import Bcfg2.Options from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.Rules import * from Bcfg2.Server.Plugin import PluginExecutionError # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin.Testhelpers import TestPrioDir class TestRules(TestPrioDir): test_obj = Rules abstract = dict( basic=lxml.etree.Element("Path", name="/etc/basic"), unhandled=lxml.etree.Element("Path", name="/etc/unhandled"), priority=lxml.etree.Element("Path", name="/etc/priority"), content=lxml.etree.Element("Path", name="/etc/text-content"), duplicate=lxml.etree.Element("SEBoolean", name="duplicate"), group=lxml.etree.Element("SEPort", name="6789/tcp"), children=lxml.etree.Element("Path", name="/etc/child-entries"), regex=lxml.etree.Element("Package", name="regex"), replace_name=lxml.etree.Element("POSIXUser", name="regex"), slash=lxml.etree.Element("Path", name="/etc/trailing/slash"), no_slash=lxml.etree.Element("Path", name="/etc/no/trailing/slash/")) concrete = dict( basic=lxml.etree.Element("Path", name="/etc/basic", type="directory", owner="root", group="root", mode="0600"), priority=lxml.etree.Element("Path", name="/etc/priority", type="directory", owner="root", group="root", mode="0600"), content=lxml.etree.Element("Path", name="/etc/text-content", type="file", owner="bar", group="bar", mode="0644"), duplicate=lxml.etree.Element("SEBoolean", name="duplicate", value="on"), group=lxml.etree.Element("SEPort", name="6789/tcp", selinuxtype="bcfg2_server_t"), children=lxml.etree.Element("Path", name="/etc/child-entries", type="directory", owner="root", group="root", mode="0775"), regex=lxml.etree.Element("Package", name="regex", type="yum", version="any"), replace_name=lxml.etree.Element("POSIXUser", name="regex", home="/foobar%{bar}/regex"), slash=lxml.etree.Element("Path", name="/etc/trailing/slash", type="directory", owner="root", group="root", mode="0600"), no_slash=lxml.etree.Element("Path", name="/etc/no/trailing/slash/", type="directory", owner="root", group="root", mode="0600")) concrete['content'].text = "Text content" lxml.etree.SubElement(concrete['children'], "ACL", type="default", scope="user", user="foouser", perms="rw") lxml.etree.SubElement(concrete['children'], "ACL", type="default", scope="group", group="users", perms="rx") in_file = copy.deepcopy(concrete) in_file['regex'].set("name", ".*") in_file['replace_name'].set("home", "/foobar%{bar}/%{name}") in_file['replace_name'].set("name", ".*") in_file['slash'].set("name", "/etc/trailing/slash/") in_file['no_slash'].set("name", "/etc/no/trailing/slash") rules1 = lxml.etree.Element("Rules", priority="10") rules1.append(in_file['basic']) lxml.etree.SubElement(rules1, "Path", name="/etc/priority", type="directory", owner="foo", group="foo", mode="0644") foogroup = lxml.etree.SubElement(rules1, "Group", name="foogroup") foogroup.append(in_file['group']) rules1.append(in_file['content']) rules1.append(copy.copy(in_file['duplicate'])) rules2 = lxml.etree.Element("Rules", priority="20") rules2.append(in_file['priority']) rules2.append(in_file['children']) rules2.append(in_file['no_slash']) rules3 = lxml.etree.Element("Rules", priority="10") rules3.append(in_file['duplicate']) rules3.append(in_file['regex']) rules3.append(in_file['replace_name']) rules3.append(in_file['slash']) rules = {"rules1.xml": rules1, "rules2.xml": rules2, "rules3.xml": rules3} def setUp(self): TestPrioDir.setUp(self) set_setup_default("lax_decryption", True) set_setup_default("rules_regex", False) set_setup_default("rules_replace_name", False) def get_child(self, name): """ Turn one of the XML documents in `rules` into a child object """ filename = os.path.join(datastore, self.test_obj.name, name) rv = self.test_obj.__child__(filename) rv.data = lxml.etree.tostring(self.rules[name]) rv.Index() return rv def get_obj(self, core=None): r = TestPrioDir.get_obj(self, core=core) r.entries = dict([(n, self.get_child(n)) for n in self.rules.keys()]) return r def _do_test(self, name, groups=None): if groups is None: groups = [] r = self.get_obj() metadata = Mock(groups=groups) entry = copy.deepcopy(self.abstract[name]) self.assertTrue(r.HandlesEntry(entry, metadata)) r.HandleEntry(entry, metadata) self.assertXMLEqual(entry, self.concrete[name]) def _do_test_failure(self, name, groups=None, handles=None): if groups is None: groups = [] r = self.get_obj() metadata = Mock(groups=groups) entry = self.abstract[name] if handles is not None: self.assertEqual(handles, r.HandlesEntry(entry, metadata)) self.assertRaises(PluginExecutionError, r.HandleEntry, entry, metadata) def test_basic(self): """ Test basic Rules usage """ self._do_test('basic') self._do_test_failure('unhandled', handles=False) def test_priority(self): """ Test that Rules respects priority """ self._do_test('priority') def test_duplicate(self): """ Test that Rules raises exceptions for duplicate entries """ self._do_test_failure('duplicate') def test_content(self): """ Test that Rules copies text content from concrete entries """ self._do_test('content') def test_group(self): """ Test that Rules respects tags """ self._do_test('group', groups=['foogroup']) self._do_test_failure('group', groups=['bargroup'], handles=False) def test_children(self): """ Test that Rules copies child elements from concrete entries """ self._do_test('children') def test_regex(self): """ Test that Rules handles regular expressions properly """ Bcfg2.Options.setup.rules_regex = False self._do_test_failure('regex', handles=False) Bcfg2.Options.setup.rules_regex = True self._do_test('regex') Bcfg2.Options.setup.rules_regex = False def test_replace_name(self): """ Test that Rules handles replaces name in attribues with regular expressions """ Bcfg2.Options.setup.rules_regex = False Bcfg2.Options.setup.rules_replace_name = False self._do_test_failure('replace_name', handles=False) Bcfg2.Options.setup.rules_regex = True Bcfg2.Options.setup.rules_replace_name = True self._do_test('replace_name') Bcfg2.Options.setup.rules_regex = False Bcfg2.Options.setup.rules_replace_name = False def test_slash(self): """ Test that Rules handles trailing slashes on Path entries """ self._do_test('slash') self._do_test('no_slash') testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestSEModules.py000066400000000000000000000103301303523157100256020ustar00rootroot00000000000000import os import sys import lxml.etree from Bcfg2.Compat import b64encode from mock import Mock, MagicMock, patch from Bcfg2.Server.Plugins.SEModules import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestSpecificData, TestGroupSpool class TestSEModuleData(TestSpecificData): test_obj = SEModuleData path = os.path.join(datastore, "SEModules", "test.pp", "test.pp") def test_bind_entry(self): data = self.get_obj() data.data = "test" entry = lxml.etree.Element("test", name=self.path) data.bind_entry(entry, Mock()) self.assertEqual(entry.get("name"), self.path) self.assertEqual(entry.get("encoding"), "base64") self.assertEqual(entry.text, b64encode(data.data)) class TestSEModules(TestGroupSpool): test_obj = SEModules def test__get_module_name(self): modules = self.get_obj() for mname in ["foo", "foo.pp"]: entry = lxml.etree.Element("SEModule", type="module", name=mname) self.assertEqual(modules._get_module_name(entry), "foo") def test__get_module_filename(self): modules = self.get_obj() for mname in ["foo", "foo.pp"]: entry = lxml.etree.Element("SEModule", type="module", name=mname) self.assertEqual(modules._get_module_filename(entry), "/foo.pp") def test_HandlesEntry(self): modules = self.get_obj() modules._get_module_filename = Mock() modules.Entries['SEModule']['/foo.pp'] = Mock() modules.Entries['SEModule']['/bar.pp'] = Mock() for el in [lxml.etree.Element("Path", name="foo.pp"), lxml.etree.Element("SEModule", name="baz.pp")]: modules._get_module_filename.return_value = "/" + el.get("name") self.assertFalse(modules.HandlesEntry(el, Mock())) if el.tag == "SEModule": modules._get_module_filename.assert_called_with(el) for el in [lxml.etree.Element("SEModule", name="foo.pp"), lxml.etree.Element("SEModule", name="bar.pp")]: modules._get_module_filename.return_value = "/" + el.get("name") self.assertTrue(modules.HandlesEntry(el, Mock()), msg="SEModules fails to handle %s" % el.get("name")) modules._get_module_filename.assert_called_with(el) TestGroupSpool.test_HandlesEntry(self) def test_HandleEntry(self): modules = self.get_obj() modules._get_module_name = Mock() handler = Mock() modules.Entries['SEModule']['/foo.pp'] = handler modules._get_module_name.return_value = "foo" entry = lxml.etree.Element("SEModule", type="module", name="foo") metadata = Mock() self.assertEqual(modules.HandleEntry(entry, metadata), handler.return_value) modules._get_module_name.assert_called_with(entry) self.assertEqual(entry.get("name"), modules._get_module_name.return_value) handler.assert_called_with(entry, metadata) TestGroupSpool.test_HandlesEntry(self) def test_add_entry(self): @patch("%s.%s.add_entry" % (self.test_obj.__base__.__module__, self.test_obj.__base__.__name__)) def inner(mock_add_entry): modules = self.get_obj() modules.event_path = Mock() evt = Mock() evt.filename = "test.pp.G10_foo" modules.event_path.return_value = \ os.path.join(datastore, self.test_obj.__name__, "test.pp", "test.pp.G10_foo") modules.add_entry(evt) self.assertEqual(modules.filename_pattern, "test.pp") mock_add_entry.assert_called_with(modules, evt) modules.event_path.assert_called_with(evt) inner() TestGroupSpool.test_add_entry(self) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestTemplateHelper.py000066400000000000000000000112711303523157100266620ustar00rootroot00000000000000import os import sys import Bcfg2.Server.Plugin from mock import Mock, MagicMock, patch from Bcfg2.Utils import safe_module_name from Bcfg2.Server.Plugins.TemplateHelper import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestDirectoryBacked, TestConnector, TestPlugin, \ TestFileBacked class TestHelperModule(Bcfg2TestCase): test_obj = HelperModule path = os.path.join(datastore, "test.py") def get_obj(self, path=None, core=None): if path is None: path = self.path if core is None: core = Mock() core.metadata_cache_mode = 'none' return self.test_obj(path, core) def test__init(self): hm = self.get_obj() self.assertEqual(hm._module_name, "test") self.assertEqual(hm._attrs, []) @patch("imp.load_source") def test_HandleEvent(self, mock_load_source): hm = self.get_obj() mock_load_source.side_effect = ImportError attrs = dir(hm) hm.HandleEvent() mock_load_source.assert_called_with( safe_module_name('TemplateHelper', hm._module_name), hm.name) self.assertEqual(attrs, dir(hm)) self.assertEqual(hm._attrs, []) mock_load_source.reset() mock_load_source.side_effect = None # a regular Mock (not a MagicMock) won't automatically create # __export__, so this triggers a failure condition in HandleEvent mock_load_source.return_value = Mock() attrs = dir(hm) hm.HandleEvent() mock_load_source.assert_called_with( safe_module_name('TemplateHelper', hm._module_name), hm.name) self.assertEqual(attrs, dir(hm)) self.assertEqual(hm._attrs, []) # test reserved attributes module = Mock() module.__export__ = ["_attrs", "HandleEvent", "__init__"] mock_load_source.reset() mock_load_source.return_value = module attrs = dir(hm) hm.HandleEvent() mock_load_source.assert_called_with( safe_module_name('TemplateHelper', hm._module_name), hm.name) self.assertEqual(attrs, dir(hm)) self.assertEqual(hm._attrs, []) # test adding attributes module = Mock() module.__export__ = ["foo", "bar", "baz", "HandleEvent"] mock_load_source.reset() mock_load_source.return_value = module hm.HandleEvent() mock_load_source.assert_called_with( safe_module_name('TemplateHelper', hm._module_name), hm.name) self.assertTrue(hasattr(hm, "foo")) self.assertTrue(hasattr(hm, "bar")) self.assertTrue(hasattr(hm, "baz")) self.assertEqual(hm._attrs, ["foo", "bar", "baz"]) # test removing attributes module = Mock() module.__export__ = ["foo", "bar", "quux", "HandleEvent"] mock_load_source.reset() mock_load_source.return_value = module hm.HandleEvent() mock_load_source.assert_called_with( safe_module_name('TemplateHelper', hm._module_name), hm.name) self.assertTrue(hasattr(hm, "foo")) self.assertTrue(hasattr(hm, "bar")) self.assertTrue(hasattr(hm, "quux")) self.assertFalse(hasattr(hm, "baz")) self.assertEqual(hm._attrs, ["foo", "bar", "quux"]) class TestTemplateHelper(TestPlugin, TestConnector, TestDirectoryBacked): test_obj = TemplateHelper testfiles = ['foo.py', 'foo_bar.py', 'foo.bar.py'] ignore = ['fooo.py~', 'fooo.pyc', 'fooo.pyo'] badevents = ['foo'] def get_obj(self, core=None, fam=None): if core is None: core = Mock() if fam is not None: core.fam = fam @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) def inner(): return TestPlugin.get_obj(self, core=core) return inner() def test_get_additional_data(self): TestConnector.test_get_additional_data(self) th = self.get_obj() modules = ['foo', 'bar'] rv = dict() for mname in modules: module = Mock() module._module_name = mname rv[mname] = module th.entries['%s.py' % mname] = module actual = th.get_additional_data(Mock()) self.assertItemsEqual(actual, rv) testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestTrigger.py000066400000000000000000000056311303523157100253550ustar00rootroot00000000000000import os import sys from mock import Mock, patch from subprocess import PIPE from Bcfg2.Server.Plugins.Trigger import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from TestPlugin import TestDirectoryBacked, TestClientRunHooks, TestPlugin, \ TestFileBacked class TestTriggerFile(TestFileBacked): test_obj = TriggerFile def test_HandleEvent(self): pass class TestTrigger(TestPlugin, TestClientRunHooks, TestDirectoryBacked): test_obj = Trigger def get_obj(self, core=None, fam=None): if core is None: core = Mock() if fam is not None: core.fam = fam @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) def inner(): return TestPlugin.get_obj(self, core=core) return inner() @patch("os.fork") @patch("os._exit") @patch("os.waitpid") @patch("subprocess.Popen") @skip("Tests that call os.fork are broken, even when os.fork is mocked") def test_async_run(self, mock_Popen, mock_waitpid, mock_exit, mock_fork): trigger = self.get_obj() def reset(): mock_Popen.reset_mock() mock_waitpid.reset_mock() mock_exit.reset_mock() mock_fork.reset_mock() mock_fork.return_value = 0 trigger.async_run(["foo", "bar"]) self.assertItemsEqual(mock_fork.call_args_list, [call(), call()]) mock_Popen.assert_called_with(["foo", "bar"], stdin=PIPE, stdout=PIPE, stderr=PIPE) mock_Popen.return_value.wait.assert_called_with() mock_exit.assert_called_with(0) reset() mock_fork.return_value = 123 trigger.async_run(["foo", "bar"]) mock_fork.assert_called_with() mock_waitpid.assert_called_with(123, 0) self.assertFalse(mock_Popen.called) def test_end_client_run(self): trigger = self.get_obj() trigger.async_run = Mock() trigger.entries = {'foo.sh': Mock(), 'bar': Mock()} metadata = Mock() metadata.hostname = "host" metadata.profile = "profile" metadata.groups = ['a', 'b', 'c'] args = ['host', '-p', 'profile', '-g', 'a:b:c'] trigger.end_client_run(metadata) self.assertItemsEqual([[os.path.join(trigger.data, 'foo.sh')] + args, [os.path.join(trigger.data, 'bar')] + args], [c[0][0] for c in trigger.async_run.call_args_list]) testsuite/Testsrc/Testlib/TestServer/TestPlugins/__init__.py000066400000000000000000000000001303523157100246320ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestServer/TestStatistics.py000066400000000000000000000032221303523157100236150ustar00rootroot00000000000000import os import sys from mock import Mock, MagicMock, patch # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * from Bcfg2.Server.Statistics import * class TestStatistic(Bcfg2TestCase): def test_stat(self): stat = Statistic("test", 1) self.assertEqual(stat.get_value(), ("test", (1.0, 1.0, 1.0, 1))) stat.add_value(10) self.assertEqual(stat.get_value(), ("test", (1.0, 10.0, 5.5, 2))) stat.add_value(100) self.assertEqual(stat.get_value(), ("test", (1.0, 100.0, 37.0, 3))) stat.add_value(12.345) self.assertEqual(stat.get_value(), ("test", (1.0, 100.0, 30.83625, 4))) stat.add_value(0.655) self.assertEqual(stat.get_value(), ("test", (0.655, 100.0, 24.8, 5))) class TestStatistics(Bcfg2TestCase): def test_stats(self): stats = Statistics() self.assertEqual(stats.display(), dict()) stats.add_value("test1", 1) self.assertEqual(stats.display(), dict(test1=(1.0, 1.0, 1.0, 1))) stats.add_value("test2", 1.23) self.assertEqual(stats.display(), dict(test1=(1.0, 1.0, 1.0, 1), test2=(1.23, 1.23, 1.23, 1))) stats.add_value("test1", 10) self.assertEqual(stats.display(), dict(test1=(1.0, 10.0, 5.5, 2), test2=(1.23, 1.23, 1.23, 1))) testsuite/Testsrc/Testlib/TestServer/__init__.py000066400000000000000000000000001303523157100223510ustar00rootroot00000000000000testsuite/Testsrc/Testlib/TestUtils.py000066400000000000000000000044711303523157100204640ustar00rootroot00000000000000# -*- coding: utf-8 -*- import os import sys from Bcfg2.Utils import * # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * class TestPackedDigitRange(Bcfg2TestCase): def test_ranges(self): # test cases. tuples of (ranges, included numbers, excluded # numbers) # tuples of (range description, numbers that are included, # numebrs that are excluded) tests = [(["0-3"], ["0", 1, "2", 3], [4]), (["1"], [1], [0, "2"]), (["10-11"], [10, 11], [0, 1]), (["9-9"], [9], [8, 10]), (["0-100"], [0, 10, 99, 100], []), (["1", "3", "5"], [1, 3, 5], [0, 2, 4, 6]), (["1-5", "7"], [1, 3, 5, 7], [0, 6, 8]), (["1-5", 7, "9-11"], [1, 3, 5, 7, 9, 11], [0, 6, 8, 12]), (["1-5, 7,9-11 "], [1, 3, 5, 7, 9, 11], [0, 6, 8, 12]), (["852-855", "321-497", 763], [852, 855, 321, 400, 497, 763], [851, 320, 766, 999]), (["0-"], [0, 1, 100, 100000], []), ([1, "5-10", "1000-"], [1, 5, 10, 1000, 10000000], [4, 11, 999])] for ranges, inc, exc in tests: rng = PackedDigitRange(*ranges) for test in inc: self.assertIn(test, rng) self.assertTrue(rng.includes(test)) for test in exc: self.assertNotIn(test, rng) self.assertFalse(rng.includes(test)) class TestIsString(Bcfg2TestCase): def test_is_string(self): for char in list(range(8)) + list(range(14, 32)): self.assertFalse(is_string("foo" + chr(char) + "bar", 'UTF-8')) for char in list(range(9, 14)) + list(range(33, 128)): self.assertTrue(is_string("foo" + chr(char) + "bar", 'UTF-8')) ustr = 'é' self.assertTrue(is_string(ustr, 'UTF-8')) if not inPy3k: self.assertFalse(is_string("foo" + chr(128) + "bar", 'ascii')) self.assertFalse(is_string(ustr, 'ascii')) testsuite/Testsrc/Testlib/__init__.py000066400000000000000000000000001303523157100202430ustar00rootroot00000000000000testsuite/Testsrc/Testsbin/000077500000000000000000000000001303523157100163315ustar00rootroot00000000000000testsuite/Testsrc/Testsbin/bcfg2-crypt/000077500000000000000000000000001303523157100204535ustar00rootroot00000000000000testsuite/Testsrc/Testsbin/bcfg2-crypt/all-basic.xml000066400000000000000000000023641303523157100230310ustar00rootroot00000000000000 U2FsdGVkX19C6Cy0nM0mlcGGBjqBMAC+GqyPfLpqgT0= U2FsdGVkX18KUHJTHdrgz3gWtNA5U3g3gq0i/AsdCVE= U2FsdGVkX1+9nUFxmbl8UJh1t5fWo4cQQa5nQm8hVtw= U2FsdGVkX18tScJs1si9y45NxPkjYj66Ee+TsYDZAd0= U2FsdGVkX19q3USU1cnvgfV8roHeSNSQ2bCMD1CCR3jE0e53aT71ATtqHmfsJfDnTgQ28xbKGZhAwoML8ixXkdkyqnsSF69bnIwebaI4qqYXFA2FWF1Cop3bYEV67m6dSM9BkSluKIcs7VdPRANE71OQnd9P2cQbMig50IkBtuE3El8bnc+n4E0k31NT7ZZJ9s/9FJMHg/AfIjvB4KgMqylHcfT43gGICeq4JYPKIsxYjKq0bzFISPBgztD1++YTdCKbJDtjNJJOlqanB3LsBR3PQt6rliWqqPVT3aLP8BU/gIcGE3oyyce04ULxNGTPqFlWgw2r7RopygqgZbzTgU21thzef7bXRi/NATQpXkM70BdCLwRvRNaC3JrMY/z4k0//QliTiNYPNejpGvwezHf99PTN7VPWMhQyONSpLO905KmEJYRt1BXx8p+72b1Q/1S/QFfzU8JU3MO4yiLFf6kyB6nS+pCA40g/UfwKLQI2Fr6Oi5acOK7SRTXqSmxhI+96TQms6bWmm889BO8QOfiuAI1CvHWEBljPACXydcM9wACjhBvpra41UzVgkzadaUO8yBV0Z0bLVHuyIdI1I78vgrkd99tJvC/AmYazEwM= U2FsdGVkX19aE/IqfkkhgkbhA0i9cb1PYp7tdTmfidg=U2FsdGVkX1+J4nhfxE5GjwDF1PzOjw2q5e5vrcFZyCQ= testsuite/Testsrc/Testsbin/bcfg2-crypt/basic-des-cbc.crypt000066400000000000000000000032151303523157100241160ustar00rootroot00000000000000U2FsdGVkX1/2LyHZkqC9Ri2CeDw45osMNNUwOMraHGD/Z3jsHekaU5w6YlTZ3DUPGJoNKsPiUp0jdheuvFGyv2jmXn4ocIPixcliR16n1lWTv5yE+lleELRRqzRCR+cYjmXJzAHg2L5YJ+/XwZR0tlN5cod81dsHiaVw86K7MU8SlkTuN+QSU24CFucuhi9cXTLt0E7ipIded4J5qlDS2ZZIR/m7LG++VS8jc47S65VHR5VHg6cTQ130b78eG8344LM7i4hVcH1Pm0YOm1Kq3Q+IrmyiPmJKFb1lABoEJHCKAlQ85H5xFdbnXP9FbcZVq9zLmIoB3RsGWLLZobY93FhwXN86VZ2APVn7rzLPZCk8KPqkxGoErUZl4of6eQMvi9Owvf7s6fhkKlG7c03BJ8FtbaKOGob9Y2vM2Vjs322QXnJnGXWZ2ZJLu6+OAdVNUueEhqe12SKIc9Olzv6Gn60tiB5XByqAuY8y6oPm3F+gCdbReh1Lu7fVVGP6brkgZWzoBv/T503nOU/Jw3yRSd9nHwBkiMA3HtjzgWZrXf+TRW5apSAoDAeX70nBR4GtqQO3GozFZgqvr4yzLHo1m3/+mB/svNjeMTbRKp5cKs+47C8rno3+asbVtykrPBnqgC16WRa+fB/juxjpRaEK7dbzhk701lNbJrXKT0S4S5Pdir+SDl1udgpTRiy1MaL9JnwQ5w5du6XfLUH43Q2UiMpNABQAyxHZDsdEcxzj16e7wKXVVguS2mqN8TrWhC1K7ZZ+q8BHZaV1c+R45CwT1nIuGoYzKe0Wsc3WtY8yYTbKJPeJQyL7qSyC/M8cV8RkkDGTgJ/FuRlsm5R89enBFRgb5tmT3S6pG6WxTsxcOyXNwc2VP47dJuetguiEYI8FX6pWbfwSfd1HG7uKkD3QMUYU1YtzCziNLNpag03jd/Ios2q/gR6agKGAzVTSIGeaNwpervlXiHXAdGThWXbqTy+1Zbp4OnEbLGEtksedpTD6Ij+JNFSgyIxDbBdrxvx0EZHf8GeIAiOifRZw3XTxla16oKylv2FnUJMJlwONOyswmwduWaY+0+LYHygzJYyF9YoJVYCASQpz5dll3Z1U/5vKqzrY8/88/SLINPP8nQ6mV1JbnXuRS8CD/LbzI3T0CTDKrsxQkkFsArtdUpYRHBqsPYyuikmG1GGeEtRznkNqcE2EXKoNdIU4Rm6GIK6thes3bYqnp5MK/HLQ2vKkOraSR3IyQJXW7EUVCqZk3M5wFGem+8aEw3sjYljrxNzSAB2EPitnxpBp21Mfu+6SmlGWOXANBNSBG630JzNgfqh31SKrWCw6jyFYrI43UXJEsAdEerfKK/UKBs2rb7Y/DGNF1lM9LDxH2nQ5ByELn/PxxpRdKYoXgsHmPwBHMuXhjPXxY1uw25GZgVVk58GR/l/Q78sRiqiwr2D0rriUTQfLW+bqpxEbdqxJjHa/rGjZQffHC08/xqY9P2FHqcE9VIGH3OdpoM6FKQ/NfBF8FMnkp756JC+nUVgFPTQ5Wn331DlUGyf4MVH6mKskUjOKGCnbt7GnG2K64eFcs0q8k0UK4f8lACtJEA5zSX2qNhPdkpG/yHyzvvLuX2p7j53QyIvX1Nz9k7ZoqD1rjhjDSsfV9C36KORyzqm/MKq8v8YVhEfnGI2UtWM= testsuite/Testsrc/Testsbin/bcfg2-crypt/basic.crypt000066400000000000000000000032311303523157100226160ustar00rootroot00000000000000U2FsdGVkX1+fzWyB9BSYjGZTIEw6WGCFJvgWeS5BlGSVcLK6cDuoajcTj47falmJhrCg6uew41tps12UDaGe8Uaw8gzZnqGBCRYYSCazD3mqGsUQDbHdjRdsN5C1uTBlrUt52GuqP0EHEmJrqcmNYCkFw2KM8/RCflpuHkEj3L44PTsuq/TsxUmNLkn01z+U9Y1qK13bl60Gn0T3EYkI1hcO2JzyHW0HHwwe2E9xRB1u56Xj9xL8EozshlafTN2J9iIysG5j7P2dlmGHBQtVgrvbgZ8SycmiIOSenRgpln/Ax9WAwcXXbcvCcSUqxA5CrjfQAkfhMuHhYIoI/f5KJcLKnf1lnOXXlFaQVVLlvMxaw7v67jFOOIJekrphgnIF/VviR0ShP8KFvtTrG56lxHHP2ngZHPg6CxqUtyhCEMp2rES1s0PcAd4xvzUlrgZLvHpphZ0ZVtknDF5hmnFuO/Y1Ebo1PvN2vty1GH/XPcLyzk+Iks92hvYmeOy9fJ+9x+myaHCW/xbwesYrkcUnFvvovres20vM981Dv4GRgO1Yej98ac60wNEu8QigqCJCbVnst4g6+dEhd9xzHp9zsf8cCGcJPWp6BUOdk7Z96uA5UwOsbOgyShlLsp2eWnKAb/5HKx8C8JXdCPD7lFNa4QvrfrH6a1HUugZ7oxU8oQ1mPrg8t6Jf7HyMbBpKdx3BGwnb4yxaOPhpW66OXyFuU0l7yHx5tAzG4ee1HqA8wp5bpvwszCQcLuB5a8s5x+tDs/4pb8bb6f5kV4EnYXza9v8tVCwPegdz8IuluPGjbcO9XM8jjTJ6UDZPIOxNKGoxYUl0RKbAKJ1QC51/d2uNFGcd6j00zs4P5P9G5Pqyy1IMUHqCBpbIO1LI5SgHf9E51NYUjTZJMgxgDfXgo3isDVI39Bob5aNHdjJKWBalU4u5BOWosQ27Du7BmXfAF4CeB4nnilTC/MqvUp7i9RRBfiw9jiPxE1tBtjkqIRDf7k1nGtDWhDcP64ph/Iz5IJ/krafzNew36KAHwYISlDN8KYH5gEewixst7oqiRteCT8D55Es+7645MWHoLa6x/LHOT3sYcXQN+fd8SkjKbR8HBT5upAI+8/WWnU/umodJoyTW62xAhjFQVszj5S6451uTEEf6PnBEpbNaYgQhrUv8bX+WGxNL+Q4XmOUdM8holHyM2Lz7saMG5HlZMlYR6DClQqh5QLG1gmL17+ozARKfQWIXBtsWh5/aTlXVwTDAu8p50abgseNswRWm1QkqtDYxVgsj3qfdtOQo/K/gkHx1dJbkZVCVBRR2DNyKT1DZjU+6q8UJZmZ+JUZ1qcP16y+bc4WOXgBdxvDVWwsR1oe9GokQAHovxTms8VtW5+am37BXpJ+nRo6dVo71yaHXdjf6wQgxzFe9eo1BXypxtx+y5jEuN788d0jwLrZ8bF2KrnbzOinch2WkU+7eFt4pHCClX38hQ/d28q8UOyFEbZp1N9Guwtun3NzX4V0DbcZxIxJDGo45uPLDcT6r9dhAUwrfoM+atTEOM3K4BOBEyxehs7NoZrObWdy0FcrKokCT3iHOqImhVdJDnDYDtfqLFi/iycZ8mUnt6eF7oNLLh/TdB+HgC5bf684YPGYCGznAfjOVE8eIKsG5l6DqGDBxx4fh+Hb563aX/UA8tKrnMhcD/A== testsuite/Testsrc/Testsbin/bcfg2-crypt/basic2.crypt000066400000000000000000000032311303523157100227000ustar00rootroot00000000000000U2FsdGVkX189QQFkAJ2EZErSsJ7h16U71KMeKEHIC803ktBOL0sBOEdWPKN/0G7Jt4mchdGSeBBCFcxGIJ7yGvhThWxgAsCNCZCPbCCGW7NQ8b01+WRzeggfHYuFw49Mwvr9Dqh97z6HelvvoNr90DW/IuxGrJgKzeP2obbdUn0MRuBNx//JjU9lTDRSCU6i5HR/IJvMdYT9iR7icJa0Wjtl6UXjoZXf2azL1jRLrtG2QtMmUI0PQcg9IDlH1Eiosut2LOcmCCThzR96ubQqjRCAggWJqR1x7qe5+Llkq47jPauWuD1RoH7OLTm1EcVRd68ZtfYPHK8tiu3xZqcmnHC5bHedM9hdwiyDqm/qhdnz5cWA4rlWp4VAFTmJzyt4VsgTTXDbCbRHtU0Ml7E36lg4fMekfql1XT+tiNxmoedAWGOXXbvf8rdyH6EZ2uFBhPtX+2kV66XsNBALAizqwVqpw/wdKVkWGRMsi1p8nE4wj6/WcHBd7YYJBig4UNyqdPTsn2dMhKi3GbzgNvkmNo+V6G3IZ0cqy4tBZkBW3izVfN8e5BEUZ6BR4V8uC/NvTSd7fcJSJZROw/RR2zdTo7qDqgzz27aZrPghDfmLYB26VGn56geSVwEFraDeaUZxa17UiBLtGYPb8rZRQkn0ACsfHu8r2PVt6p/W6oVnZ/05icvP2NjGtWWbzXRpHZEw/5R+cRsfTgi2HpWpJwpqt6Y+dxXb5mpNkwL3tM/p9i1+LKm+jxknkmtCW8rxeHseuvX3WIcyAP7JEBzv76xliKBQP0lpvM53fNcgsJvzgphnwUGAlht+2Y9ZBlKSdGyrft+K4hxYWRcCWMg8+s/VA1VkHl3+JOzwmgfGFx3eVcZnul1Xq58BGWRmVXo0PfSo0VCZuhVuegF2Odqfrx2w57CwKESxDkKGt7h8m7CexJbVHzawJlC5ZbP6iLHCf2rHXIcD/K4Qjl/rbDLvMrHpazSOpaIckYdXFeA3UglEFQ4m4o8OEYJ1rH8z3jQt4A+3L4+8+8tzeTIfnVgA2ouqs00XcoFoPSSvbzZstawJubBR6/pOwYjJAAJTVMPw08X/8qM1NJHSvxgkDlWrm8uUfgGbG6ftkCKlbEuJmWLH2I74zHa0/2j8VtcoHjFkbQc7Slg/pM0IC55ZJhgaZ74qsGJaGz5FpdYx8Fz+riqBTGZA7LpVzsYGl/f4Cmtvf6v0f2pCypb1bMWp+IEn2w7StGVd6oSuc9+wgBi9RWAmlB27MkBvKnx+IuUPpKtUSUG+YM3mFUl9f5A0spMdTiDPdh8Wegqx5S2PLKt61YvoMS8vSQTGxMf3A3WhykYGrFxq8l1zKcZfD7x4oPj398rfBGQPpemuiTuDJeGtsKta8ERgLw1bgrtEu6PWvEf4bSCLYa1x7RkrOihDPyn+jP0v/1Cb7+eQR33tVwNYdN7ZNRzvbn7JHKuci8DrZU2961aA+7t1UMjSaJ06RG4pm120pQckRi3vALxh583KlqpG5a8+DAHq46h16W7PC9IbDdb5YRgAo0QwOnIdFLj4EKt5GefOspnfOWM5gROU33l4J2n+5IsH8d0eOTGXlIVmyye3fXMTTI248DMchBR/xUKisujZZsvjFutxYYX/RGXoIHllmycpAsqnMBPNj6edjPruAqnyDzHpS88Y3eBSv446GA== testsuite/Testsrc/Testsbin/bcfg2-crypt/bogus-forced.xml000066400000000000000000000003201303523157100235470ustar00rootroot00000000000000 U2FsdGVkX1+uFQUijBDQpGBdTroNS6nl8lPUSeHcWJw= testsuite/Testsrc/Testsbin/bcfg2-crypt/bogus.xml000066400000000000000000000002551303523157100223160ustar00rootroot00000000000000 some text testsuite/Testsrc/Testsbin/bcfg2-crypt/complex.crypt000066400000000000000000000032311303523157100232040ustar00rootroot00000000000000U2FsdGVkX1+pB62f8PRmn7i+qzc0nVvezv4sL6KXA4SaTWGvai48caajA480b//AfSkSaLK9C5pDFk0hf0HvoSK/qFdEL/3sChswE2EXJqcZCQxSpexQPhd4t/ES3m40PPO+WmK3AemjvNSv9oQdIXPlgwboVBoBfJ61onPqdQSrDtheDRsaNzOA9kYe5dttl7PwKz9UTj8Ds7rRXvrFdXvmX/KvwWHxQvHmz7RzFfMzBQ7AS+yKo4JPVMSA6peBQRsmu8n/c+G4q0MXntmPRfSELyxX2BG9kIUKJEmCgJHVOnN6Yd/wseKQWbWFut/vB+tq+83yTQKJmEtDx1EAupXv3oBNDpDR2wjLsijvAQCaBkvalHyLEC8/KlC3P6VMUObiaHOMIxcMX9wH5C2jGYCTZ+tlAnzM9RfKkXPEq0sXbTpRy0laRWfV6c39PDFgTE3qBy79Reh4V+YfqG0X2jFR3bbcG1ZFc/QL20X+6LvfP/cxf/zepZ1sa9yJmAw386xtt7tDohdZBmvwOgueqbHhlRnf2hXggSUBHLxspAKsy8kyHzSFBy71qzmXo4j3C96MFf/SCCDJsX1P2yEdny7hKxHKqvQBFR2JlN+m0DFt5rnhiWAkLbpJsBdy66hZ9dmWa3IfKhO9EDkAFf5ts7ZXPgimpdj/A7IjtD19FmBLZ/N29spA+InK661iHwc3HVJ14AorA9gOyeCW6p3GBS/BzENALEDrKk3O9w0Dk4pRcrlRgvEK+HZF6CDVV5jh188Lt3eWReCfBQ9Cwoj/sDY16WNTsqJnA9f+FIUW7WxSpUA5g3bk2IR3pfnzXLfxFpUUGy7v7y7EvOMOWH27550NKtXOszcS72rRzwf4jze7mN51Wt1n9iKKpmoUZNwJ6E6vaU2o/NyuWrWHMWPtTusdQsVMkm2J3TddvMFp6u8mTMIj5ypMuz8+9f2QgLjWrCTY0llVHQfQvELgSSNoNBMfpFKqZt0qk4Zoo0a/CRFj+wEv0zOwF8A0HAtIXrnF4zeiLjDlN4lMrR67hKOuHNLpR6hqkCUSo2blf3d3AiwN6YfLHNuVywuUHINVeoyzls1q6WXuLQuO3HOdpMOWrp4tA6PoDAia36qT8+WhK/hrzinEbiu0Wvd9uv7Ie/veAGBcqW+4SOu0kJqQLgnQfGXvUoxs5TvgbTsrs8TW9CtihR0u0vuuec4El6VEA0xVDKbyI1H2ac3X44qEXwVwWqC2JJdy9O2uNgEQhGah+qLiumvKQElZngMw/ovE76rysF8eO7z1Hkt45raOcodUcAnvTgvGYrVwPKa3a4ShBpT/94IjbsMQ/S9dsnACCps7/TGtqgS96XsookFX8m9x2oycJnpiF59+UlcyyxY6e+vAkl11eIwH/niEpl3JzGKGUi5rs3y7MiLR12ZAoI5R9xBpSY8nm9OGNN5lO242cDnstdjhd7Qz2bR8/KDFQtIVLgxhKrRaUnQr0CFBQ/bW0RS0E1SdFssixvvPohlAXoBQssVOUPHVXbk9On9k8ebQ9n9lhd6L2dK0HydVUFLr0vtVCTv049aLcI3yQ8a6TrOxtVy793hzwY0UZC8X5pjYqT6V/ddyRWzJQRvXnK5KzL4KHWSOXTvwam8TYMp3TdBEi8G+Raydi2kVSeGYEjdjkGSfGgw5kRoeDqkworeZSA== testsuite/Testsrc/Testsbin/bcfg2-crypt/plaintext000066400000000000000000000023221303523157100224050ustar00rootroot00000000000000Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla in elit arcu. Morbi interdum fermentum magna non molestie. Curabitur imperdiet, mi eget ultrices porttitor, dolor arcu dictum purus, eu tristique felis felis eget leo. Suspendisse dignissim laoreet velit, id bibendum leo. Etiam faucibus lorem nunc, eget laoreet tortor feugiat at. Fusce at ornare tellus. Donec dui neque, fermentum quis ante ut, sodales commodo magna. Proin nec malesuada risus, ac consectetur mi. Praesent pharetra eleifend lacinia. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Donec odio metus, dictum vel commodo quis, tincidunt in ligula. Aenean et orci non arcu lobortis ultricies. Ut ipsum nisl, luctus sed porta nec, vestibulum pharetra tellus. Praesent consectetur condimentum nisl ut cursus. Etiam aliquam nisi dolor. Mauris aliquet condimentum neque, sodales laoreet lectus venenatis ac. Morbi mattis justo odio, ac fringilla leo egestas ut. Integer nec sapien pulvinar, ultrices nulla id, posuere magna. Quisque in tincidunt sem, sed vehicula orci. Nulla blandit, nisi vel cursus semper, nibh metus consequat purus, ac ullamcorper dolor lorem vitae ligula. Maecenas non consectetur nibh. testsuite/Testsrc/Testsbin/bcfg2-crypt/plaintext-all.xml000066400000000000000000000023701303523157100237550ustar00rootroot00000000000000 U2FsdGVkX19CZCt2ydtozka/HuG9Iay3Dpxs/pR7byM= U2FsdGVkX18RmlLRK6CSIww69iuUTAb1xOkA/2dZw84= U2FsdGVkX1+h5sBn5Ms1FXe88o69Wc0tE99Nuck++tQ= U2FsdGVkX1/NcWDYbvU1fUWry44xvFxYQXodBoTs/Ek= U2FsdGVkX1+02mmcZw+h/QkC+Qr48bjy198xcivfopvaK64xzBe25fEBADvIG7Qab+BxZdZAPWFgX3toBVQFVjQ6M6zf1lrNeciK39LSDj4v3mTIg1/gvew8TUGeQtkrU/xo8ShEAiExma6ILf7Qq6PTc5IdBfuB85bn5YtU1tN4YTiUbK3/DIkTRJI+8YI4GbhFBKeaqMkau5498YdwhxpE0LB0sxTK3Bjw23nwOfcLLXH0uYux1JCxDgYJ9Zalx0qPUthrNnEq2mR9R11lZGmRQD8ToN0/7eS8NkZs3j5TgefbzNdpK7yThbXHFPNwuD6I1AwhQ5oJ//iOkVGpAMXvdPkEZCgKthXnze/X99J0MphTq6oD7XGrY+Sj5EwVzv8X9Mux96QtFylCIGhNllkCAqb3Mzmsr7ZIEmauAr+eTkuRASjJr7XsQKSL5hoLFtF/vKnzTx6YjVETrbXkczZUhA2n7C3HF1OYAozPZmd2WTF7/15jcWCKZB517dfKr5GC1q10NlbiujEUfb/8JnVRg9JfK5r6oXcdfxbODbLchzU+/h2sRRjSVdN6wcXrX+bVMJG9P6cLiPR3oruBUHf/dbZXg06Mp1bqazbOpJY= U2FsdGVkX1/keWAAgSOnVvhoEDWzmRQWaf3mxOy749o=U2FsdGVkX1+O10Id9f9FUcavHi8JaQWVNlWm/jwQ4f4= testsuite/Testsrc/Testsbin/bcfg2-crypt/plaintext-xpath.xml000066400000000000000000000022171303523157100243310ustar00rootroot00000000000000 U2FsdGVkX18bAwhcMtr8J02ztT8kBjdCjae9lYnbsRY= Some text U2FsdGVkX19+Yq+VwbAfNGUHtnB7hy74V7Fvz0GHsqA= Some text U2FsdGVkX1+A3f6lIoFvCNaC6/ctbOLqT0z/YCJ+FkeyTgAnmU1/wk7FlPxOaPPkI2iRfEbNK2sNtUS0rQ8TYT3gIRO3qyrUNIcHaYfGerAZN3Xg9F3CsaL1NQjHxSKyJLdTmdB/1m0AQ3jw13n1eNrokGHF6HU6bD6TIJVFmds126ucOg56Xh+3ffOUukh2EwlBxnvGC/CDQluFixL1MY74xFd5mY5iDcJG9o5qUtjKmiOrtEAXFuM49JLciDHtMjQ2wbX/9lGek7U2Y05I2vU8BGtD3jh/Pt+17Vql80UrNHqVnWx247sxgYmkJIaworCTNowU2KsWEQj46E1bzAXEUVdGF65ltIXIK75KozHf8msKuVFwQDYCJ+lXRZgIygqcZ5glAyjW1WxyigxSFpRfVcZfiHp7d52JfBCU66367j7DvEnAJAuvL7jufJSavd6RxaEGGB3KGAMpz8NQxPy6i2s5RkY5V8eiqUOHsnZN6zHPgkZ90a+dokllLbH+HSYGU26sevJL4TupDCkz2/sRasmBB8fBAF5PGOI+UC7vXncbvpsMLsILFoUYtyWrDZ9cygOElEmWpVJSeECAA09iOhyaXN5rN/tyqkt3+ao= Some textU2FsdGVkX1+cSl37JVEVIEV+bqVBlMGQnZdZWsjHPME= testsuite/Testsrc/Testsbin/bcfg2-crypt/plaintext.xml000066400000000000000000000015721303523157100232120ustar00rootroot00000000000000 Some text Some text Some text Some text Praesent consectetur condimentum nisl ut cursus. Etiam aliquam nisi dolor. Mauris aliquet condimentum neque, sodales laoreet lectus venenatis ac. Morbi mattis justo odio, ac fringilla leo egestas ut. Integer nec sapien pulvinar, ultrices nulla id, posuere magna. Quisque in tincidunt sem, sed vehicula orci. Nulla blandit, nisi vel cursus semper, nibh metus consequat purus, ac ullamcorper dolor lorem vitae ligula. Maecenas non consectetur nibh. Some textNested text testsuite/Testsrc/Testsbin/bcfg2-crypt/plaintext2.xml000066400000000000000000000015661303523157100232770ustar00rootroot00000000000000 Some text Some text Some text Some text Praesent consectetur condimentum nisl ut cursus. Etiam aliquam nisi dolor. Mauris aliquet condimentum neque, sodales laoreet lectus venenatis ac. Morbi mattis justo odio, ac fringilla leo egestas ut. Integer nec sapien pulvinar, ultrices nulla id, posuere magna. Quisque in tincidunt sem, sed vehicula orci. Nulla blandit, nisi vel cursus semper, nibh metus consequat purus, ac ullamcorper dolor lorem vitae ligula. Maecenas non consectetur nibh. Some textNested text testsuite/Testsrc/Testsbin/test_bcfg2_crypt.py000066400000000000000000000401141303523157100221460ustar00rootroot00000000000000# -*- coding: utf-8 -*- import os import sys import shutil import difflib import tempfile import lxml.etree import Bcfg2.Options from Bcfg2.Compat import StringIO, b64decode, u_str from mock import Mock, MagicMock, patch # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * try: from Bcfg2.Server.Encryption import CLI HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False class TestEncryption(Bcfg2TestCase): cfg_plaintext = None known_files = None basedir = None @classmethod def setUpClass(cls): basedir = os.path.join(os.path.dirname(__file__), "bcfg2-crypt") cls.basedir = tempfile.mkdtemp() for fname in os.listdir(basedir): shutil.copy(os.path.join(basedir, fname), cls.basedir) cls.known_files = os.listdir(cls.basedir) cls.cfg_plaintext = open(os.path.join(cls.basedir, "plaintext")).read() @classmethod def tearDownClass(cls): shutil.rmtree(cls.basedir) @skipUnless(HAS_CRYPTO, "Encryption libraries not found") def setUp(self): set_setup_default("lax_decryption", False) def set_options(self): Bcfg2.Options.setup.algorithm = "aes_256_cbc" Bcfg2.Options.setup.passphrases = dict( basic="basic", complex="1234567890əùíÿł¢€ñû⸘" * 10) def tearDown(self): # clean up stray files created by tests for fname in os.listdir(self.basedir): if fname not in self.known_files: os.unlink(os.path.join(self.basedir, fname)) def assertExists(self, fname): fpath = os.path.join(self.basedir, fname) self.assertTrue(os.path.exists(fpath), "%s does not exist" % fpath) def assertNotExists(self, fname): fpath = os.path.join(self.basedir, fname) self.assertFalse(os.path.exists(fpath), "%s exists, but shouldn't" % fpath) def assertFilesEqual(self, fname1, fname2): self.assertExists(fname1) self.assertExists(fname2) contents1 = open(os.path.join(self.basedir, fname1)).read().strip() contents2 = open(os.path.join(self.basedir, fname2)).read().strip() diff = "\n".join( difflib.unified_diff(contents1.splitlines(), contents2.splitlines(), fname1, fname2)).replace("\n\n", "\n") self.assertEqual(contents1, contents2, "Contents of %s and %s do not match:\n%s" % (fname1, fname2, diff)) def assertFilesNotEqual(self, fname1, fname2): self.assertExists(fname1) self.assertExists(fname2) self.assertNotEqual( open(os.path.join(self.basedir, fname1)).read(), open(os.path.join(self.basedir, fname2)).read(), "Contents of %s and %s are unexpectedly identical") def _is_encrypted(self, data): """ Pretty crappy check for whether or not data is encrypted: just see if it's a valid base64-encoded string whose contents start with "Salted__". But without decrypting, which rather begs the question in a set of crypto unit tests, I'm not sure how to do a better test.""" try: return b64decode(data).startswith("Salted__") except UnicodeDecodeError: # decoded base64, resulting value contained non-ASCII text return True except TypeError: # couldn't decode base64 return False def assertIsEncrypted(self, data): if not self._is_encrypted(data): self.fail("Data is not encrypted: %s" % data) def assertNotEncrypted(self, data): if self._is_encrypted(data): self.fail("Data is unexpectedly encrypted: %s" % data) def _decrypt(self, cli, outfile, expected=None): self.set_options() cli.run() if expected is None: self.assertExists(outfile) actual = open(os.path.join(self.basedir, outfile)).read() self.assertEqual(self.cfg_plaintext, actual) self.assertNotEncrypted(actual) else: self.assertFilesEqual(outfile, expected) def _encrypt(self, cli, outfile, original=None): self.set_options() cli.run() if original is None: self.assertExists(outfile) actual = open(os.path.join(self.basedir, outfile)).read() self.assertNotEqual(self.cfg_plaintext, actual) self.assertIsEncrypted(actual) else: self.assertFilesNotEqual(outfile, original) def _cfg_decrypt(self, opts, encrypted): if encrypted.endswith(".crypt"): decrypted = encrypted[:-6] else: self.fail("Could not determine decrypted filename for %s" % encrypted) cli = CLI(opts + [os.path.join(self.basedir, encrypted)]) self._decrypt(cli, decrypted) def _cfg_encrypt(self, opts, plaintext): cli = CLI(opts + [os.path.join(self.basedir, plaintext)]) self._encrypt(cli, plaintext + ".crypt") def _props_decrypt(self, opts, encrypted, expected): test = os.path.join(self.basedir, "test.xml") shutil.copy(os.path.join(self.basedir, encrypted), test) cli = CLI(opts + [test]) self._decrypt(cli, "test.xml", expected) try: xdata = lxml.etree.parse(test) except: self.fail("Could not parse decrypted Properties file: %s" % sys.exc_info()[1]) for el in xdata.iter(): if el.tag is not lxml.etree.Comment and el.text.strip(): self.assertNotEncrypted(el.text) def _props_encrypt(self, opts, plaintext, check_all=True): test = os.path.join(self.basedir, "test.xml") shutil.copy(os.path.join(self.basedir, plaintext), test) cli = CLI(opts + [test]) self._encrypt(cli, "test.xml", plaintext) try: xdata = lxml.etree.parse(test) except: self.fail("Could not parse encrypted Properties file: %s" % sys.exc_info()[1]) if check_all: for el in xdata.iter(): if el.tag is not lxml.etree.Comment and el.text.strip(): self.assertIsEncrypted(el.text) def test_decrypt_cfg(self): """ Decrypt a Cfg file """ self._cfg_decrypt(["--decrypt", "--cfg", "-p", "basic"], "basic.crypt") def test_decrypt_cfg_complex(self): """ Decrypt a Cfg file with a passphrase with special characters """ self._cfg_decrypt(["--decrypt", "--cfg", "-p", "complex"], "complex.crypt") def test_decrypt_cfg_algorithm(self): """ Decrypt a Cfg file with a non-default algorithm """ # this can't be done with self._cfg_decrypt or even # self._decrypt because we have to set the algorithm after # other options are set, but before the decrypt is performed cli = CLI(["--decrypt", "--cfg", "-p", "basic", os.path.join(self.basedir, "basic-des-cbc.crypt")]) self.set_options() Bcfg2.Options.setup.algorithm = "des_cbc" cli.run() self.assertExists("basic-des-cbc") actual = open(os.path.join(self.basedir, "basic-des-cbc")).read() self.assertEqual(self.cfg_plaintext, actual) self.assertNotEncrypted(actual) def test_cfg_auto_passphrase(self): """ Discover the passphrase to decrypt a Cfg file""" self._cfg_decrypt(["--decrypt", "--cfg"], "complex.crypt") def test_cfg_auto_mode(self): """ Discover whether to encrypt or decrypt a Cfg file """ self._cfg_decrypt(["--cfg", "-p", "basic"], "basic.crypt") self._cfg_encrypt(["--cfg", "-p", "basic"], "plaintext") def test_cfg_auto_type(self): """ Discover a file is a Cfg file """ self._cfg_decrypt(["--decrypt", "-p", "basic"], "basic.crypt") self._cfg_encrypt(["--encrypt", "-p", "basic"], "plaintext") def test_cfg_multiple(self): """ Decrypt multiple Cfg files """ cli = CLI(["--decrypt", "--cfg", "-p", "basic", os.path.join(self.basedir, "basic.crypt"), os.path.join(self.basedir, "basic2.crypt")]) self.set_options() cli.run() self.assertExists("basic") self.assertExists("basic2") actual1 = open(os.path.join(self.basedir, "basic")).read() actual2 = open(os.path.join(self.basedir, "basic2")).read() self.assertEqual(self.cfg_plaintext, actual1) self.assertEqual(self.cfg_plaintext, actual2) self.assertNotEncrypted(actual1) self.assertNotEncrypted(actual2) def test_cfg_auto_all(self): """ Discover all options to encrypt/decrypt Cfg files """ self._cfg_decrypt([], "complex.crypt") self._cfg_encrypt(["-p", "basic"], "plaintext") def test_cfg_stdout(self): """ Decrypt a Cfg file to stdout """ cli = CLI(["--decrypt", "--cfg", "-p", "basic", "--stdout", os.path.join(self.basedir, "basic.crypt")]) self.set_options() old_stdout = sys.stdout sys.stdout = StringIO() cli.run() output = sys.stdout.getvalue() sys.stdout = old_stdout self.assertNotExists("basic") self.assertEqual(self.cfg_plaintext.strip(), output.strip()) self.assertNotEncrypted(output) def test_encrypt_cfg(self): """ Encrypt a Cfg file """ self._cfg_encrypt(["--encrypt", "--cfg", "-p", "basic"], "plaintext") os.rename(os.path.join(self.basedir, "plaintext.crypt"), os.path.join(self.basedir, "test.crypt")) self._cfg_decrypt(["--decrypt", "--cfg", "-p", "basic"], "test.crypt") def test_encrypt_props_as_cfg(self): """ Encrypt an XML file as a Cfg file """ cli = CLI(["--encrypt", "--cfg", "-p", "basic", os.path.join(self.basedir, "plaintext.xml")]) self._encrypt(cli, "plaintext.xml.crypt", "plaintext.xml") os.rename(os.path.join(self.basedir, "plaintext.xml.crypt"), os.path.join(self.basedir, "test.xml.crypt")) cli = CLI(["--decrypt", "--cfg", "-p", "basic", os.path.join(self.basedir, "test.xml.crypt")]) self._decrypt(cli, "test.xml", "plaintext.xml") def test_cfg_remove(self): """ Encrypt and remove a Cfg file """ test = os.path.join(self.basedir, "test") shutil.copy(os.path.join(self.basedir, "plaintext"), test) self._cfg_encrypt(["--encrypt", "--remove", "--cfg", "-p", "basic"], test) self.assertNotExists("test") def test_decrypt_props(self): """ Decrypt a Properties file """ self._props_decrypt(["--decrypt", "--properties", "-p", "basic"], "all-basic.xml", "plaintext2.xml") def test_props_decrypt_multiple_passphrases(self): """ Decrypt a Properties file with multiple passphrases""" self._props_decrypt(["--decrypt", "--properties"], "plaintext-all.xml", "plaintext.xml") def test_props_decrypt_mixed(self): """ Decrypt a Properties file with mixed encrypted content""" self._props_decrypt(["--decrypt", "--properties"], "plaintext-xpath.xml", "plaintext.xml") def test_props_decrypt_bogus(self): """ Decrypt a malformed Properties file """ self._props_decrypt(["--decrypt", "--properties"], "bogus-forced.xml", "bogus.xml") def test_props_decrypt_auto_type(self): """ Discover an encrypted file is a Properties file """ self._props_decrypt(["--decrypt"], "all-basic.xml", "plaintext2.xml") def test_props_decrypt_auto_mode(self): """ Discover whether to encrypt or decrypt an encrypted Properties file """ self._props_decrypt(["--properties"], "all-basic.xml", "plaintext2.xml") def test_props_decrypt_auto_all(self): """ Discover all options to decrypt a Properties file """ self._props_decrypt([], "all-basic.xml", "plaintext2.xml") def test_props_encrypt_cli_passphrase(self): """ Encrypt a Properties file with passphrase on the CLI""" self._props_encrypt(["--encrypt", "--properties", "-p", "basic"], "plaintext2.xml") os.rename(os.path.join(self.basedir, "test.xml"), os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt(["--decrypt", "--properties", "-p", "basic"], "encrypted.xml", "plaintext2.xml") def test_props_encrypt_file_passphrase(self): """ Encrypt a Properties file with passphrase in the file """ self._props_encrypt(["--encrypt", "--properties"], "plaintext2.xml") os.rename(os.path.join(self.basedir, "test.xml"), os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt(["--decrypt", "--properties"], "encrypted.xml", "plaintext2.xml") def test_props_encrypt_multiple_passphrases(self): """ Encrypt a Properties file with multiple passphrases """ self._props_encrypt(["--encrypt", "--properties"], "plaintext.xml") os.rename(os.path.join(self.basedir, "test.xml"), os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt(["--decrypt", "--properties"], "encrypted.xml", "plaintext.xml") def test_props_encrypt_xpath(self): """ Encrypt a Properties file with --xpath """ test = os.path.join(self.basedir, "test.xml") self._props_encrypt(["--encrypt", "--properties", "--xpath", "//Foo"], "plaintext.xml", check_all=False) xdata = lxml.etree.parse(test) for el in xdata.iter(): if el.tag is not lxml.etree.Comment and el.text.strip(): if el.tag == "Foo": self.assertIsEncrypted(el.text) else: self.assertNotEncrypted(el.text) os.rename(test, os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt(["--decrypt", "--properties"], "encrypted.xml", "plaintext.xml") def test_props_encrypt_bogus(self): """ Decrypt a malformed Properties file """ self._props_encrypt(["--encrypt", "--properties"], "bogus.xml") os.rename(os.path.join(self.basedir, "test.xml"), os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt(["--decrypt", "--properties"], "encrypted.xml", "bogus.xml") def test_props_encrypt_auto_type(self): """ Discover if a file is a Properties file """ self._props_encrypt(["--encrypt"], "plaintext2.xml") os.rename(os.path.join(self.basedir, "test.xml"), os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt(["--decrypt"], "encrypted.xml", "plaintext2.xml") def test_props_encrypt_auto_mode(self): """ Discover whether to encrypt or decrypt a Properties file """ self._props_encrypt(["--properties"], "plaintext2.xml") os.rename(os.path.join(self.basedir, "test.xml"), os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt(["--properties"], "encrypted.xml", "plaintext2.xml") def test_props_encrypt_auto_all(self): """ Discover all options to encrypt a Properties file """ self._props_encrypt([], "plaintext.xml") os.rename(os.path.join(self.basedir, "test.xml"), os.path.join(self.basedir, "encrypted.xml")) self._props_decrypt([], "encrypted.xml", "plaintext.xml") testsuite/Testsrc/__init__.py000066400000000000000000000000001303523157100166350ustar00rootroot00000000000000testsuite/Testsrc/test_code_checks.py000066400000000000000000000307011303523157100204020ustar00rootroot00000000000000import os import re import sys import glob import copy from subprocess import Popen, PIPE, STDOUT # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 _path = os.path.dirname(__file__) while _path != '/': if os.path.basename(_path).lower().startswith("test"): sys.path.append(_path) if os.path.basename(_path) == "testsuite": break _path = os.path.dirname(_path) from common import * # path to base testsuite directory testdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) # path to Bcfg2 src directory srcpath = os.path.abspath(os.path.join(testdir, "..", "src")) # path to pylint rc file rcfile = os.path.join(testdir, "pylintrc.conf") # perform checks on the listed files only if the module listed in the # keys can be imported contingent_checks = { ("django",): {"lib/Bcfg2": ["Reporting"], "lib/Bcfg2/Server": ["Reports", "SchemaUpdater", "models.py"], "lib/Bcfg2/Server/Admin": ["Reports.py", "Syncdb.py"], "sbin": ["bcfg2-reports"]}, ("pyinotify",): {"lib/Bcfg2/Server/FileMonitor": ["Inotify.py"]}, ("apt",): {"lib/Bcfg2/Client/Tools": ["APT.py"]}, ("yum",): {"lib/Bcfg2/Client/Tools": ["YUM.py"]}, ("genshi",): {"lib/Bcfg2/Server/Plugins/Cfg": ["CfgGenshiGenerator.py"]}, ("Cheetah",): {"lib/Bcfg2/Server/Plugins/Cfg": ["CfgCheetahGenerator.py"]}, ("jinja2",): {"lib/Bcfg2/Server/Plugins/Cfg": ["CfgJinja2Generator.py"]}, ("M2Crypto",): {"lib/Bcfg2": ["Encryption.py"], "lib/Bcfg2/Server/Plugins/Cfg": ["CfgEncryptedGenerator.py"]}, ("M2Crypto", "genshi"): {"lib/Bcfg2/Server/Plugins/Cfg": ["CfgEncryptedGenshiGenerator.py"]}, ("M2Crypto", "Cheetah"): {"lib/Bcfg2/Server/Plugins/Cfg": ["CfgEncryptedCheetahGenerator.py"]}, ("M2Crypto", "jinja2"): {"lib/Bcfg2/Server/Plugins/Cfg": ["CfgEncryptedJinja2Generator.py"]}, ("mercurial",): {"lib/Bcfg2/Server/Plugins": ["Hg.py"]}, ("guppy",): {"lib/Bcfg2/Server/Plugins": ["Guppy.py"]}, ("boto",): {"lib/Bcfg2/Server/Plugins": ["AWSTags.py"]}, } # perform only error checking on the listed files error_checks = { "lib/Bcfg2": ["Reporting"], "lib/Bcfg2/Client": ["Proxy.py"], "lib/Bcfg2/Server": ["Reports", "SchemaUpdater", "SSLServer.py"], "lib/Bcfg2/Server/Admin": ["Compare.py"], "lib/Bcfg2/Client/Tools": ["OpenCSW.py", "Blast.py", "FreeBSDInit.py", "VCS.py", "YUM24.py"], "lib/Bcfg2/Server/Plugins": ["Deps.py", "Pkgmgr.py"] } # perform no checks at all on the listed files no_checks = { "lib/Bcfg2/Client/Tools": ["RPM.py", "rpmtools.py"], "lib/Bcfg2/Server": ["Snapshots", "Hostbase"], "lib/Bcfg2": ["manage.py"], "lib/Bcfg2/Server/Reports": ["manage.py"], "lib/Bcfg2/Server/Plugins": ["Base.py"], "lib/Bcfg2/Server/migrations": ["*.py"], "lib/Bcfg2/Server/south_migrations": ["*.py"], } if sys.version_info < (2, 6): # multiprocessing core requires py2.6 no_checks['lib/Bcfg2/Server'] = ['MultiprocessingCore.py'] try: any except NameError: def any(iterable): """ implementation of builtin any() for python 2.4 """ for element in iterable: if element: return True return False def expand_path_dict(pathdict): """ given a path dict as above, return a list of all the paths """ rv = [] for parent, modules in pathdict.items(): for mod in modules: rv.extend(glob.glob(os.path.join(srcpath, parent, mod))) return rv def whitelist_filter(filelist, whitelist): rv = [] for fpath in filelist: if fpath in whitelist: rv.append(fpath) continue # check if the path is in any directories that are in the # whitelist if any(fpath.startswith(wpath + "/") for wpath in whitelist): rv.append(fpath) continue return rv def blacklist_filter(filelist, blacklist): rv = [] for fpath in filelist: if fpath in blacklist: continue # check that the path isn't in any directories that are in # the blacklist if any(fpath.startswith(bpath + "/") for bpath in blacklist): continue rv.append(fpath) return rv class CodeTestCase(Bcfg2TestCase): __test__ = False # build the blacklists blacklist = expand_path_dict(no_checks) contingent_blacklist = [] for filedict in contingent_checks.values(): contingent_blacklist += expand_path_dict(filedict) full_blacklist = expand_path_dict(error_checks) + contingent_blacklist + \ blacklist command = [None] has_command = None # extra arguments when running tests on sbin/* sbin_args = [] # extra arguments when running tests on lib/* lib_args = [] # extra arguments for full tests full_args = [] # extra arguments for error tests error_args = [] def has_exec(self): if self.has_command is None: try: Popen(self.command, stdin=PIPE, stdout=PIPE, stderr=STDOUT).wait() self.has_command = True except OSError: self.has_command = False return self.has_command def get_env(self): if ('PYTHONPATH' not in os.environ or testdir not in os.environ['PYTHONPATH'].split(":")): env = copy.copy(os.environ) env['PYTHONPATH'] = ':'.join([env.get("PYTHONPATH", ""), testdir]) return env else: return os.environ def _test_full(self, files, extra_args=None): """ test select files for all problems """ if not len(files): return if extra_args is None: extra_args = [] cmd = self.command + self.full_args + extra_args + \ [os.path.join(srcpath, f) for f in files] proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, env=self.get_env()) print(proc.communicate()[0].decode()) self.assertEqual(proc.wait(), 0) def _test_errors(self, files, extra_args=None): """ test select files for errors """ if not len(files): return if extra_args is None: extra_args = [] cmd = self.command + self.error_args + extra_args + \ [os.path.join(srcpath, f) for f in files] proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, env=self.get_env()) print(proc.communicate()[0].decode()) self.assertEqual(proc.wait(), 0) @skipIf(not os.path.exists(srcpath), "%s does not exist" % srcpath) @skipIf(not os.path.exists(rcfile), "%s does not exist" % rcfile) def test_lib_full(self): @skipUnless(self.has_exec(), "%s not found, skipping" % self.command[0]) def inner(): full_list = [] for root, _, files in os.walk(os.path.join(srcpath, "lib")): full_list.extend(blacklist_filter([os.path.join(root, f) for f in files if f.endswith(".py")], self.full_blacklist)) self._test_full(full_list, extra_args=self.lib_args) inner() @skipIf(not os.path.exists(srcpath), "%s does not exist" % srcpath) @skipIf(not os.path.exists(rcfile), "%s does not exist" % rcfile) def test_contingent_full(self): @skipUnless(self.has_exec(), "%s not found, skipping" % self.command[0]) def inner(): filelist = [] blacklist = set(expand_path_dict(error_checks) + self.blacklist) for (mods, filedict) in contingent_checks.items(): try: for mod in mods: __import__(mod) except ImportError: continue filelist.extend(expand_path_dict(filedict)) self._test_full(blacklist_filter(filelist, blacklist), extra_args=self.lib_args) inner() @skipIf(not os.path.exists(srcpath), "%s does not exist" % srcpath) @skipIf(not os.path.exists(rcfile), "%s does not exist" % rcfile) def test_sbin(self): @skipUnless(self.has_exec(), "%s not found, skipping" % self.command[0]) def inner(): all_sbin = [os.path.join(srcpath, "sbin", f) for f in glob.glob(os.path.join(srcpath, "sbin", "*"))] full_list = blacklist_filter([f for f in all_sbin if not os.path.islink(f)], self.full_blacklist) self._test_full(full_list, extra_args=self.sbin_args) errors_list = blacklist_filter([f for f in all_sbin if not os.path.islink(f)], self.contingent_blacklist) self._test_errors(errors_list, extra_args=self.sbin_args) inner() @skipIf(not os.path.exists(srcpath), "%s does not exist" % srcpath) @skipIf(not os.path.exists(rcfile), "%s does not exist" % rcfile) def test_contingent_errors(self): @skipUnless(self.has_exec(), "%s not found, skipping" % self.command[0]) def inner(): filelist = [] whitelist = expand_path_dict(error_checks) for (mods, filedict) in contingent_checks.items(): try: for mod in mods: __import__(mod) except ImportError: continue filelist.extend(expand_path_dict(filedict)) flist = blacklist_filter(whitelist_filter(filelist, whitelist), self.blacklist) self._test_errors(flist, extra_args=self.lib_args) inner() @skipIf(not os.path.exists(srcpath), "%s does not exist" % srcpath) @skipIf(not os.path.exists(rcfile), "%s does not exist" % rcfile) def test_lib_errors(self): @skipUnless(self.has_exec(), "%s not found, skipping" % self.command[0]) def inner(): filelist = blacklist_filter(expand_path_dict(error_checks), self.contingent_blacklist) return self._test_errors(filelist, extra_args=self.lib_args) inner() class TestPylint(CodeTestCase): __test__ = True command = ["pylint", "--rcfile", rcfile, "--init-hook", "import sys;sys.path.append('%s')" % os.path.join(srcpath, "lib")] sbin_args = ["--module-rgx", "[a-z_-][a-z0-9_-]*$"] error_args = ["-f", "parseable", "-d", "R0801,E1103"] # regex to find errors and fatal errors error_re = re.compile(r':\d+:\s+\[[EF]\d{4}') def __init__(self, *args, **kwargs): CodeTestCase.__init__(self, *args, **kwargs) for mods, filedict in contingent_checks.items(): if "django" in mods: # there's some issue with running pylint on modules # that use django in Travis CI (but not elsewhere), so # skip these for now self.blacklist += expand_path_dict(filedict) def _test_errors(self, files, extra_args=None): """ test all files for fatals and errors """ if not len(files): return if extra_args is None: extra_args = [] args = self.command + self.error_args + extra_args + \ [os.path.join(srcpath, p) for p in files] pylint = Popen(args, stdout=PIPE, stderr=STDOUT, env=self.get_env()) output = pylint.communicate()[0].decode() rv = pylint.wait() for line in output.splitlines(): if self.error_re.search(str(line)): print(line) # pylint returns a bitmask, where 1 means fatal errors # were encountered and 2 means errors were encountered. self.assertEqual(rv & 3, 0) class TestPEP8(CodeTestCase): __test__ = True command = ["pep8", "--ignore=E125,E129,E501"] def _test_errors(self, files, extra_args=None): pass testsuite/Testsrc/test_doc.py000066400000000000000000000022241303523157100167140ustar00rootroot00000000000000import os import sys # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 _path = os.path.dirname(__file__) while _path != '/': if os.path.basename(_path).lower().startswith("test"): sys.path.append(_path) if os.path.basename(_path) == "testsuite": break _path = os.path.dirname(_path) from common import * try: from sphinx.application import Sphinx HAS_SPHINX = True except ImportError: HAS_SPHINX = False TEST_SPHINX = bool(os.environ.get('TEST_SPHINX', 'yes') != 'no') class DocTest(Bcfg2TestCase): top = os.path.join(os.path.dirname(__file__), '..', '..') source_dir = os.path.join(top, 'doc/') doctree_dir = os.path.join(top, 'build', 'doctree') @skipUnless(HAS_SPHINX, 'Sphinx not found') @skipUnless(TEST_SPHINX, 'Documentation testing disabled') def test_html_documentation(self): output_dir = os.path.join(self.top, 'build', 'html') app = Sphinx(self.source_dir, self.source_dir, output_dir, self.doctree_dir, buildername='html', warningiserror=True) app.build(force_all=True) testsuite/Testtools/000077500000000000000000000000001303523157100151075ustar00rootroot00000000000000testsuite/Testtools/__init__.py000066400000000000000000000074511303523157100172270ustar00rootroot00000000000000import os import re import sys # add all parent testsuite directories to sys.path to allow (most) # relative imports in python 2.4 path = os.path.dirname(__file__) while path != "/": if os.path.basename(path).lower().startswith("test"): sys.path.append(path) if os.path.basename(path) == "testsuite": break path = os.path.dirname(path) from common import * TOOLSDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "tools")) class TestToolsDocs(Bcfg2TestCase): blankline = re.compile(r'^\s*$') @skipUnless(os.path.exists(TOOLSDIR), "%s does not exist, skipping tools/ tests" % TOOLSDIR) def tools_in_README(self, toolsdir=None): if toolsdir is None: toolsdir = TOOLSDIR script = None desc = None started = False rv = dict() for line in open(os.path.join(toolsdir, "README")).readlines(): if not started: # skip up to the first blank line if self.blankline.match(line): started = True elif not self.blankline.match(line): match = re.match(r'^(\S+)', line) if match: script = match.group(1) desc = '' else: match = re.match(r'^\s+(?:-\s+)?(.*)$', line) if match: desc += match.group(1) else: # blank line if script and desc: rv[script] = desc if script and desc: rv[script] = desc return rv @skipUnless(os.path.exists(TOOLSDIR), "%s does not exist, skipping tools/ tests" % TOOLSDIR) def test_all_scripts_in_README(self, prefix=''): toolsdir = os.path.join(TOOLSDIR, prefix) tools = self.tools_in_README(toolsdir=toolsdir) for fname in os.listdir(toolsdir): if fname == 'README': continue dname = os.path.join(prefix, fname) # display name fpath = os.path.join(toolsdir, fname) if os.path.isfile(fpath): self.assertIn(fname, tools, msg="%s has no entry in README" % dname) self.assertNotRegexpMatches(tools[fname], r'^(\s|\?)*$', msg="%s has an empty entry in README" % dname) @skipUnless(os.path.exists(TOOLSDIR), "%s does not exist, skipping tools/ tests" % TOOLSDIR) def test_no_extras_in_README(self, prefix=''): toolsdir = os.path.join(TOOLSDIR, prefix) tools = self.tools_in_README(toolsdir=toolsdir) for fname in tools.keys(): dname = os.path.join(prefix, fname) # display name fpath = os.path.join(toolsdir, fname) self.assertTrue(os.path.exists(fpath), msg="%s is listed in README but does not exist" % dname) @skipUnless(os.path.exists(TOOLSDIR), "%s does not exist, skipping tools/ tests" % TOOLSDIR) def test_upgrade_scripts_documented(self): upgrade = os.path.join(TOOLSDIR, "upgrade") for udir in os.listdir(upgrade): upath = os.path.join(upgrade, udir) dname = os.path.join("upgrade", udir) # display name self.assertTrue(os.path.isdir(upath), msg="Unexpected script %s found in %s" % (udir, dname)) self.assertTrue(os.path.exists(os.path.join(upath, 'README')), msg="%s has no README" % dname) self.test_all_scripts_in_README(dname) testsuite/__init__.py000066400000000000000000000000001303523157100152060ustar00rootroot00000000000000testsuite/common.py000066400000000000000000000276441303523157100147660ustar00rootroot00000000000000""" In order to make testing easier and more consistent, we provide a number of convenience functions, variables, and classes, for a wide variety of reasons. To import this module, first set up :ref:`development-unit-testing-relative-imports` and then simply do: .. code-block:: python from common import * """ import os import re import sys import codecs import lxml.etree import Bcfg2.Options import Bcfg2.Utils try: from mock.mock import patch, MagicMock, _patch, DEFAULT except ImportError: from mock import patch, MagicMock, _patch, DEFAULT try: from unittest2 import skip, skipIf, skipUnless, TestCase except ImportError: from unittest import skip, skipIf, skipUnless, TestCase #: The XInclude namespace name XI_NAMESPACE = "http://www.w3.org/2001/XInclude" #: The XInclude namespace in a format suitable for use in XPath #: expressions XI = "{%s}" % XI_NAMESPACE #: Whether or not the tests are being run on Python 3. inPy3k = False if sys.hexversion >= 0x03000000: inPy3k = True #: A function to set a default config option if it's not already set def set_setup_default(option, value=None): if not hasattr(Bcfg2.Options.setup, option): setattr(Bcfg2.Options.setup, option, value) # these two variables do slightly different things for unit tests; the # former skips config file reading, while the latter sends option # debug logging to stdout so it can be captured. These are separate # because we want to enable config file reading in order to test # option parsing. Bcfg2.Options.Parser.unit_test = True Bcfg2.Options.Options.unit_test = True try: import django.conf has_django = True set_setup_default("db_engine", "sqlite3") set_setup_default("db_name", os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.sqlite")) set_setup_default("db_user") set_setup_default("db_password") set_setup_default("db_host") set_setup_default("db_port") set_setup_default("db_opts", dict()) set_setup_default("db_schema") set_setup_default("time_zone") set_setup_default("web_debug", False) set_setup_default("web_prefix") set_setup_default("django_settings") import Bcfg2.DBSettings Bcfg2.DBSettings.finalize_django_config() except ImportError: has_django = False #: The path to the Bcfg2 specification root for the tests. Using the #: root directory exposes a lot of potential problems with building #: paths. datastore = "/" set_setup_default("repository", datastore) try: from mock import call except ImportError: def call(*args, **kwargs): """ Analog to the Mock call object, which is a fairly recent addition, but it's very very useful, so we create our own function to create Mock calls""" return (args, kwargs) #: The name of the builtin module, for mocking Python builtins. In #: Python 2, this is ``__builtin__``, in Python 3 ``builtins``. To #: patch a builtin, you must do something like: #: #: .. code-block:: python #: #: @patch("%s.open" % open) #: def test_something(self, mock_open): #: ... builtins = "__builtin__" if inPy3k: builtins = "builtins" def u(s): """ Get a unicode string, whatever that means. In Python 2, returns a unicode object; in Python 3, returns a str object. :param s: The string to unicode-ify. :type s: str :returns: str or unicode """ return s else: def u(s): """ Get a unicode string, whatever that means. In Python 2, returns a unicode object; in Python 3, returns a str object. :param s: The string to unicode-ify. :type s: str :returns: str or unicode """ return codecs.unicode_escape_decode(s)[0] class MockExecutor(object): """mock object for :class:`Bcfg2.Utils.Executor` objects.""" def __init__(self, timeout=None): self.timeout = timeout # variables that can be set to control the result returned self.stdout = '' self.stderr = '' self.retval = 0 # variables that record how run() was called self.calls = [] def run(self, command, inputdata=None, timeout=None, **kwargs): self.calls.append({"command": command, "inputdata": inputdata, "timeout": timeout or self.timeout, "kwargs": kwargs}) return Bcfg2.Utils.ExecutorResult(self.stdout, self.stderr, self.retval) class Bcfg2TestCase(TestCase): """ Base TestCase class that inherits from :class:`unittest.TestCase`. This class adds :func:`assertXMLEqual`, a useful assertion method given all the XML used by Bcfg2. """ capture_stderr = True @classmethod def setUpClass(cls): cls._stderr = sys.stderr if cls.capture_stderr: sys.stderr = sys.stdout @classmethod def tearDownClass(cls): if cls.capture_stderr: sys.stderr = cls._stderr if hasattr(TestCase, "assertCountEqual"): assertItemsEqual = assertCountEqual def assertXMLEqual(self, el1, el2, msg=None): """ Test that the two XML trees given are equal. """ if msg is None: msg = "XML trees are not equal: %s" else: msg += ": %s" msg += "\n%s" fullmsg = "First: %s" % lxml.etree.tostring(el1) + \ "\nSecond: %s" % lxml.etree.tostring(el2) self.assertEqual(el1.tag, el2.tag, msg=msg % ("Tags differ", fullmsg)) if el1.text is not None and el2.text is not None: self.assertEqual(el1.text.strip(), el2.text.strip(), msg=msg % ("Text content differs", fullmsg)) else: self.assertEqual(el1.text, el2.text, msg=msg % ("Text content differs", fullmsg)) self.assertItemsEqual(el1.attrib.items(), el2.attrib.items(), msg=msg % ("Attributes differ", fullmsg)) self.assertEqual(len(el1.getchildren()), len(el2.getchildren()), msg=msg % ("Different numbers of children", fullmsg)) matched = [] for child1 in el1.getchildren(): for child2 in el2.xpath(child1.tag): if child2 in matched: continue try: self.assertXMLEqual(child1, child2) matched.append(child2) break except AssertionError: continue else: assert False, \ msg % ("Element %s is missing from second" % lxml.etree.tostring(child1), fullmsg) self.assertItemsEqual(el2.getchildren(), matched, msg=msg % ("Second has extra element(s)", fullmsg)) class DBModelTestCase(Bcfg2TestCase): """ Test case class for Django database models """ models = [] __test__ = False @skipUnless(has_django, "Django not found, skipping") def test_syncdb(self): """ Create the test database and sync the schema """ if self.models: import django import django.core.management from django.core.exceptions import ImproperlyConfigured dbfile = django.conf.settings.DATABASES['default']['NAME'] # Close all connections to the old database if django.VERSION[0] == 1 and django.VERSION[1] >= 7: for connection in django.db.connections.all(): connection.close() else: django.db.close_connection() # Remove old database if os.path.exists(dbfile): os.unlink(dbfile) self.assertFalse(os.path.exists(dbfile)) # Create new if django.VERSION[0] == 1 and django.VERSION[1] < 7: django.core.management.call_command('syncdb', interactive=False, verbosity=1) django.core.management.call_command('migrate', interactive=False, verbosity=1) # Check if database exists now self.assertTrue(os.path.exists(dbfile)) @skipUnless(has_django, "Django not found, skipping") def test_cleandb(self): """ Ensure that we a) can connect to the database; b) start with a clean database """ for model in self.models: model.objects.all().delete() self.assertItemsEqual(list(model.objects.all()), []) # in order for patchIf() to decorate a function in the same way as # patch(), we override the default behavior of __enter__ and __exit__ # on the _patch() object to basically be noops. class _noop_patch(_patch): def __enter__(self): return MagicMock(name=self.attribute) def __exit__(self, *args): pass class patchIf(object): """ Decorator class to perform conditional patching. This is necessary because some libraries might not be installed (e.g., selinux, pylibacl), and patching will barf on that. Other workarounds are not available to us; e.g., context managers aren't in python 2.4, and using inner functions doesn't work because python 2.6 parses all decorators at compile-time, not at run-time, so decorating inner functions does not prevent the decorators from being run. """ def __init__(self, condition, target, new=DEFAULT, spec=None, create=False, spec_set=None): """ :param condition: The condition to evaluate to decide if the patch will be applied. :type condition: bool :param target: The name of the target object to patch :type target: str :param new: The new object to replace the target with. If this is omitted, a new :class:`mock.MagicMock` is created and passed as an extra argument to the decorated function. :type new: any :param spec: Spec passed to the MagicMock object if ``patchIf`` is creating one for you. :type spec: List of strings or existing object :param create: Tell patch to create attributes on the fly. See the documentation for :func:`mock.patch` for more details on this. :type create: bool :param spec_set: Spec set passed to the MagicMock object if ``patchIf`` is creating one for you. :type spec_set: List of strings or existing object """ self.condition = condition self.target = target self.patch_args = dict(new=new, spec=spec, create=create, spec_set=spec_set) def __call__(self, func): if self.condition: return patch(self.target, **self.patch_args)(func) else: args = [lambda: True, self.target.rsplit('.', 1)[-1], self.patch_args['new'], self.patch_args['spec'], self.patch_args['create'], None, self.patch_args['spec_set']] try: # in older versions of mock, _patch() takes 8 args return _noop_patch(*args)(func) except TypeError: # in some intermediate versions of mock, _patch # takes 11 args args.extend([None, None, None]) try: return _noop_patch(*args)(func) except TypeError: # in the latest versions of mock, _patch() takes # 10 args -- mocksignature has been removed args.pop(5) return _noop_patch(*args)(func) #: The type of compiled regular expression objects re_type = None try: re_type = re._pattern_type except AttributeError: re_type = type(re.compile("")) testsuite/ext/000077500000000000000000000000001303523157100137075ustar00rootroot00000000000000testsuite/ext/__init__.py000066400000000000000000000000001303523157100160060ustar00rootroot00000000000000testsuite/ext/exception_messages.py000066400000000000000000000033121303523157100201450ustar00rootroot00000000000000try: from logilab import astng as ast from pylint.interfaces import IASTNGChecker as IChecker PYLINT = 0 # pylint 0.something except ImportError: import astroid as ast from pylint.interfaces import IAstroidChecker as IChecker PYLINT = 1 # pylint 1.something from pylint.checkers import BaseChecker from pylint.checkers.utils import safe_infer if PYLINT == 0: # this is not quite correct; later versions of pylint 0.* wanted a # three-tuple for messages as well msg = ('Exception raised without arguments', 'Used when an exception is raised without any arguments') else: msg = ('Exception raised without arguments', 'exception-without-args', 'Used when an exception is raised without any arguments') msgs = {'R9901': msg} class ExceptionMessageChecker(BaseChecker): __implements__ = IChecker name = 'Exception Messages' options = ( ('exceptions-without-args', dict(default=('NotImplementedError',), type='csv', metavar='', help='List of exception names that may be raised without arguments')),) # this is important so that your checker is executed before others priority = -1 def visit_raise(self, node): if node.exc is None: return if isinstance(node.exc, ast.Name): raised = safe_infer(node.exc) if (isinstance(raised, ast.Class) and raised.name not in self.config.exceptions_without_args): self.add_message('R9901', node=node.exc) def register(linter): """required method to auto register this checker""" linter.register_checker(ExceptionMessageChecker(linter)) testsuite/ext/ssl_protocols.py000066400000000000000000000010161303523157100171640ustar00rootroot00000000000000try: from logilab.astng import MANAGER, scoped_nodes, node_classes PYLINT=0 except ImportError: from astroid import MANAGER, scoped_nodes, node_classes PYLINT=1 def ssl_transform(module): if module.name == 'ssl': for proto in ('SSLv23', 'TLSv1'): module.locals['PROTOCOL_%s' % proto] = [node_classes.Const()] def register(linter): if PYLINT == 0: MANAGER.register_transformer(ssl_transform) else: MANAGER.register_transform(scoped_nodes.Module, ssl_transform) testsuite/install.sh000077500000000000000000000016001303523157100151110ustar00rootroot00000000000000#!/bin/bash -ex # install script for Travis-CI sudo apt-get update -qq sudo apt-get install swig libxml2-utils pip install -r testsuite/requirements.txt PYVER=$(python -c 'import sys;print(".".join(str(v) for v in sys.version_info[0:2]))') if [[ ${PYVER:0:1} == "2" && $PYVER != "2.7" ]]; then pip install unittest2 fi if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then sudo apt-get install -y yum libaugeas0 augeas-lenses libacl1-dev libssl-dev \ python-gamin python-selinux pip install PyYAML pyinotify boto pylibacl Jinja2 mercurial guppy cherrypy easy_install https://fedorahosted.org/released/python-augeas/python-augeas-0.4.1.tar.gz if [[ ${PYVER:0:1} == "2" ]]; then pip install cheetah m2crypto if [[ $PYVER != "2.7" ]]; then pip install 'django<1.7' 'South<0.8' else pip install django fi fi fi testsuite/pylintrc.conf000066400000000000000000000222771303523157100156340ustar00rootroot00000000000000[MASTER] # Specify a configuration file. #rcfile= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Profiled execution. profile=no # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS # Pickle collected data for later comparisons. persistent=no # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins=ext.exception_messages,ext.ssl_protocols [MESSAGES CONTROL] # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time. #enable= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). disable=F0401,W0142,W0511,W0603,W1201,R0201,R0801,R0901,R0902,R0903,R0904,R0921,R0922,C0302,I0011,E0100,E0101,E0102,E0106 # Some of these are disabled because they warn about things we _want_: # # * W0142: Used * or ** magic # * W1201: Specify string format arguments as logging function parameters # * I0011: Locally disabling a pylint message # * R0921: Abstract class not referenced # * R0922: Abstract class is only referenced a small number of times # # We have several modules, e.g., Bcfg2.Server.Plugin.interfaces, that # only declare abstract classes, which makes R0921 and R0922 useless. # Some of these are disabled because they just aren't that useful: # # * R0901: Too many ancestors # * R0902: Too many instance attributes # * R0903: Too few public methods # * R0904: Too many public methods # Some of these are disabled because they cause lots of errors with no # obvious solutions, but we should try to enable them at some point in # the future: # # * W0511: FIXME or TODO # * W0603: Using the global statement # * R0201: Method could be a function # * R0801: Similar lines in files # * C0302: Too many lines in module # Some of these are disabled because of bugs in pylint: # * E0100,E0101,E0102,E0106: http://stackoverflow.com/questions/12514214/pylint-and-tornado-fails-on-tornado-web-authenticated # Some of these are disabled for various other reasons: # * F0401: Unable to import a module: Bcfg2 has loads and loads of # optional dependencies [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html output-format=text # Include message's id in output include-ids=yes # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". files-output=no # Tells whether to display a full report or only the messages reports=no # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Add a comment according to your evaluation note. This is used by the global # evaluation report (RP0004). comment=no # Template used to display messages. This is a python new-style format string # used to format the massage information. See doc for all details msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg} [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching the beginning of the name of dummy variables # (i.e. not used). dummy-variables-rgx=_|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=6 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes [FORMAT] # Maximum number of characters on a single line. max-line-length=79 # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )?(?|:(func|class):.*)$ # Maximum number of lines in a module max-module-lines=1000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' [TYPECHECK] # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamically set). ignored-classes=ForeignKey,Interaction,git.cmd.Git,argparse.Namespace,Namespace # When zope mode is activated, add a predefined set of Zope acquired attributes # to generated-members. zope=no # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E0201 when accessed. Python regular # expressions are accepted. generated-members=objects,DoesNotExist,isoformat,filter,save,count,get,add,id,MultipleObjectsReturned [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME,XXX,TODO [BASIC] # Required attributes for module, separated by a comma required-attributes= # List of builtins function names that should not be used, separated by a comma bad-functions=map,apply # Regular expression which should only match correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression which should only match correct module level names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Regular expression which should only match correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Regular expression which should only match correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct method # names. Change the ranges below to [a-z] when ready to make all API # methods consistent. method-rgx=[A-z_][A-z0-9_]{2,30}$ # Regular expression which should only match correct instance attribute names attr-rgx=(Entries|[a-z_][a-z0-9_]{2,30}(ID)?)$ # Regular expression which should only match correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}(ID)?$ # Regular expression which should only match correct variable names variable-rgx=[a-z_][a-z0-9_]{2,30}(ID)?$ # Regular expression which should only match correct list comprehension / # generator expression variable names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Good variable names which should always be accepted, separated by a comma good-names=_,rv,el,fd,ca,re,i,j,iv,ip # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # Regular expression which should only match functions or classes name which do # not require a docstring no-docstring-rgx=__.*__|main [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=regsub,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= [DESIGN] # Maximum number of arguments for function / method max-args=8 # Argument names that match this expression will be ignored. Default to name # with leading underscore ignored-argument-names=_.* # Maximum number of locals for function / method body max-locals=20 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of branch for function / method body (max-branchs is # pylint 0.x, max-branches is 1.0) max-branchs=18 max-branches=18 # Maximum number of statements in function / method body max-statements=75 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of attributes for a class (see R0902). max-attributes=15 # Minimum number of public methods for a class (see R0903). min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=25 [CLASSES] # List of interface methods to ignore, separated by a comma. This is used for # instance to not check methods defines in Zope's Interface base class. ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=Exception testsuite/requirements.txt000066400000000000000000000001171303523157100163720ustar00rootroot00000000000000lxml nose mock sphinx<1.5 pylint<0.29 pep8 python-daemon<2.0.0 genshi argparse tools/000077500000000000000000000000001303523157100122165ustar00rootroot00000000000000tools/README000066400000000000000000000047511303523157100131050ustar00rootroot00000000000000This directory contains repository maintenance tools. basebuilder.py - builds v2 base.xml from bcfg1 repo bcfg2-completion.bash - Bash tab completion for bcfg2-admin bcfg2-cron - Script to run bcfg2 with cron bcfg2-import-config - Create tarball of changed files on a client for import into the specification bcfg2_local.py - Perform a full Bcfg2 run against a local repository instead of against a remote server bcfg2_svnlog.py - Send intelligent log messages about changes made to your Bcfg2 repository from SVN postcommit create-debian-pkglist-gp.py - Generate Pkgmgr XML files from etc/debian-pkglist.conf. See http://trac.mcs.anl.gov/projects/bcfg2/wiki/Tools/DebianPkgmgrCreationScript create-debian-pkglist.py - Generate Pkgmgr XML files for Debian packages create-rpm-pkglist.py - Generate list of installed RPM packages ctags.sh - Generate ctags (or etags) indexes for the Bcfg2 source encap-util-count.sh - Produce a count of encap packages per directory encap-util-expand.sh - Gets encaps out of a makeself .run file encap-util-place.sh - Put encaps in the right directories encap-util-xml.sh - Generate Pkgmgr XML files for encap packages export.py - Export a tagged version of the Bcfg2 source generate-manpages.bash - Generate man pages from the Sphinx source git_commit.py - Trigger script to commit local changes back to a git repository pkgmgr_gen.py - Generate Pkgmgr XML files from a list of directories that contain RPMS pkgmgr_update.py - Update Pkgmgr XML files from a list of directories that contain RPMS posixusers_baseline.py - Create a Bundle with all base POSIXUser/POSIXGroup entries on a client. rpmlisting.py - Generate Pkgmgr XML files for RPM packages selinux_baseline.py - Create a Bundle with all base SELinux entries on a client. This is useful because different versions of the SELinux libraries have different capabilities, and some clients may have very, very large baselines. selinux_baseline.py can be used to ensure that there are no 'extra' SELinux entries on such a client. upgrade - This directory contains scripts used to upgrade to the specified version. E.g., upgrade/1.2 has scripts needed to upgrade to Bcfg2 1.2.x from bcfg2 1.1.x yum-listpkgs-xml.py - Produces a list of all packages installed and available in a format suitable for use by Packages or Pkgmgr tools/basebuilder.py000066400000000000000000000011441303523157100150510ustar00rootroot00000000000000#!/usr/bin/env python from sys import argv from elementtree.ElementTree import Element, SubElement, tostring if __name__ == '__main__': dir = argv[1] imagename = dir.split('/')[-1] e = Element("Image", name=imagename) for line in open("%s/base.ConfigFile" % (dir)).readlines(): SubElement(e, "ConfigFile", name=line.strip()) for line in open("%s/base.Package" % (dir)).readlines(): SubElement(e, "Package", name=line.strip()) for line in open("%s/base.Service" % (dir)).readlines(): SubElement(e, "Service", name=line.strip().split()[0]) print(tostring(e)) tools/bcfg2-completion.bash000066400000000000000000000016451303523157100162150ustar00rootroot00000000000000# TODO: Add completion for each admin mode _bcfg2-admin() { local cur prev possibles COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" if [[ ${COMP_CWORD} -eq 1 ]] || [[ -n "${prev}" && ${prev} == -* ]] then possibles="$(bcfg2-admin help | awk '{print $1}')" #elif bcfg2-admin ${prev} help &>/dev/null ; then # possibles=$(bcfg2-admin ${prev} help | ${sedcmd}) fi [[ -n "${possibles}" ]] && \ COMPREPLY=( $(compgen -W "${possibles}" -- ${cur}) ) return 0 } _bcfg2-info() { local cur prev possibles COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" if [[ ${COMP_CWORD} -eq 1 ]] || [[ -n "${prev}" && ${prev} == -* ]] then possibles="$(bcfg2-info help | awk '{print $1}')" fi [[ -n "${possibles}" ]] && \ COMPREPLY=( $(compgen -W "${possibles}" -- ${cur}) ) return 0 } complete -F _bcfg2-admin bcfg2-admin complete -F _bcfg2-info bcfg2-info tools/bcfg2-cron000077500000000000000000000025711303523157100140730ustar00rootroot00000000000000#!/bin/sh # # Script to run bcfg2 with cron. # # This script is designed so that bcfg2-cron can be invoked from both # /etc/cron.daily and /etc/cron.hourly. This allows the administrators to # modify /etc/default/bcfg2 and define the wanted frequency of cron runs. # # Default is not to run at all from cron BCFG2_CRON=off BCFG2_ENABLED=0 # Set default options # This script will respect additional variables: # BCFG_OPTIONS_DAILY and BCFG_OPTIONS_HOURLY are added to BCFG2_OPTIONS # This allows you to run different types of passes with cron BCFG2_OPTIONS="-q" # bcfg2 file locations BCFG2_BIN=/usr/sbin/bcfg2 BCFG2_CFG=/etc/bcfg2.conf # Read the configuration from /etc/default/bcfg2 [ -e /etc/default/bcfg2 ] && . /etc/default/bcfg2 # Check that configuration and executable exists [ -x ${BCFG2_BIN} -a -e ${BCFG2_CFG} ] || exit 1 invoke_bcfg2 () { # Invoke bcfg2 if enabled if [ ${BCFG2_ENABLED} -eq 1 ]; then eval BCFG2_EXTRA_OPTIONS=\${BCFG2_OPTIONS_${RUNTYPE}} ${BCFG2_BIN} ${BCFG2_OPTIONS} ${BCFG2_EXTRA_OPTIONS} fi } case $1 in "--daily") [ "x${BCFG2_CRON}" = "xdaily" -o "x${BCFG2_CRON}" = "xboth" ] && \ RUNTYPE=DAILY invoke_bcfg2 ;; "--hourly") [ "x${BCFG2_CRON}" = "xhourly" -o "x${BCFG2_CRON}" = "xboth" ] && \ RUNTYPE=HOURLY invoke_bcfg2 ;; *) echo "Usage: $0 [--daily|--hourly]" exit 1 ;; esac tools/bcfg2-import-config000077500000000000000000000052061303523157100157050ustar00rootroot00000000000000#!/bin/sh # # Import client configuration in to repository like tarball. # This tarball can then be extracted on the server straight in to the server # Repository. This makes it easier to import a live host in to bcfg2 # - Sami Haahtinen # # TODO: # - Fetch filelist from server usage() { echo "$0: tool to import files in to bcfg2-server repository" echo " -s Copy SSH Key files" echo " -n No suffix. Generate global files" echo " --debian Run debsums to detect changed configuration files" echo " ** debsums is only able to detect part of changes!" echo " -h Help (You are here)" } ## Start Getopt TEMP=`getopt -o snph --long help,debian -n $0 -- "$@"` if [ $? != 0 ] ; then ( usage ) >&2 ; exit 1 ; fi eval set -- "$TEMP" ## End Getopt ## Start Defaults NEEDSSH=0 DEBSUMS=0 NOSUFFIX=0 # End Defaults ## Start option parse while true ; do case "$1" in -s) NEEDSSH=1; shift ;; --debian) DEBSUMS=1; shift ;; -n) NOSUFFIX=1; shift ;; -h|--help) usage exit 0 ;; --) shift; break ;; *) echo "Internal error!" exit 1 ;; esac done FILES=$@ ## End option parse ## Start functions init_temp_repo() { TMPREPO=`mktemp -d` if [ $NEEDSSH -ne 0 ]; then SSHBASE="$TMPREPO/SSHbase" mkdir $SSHBASE fi CFGREPO="$TMPREPO/Cfg" HOSTNAME=`hostname -f` if [ $NOSUFFIX -eq 0 ]; then SUFFIX=".H_$HOSTNAME" else SUFFIX="" fi } package_temp_repo() { echo "Creating tarball to: /tmp/$HOSTNAME-bcfg2.tar.gz" # We should test for files here. tar -cz -C $TMPREPO -f /tmp/$HOSTNAME-bcfg2.tar.gz . } clean_temp_repo() { rm -r "$TMPREPO" } get_ssh() { if [ $NEEDSSH -ne 0 ]; then echo "Importing SSH host keys (if exists)" for i in $(find /etc/ssh -name ssh_host\*); do FILE=$(basename $i) cp $i $SSHBASE/${FILE}${SUFFIX} done fi } get_files() { if [ -n "$FILES" ]; then echo "Copying files:" # TODO: Files need an absolute path! for i in $FILES; do if [ -f $i ]; then echo -n "$i: " FILE=$(basename $i) mkdir -p $CFGREPO/$i cp $i $CFGREPO/$i/${FILE}${SUFFIX} echo "OK" else echo "$i: Not a file" fi done fi } get_debsums() { if [ $DEBSUMS -ne 0 ]; then echo "Locating changed configuration with debsums" echo " ** debsums by design is unable to find all changed files" echo " you need to add missing files by hand." DEBSUMSFILES=$(debsums -ec 2> /dev/null) FILES="$FILES $DEBSUMSFILES" fi } ## End Functions if [ $(($NEEDSSH + $DEBSUMS)) -eq 0 -a -z "$FILES" ]; then usage ; exit 0; fi init_temp_repo get_debsums get_ssh get_files package_temp_repo clean_temp_repo tools/bcfg2_local.py000077500000000000000000000046131303523157100147340ustar00rootroot00000000000000#!/usr/bin/env python """ This tool performs a full Bcfg2 run entirely against a local repository, i.e., without a server. It starts up a local instance of the server core, then uses that to get probes, run them, and so on.""" import sys import socket import Bcfg2.Options from Bcfg2.Client import Client from Bcfg2.Server.Core import Core class LocalCore(Core): """ Local server core similar to the one started by bcfg2-info """ def __init__(self): #saved = (setup['syslog'], setup['logging']) #setup['syslog'] = False #setup['logging'] = None Bcfg2.Server.Core.BaseCore.__init__(self) #setup['syslog'], setup['logging'] = saved self.load_plugins() self.block_for_fam_events(handle_events=True) def _daemonize(self): return True def _run(self): return True def _block(self): pass class LocalProxy(object): """ A local proxy (as opposed to XML-RPC) that proxies from the Client object to the LocalCore object, adding a client address pair to the argument list of each proxied call """ def __init__(self, core): self.core = core self.hostname = socket.gethostname() self.ipaddr = socket.gethostbyname(self.hostname) def __getattr__(self, attr): if hasattr(self.core, attr): func = getattr(self.core, attr) if func.exposed: def inner(*args, **kwargs): # the port portion of the addresspair tuple isn't # actually used, so it's safe to hardcode 6789 # here. args = ((self.ipaddr, 6789), ) + args return func(*args, **kwargs) return inner raise AttributeError(attr) class LocalClient(Client): """ A version of the Client class that uses LocalProxy instead of an XML-RPC proxy to make its calls """ def __init__(self, proxy): Client.__init__(self) self._proxy = proxy def main(): parser = Bcfg2.Options.Parser( description="Run a Bcfg2 client against a local repository without a " "server", conflict_handler="resolve", components=[LocalCore, LocalProxy, LocalClient]) parser.parse() core = LocalCore() try: LocalClient(LocalProxy(core)).run() finally: core.shutdown() if __name__ == '__main__': sys.exit(main()) tools/bcfg2_svnlog.py000077500000000000000000000440171303523157100151540ustar00rootroot00000000000000#!/usr/bin/python -O """ Send email about Bcfg2 commits from an SVN postcommit hook This script can be used to send email from a Subversion postcommit hook. It emails out a list of diffs, with a few exceptions: * If a file was deleted, the deletion is noted but no diff is included * If the file matches a set of blacklist patterns (configurable; by default: /Ohai/*.json, */Probes/probed.xml, */SSHbase/*, */Packages/packages.conf), then the diff is not included but the file is listed as 'sensitive.' (This is a bit of a broad brush, since the stuff in Probes and Ohai isn't necessarily sensitive, just annoying to get diffs for.) * If the file is a directory, not a file, it is omitted * If he file is binary, that is noted instead of a diff being included * If the diff exceeds 100 lines (configurable), then a 'large diff' is mentioned, but not included. * If the file is a Property file and is flagged as sensitive in the opening Property tag, then it is listed as sensitive and no diff is included. * If the file is flagged as sensitive in its info.xml, then it is listed as sensitive and no diff is included. The script attempts to look up the committing user's email address in LDAP; it uses the system LDAP config to do so. Currently it looks in /etc/ldap.conf, /etc/openldap/ldap.conf, and /etc/nss_ldap.conf to figure out the LDAP config, so it doesn't work with SSSD or with OSes that keep their LDAP configs in other places. The config file, /etc/bcfg2_svnlog.conf, should contain one stanza per repository. (If you just have one Bcfg2 repo, then you only need one stanza. This script unfortunately does not support different configurations for different branches.) Each stanza should look like this: [] email=
    subject= largediff=<# of lines a diff must exceed to be considered too large to include in the email> blacklist= Only 'email' is required. The commit message can itself contain some magic that will influence the email sent out. The following patterns are special: * Subject: Use the specified text as the subject of the message. Otherwise, the first line (up to the first [.!;:?] or 120 characters) will be used. * Resolve: Add some magic to the email that will resolve the specified RT ticket. These patterns can either be listed on a line by themselves, or enclosed in curly braces ({...}). Whitespace after the colon is optional. The patterns are all case-insensitive. So these two commits are identical: svn ci -m '{resolve:108934}Fixed DNS error' svn ci -m 'Fixed DNS error Resolve: 108934' """ __author__ = "Chris St Pierre" __email__ = "chris.a.st.pierre@gmail.com" import re import os import sys import ldap import pysvn import shutil import fnmatch import smtplib import logging import logging.handlers import tempfile import lxml.etree from email.Message import Message from optparse import OptionParser, OptionError from ConfigParser import SafeConfigParser SEPARATOR = "=" * 67 LOGGER = None def get_logger(verbose=0): """ set up logging according to the verbose level given on the command line """ global LOGGER if LOGGER is None: LOGGER = logging.getLogger(sys.argv[0]) stderr = logging.StreamHandler() level = logging.WARNING lformat = "%(message)s" if verbose == 1: level = logging.INFO elif verbose > 1: stderr.setFormatter(logging.Formatter("%(asctime)s: %(levelname)s: %(message)s")) level = logging.DEBUG LOGGER.setLevel(level) LOGGER.addHandler(stderr) syslog = logging.handlers.SysLogHandler("/dev/log") syslog.setFormatter(logging.Formatter("%(name)s: %(message)s")) LOGGER.addHandler(syslog) LOGGER.debug("Setting verbose to %s" % verbose) return LOGGER def parse_log_message(message): """ Parse the commit log message """ keywords = dict(subject=None, resolve=None) logger = get_logger() for keyword in keywords.iterkeys(): pattern = re.compile((r'(?:\A|\n|\{)%s:\s*([^\}\n]+)(?:\Z|\n|\})' % keyword), re.IGNORECASE | re.MULTILINE) match = pattern.search(message) if match: keywords[keyword] = match.group(1).strip() logger.debug("Found log message keyword %s=%s" % (keyword, match.group(0))) message = pattern.sub('', message) return (message, keywords) def build_summary(changes): """ build a summary of changes """ summary = dict() logger = get_logger() for change in changes: logger.info("Summarizing %s file %s" % (change.summarize_kind, change.path)) if change.summarize_kind not in summary: summary[change.summarize_kind] = [] summary[change.summarize_kind].append(change.path) return summary def get_author_email(author): """looks up author email in ldap""" logger = get_logger() ldapconf = dict() for conffile in ["/etc/ldap.conf", "/etc/openldap/ldap.conf", "/etc/nss_ldap.conf"]: # short-circuit if we have both a base and a host if 'base' in ldapconf and 'host' in ldapconf: break logger.debug("Reading LDAP configuration from %s" % conffile) try: for line in open(conffile).read().splitlines(): match = re.search(r'^(base|host|ssl)\s+(.*)', line) if match: ldapconf[match.group(1)] = match.group(2) except IOError: pass if 'base' in ldapconf and 'host' in ldapconf: # host can be a space-delimited list; in that case, we just # use the first host ldapconf['host'] = ldapconf['host'].split()[0] # ensure that we have an ldap uri if not re.search(r'^ldap[si]?://', ldapconf['host']): if ('ssl' in ldapconf and ldapconf['ssl'] in ['on', 'yes', 'start_tls']): ldapconf['host'] = "ldaps://%s" % ldapconf['host'] else: ldapconf['host'] = "ldap://%s" % ldapconf['host'] logger.debug("Connecting to LDAP server at %s" % ldapconf['host']) try: conn = ldap.initialize(ldapconf['host']) except ldap.LDAPError, err: logger.warn("Could not connect to LDAP server at %s: %s" % (ldapconf['host'], err)) return author if 'ssl' in ldapconf and ldapconf['ssl'] == 'start_tls': # try TLS, but don't require it. if starting TLS fails # but the connection requires confidentiality, the search # will fail below logger.debug("Starting TLS") try: conn.start_tls_s() except ldap.LDAPError, err: if err[0]['info'] != 'TLS already started': logger.warn("Could not start TLS: %s" % err) ldap_filter = "uid=%s" % author logger.debug("Searching for %s in %s" % (ldap_filter, ldapconf['base'])) try: res = conn.search_s(ldapconf['base'], ldap.SCOPE_SUBTREE, ldap_filter, ['mail']) if len(res) == 1: attrs = res.pop()[1] logger.debug("Got %s for email address" % attrs['mail'][0]) return attrs['mail'][0] elif len(res): logger.warn("More than one LDAP entry found for %s" % ldap_filter) return author elif not res: logger.warn("No LDAP entries found for %s" % ldap_filter) return author except ldap.LDAPError, err: logger.warn("Could not search for %s in LDAP at %s: %s" % (ldap_filter, ldapconf['host'], err)) return author else: logger.warn("Could not determine LDAP configuration") return author def get_diff_set(change, baseuri, largediff=100, rev=None, blacklist=None): """ generate diffs for the given change object. returns a tuple of (, ). Type is one of None, 'sensitive', 'large', 'binary', or 'diff'""" logger = get_logger() client = pysvn.Client() revision = pysvn.Revision(pysvn.opt_revision_kind.number, rev) previous = pysvn.Revision(pysvn.opt_revision_kind.number, rev - 1) logger.info("Diffing %s file %s" % (change.summarize_kind, change.path)) change_uri = os.path.join(baseuri, change.path) if plugin_blacklist is None: plugin_blacklist = [] # There are a number of reasons a diff might not be included in an # svnlog message: # # * The file was deleted # * The file matches a blacklist pattern (default */Ohai/*.json, # */Probes/probed.xml, */SSHbase/*, */Packages/packages.conf) # * The file is a directory, not a file # * The file is binary # * The diff exceeds 100 lines # * The file is a Property file and is flagged as sensitive in the # opening Property tag # * The file is flagged as sensitive in its info.xml # # These are listed here in approximate order from least expensive # to most expensive. Consequently, if we can do a simple filename # match and avoid generating a diff, we win; and so on. if change.summarize_kind == pysvn.diff_summarize_kind.delete: logger.debug("%s was %s, skipping diff" % (change.path, change.summarize_kind)) return (None, None) if ("/SSHbase/" in change.path or change.path.endswith("/Packages/packages.conf")): logger.debug("%s is hard-coded as sensitive, skipping diff" % change.path) return ("sensitive", change.path) for pattern in blacklist: if fnmatch.fnmatch(change.path, pattern): logger.debug("% is blacklisted, skipping diff") return (None, None) info = client.info2(change_uri, revision=revision, recurse=False)[0][1] if info.kind == pysvn.node_kind.dir: logger.debug("%s is a directory, skipping diff" % change.path) return (None, None) mime = client.propget('svn:mime-type', change_uri, revision=revision) if change_uri in mime: logger.debug("%s is binary (%s), skipping diff" % (change.path, mime[change_uri])) return ('binary', change.path) diff = None if change.summarize_kind == pysvn.diff_summarize_kind.modified: tempdir = tempfile.mkdtemp() diff = client.diff(tempdir, change_uri, revision1=previous, revision2=revision) shutil.rmtree(tempdir) else: diff = ("Added: %s\n%s\n%s" % (change.path, SEPARATOR, client.cat(change_uri, revision=revision))) if len(diff.splitlines()) > largediff: logger.debug("Diff for %s is large (%d lines), skipping diff" % (change.path, len(diff.splitlines()))) return ('large', change.path) if fnmatch.fnmatch(change.path, "*/Properties/*.xml"): logger.info("Checking out %s" % os.path.dirname(change.path)) tempdir = tempfile.mkdtemp() try: client.checkout(os.path.join(baseuri, os.path.dirname(change.path)), tempdir, revision=revision) xdata = \ lxml.etree.parse(os.path.join(tempdir, os.path.basename(change.path))) finally: shutil.rmtree(tempdir) if xdata.getroot().get("sensitive", "false").lower() == "true": return ("sensitive", change.path) if ("/Cfg/" in change.path and os.path.basename(change.path) != "info.xml"): # try to check out an info.xml for this file logger.info("Checking out %s" % os.path.dirname(change.path)) tempdir = tempfile.mkdtemp() # in python 2.4, try...except...finally isn't supported; you # have to nest a try...except block inside try...finally try: try: client.checkout(os.path.join(baseuri, os.path.dirname(change.path)), tempdir, revision=revision) root = lxml.etree.parse(os.path.join(tempdir, "info.xml")).getroot() except IOError: logger.debug("No info.xml found for %s" % change.path) except: raise finally: shutil.rmtree(tempdir) if root is not None: for el in root.xpath("//Info"): if el.get("sensitive", "false").lower() == "true": return ("sensitive", change.path) return ('diff', diff) def parse_args(): """ parse command-line arguments """ usage = """Usage: bcfg2_svnlog.py [options] -r """ parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", help="Be verbose", action="count") parser.add_option("-c", "--config", help="Config file", default="/etc/bcfg2_svnlog.conf") parser.add_option("-r", "--rev", help="Revision") parser.add_option("--stdout", help="Print log message to stdout") try: (options, args) = parser.parse_args() except OptionError: parser.print_help() raise SystemExit(1) if not len(args): parser.print_help() raise SystemExit(1) get_logger(options.verbose) return (options, args.pop()) def get_config(configfile, repos_name): """ read config for the given repository """ logger = get_logger() defaults = dict(largediff=100, subject='', blacklist="*/Ohai/*.json */Probes/probed.xml */SSHbase/ssh_host*_key.[GH]* */Packages/packages.conf") config = SafeConfigParser(defaults) if os.path.exists(configfile): config.read(configfile) else: logger.fatal("Config file %s does not exist" % configfile) raise SystemExit(1) if not config.has_section(repos_name): logger.fatal("No configuration section found for '%s' repo, aborting" % repos_name) raise SystemExit(2) return config def main(): """ main subroutine """ (options, path) = parse_args() uri = "file://%s" % path logger = get_logger() repos_name = os.path.basename(uri) config = get_config(options.config, repos_name) client = pysvn.Client() revision = pysvn.Revision(pysvn.opt_revision_kind.number, options.rev) previous = pysvn.Revision(pysvn.opt_revision_kind.number, int(options.rev) - 1) changes = client.diff_summarize(uri, revision1=previous, revision2=revision) # parse log message log = client.log(uri, revision_end=revision)[0] logger.info("Examining commit %s by %s" % (options.rev, log.author)) (message, keywords) = parse_log_message(log.message) summary = build_summary(changes) diffs = dict(diff=[], large=[], binary=[], sensitive=[]) for change in changes: (dtype, ddata) = get_diff_set(change, uri, rev=int(options.rev), largediff=int(config.get(repos_name, 'largediff'))) if dtype is not None: diffs[dtype].append(ddata) # construct the email body = [message.strip(), '', "Author: %s" % log.author, "Revision: %s" % options.rev, '', "Affected files:", ''] for ctype in summary: body.extend(["%-65s %-10s" % (f, ctype) for f in summary[ctype]]) body.append('') if diffs['binary']: body.extend([SEPARATOR, '', "The following binary files were changed:", '']) body.extend(diffs['binary']) body.append('') if diffs['large']: body.extend([SEPARATOR, '', "Diffs for the following files were too large to include:", '']) body.extend(diffs['large']) body.append('') if diffs['sensitive']: body.extend([SEPARATOR, '', "The following sensitive files were changed:", '']) body.extend(diffs['sensitive']) body.append('') if diffs['diff']: body.extend([SEPARATOR, '', "The following files were changed:", '']) body.extend(diffs['diff']) if keywords['resolve']: body.extend(['', "RT-AddRefersTo: %s" % keywords['resolve'], "RT-AddReferredToBy: %s" % keywords['resolve'], "RT-ResolveTicket: %s" % keywords['resolve']]) if config.has_option(repos_name, 'email') and not options.stdout: msg = Message() msg.set_payload("\n".join(body)) subject = None if keywords['subject']: subject = keywords['subject'] elif "\n" in message: subject = message[0:max(120, message.index("\n"))] else: subject = message[0:120] msg['Subject'] = "%s %s" % (config.get(repos_name, 'subject'), subject) msg['From'] = get_author_email(log.author) msg['To'] = config.get(repos_name, 'email') logger.debug("Sending message from %s to %s: %s" % (msg['From'], msg['To'], msg['Subject'])) smtp = smtplib.SMTP('localhost') if options.verbose > 2: # this is _really_ verbose smtp.set_debuglevel(options.verbose - 1) smtp.sendmail(msg['From'], [msg['To']], msg.as_string()) smtp.quit() else: print("\n".join(body)) if __name__ == "__main__": sys.exit(main()) tools/create-debian-pkglist-gp.py000066400000000000000000000216321303523157100173360ustar00rootroot00000000000000#!/usr/bin/env python '''Build debian/ubuntu package indexes''' # Original code from Bcfg2 sources import gzip import os import sys import subprocess # Compatibility imports from Bcfg2.Compat import StringIO from Bcfg2.Compat import ConfigParser from Bcfg2.Compat import urlopen def debug(msg): '''print debug messages''' if '-v' in sys.argv: sys.stdout.write(msg) def get_as_list(somestring): """ Input : a string like this : 'a, g, f,w' Output : a list like this : ['a', 'g', 'f', 'w'] """ return somestring.replace(' ', '').split(',') def list_contains_all_the_same_values(l): if len(l) == 0: return True # The list contains all the same values if all elements in # the list are equal to the first element. first = l[0] for elem in l: if first != elem: return False return True class SourceURL: def __init__(self, deb_url): deb_url_tokens = deb_url.split() # ex: deb http://somemirror.com/ubuntu dapper main restricted universe self.url = deb_url_tokens[1] self.distribution = deb_url_tokens[2] self.sections = deb_url_tokens[3:] def __str__(self): return "deb %s %s %s" % (self.url, self.distribution, ' '.join(self.sections)) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, str(self)) class Source: def __init__(self, confparser, section, bcfg2_repos_prefix): self.filename = "%s/Pkgmgr/%s.xml" % (bcfg2_repos_prefix, section) self.groups = get_as_list(confparser.get(section, "group_names")) self.priority = confparser.getint(section, "priority") self.architectures = get_as_list(confparser.get(section, "architectures")) self.source_urls = [] self.source_urls.append(SourceURL(confparser.get(section, "deb_url"))) # Agregate urls in the form of deb_url0, deb_url1, ... to deb_url9 for i in range(10): # 0 to 9 option_name = "deb_url%s" % i if confparser.has_option(section, option_name): self.source_urls.append(SourceURL(confparser.get(section, option_name))) self.file = None self.indent_level = 0 def __str__(self): return """File: %s Groups: %s Priority: %s Architectures: %s Source URLS: %s""" % (self.filename, self.groups, self.priority, self.architectures, self.source_urls) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, str(self)) def _open_file(self): self.file = open(self.filename + '~', 'w') def _close_file(self): self.file.close() def _write_to_file(self, msg): self.file.write("%s%s\n" % (self.indent_level * ' ', msg)) def _rename_file(self): os.rename(self.filename + '~', self.filename) def _pkg_version_is_older(self, version1, version2): """ Use dpkg to compare the two version Return true if version1 < version2 """ # Avoid forking a new process if the two strings are equals if version1 == version2: return False (status, output) = subprocess.getstatusoutput("/usr/bin/dpkg --compare-versions %s lt %s" % (version1, version2)) #print "%s dpkg --compare-versions %s lt %s" % (status, version1, version2) return status == 0 def _update_pkgdata(self, pkgdata, source_url): for section in source_url.sections: for arch in self.architectures: url = "%s/dists/%s/%s/binary-%s/Packages.gz" % (source_url.url, source_url.distribution, section, arch) debug("Processing url %s\n" % (url)) try: data = urlopen(url) buf = StringIO(''.join(data.readlines())) reader = gzip.GzipFile(fileobj=buf) for line in reader.readlines(): if line[:8] == 'Package:': pkgname = line.split(' ')[1].strip() elif line[:8] == 'Version:': version = line.split(' ')[1].strip() if pkgname in pkgdata: if arch in pkgdata[pkgname]: # The package is listed twice for the same architecture # We keep the most recent version old_version = pkgdata[pkgname][arch] if self._pkg_version_is_older(old_version, version): pkgdata[pkgname][arch] = version else: # The package data exists for another architecture, # but not for this one. Add it. pkgdata[pkgname][arch] = version else: # First entry for this package pkgdata[pkgname] = {arch: version} else: continue except: raise Exception("Could not process URL %s\n%s\nPlease " "verify the URL." % (url, sys.exc_info()[1])) return pkgdata def _get_sorted_pkg_keys(self, pkgdata): pkgs = [] for k in list(pkgdata.keys()): pkgs.append(k) pkgs.sort() return pkgs def _write_common_entries(self, pkgdata): # Write entries for packages that have the same version # across all architectures #coalesced = 0 for pkg in self._get_sorted_pkg_keys(pkgdata): # Dictionary of archname: pkgversion # (There is exactly one version per architecture) archdata = pkgdata[pkg] # List of versions for all architectures of this package pkgversions = list(archdata.values()) # If the versions for all architectures are the same if list_contains_all_the_same_values(pkgversions): # Write the package data ver = pkgversions[0] self._write_to_file('' % (pkg, ver)) #coalesced += 1 # Remove this package entry del pkgdata[pkg] def _write_perarch_entries(self, pkgdata): # Write entries that are left, i.e. packages that have different # versions per architecture #perarch = 0 if pkgdata: for arch in self.architectures: self._write_to_file('' % (arch)) self.indent_level = self.indent_level + 1 for pkg in self._get_sorted_pkg_keys(pkgdata): if arch in pkgdata[pkg]: self._write_to_file('' % (pkg, pkgdata[pkg][arch])) #perarch += 1 self.indent_level = self.indent_level - 1 self._write_to_file('') #debug("Got %s coalesced, %s per-arch\n" % (coalesced, perarch)) def process(self): '''Build package indices for source''' # First, build the pkgdata structure without touching the file, # so the file does not contain incomplete informations if the # network in not reachable. pkgdata = {} for source_url in self.source_urls: pkgdata = self._update_pkgdata(pkgdata, source_url) # Construct the file. self._open_file() for source_url in self.source_urls: self._write_to_file('' % source_url) self._write_to_file('' % self.priority) self.indent_level = self.indent_level + 1 for group in self.groups: self._write_to_file('' % group) self.indent_level = self.indent_level + 1 self._write_common_entries(pkgdata) self._write_perarch_entries(pkgdata) for group in self.groups: self.indent_level = self.indent_level - 1 self._write_to_file('') self.indent_level = self.indent_level - 1 self._write_to_file('') self._close_file() self._rename_file() if __name__ == '__main__': main_conf_parser = ConfigParser.SafeConfigParser() main_conf_parser.read(['/etc/bcfg2.conf']) repo = main_conf_parser.get('server', 'repository') confparser = ConfigParser.SafeConfigParser() confparser.read(os.path.join(repo, "etc/debian-pkglist.conf")) # We read the whole configuration file before processing each entries # to avoid doing work if there is a problem in the file. sources_list = [] for section in confparser.sections(): sources_list.append(Source(confparser, section, repo)) for source in sources_list: source.process() tools/create-debian-pkglist.py000077500000000000000000000247671303523157100167510ustar00rootroot00000000000000#!/usr/bin/env python '''Build debian/ubuntu package indexes''' # Original code from Bcfg2 sources import apt_pkg import gzip import os import re import sys # Compatibility imports from Bcfg2.Compat import StringIO from Bcfg2.Compat import ConfigParser from Bcfg2.Compat import urlopen apt_pkg.init() def debug(msg): '''print debug messages''' if '-v' in sys.argv: sys.stdout.write(msg) def get_as_list(somestring): """ Input : a string like this : 'a, g, f,w' Output : a list like this : ['a', 'g', 'f', 'w'] """ return somestring.replace(' ', '').split(',') def list_contains_all_the_same_values(l): if len(l) == 0: return True # The list contains all the same values if all elements in # the list are equal to the first element. first = l[0] for elem in l: if first != elem: return False return True class SourceURL: def __init__(self, deb_url, arch): deb_url_tokens = deb_url.split() # ex: deb http://somemirror.com/ubuntu dapper main restricted universe self.url = deb_url_tokens[1] self.distribution = deb_url_tokens[2] self.sections = deb_url_tokens[3:] self.arch = arch def __str__(self): return "deb %s %s %s" % (self.url, self.distribution, ' '.join(self.sections)) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, str(self)) class Source: def __init__(self, confparser, section, bcfg2_repos_prefix): self.filename = "%s/Pkgmgr/%s.xml" % (bcfg2_repos_prefix, section) self.groups = get_as_list(confparser.get(section, "group_names")) self.priority = confparser.getint(section, "priority") try: self.pattern = confparser.get(section, "pattern", raw=True) except: self.pattern = '.*' self.architectures = get_as_list(confparser.get(section, "architectures")) self.arch_specialurl = set() self.source_urls = [] self.source_urls.append(SourceURL(confparser.get(section, "deb_url"), "all")) # Agregate urls in the form of deb_url0, deb_url1, ... to deb_url9 for i in range(10): # 0 to 9 option_name = "deb_url%s" % i if confparser.has_option(section, option_name): self.source_urls.append(SourceURL(confparser.get(section, option_name), "all")) # Aggregate architecture specific urls (if present) for arch in self.architectures: if not confparser.has_option(section, "deb_" + arch + "_url"): continue self.source_urls.append(SourceURL(confparser.get(section, "deb_" + arch + "_url"), arch)) # Agregate urls in the form of deb_url0, deb_url1, ... to deb_url9 for i in range(10): # 0 to 9 option_name = "deb_" + arch + "_url%s" % i if confparser.has_option(section, option_name): self.source_urls.append(SourceURL(confparser.get(section, option_name), arch)) self.arch_specialurl.add(arch) self.file = None self.indent_level = 0 def __str__(self): return """File: %s Groups: %s Priority: %s Architectures: %s Source URLS: %s""" % (self.filename, self.groups, self.priority, self.architectures, self.source_urls) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, str(self)) def _open_file(self): self.file = open(self.filename + '~', 'w') def _close_file(self): self.file.close() def _write_to_file(self, msg): self.file.write("%s%s\n" % (self.indent_level * ' ', msg)) def _rename_file(self): os.rename(self.filename + '~', self.filename) def _pkg_version_is_older(self, version1, version2): """ Use dpkg to compare the two version Return true if version1 < version2 """ # Avoid forking a new process if the two strings are equals if version1 == version2: return False status = apt_pkg.VersionCompare(version1, version2) return status < 0 def _update_pkgdata(self, pkgdata, source_url): for section in source_url.sections: for arch in self.architectures: if source_url.arch != arch and source_url.arch != "all": continue if source_url.arch == "all" and arch in self.arch_specialurl: continue url = "%s/dists/%s/%s/binary-%s/Packages.gz" % (source_url.url, source_url.distribution, section, arch) debug("Processing url %s\n" % (url)) try: data = urlopen(url) buf = StringIO(''.join(data.readlines())) reader = gzip.GzipFile(fileobj=buf) for line in reader.readlines(): if line[:8] == 'Package:': pkgname = line.split(' ')[1].strip() elif line[:8] == 'Version:': version = line.split(' ')[1].strip() if pkgname in pkgdata: if arch in pkgdata[pkgname]: # The package is listed twice for the same architecture # We keep the most recent version old_version = pkgdata[pkgname][arch] if self._pkg_version_is_older(old_version, version): pkgdata[pkgname][arch] = version else: # The package data exists for another architecture, # but not for this one. Add it. pkgdata[pkgname][arch] = version else: # First entry for this package pkgdata[pkgname] = {arch: version} else: continue except: raise Exception("Could not process URL %s\n%s\nPlease " "verify the URL." % (url, sys.exc_info()[1])) return dict((k, v) for (k, v) in list(pkgdata.items()) \ if re.search(self.pattern, k)) def _get_sorted_pkg_keys(self, pkgdata): pkgs = [] for k in list(pkgdata.keys()): pkgs.append(k) pkgs.sort() return pkgs def _write_common_entries(self, pkgdata): # Write entries for packages that have the same version # across all architectures #coalesced = 0 for pkg in self._get_sorted_pkg_keys(pkgdata): # Dictionary of archname: pkgversion # (There is exactly one version per architecture) archdata = pkgdata[pkg] # List of versions for all architectures of this package pkgversions = list(archdata.values()) # If the versions for all architectures are the same if len(self.architectures) == len(pkgversions) and list_contains_all_the_same_values(pkgversions): # Write the package data ver = pkgversions[0] self._write_to_file('' % (pkg, ver)) #coalesced += 1 # Remove this package entry del pkgdata[pkg] def _write_perarch_entries(self, pkgdata): # Write entries that are left, i.e. packages that have different # versions per architecture #perarch = 0 if pkgdata: for arch in self.architectures: self._write_to_file('' % (arch)) self.indent_level = self.indent_level + 1 for pkg in self._get_sorted_pkg_keys(pkgdata): if arch in pkgdata[pkg]: self._write_to_file('' % (pkg, pkgdata[pkg][arch])) #perarch += 1 self.indent_level = self.indent_level - 1 self._write_to_file('') #debug("Got %s coalesced, %s per-arch\n" % (coalesced, perarch)) def process(self): '''Build package indices for source''' # First, build the pkgdata structure without touching the file, # so the file does not contain incomplete informations if the # network in not reachable. pkgdata = {} for source_url in self.source_urls: pkgdata = self._update_pkgdata(pkgdata, source_url) # Construct the file. self._open_file() for source_url in self.source_urls: self._write_to_file('' % source_url) self._write_to_file('' % self.priority) self.indent_level = self.indent_level + 1 for group in self.groups: self._write_to_file('' % group) self.indent_level = self.indent_level + 1 self._write_common_entries(pkgdata) self._write_perarch_entries(pkgdata) for group in self.groups: self.indent_level = self.indent_level - 1 self._write_to_file('') self.indent_level = self.indent_level - 1 self._write_to_file('') self._close_file() self._rename_file() if __name__ == '__main__': # Prefix is relative to script path complete_script_path = os.path.join(os.getcwd(), sys.argv[0]) prefix = complete_script_path[:-len('etc/create-debian-pkglist.py')] confparser = ConfigParser.SafeConfigParser() confparser.read(prefix + "etc/debian-pkglist.conf") # We read the whole configuration file before processing each entries # to avoid doing work if there is a problem in the file. sources_list = [] for section in confparser.sections(): sources_list.append(Source(confparser, section, prefix)) for source in sources_list: source.process() tools/create-rpm-pkglist.py000066400000000000000000000111041303523157100162770ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright (c) 2010 Fabian Affolter, Bernewireless.net. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the Bernewireless nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Author: Fabian Affolter # from lxml import etree from optparse import OptionParser import os import yum __author__ = 'Fabian Affolter ' __version__ = '0.1' def retrievePackages(): """Getting the installed packages with yum.""" yb = yum.YumBase() yb.conf.cache = os.geteuid() != 1 pl = yb.doPackageLists('installed') pkglist = [] for pkg in sorted(pl.installed): pkgdata = pkg.name, pkg.version pkglist.append(pkgdata) return pkglist def parse_command_line_parameters(): """Parses command line arguments.""" usage = "usage: %prog [options]" version = 'Version: %prog ' + __version__ parser = OptionParser(usage, version=version) parser.add_option("-s", "--show", action="store_true", help="Prints the result to STOUT") parser.add_option("-v", "--pkgversion", action="store_true", help="Include Package version") parser.add_option("-f", "--filename", dest="filename", type="string", metavar="FILE", default="packages.xml", help="Write the output to an XML FILE") (options, args) = parser.parse_args() num_args = 1 return options, args def indent(elem, level=0): """Helps clean up the XML.""" # Stolen from http://effbot.org/zone/element-lib.htm i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " for e in elem: indent(e, level + 1) if not e.tail or not e.tail.strip(): e.tail = i + " " if not e.tail or not e.tail.strip(): e.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def transformXML(): """Transform the package list to an XML file.""" packagelist = retrievePackages() root = etree.Element("PackageList") for i, j in packagelist: root.append(etree.Element("Package", name=i, version=j)) #Print the content #print(etree.tostring(root, pretty_print=True)) tree = etree.ElementTree(root) return tree def main(): options, args = parse_command_line_parameters() filename = options.filename packagelist = transformXML() if options.show == True: tree = etree.parse(filename) for node in tree.findall("//Package"): print(node.attrib["name"]) indent(packagelist.getroot()) packagelist.write(filename, encoding="utf-8") if options.pkgversion == True: tree = etree.parse(filename) for node in tree.findall("//Package"): print("%s-%s" % (node.attrib["name"], node.attrib["version"])) #FIXME : This should be changed to the standard way of optparser #FIXME : Make an option available to strip the version number of the pkg if options.pkgversion == None and options.show == None: indent(packagelist.getroot()) packagelist.write(filename, encoding="utf-8") if __name__ == "__main__": main() tools/ctags.sh000077500000000000000000000022011303523157100136510ustar00rootroot00000000000000#!/bin/bash usage() { echo "$(basename $0) [-e] [-o ] [-s ]" echo " -e: etags mode" echo " -o : Write to . Default is tags or TAGS in the" echo " default source dir" echo " -s : Find Bcfg2 source directory. Default is the " echo " parent of the directory where $(basename $0) lives" exit 1 } # compute the path to the parent directory of tools/ SRCDIR=$(pwd)/$(dirname $0)/.. CTAGS=ctags ETAGS= CTAGS_ARGS= OUTFILE="$SRCDIR/TAGS" while getopts ":eho:s:" opt; do case $opt in e) ETAGS=1 CTAGS_ARGS="$CTAGS_ARGS -e" ;; h) usage ;; o) $OUTFILE=$OPTARG ;; s) $SRCDIR=$OPTARG ;; \?) echo "Invalid option: -$OPTARG" >&2 usage ;; esac done CTAGS_ARGS="$CTAGS_ARGS -f $OUTFILE" find "$SRCDIR/testsuite" "$SRCDIR/tools" "$SRCDIR/src/lib" -name \*.py | \ xargs "$CTAGS" $CTAGS_ARGS find "$SRCDIR/src/sbin" | xargs "$CTAGS" $CTAGS_ARGS --append tools/encap-util-count.sh000077500000000000000000000005431303523157100157460ustar00rootroot00000000000000#!/bin/sh # This shows a count of encap packages per directory # Can be useful to make sure you have everything # built for all platforms. It assumes the directory # has a *.run file in it (from the bcfg2 encap build) for RUN in $(find . -type f | grep run$); do DIR="$(dirname $RUN)" printf "${DIR}: " (cd $DIR && ls | wc -l) done exit 0 tools/encap-util-expand.sh000077500000000000000000000002721303523157100160740ustar00rootroot00000000000000#!/bin/sh # This gets the encaps out of a makeself .run file for RUN in $(find . -type f | grep run$); do DIR="$(dirname $RUN)" $RUN --noexec --keep --target $DIR done exit 0 tools/encap-util-place.sh000077500000000000000000000026461303523157100157100ustar00rootroot00000000000000#!/bin/bash # This puts encaps in the right directories, creating the # directories if needed. getdir(){ case $1 in *"ix86-linux_debian_etch"*) printf "linux/debian/etch/ix86/" ;; *"ix86-linux_redhat_60"*) printf "linux/redhat/60/ix86/" ;; *"ix86-linux_redhat_72"*) printf "linux/redhat/72/ix86/" ;; *"ix86-linux_redhat_rhel4"*) printf "linux/redhat/rhel4/ix86/" ;; *"ix86-linux_suse_sles10"*) printf "linux/suse/sles10/ix86/" ;; *"ix86-linux_suse_sles8"*) printf "linux/suse/sles8/ix86/" ;; *"rs6000-aix4.3.1"*) printf "aix/4.3.1/rs6000/" ;; *"rs6000-aix4.3.3"*) printf "aix/4.3.3/rs6000/" ;; *"rs6000-aix5.2.0"*) printf "aix/5.2.0/rs6000/" ;; *"rs6000-aix5.3.0"*) printf "aix/5.3.0/rs6000/" ;; *"sparc-solaris10"*) printf "solaris/10/sparc/" ;; *"sparc-solaris8"*) printf "solaris/8/sparc/" ;; *"sparc-solaris9"*) printf "solaris/9/sparc/" ;; *"sparc-solaris2.6"*) printf "solaris/2.6/sparc/" ;; *"x86_64-linux_suse_sles10"*) printf "linux/suse/sles10/x86_64/" ;; *"-encap-share.tar.gz") printf "share/" ;; *) printf "ERROR" ;; esac } for ep in $(find . -type f | grep -v \.sh$ \ | grep -v epkg\.tar$ \ | grep -v "^\.\/xml\/"); do DIR="$(getdir $ep)" EPNAME="$(basename $ep)" if [ "${DIR}x" != "ERRORx" ]; then if [ ! -d $DIR ]; then mkdir -p $DIR; fi mv $ep $DIR 2>&1 | grep -v "are the same file" else printf "ERROR: Don't know where to put $ep\n" fi done exit 0 tools/encap-util-xml.sh000077500000000000000000000022531303523157100154160ustar00rootroot00000000000000#!/bin/sh # This builds the XML Pkgmgr files for the encap directory # structure created by the place script. It assumes the # directory has a *.run file in it (from the bcfg2 encap build) SITEBASEURI="http://example.com/encaps" for RUN in $(find * -type f | grep run$); do DIR="$(dirname $RUN)" FILE="$(basename $RUN)" ARCH="$(printf "$FILE" | awk -F\- '{print $4}')" OS="$(printf "$FILE" | awk -F\- '{print $5}' | sed s:\.run$::g)" case $OS in *aix*) OSDIR="aix/$(printf "$OS" | sed s:aix::g)" ;; *solaris*) OSDIR="solaris/$(printf "$OS" | sed s:solaris::g)" ;; *linux*) OSDIR="$(printf "$OS" | sed s:\_:\/:g)" ;; *) exit 1 esac XML="./xml/site-encaps-${ARCH}-${OS}.xml" printf " $XML printf " type='encap'\n" >> $XML printf " uri='${SITEBASEURI}/%s/%s'>\n" "$OSDIR" "$ARCH" >> $XML printf " \n" "$ARCH" "$OS" >> $XML for FILE in `(cd ./$DIR && ls *-encap-*.tar.gz) | sort`; do printf " \n" "$FILE" >> $XML done printf " \n" >> $XML printf "\n" >> $XML done exit 0 tools/export.py000077500000000000000000000343141303523157100141210ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 """ Second attempt to make our export script more portable than export.sh """ import fileinput from subprocess import Popen, PIPE import sys # This will need to be replaced with argsparse when we make a 2.7+/3.2+ version import optparse import datetime # py3k compatibility try: from email.Utils import formatdate except ImportError: from email.utils import formatdate # In lieu of a config file help_message = \ """This script creates a tag in the Bcfg2 git repo and exports a tar file of the code at that tag. This script must be run at the top of your git repository. """ pkgname = 'bcfg2' ftphost = 'terra.mcs.anl.gov' ftpdir = '/mcs/ftp/pub/bcfg' def run(command): return Popen(command, shell=True, stdout=PIPE).communicate() def find_and_replace(f, iftest, rline, startswith=False, dryrun=False): if dryrun: inplace = 0 print("*** dry-run: New '%s' will look like this:" % f) else: inplace = 1 for line in fileinput.input(f, inplace): if startswith: if line.startswith(iftest): line = line.replace(line, rline) sys.stdout.write(line) else: if iftest in line and line != "Version: %{version}\n": line = line.replace(line, rline) sys.stdout.write(line) if dryrun: print("*** End '%s'" % f) def main(): # This is where the options are set up p = optparse.OptionParser(description=help_message, prog=sys.argv[0], version='0.1', usage='%prog [-h|--help] [-v|--version] ' '[-n|--dry-run] [-d|--debug]') p.add_option('--verbose', '-v', action='store_true', help='turns on verbose mode', default=False, dest='verbose') p.add_option('--dry-run', '-n', action='store_true', help='run in dry-run mode; ' 'no changes will be made to the system', default=False, dest='dryrun') p.add_option('--debug', '-d', action='store_true', help='run in debun mode', default=False, dest='debug') p.add_option('--paranoid', '-P', action='store_true', help='run in paranoid mode, ' 'make changes but do not commit to repository', default=False, dest='paranoid') options = p.parse_args()[0] if options.debug: print(options) print("What should debug mode do?") # py3k compatibility try: version = raw_input("Please enter the Bcfg2 version " "you are tagging (e.g. 1.0.0): ") name = raw_input("Your name: ") email = raw_input("Your email: ") except NameError: version = input("Please enter the Bcfg2 version " "you are tagging (e.g. 1.0.0): ") name = input("Your name: ") email = input("Your email: ") # parse version into Major.Minor.MicroBuild and validate vkeys = ["major", "minor", "microbuild"] try: version_info = dict(zip(vkeys, version.split("."))) version_info["micro"] = version_info["microbuild"][0:1] version_info["build"] = version_info["microbuild"][1:] version_release = "%s.%s.%s" % (version_info['major'], version_info['minor'], version_info['micro']) if options.debug: print("version is %s" % version) print("version_info is %s" % version_info) print("version_release is %s" % version_release) if not version_info["major"].isdigit() \ or not version_info["minor"].isdigit() \ or not version_info["micro"]: raise VersionError('isdigit() test failed') if len(version_info["micro"]) > 1: raise VersionError('micro must be single digit because ' 'IFMinorVersion restrictions in ' 'Mac OS X Packaging') except: print("""Version must be of the form Major.Minor.MicroBuild, where Major and Minor are integers and Micro is a single digit optionally followed by Build (i.e. pre##) E.G. 1.2.0pre1 is a valid version. """) quit() tarname = '/tmp/%s-%s.tar.gz' % (pkgname, version) newchangelog = """bcfg2 (%s-0.0) unstable; urgency=low * New upstream release -- %s <%s> %s """ % (version, name, email, formatdate(localtime=True)) # write out the new debian changelog if options.dryrun: print("*** Add the following to the top of debian/changelog:\n%s\n" % newchangelog) else: try: with open('debian/changelog', 'r+') as f: old = f.read() f.seek(0) f.write(newchangelog + old) f.close() except: print("Problem opening debian/changelog") print(help_message) quit() # update solaris version find_and_replace('solaris/Makefile', 'VERS=', 'VERS=%s-1\n' % version, startswith=True, dryrun=options.dryrun) find_and_replace('solaris/pkginfo.bcfg2', 'VERSION=', 'VERSION="%s"\n' % version, startswith=True, dryrun=options.dryrun) find_and_replace('solaris/pkginfo.bcfg2-server', 'VERSION=', 'VERSION="%s"\n' % version, startswith=True, dryrun=options.dryrun) # update solaris IPS version find_and_replace('solaris-ips/Makefile', 'VERS=', 'VERS=%s-1\n' % version, startswith=True, dryrun=options.dryrun) find_and_replace('solaris-ips/MANIFEST.bcfg2.header', 'set name=pkg.fmri value="pkg://bcfg2/bcfg2@', 'set name=pkg.fmri value="pkg://bcfg2/bcfg2@%s"\n' % version, startswith=True, dryrun=options.dryrun) find_and_replace('solaris-ips/MANIFEST.bcfg2-server.header', 'set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@', 'set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@%s"\n' % version, startswith=True, dryrun=options.dryrun) find_and_replace('solaris-ips/pkginfo.bcfg2', 'VERSION=', 'VERSION="%s"\n' % version, startswith=True, dryrun=options.dryrun) find_and_replace('solaris-ips/pkginfo.bcfg2-server', 'VERSION=', 'VERSION="%s"\n' % version, startswith=True, dryrun=options.dryrun) # set new version in Bcfg2/version.py find_and_replace('src/lib/Bcfg2/version.py', '__version__ =', '__version__ = "%s"\n' % version, dryrun=options.dryrun) # replace version in misc/bcfg2.spec and misc/bcfg2-selinux.spec find_and_replace('misc/bcfg2.spec', 'Version:', 'Version: %s\n' % version_release, dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'Version:', 'Version: %s\n' % version_release, dryrun=options.dryrun) if version_info['build'].startswith('rc'): find_and_replace('misc/bcfg2.spec', 'global _rc ', '%%global _rc %s\n' % version_info['build'], dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'global _rc ', '%%global _rc %s\n' % version_info['build'], dryrun=options.dryrun) elif version_info['build'].startswith('pre'): find_and_replace('misc/bcfg2.spec', 'global _pre ', '%%global _pre %s\n' % version_info['build'], dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'global _pre ', '%%global _pre %s\n' % version_info['build'], dryrun=options.dryrun) else: # comment out pre/rc find_and_replace('misc/bcfg2.spec', 'global _pre ', '#%%global _pre 2\n', dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'global _pre ', '#%%global _pre 2\n', dryrun=options.dryrun) find_and_replace('misc/bcfg2.spec', 'global _rc ', '#%%global _rc 1\n', dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'global _rc ', '#%%global _rc 1\n', dryrun=options.dryrun) find_and_replace('misc/bcfg2.spec', 'Release: ', 'Release: 1%{?_pre_rc}%{?dist}\n', startswith=True, dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'Release: ', 'Release: 1%{?_pre_rc}%{?dist}\n', startswith=True, dryrun=options.dryrun) find_and_replace('misc/bcfg2.spec', '%setup', '%setup -q -n %{name}-%{version}%{?_pre_rc}\n', startswith=True, dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', '%setup', '%setup -q -n %{name}-%{version}%{?_pre_rc}\n', startswith=True, dryrun=options.dryrun) find_and_replace('misc/bcfg2.spec', 'BuildRoot', 'BuildRoot: %{_tmppath}/%{name}-%{version}%{?_pre_rc}-%{release}-root-%(%{__id_u} -n)\n', startswith=True, dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'BuildRoot', 'BuildRoot: %{_tmppath}/%{name}-%{version}%{?_pre_rc}-%{release}-root-%(%{__id_u} -n)\n', startswith=True, dryrun=options.dryrun) # fix pre problem noted in # http://trac.mcs.anl.gov/projects/bcfg2/ticket/1129 find_and_replace('misc/bcfg2.spec', 'Source0', 'Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}%{?_pre_rc}.tar.gz\n', startswith=True, dryrun=options.dryrun) find_and_replace('misc/bcfg2-selinux.spec', 'Source0', 'Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}%{?_pre_rc}.tar.gz\n', startswith=True, dryrun=options.dryrun) # update the version in reports find_and_replace('src/lib/Bcfg2/Reporting/templates/base.html', 'Bcfg2 Version', ' Bcfg2 Version %s\n' % version, dryrun=options.dryrun) # update the version in the docs find_and_replace('doc/conf.py', 'version =', 'version = \'%s.%s\'\n' % (version_info['major'], version_info['minor']), startswith=True, dryrun=options.dryrun) find_and_replace('doc/conf.py', 'release =', 'release = \'%s\'\n' % (version_release), startswith=True, dryrun=options.dryrun) # update osx Makefile find_and_replace('osx/Makefile', 'BCFGVER =', 'BCFGVER = %s\n' % (version), startswith=True, dryrun=options.dryrun) find_and_replace('osx/Makefile', 'MAJOR =', 'MAJOR = %s\n' % (version_info['major']), startswith=True, dryrun=options.dryrun) find_and_replace('osx/Makefile', 'MINOR =', 'MINOR = %s%s\n' % (version_info['minor'], version_info['micro']), startswith=True, dryrun=options.dryrun) # update osx Portfile find_and_replace('osx/macports/Portfile', 'version ', 'version %s\n' % version, startswith=True, dryrun=options.dryrun) # tag the release #FIXME: do this using python-dulwich commando = {} commando["vcs_diff"] = "git diff" commando["vcs_commit"] = "git commit -asm 'Version bump to %s'" % version # NOTE: This will use the default email address key. If you want to sign # the tag using a different key, you will need to set 'signingkey' # to the proper value in the [user] section of your git # configuration. commando["vcs_tag"] = "git tag -s v%s -m 'tagged %s release'" % (version, version) commando["create_archive"] = \ "git archive --format=tar --prefix=%s-%s/ v%s | gzip > %s" \ % (pkgname, version, version, tarname) commando["gpg_encrypt"] = "gpg --armor --output %s.gpg --detach-sig %s" \ % (tarname, tarname) # upload release to ftp commando["scp_archive"] = "scp %s* terra.mcs.anl.gov:/mcs/ftp/pub/bcfg/" \ % tarname # Execute the commands if options.paranoid: commando_orders = ["vcs_diff"] else: commando_orders = ["vcs_commit", "vcs_tag", "create_archive", "gpg_encrypt"] #"scp_archive"] if options.dryrun: for cmd in commando_orders: print("*** dry-run: %s" % commando[cmd]) else: for cmd in commando_orders: output = run(commando[cmd])[0].strip() if options.verbose: print(output) print("Ran '%s' with above output." % cmd) if __name__ == '__main__': sys.exit(main()) tools/generate-manpages.bash000066400000000000000000000003621303523157100164410ustar00rootroot00000000000000#!/bin/bash # This makes building our manpages easier and more consistent. if [ ! -d man -o ! -d tools -o ! -d doc ] then echo "Must be in the top-level bcfg2 source directory" exit 1 fi sphinx-build -b man -D copyright="" doc man tools/git_commit.py000077500000000000000000000147001303523157100147300ustar00rootroot00000000000000#!/usr/bin/env python """ Trigger script to commit selected changes to a local repository back to git. To use this script, enable the Trigger plugin, put this script in /var/lib/bcfg2/Trigger/, and create /etc/bcfg2-commit.conf. The config file, /etc/bcfg2-commit.conf, may contain four options in the [global] section: * "config" is the path to the Bcfg2 server config file. (Default: /etc/bcfg2.conf) * "commit" is a comma-separated list of globs giving the paths that should be committed back to the repository. Default is 'SSLCA/*, SSHbase/*, Cfg/*', which will commit data back for SSLCA, SSHbase, Cfg, FileProbes, etc., but not, for instance, Probes/probed.xml. You may wish to add Metadata/clients.xml to the commit list. * "debug" and "verbose" let you set the log level for git_commit.py itself. """ import os import sys import git import logging import Bcfg2.Logger import Bcfg2.Options from Bcfg2.Compat import ConfigParser from fnmatch import fnmatch # config file path CONFIG = "/etc/bcfg2-commit.conf" # config defaults. all config options are in the [global] section DEFAULTS = dict(config='/etc/bcfg2.conf', commit="SSLCA/*, SSHbase/*, Cfg/*") def list_changed_files(repo): return [d for d in repo.index.diff(None) if (d.a_blob is not None and not d.deleted_file and not d.renamed and not d.new_file)] def add_to_commit(patterns, path, repo, relpath): progname = os.path.basename(sys.argv[0]) logger = logging.getLogger(progname) for pattern in patterns: if fnmatch(path, os.path.join(relpath, pattern)): logger.debug("%s: Adding %s to commit" % (progname, path)) repo.index.add([path]) return True return False def parse_options(): config = ConfigParser.SafeConfigParser(DEFAULTS) config.read(CONFIG) optinfo = dict( profile=Bcfg2.Options.CLIENT_PROFILE, dryrun=Bcfg2.Options.CLIENT_DRYRUN, groups=Bcfg2.Options.Option("Groups", default=[], cmd="-g", odesc=':', cook=Bcfg2.Options.colon_split)) optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) argv = [Bcfg2.Options.CFILE.cmd, config.get("global", "config")] argv.extend(sys.argv[1:]) setup = Bcfg2.Options.OptionParser(optinfo, argv=argv) setup.parse(argv) setup['commit'] = Bcfg2.Options.list_split(config.get("global", "commit")) for opt in ['debug', 'verbose']: try: setup[opt] = config.getboolean("global", opt) except ConfigParser.NoOptionError: pass try: hostname = setup['args'][0] except IndexError: print(setup.hm) raise SystemExit(1) return (setup, hostname) def setup_logging(setup): progname = os.path.basename(sys.argv[0]) log_args = dict(to_syslog=setup['syslog'], to_console=sys.stdout.isatty(), to_file=setup['logging'], level=logging.WARNING) if setup['debug']: log_args['level'] = logging.DEBUG elif setup['verbose']: log_args['level'] = logging.INFO Bcfg2.Logger.setup_logging(progname, **log_args) return logging.getLogger(progname) def main(): progname = os.path.basename(sys.argv[0]) setup, hostname = parse_options() logger = setup_logging(setup) if setup['dryrun']: logger.info("%s: In dry-run mode, changes will not be committed" % progname) if setup['vcs_root']: gitroot = os.path.realpath(setup['vcs_root']) else: gitroot = os.path.realpath(setup['repo']) logger.info("%s: Using Git repo at %s" % (progname, gitroot)) try: repo = git.Repo(gitroot) except: # pylint: disable=W0702 logger.error("%s: Error setting up Git repo at %s: %s" % (progname, gitroot, sys.exc_info()[1])) return 1 # canonicalize the repo path so that git will recognize it as # being inside the git repo bcfg2root = os.path.realpath(setup['repo']) if not bcfg2root.startswith(gitroot): logger.error("%s: Bcfg2 repo %s is not inside Git repo %s" % (progname, bcfg2root, gitroot)) return 1 # relative path to Bcfg2 root from VCS root if gitroot == bcfg2root: relpath = '' else: relpath = bcfg2root[len(gitroot) + 1:] new = 0 changed = 0 logger.debug("%s: Untracked files: %s" % (progname, repo.untracked_files)) for path in repo.untracked_files: if add_to_commit(setup['commit'], path, repo, relpath): new += 1 else: logger.debug("%s: Not adding %s to commit" % (progname, path)) logger.debug("%s: Untracked files after building commit: %s" % (progname, repo.untracked_files)) changes = list_changed_files(repo) logger.info("%s: Changed files: %s" % (progname, [d.a_blob.path for d in changes])) for diff in changes: if add_to_commit(setup['commit'], diff.a_blob.path, repo, relpath): changed += 1 else: logger.debug("%s: Not adding %s to commit" % (progname, diff.a_blob.path)) logger.info("%s: Changed files after building commit: %s" % (progname, [d.a_blob.path for d in list_changed_files(repo)])) if new + changed > 0: logger.debug("%s: Committing %s new files and %s changed files" % (progname, new, changed)) if setup['dryrun']: logger.warning("%s: In dry-run mode, skipping commit and push" % progname) else: output = repo.index.commit("Auto-commit with %s from %s run" % (progname, hostname)) if output: logger.debug("%s: %s" % (progname, output)) remote = repo.remote() logger.debug("%s: Pushing to remote %s at %s" % (progname, remote, remote.url)) output = remote.push() if output: logger.debug("%s: %s" % (progname, output)) else: logger.info("%s: No changes to commit" % progname) if __name__ == '__main__': sys.exit(main()) tools/pkgmgr_gen.py000077500000000000000000000532101303523157100147140ustar00rootroot00000000000000#!/usr/bin/python """Program to generate a bcfg2 Pkgmgr configuration file from a list of directories that contain RPMS. All versions or only the latest may be included in the output. rpm.labelCompare is used to compare the package versions, so that a proper rpm version comparison is done (epoch:version-release). The output file may be formated for use with the RPM or Yum bcfg2 client drivers. The output can also contain the PackageList and nested group headers. """ import collections import datetime import glob import gzip import optparse import os import rpm import sys from lxml.etree import parse import xml.sax from xml.sax.handler import ContentHandler # Compatibility imports from Bcfg2.Compat import urljoin def info(object, spacing=10, collapse=1): """Print methods and doc strings. Takes module, class, list, dictionary, or string. """ methodList = [method for method in dir(object) if isinstance(getattr(object, method), collections.Callable)] processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s) print("\n".join(["%s %s" % (method.ljust(spacing), processFunc(str(getattr(object, method).__doc__))) for method in methodList])) def readRpmHeader(ts, filename): """ Read an rpm header from an RPM file. """ try: fd = os.open(filename, os.O_RDONLY) except: print("Failed to open RPM file %s" % filename) h = ts.hdrFromFdno(fd) os.close(fd) return h def sortedDictValues(adict): """ Sort a dictionary by its keys and return the items in sorted key order. """ keys = list(adict.keys()) keys.sort() return list(map(adict.get, keys)) def cmpRpmHeader(a, b): """ cmp() implemetation suitable for use with sort. a and b are dictionaries as created by loadRpms(). Comparison is made by package name and then by the full rpm version (epoch, version, release). rpm.labelCompare is used for the version part of the comparison. """ n1 = str(a['name']) e1 = str(a['epoch']) v1 = str(a['version']) r1 = str(a['release']) n2 = str(b['name']) e2 = str(b['epoch']) v2 = str(b['version']) r2 = str(b['release']) ret = cmp(n1, n2) if ret == 0: ret = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) return ret def loadRpms(dirs): """ dirs is a list of directories to search for rpms. Builds a dictionary keyed by the package name. Dictionary item is a list, one entry per package instance found. The list entries are dictionaries. Keys are 'filename', 'mtime' 'name', 'arch', 'epoch', 'version' and 'release'. e.g. packages = { 'bcfg2' : [ {'filename':'bcfg2-0.9.2-0.0rc1.noarch.rpm', 'mtime':'' 'name':"bcfg2', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-0.9.2-0.0rc5.noarch.rpm', 'mtime':'' 'name':"bcfg2', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}], 'bcfg2-server' : [ {'filename':'bcfg2-server-0.9.2-0.0rc1.noarch.rpm', 'mtime':'' 'name':"bcfg2-server', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-server-0.9.2-0.0rc5.noarch.rpm', 'mtime':'' 'name':"bcfg2-server', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}], } """ packages = {} ts = rpm.TransactionSet() vsflags = 0 vsflags |= rpm._RPMVSF_NODIGESTS vsflags |= rpm._RPMVSF_NOSIGNATURES ovsflags = ts.setVSFlags(vsflags) for dir in dirs: if options.verbose: print("Scanning directory: %s" % dir) for file in [files for files in os.listdir(dir) if files.endswith('.rpm')]: filename = os.path.join(dir, file) # Get the mtime of the RPM file. file_mtime = datetime.date.fromtimestamp(os.stat(filename).st_mtime) # Get the RPM header header = readRpmHeader(ts, filename) # Get what we are interesting in out of the header. name = header[rpm.RPMTAG_NAME] epoch = header[rpm.RPMTAG_EPOCH] version = header[rpm.RPMTAG_VERSION] release = header[rpm.RPMTAG_RELEASE] subarch = header[rpm.RPMTAG_ARCH] # Only load RPMs with subarchitectures as calculated from the --archs option. if subarch in subarchs or 'all' in subarchs: # Store what we want in our structure. packages.setdefault(name, []).append({'filename': file, 'mtime': file_mtime, 'name': name, 'arch': subarch, 'epoch': epoch, 'version': version, 'release': release}) # Print '.' for each package. stdio is line buffered, so have to flush it. if options.verbose: sys.stdout.write('.') sys.stdout.flush() if options.verbose: sys.stdout.write('\n') return packages class pkgmgr_URLopener(urllib.FancyURLopener): """ Override default error handling so that we can see what the errors are. """ def http_error_default(self, url, fp, errcode, errmsg, headers): """ Override default error handling so that we can see what the errors are. """ print("ERROR %s: Unable to retrieve %s" % (errcode, url)) class PrimaryParser(ContentHandler): def __init__(self, packages): self.inPackage = 0 self.inName = 0 self.inArch = 0 self.packages = packages def startElement(self, name, attrs): if name == "package": self.package = {'file': None, 'name': '', 'subarch': '', 'epoch': None, 'version': None, 'release': None} self.inPackage = 1 elif self.inPackage: if name == "name": self.inName = 1 elif name == "arch": self.inArch = 1 elif name == "version": self.package['epoch'] = attrs.getValue('epoch') self.package['version'] = attrs.getValue('ver') self.package['release'] = attrs.getValue('rel') elif name == "location": self.package['file'] = attrs.getValue('href') def endElement(self, name): if name == "package": self.inPackage = 0 # Only load RPMs with subarchitectures as calculated from the --archs option. if self.package['subarch'] in subarchs or 'all' in subarchs: self.packages.setdefault(self.package['name'], []).append( {'filename': self.package['file'], 'name': self.package['name'], 'arch': self.package['subarch'], 'epoch': self.package['epoch'], 'version': self.package['version'], 'release': self.package['release']}) # Print '.' for each package. stdio is line buffered, so have to flush it. if options.verbose: sys.stdout.write('.') sys.stdout.flush() elif self.inPackage: if name == "name": self.inName = 0 elif name == "arch": self.inArch = 0 def characters(self, content): if self.inPackage: if self.inName: self.package['name'] += content if self.inArch: self.package['subarch'] += content def loadRepos(repolist): ''' repolist is a list of urls to yum repositories. Builds a dictionary keyed by the package name. Dictionary item is a list, one entry per package instance found. The list entries are dictionaries. Keys are 'filename', 'mtime' 'name', 'arch', 'epoch', 'version' and 'release'. e.g. packages = { 'bcfg2' : [ {'filename':'bcfg2-0.9.2-0.0rc1.noarch.rpm', 'mtime':'' 'name':"bcfg2', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-0.9.2-0.0rc5.noarch.rpm', 'mtime':'' 'name':"bcfg2', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}], 'bcfg2-server' : [ {'filename':'bcfg2-server-0.9.2-0.0rc1.noarch.rpm', 'mtime':'' 'name':"bcfg2-server', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-server-0.9.2-0.0rc5.noarch.rpm', 'mtime':'' 'name':"bcfg2-server', ''arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}], } ''' packages = {} for repo in repolist: url = urljoin(repo, './repodata/repomd.xml') if options.verbose: print("Loading repo metadata : %s" % url) try: opener = pkgmgr_URLopener() file, message = opener.retrieve(url) except: sys.exit() try: tree = parse(file) except IOError: print("ERROR: Unable to parse retrieved repomd.xml.") sys.exit() repomd = tree.getroot() for element in repomd: if element.tag.endswith('data') and element.get('type') == 'primary': for property in element: if property.tag.endswith('location'): primaryhref = property.get('href') url = urljoin(repo, './' + primaryhref) if options.verbose: print("Loading : %s" % url) try: opener = pkgmgr_URLopener() file, message = opener.retrieve(url) except: sys.exit() try: repo_file = gzip.open(file) except IOError: print("ERROR: Unable to parse retrieved file.") sys.exit() parser = xml.sax.make_parser() parser.setContentHandler(PrimaryParser(packages)) parser.parse(repo_file) if options.verbose: sys.stdout.write('\n') repo_file.close() return packages def printInstance(instance, group_count): """ Print the details for a package instance with the appropriate indentation and in the specified format (rpm or yum). """ group_count = group_count + 1 name = instance['name'] epoch = instance['epoch'] version = instance['version'] release = instance['release'] arch = instance['arch'] output_line = '' if options.format == 'rpm': output_line = '%s\n' % (version, release, arch) output.write(output_line) def printPackage(entry, group_count): """ Print the details of a package with the appropriate indentation. Only the specified (all or latest) release(s) is printed. entry is a single package entry as created in loadRpms(). """ output.write('%s\n' \ % (group_count * indent, entry[0]['name'], options.format)) subarch_dict = {} arch_dict = {} # Split instances of this package into subarchitectures. for instance in entry: if instance['arch'] == 'src': continue if instance['arch'] in subarch_dict: subarch_dict[instance['arch']].append(instance) else: subarch_dict[instance['arch']] = [instance] # Keep track of the subarchitectures we have found in each architecture. if subarch_mapping[instance['arch']] in arch_dict: if instance['arch'] not in arch_dict[subarch_mapping[instance['arch']]]: arch_dict[subarch_mapping[instance['arch']]].append(instance['arch']) else: arch_dict[subarch_mapping[instance['arch']]] = [instance['arch']] # Only keep the 'highest' subarchitecture in each architecture. for arch in list(arch_dict.keys()): if len(arch_dict[arch]) > 1: arch_dict[arch].sort() for s in arch_dict[arch][:-1]: del subarch_dict[s] # Sort packages within each architecture into version order for arch in subarch_dict: subarch_dict[arch].sort(cmpRpmHeader) if options.release == 'all': # Output all instances for header in subarch_dict[arch]: printInstance(header, group_count) else: # Output the latest printInstance(subarch_dict[arch][-1], group_count) output.write('%s\n' % (group_count * indent)) def main(): if options.verbose: print("Loading package headers") if options.rpmdirs: package_dict = loadRpms(search_dirs) elif options.yumrepos: package_dict = loadRepos(repos) if options.verbose: print("Processing package headers") if options.pkgmgrhdr: if options.format == "rpm": output.write("\n" % (options.uri, options.priority)) else: output.write("\n" % (options.priority)) group_count = 1 if groups_list: for group in groups_list: output.write("%s\n" % (indent * group_count, group)) group_count = group_count + 1 # Process packages in name order for package_entry in sortedDictValues(package_dict): printPackage(package_entry, group_count) if groups_list: group_count = group_count - 1 while group_count: output.write('%s\n' % (indent * group_count)) group_count = group_count - 1 if options.pkgmgrhdr: output.write('\n') if options.verbose: print("%i package instances were processed" % len(package_dict)) if __name__ == "__main__": p = optparse.OptionParser() p.add_option('--archs', '-a', action='store', \ default='all', \ type='string', \ help='''Comma separated list of subarchitectures to include. The highest subarichitecture required in an architecture group should specified. Lower subarchitecture packages will be loaded if that is all that is available. e.g. The higher of i386, i486 and i586 packages will be loaded if -a i586 is specified. (Default: all). ''') p.add_option('--rpmdirs', '-d', action='store', type='string', \ help='''Comma separated list of directories to scan for RPMS. Wilcards are permitted. ''') p.add_option('--enddate', '-e', action='store', \ type='string', \ help='End date for RPM file selection.') p.add_option('--format', '-f', action='store', \ default='yum', \ type='choice', \ choices=('yum', 'rpm'), \ help='''Format of the Output. Choices are yum or rpm. (Default: yum) ''') p.add_option('--groups', '-g', action='store', \ type='string', \ help='''List of comma separated groups to nest Package entities in. ''') p.add_option('--indent', '-i', action='store', \ default=4, \ type='int', \ help='''Number of leading spaces to indent nested entries in the output. (Default:4) ''') p.add_option('--outfile', '-o', action='store', \ type='string', \ help='Output file name.') p.add_option('--pkgmgrhdr', '-P', action='store_true', \ help='Include PackageList header in output.') p.add_option('--priority', '-p', action='store', \ default=0, \ type='int', \ help='''Value to set priority attribute in the PackageList Tag. (Default: 0) ''') p.add_option('--release', '-r', action='store', \ default='latest', \ type='choice', \ choices=('all', 'latest'), \ help='''Which releases to include in the output. Choices are all or latest. (Default: latest).''') p.add_option('--startdate', '-s', action='store', \ type='string', \ help='Start date for RPM file selection.') p.add_option('--uri', '-u', action='store', \ type='string', \ help='URI for PackageList header required for RPM format ouput.') p.add_option('--verbose', '-v', action='store_true', \ help='Enable verbose output.') p.add_option('--yumrepos', '-y', action='store', type='string', \ help='''Comma separated list of YUM repository URLs to load. NOTE: Each URL must end in a '/' character.''') options, arguments = p.parse_args() if options.pkgmgrhdr and options.format == 'rpm' and not options.uri: print("Option --uri must be specified to produce a PackageList Tag " "for rpm formatted files.") sys.exit(1) if not options.rpmdirs and not options.yumrepos: print("One of --rpmdirs and --yumrepos must be specified") sys.exit(1) # Set up list of directories to search if options.rpmdirs: search_dirs = [] for d in options.rpmdirs.split(','): search_dirs += glob.glob(d) if options.verbose: print("The following directories will be scanned:") for d in search_dirs: print(" %s" % d) # Setup list of repos if options.yumrepos: repos = [] for r in options.yumrepos.split(','): repos.append(r) if options.verbose: print("The following repositories will be scanned:") for d in repos: print(" %s" % d) # Set up list of architectures to include and some mappings # to use later. arch_mapping = {'x86': ['i686', 'i586', 'i486', 'i386', 'athlon'], 'x86_64': ['x86_64'], 'ia64': ['ia64'], 'ppc': ['ppc'], 'ppc64': ['ppc64'], 'sparc': ['sparc'], 'noarch': ['noarch']} subarch_mapping = {'i686': 'x86', 'i586': 'x86', 'i486': 'x86', 'i386': 'x86', 'athlon': 'x86', 'x86_64': 'x86_64', 'ia64': 'ia64', 'ppc': 'ppc', 'ppc64': 'ppc64', 'sparc': 'sparc', 'noarch': 'noarch'} commandline_subarchs = options.archs.split(',') arch_list = [] subarchs = [] if 'all' in commandline_subarchs: subarchs.append('all') else: for s in commandline_subarchs: if s not in subarch_mapping: print("Error: Invalid subarchitecture specified: ", s) sys.exit(1) # Only allow one subarchitecture per architecture to be specified. if s not in arch_list: arch_list.append(s) # Add subarchitectures lower than the one specified to the list. # e.g. If i486 is specified this will add i386 to the list of # subarchitectures to load. i = arch_mapping[subarch_mapping[s]].index(s) #if i != len(arch_mapping[subarch_mapping[s]]): subarchs += arch_mapping[subarch_mapping[s]][i:] else: print("Error: Multiple subarchitecutes of the same " "architecture specified.") sys.exit(1) indent = ' ' * options.indent if options.groups: groups_list = options.groups.split(',') else: groups_list = None if options.outfile: output = file(options.outfile, "w") else: output = sys.stdout main() tools/pkgmgr_update.py000077500000000000000000000365741303523157100154430ustar00rootroot00000000000000#!/usr/bin/python """ Program to update an existing bcfg2 Pkgmgr configuration file from a list of directories that contain RPMS. Only the epoch, version, release and simplefiles attributes are updated in existing entries. All other entries and attributes are preserved. This is a total hack until a proper more generalised system for managing Pkgmgr configuation files is developed. """ __version__ = '0.1' import datetime import glob import gzip import optparse import os import rpm import sys # Compatibility imports from Bcfg2.Compat import urljoin try: from lxml.etree import parse, tostring except: from elementtree.ElementTree import parse, tostring installOnlyPkgs = ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-source', 'kernel-devel', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'] def readRpmHeader(ts, filename): """ Read an rpm header from an RPM file. """ try: fd = os.open(filename, os.O_RDONLY) except: print("Failed to open RPM file %s" % filename) h = ts.hdrFromFdno(fd) os.close(fd) return h def sortedDictValues(adict): """ Sort a dictionary by its keys and return the items in sorted key order. """ keys = list(adict.keys()) keys.sort() return list(map(adict.get, keys)) def cmpRpmHeader(a, b): """ cmp() implemetation suitable for use with sort. """ n1 = str(a.get('name')) e1 = str(a.get('epoch')) v1 = str(a.get('version')) r1 = str(a.get('release')) n2 = str(b.get('name')) e2 = str(b.get('epoch')) v2 = str(b.get('version')) r2 = str(b.get('release')) return rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) def loadRpms(dirs): """ dirs is a list of directories to search for rpms. Builds a multilevel dictionary keyed by the package name and arch. Arch dictionary item is a list, one entry per package instance found. The list entries are dictionaries. Keys are 'filename', 'mtime' 'name', 'arch', 'epoch', 'version' and 'release'. e.g. packages = { 'bcfg2' : { 'noarch' : [ {'filename':'bcfg2-0.9.2-0.0rc1.noarch.rpm', 'mtime':'', 'name':'bcfg2', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-0.9.2-0.0rc5.noarch.rpm', 'mtime':'', 'name':'bcfg2', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}]}, 'bcfg2-server' { 'noarch' : [ {'filename':'bcfg2-server-0.9.2-0.0rc1.noarch.rpm', 'mtime':'', 'name':'bcfg2-server', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-server-0.9.2-0.0rc5.noarch.rpm', 'mtime':'', 'name':"bcfg2-server', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}]}, } """ packages = {} ts = rpm.TransactionSet() vsflags = 0 vsflags |= rpm._RPMVSF_NODIGESTS vsflags |= rpm._RPMVSF_NOSIGNATURES ovsflags = ts.setVSFlags(vsflags) for dir in dirs: if options.verbose: print("Scanning directory: %s" % dir) for file in [files for files in os.listdir(dir) if files.endswith('.rpm')]: filename = os.path.join(dir, file) # Get the mtime of the RPM file. file_mtime = datetime.date.fromtimestamp(os.stat(filename).st_mtime) # Get the RPM header header = readRpmHeader(ts, filename) # Get what we are interesting in out of the header. name = header[rpm.RPMTAG_NAME] epoch = header[rpm.RPMTAG_EPOCH] version = header[rpm.RPMTAG_VERSION] release = header[rpm.RPMTAG_RELEASE] subarch = header[rpm.RPMTAG_ARCH] if name not in installOnlyPkgs: packages.setdefault(name, {}).setdefault(subarch, []).append({'filename': file, 'mtime': file_mtime, 'name': name, 'arch': subarch, 'epoch': epoch, 'version': version, 'release': release}) if options.verbose: sys.stdout.write('.') sys.stdout.flush() if options.verbose: sys.stdout.write('\n') return packages class pkgmgr_URLopener(urllib.FancyURLopener): """ Override default error handling so that we can see what the errors are. """ def http_error_default(self, url, fp, errcode, errmsg, headers): """ Override default error handling so that we can see what the errors are. """ print("ERROR %s: Unable to retrieve %s" % (errcode, url)) def loadRepos(repolist): """ repolist is a list of urls to yum repositories. Builds a multilevel dictionary keyed by the package name and arch. Arch dictionary item is a list, one entry per package instance found. The list entries are dictionaries. Keys are 'filename', 'mtime' 'name', 'arch', 'epoch', 'version' and 'release'. e.g. packages = { 'bcfg2' : { 'noarch' : [ {'filename':'bcfg2-0.9.2-0.0rc1.noarch.rpm', 'mtime':'', 'name':'bcfg2', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-0.9.2-0.0rc5.noarch.rpm', 'mtime':'', 'name':'bcfg2', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}]}, 'bcfg2-server' { 'noarch' : [ {'filename':'bcfg2-server-0.9.2-0.0rc1.noarch.rpm', 'mtime':'', 'name':'bcfg2-server', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc1'} {'filename':'bcfg2-server-0.9.2-0.0rc5.noarch.rpm', 'mtime':'', 'name':"bcfg2-server', 'arch':'noarch', 'epoch':None, 'version':'0.9.2', 'release':'0.0rc5'}]}, } """ packages = {} for repo in repolist: url = urljoin(repo, './repodata/repomd.xml') try: opener = pkgmgr_URLopener() file, message = opener.retrieve(url) except: sys.exit() try: tree = parse(file) except IOError: print("ERROR: Unable to parse retrieved repomd.xml.") sys.exit() repomd = tree.getroot() for element in repomd: if element.tag.endswith('data') and element.attrib['type'] == 'primary': for property in element: if property.tag.endswith('location'): primaryhref = property.attrib['href'] url = urljoin(repo, './' + primaryhref) if options.verbose: print("Loading : %s" % url) try: opener = pkgmgr_URLopener() file, message = opener.retrieve(url) except: sys.exit() try: repo_file = gzip.open(file) tree = parse(repo_file) except IOError: print("ERROR: Unable to parse retrieved file.") sys.exit() root = tree.getroot() for element in root: if element.tag.endswith('package'): for property in element: if property.tag.endswith('name'): name = property.text elif property.tag.endswith('arch'): subarch = property.text elif property.tag.endswith('version'): version = property.get('ver') epoch = property.get('epoch') release = property.get('rel') elif property.tag.endswith('location'): file = property.get('href') if name not in installOnlyPkgs: packages.setdefault(name, {}).setdefault(subarch, []).append({'filename': file, 'name': name, 'arch': subarch, 'epoch': epoch, 'version': version, 'release': release}) if options.verbose: sys.stdout.write('.') sys.stdout.flush() if options.verbose: sys.stdout.write('\n') return packages def str_evra(instance): """ Convert evra dict entries to a string. """ if instance.get('epoch', '*') == '*' or instance.get('epoch', '*') == None: return '%s-%s.%s' % (instance.get('version', '*'), instance.get('release', '*'), instance.get('arch', '*')) else: return '%s:%s-%s.%s' % (instance.get('epoch', '*'), instance.get('version', '*'), instance.get('release', '*'), instance.get('arch', '*')) def updatepkg(pkg): """ """ global package_dict name = pkg.get('name') if name not in installOnlyPkgs: for inst in [inst for inst in pkg if inst.tag == 'Instance']: arch = inst.get('arch') if name in package_dict: if arch in package_dict[name]: package_dict[name][arch].sort(cmpRpmHeader) latest = package_dict[name][arch][-1] if cmpRpmHeader(inst, latest) == -1: if options.verbose: print("Found newer version of package %s" % name) print(" Updating %s to %s" % (str_evra(inst), str_evra(latest))) if latest['epoch'] != None: inst.attrib['epoch'] = str(latest['epoch']) inst.attrib['version'] = latest['version'] inst.attrib['release'] = latest['release'] if inst.get('simplefile', False): inst.attrib['simplefile'] = latest['filename'] if options.altconfigfile: ignoretags = pkg.xpath(".//Ignore") # if we find Ignore tags, then assume they're correct; # otherwise, check the altconfigfile if not ignoretags: altpkgs = alttree.xpath(".//Package[@name='%s'][Ignore]" % name) if (len(altpkgs) == 1): for ignoretag in altpkgs[0].xpath(".//Ignore"): if options.verbose: print(" Found Ignore tag in altconfigfile for package %s" % name) pkg.append(ignoretag) def main(): global package_dict global alttree if options.verbose: print("Loading Pkgmgr config file %s." % (options.configfile)) tree = parse(options.configfile) config = tree.getroot() if options.altconfigfile: if options.verbose: print("Loading Pkgmgr alternate config file %s." % (options.altconfigfile)) alttree = parse(options.altconfigfile) if options.verbose: print("Loading package headers") if options.rpmdirs: package_dict = loadRpms(search_dirs) elif options.yumrepos: package_dict = loadRepos(repos) if options.verbose: print("Processing package headers") for pkg in config.getiterator('Package'): updatepkg(pkg) output.write(tostring(config)) if __name__ == "__main__": p = optparse.OptionParser() p.add_option('--configfile', '-c', action='store', \ type='string', \ help='Existing Pkgmgr configuration file name.') p.add_option('--altconfigfile', '-a', action='store', \ type='string', \ help='''Alternate, existing Pkgmgr configuration file name to read Ignore tags from (used for upgrades).''') p.add_option('--rpmdirs', '-d', action='store', type='string', \ help='''Comma separated list of directories to scan for RPMS. Wilcards are permitted.''') p.add_option('--outfile', '-o', action='store', \ type='string', \ help='Output file name or new Pkgrmgr file.') p.add_option('--verbose', '-v', action='store_true', \ help='Enable verbose output.') p.add_option('--yumrepos', '-y', action='store', type='string', \ help='''Comma separated list of YUM repository URLs to load. NOTE: Each URL must end in a '/' character.''') options, arguments = p.parse_args() if not options.configfile: print("An existing Pkgmgr configuration file must be specified with " "the -c option.") sys.exit() if not options.rpmdirs and not options.yumrepos: print("One of --rpmdirs and --yumrepos must be specified") sys.exit(1) # Set up list of directories to search if options.rpmdirs: search_dirs = [] for d in options.rpmdirs.split(','): search_dirs += glob.glob(d) if options.verbose: print("The following directories will be scanned:") for d in search_dirs: print(" %s" % d) # Setup list of repos if options.yumrepos: repos = [] for r in options.yumrepos.split(','): repos.append(r) if options.verbose: print("The following repositories will be scanned:") for d in repos: print(" %s" % d) if options.outfile: output = file(options.outfile, "w") else: output = sys.stdout package_dict = {} main() tools/posixusers_baseline.py000077500000000000000000000040231303523157100166600ustar00rootroot00000000000000#!/usr/bin/env python import grp import sys import logging import lxml.etree import Bcfg2.Logger import Bcfg2.Options from Bcfg2.Client.Tools.POSIXUsers import POSIXUsers class CLI(object): options = [ Bcfg2.Options.BooleanOption( "--no-uids", help="Do not include UID numbers for users"), Bcfg2.Options.BooleanOption( "--no-gids", help="Do not include GID numbers for groups")] def __init__(self): Bcfg2.Options.get_parser( description="Generate a bundle with a baseline of POSIX users and " "groups", components=[self, POSIXUsers]).parse() config = lxml.etree.Element("Configuration") self.users = POSIXUsers(config) self.logger = logging.getLogger('posixusers_baseline.py') def run(self): baseline = lxml.etree.Element("Bundle", name="posixusers_baseline") for entry in self.users.FindExtra(): data = self.users.existing[entry.tag][entry.get("name")] for attr, idx in self.users.attr_mapping[entry.tag].items(): if (entry.get(attr) or (attr == 'uid' and Bcfg2.Options.setup.no_uids) or (attr == 'gid' and Bcfg2.Options.setup.no_gids)): continue entry.set(attr, str(data[idx])) if entry.tag == 'POSIXUser': try: entry.set("group", grp.getgrgid(data[3])[0]) except KeyError: self.logger.warning( "User %s is a member of nonexistent group %s" % (entry.get("name"), data[3])) entry.set("group", str(data[3])) for group in self.users.user_supplementary_groups(entry): lxml.etree.SubElement(entry, "MemberOf", group=group[0]) entry.tag = "Bound" + entry.tag baseline.append(entry) print(lxml.etree.tostring(baseline, pretty_print=True)) if __name__ == "__main__": sys.exit(CLI().run()) tools/rpmlisting.py000066400000000000000000000336131303523157100147660ustar00rootroot00000000000000#!/usr/bin/python -u import os import sys import subprocess import getopt import re import datetime from socket import gethostname def run_or_die(command): """run a command, returning output. raise an exception if it fails.""" (status, stdio) = subprocess.getstatusoutput(command) if status != 0: raise Exception("command '%s' failed with exit status %d and output '%s'" % (command, status, stdio)) return stdio def rpmblob_cmp(a, b): """cmp() implementation for rpmblobs, suitable for use with sort().""" ret = cmp(a['name'], b['name']) if ret == 0: ret = verstr_cmp(a['version'], b['version']) if ret == 0: ret = verstr_cmp(a['release'], b['release']) return ret def verstr_cmp(a, b): """cmp() implementation for version strings, suitable for use with sort().""" ret = 0 index = 0 a_parts = subdivide(a) b_parts = subdivide(b) prerelease_pattern = re.compile('rc|pre') while ret == 0 and index < min(len(a_parts), len(b_parts)): subindex = 0 a_subparts = a_parts[index] b_subparts = b_parts[index] while ret == 0 and subindex < min(len(a_subparts), len(b_subparts)): ret = cmp(a_subparts[subindex], b_subparts[subindex]) if ret != 0: return ret subindex = subindex + 1 if len(a_subparts) != len(b_subparts): # handle prerelease special case at subpart level (ie, '4.0.2rc5'). if len(a_subparts) > len(b_subparts) and prerelease_pattern.match(str(a_subparts[subindex])): return -1 elif len(a_subparts) < len(b_subparts) and prerelease_pattern.match(str(b_subparts[subindex])): return 1 else: return len(a_subparts) - len(b_subparts) index = index + 1 if len(a_parts) != len(b_parts): # handle prerelease special case at part level (ie, '4.0.2.rc5). if len(a_parts) > len(b_parts) and prerelease_pattern.match(str(a_parts[index][0])): return -1 elif len(a_parts) < len(b_parts) and prerelease_pattern.match(str(b_parts[index][0])): return 1 else: return len(a_parts) - len(b_parts) return ret def subdivide(verstr): """subdivide takes a version or release string and attempts to subdivide it into components to facilitate sorting. The string is divided into a two level hierarchy of sub-parts. The upper level is subdivided by periods, and the lower level is subdivided by boundaries between digit, alpha, and other character groupings. """ parts = [] # parts is a list of lists representing the subsections which make up a version string. # example: # 4.0.2b3 would be represented as [[4],[0],[2,'b',3]]. major_parts = verstr.split('.') for major_part in major_parts: minor_parts = [] index = 0 while index < len(major_part): # handle digit subsection if major_part[index].isdigit(): digit_str_part = "" while index < len(major_part) and major_part[index].isdigit(): digit_str_part = digit_str_part + major_part[index] index = index + 1 digit_part = int(digit_str_part) minor_parts.append(digit_part) # handle alpha subsection elif major_part[index].isalpha(): alpha_part = "" while index < len(major_part) and major_part[index].isalpha(): alpha_part = alpha_part + major_part[index] index = index + 1 minor_parts.append(alpha_part) # handle other characters. this should only be '_', but we will treat is as a subsection to keep it general. elif not major_part[index].isalnum(): other_part = "" while index < len(major_part) and not major_part[index].isalnum(): other_part = other_part + major_part[index] index = index + 1 minor_parts.append(other_part) parts.append(minor_parts) return parts subarch_mapping = {'athlon': 'x86', 'i686': 'x86', 'i586': 'x86', 'i486': 'x86', 'i386': 'x86', 'x86_64': 'x86_64', 'noarch': 'noarch'} arch_mapping = {'x86': ['athlon', 'i686', 'i586', 'i486', 'i386'], 'x86_64': ['x86_64'], 'noarch': ['noarch']} def parse_rpm(path, filename): """read the name, version, release, and subarch of an rpm. this version reads the rpm headers. """ cmd = 'rpm --nosignature --queryformat \'%%{NAME} %%{VERSION} %%{RELEASE} %%{ARCH}\' -q -p %s/%s' % (path, filename) output = run_or_die(cmd) (name, version, release, subarch) = output.split() if subarch not in list(subarch_mapping.keys()): raise Exception("%s/%s has invalid subarch %s" % (path, filename, subarch)) return (name, version, release, subarch) def parse_rpm_filename(path, filename): """read the name, version, release, and subarch of an rpm. this version tries to parse the filename directly, and calls 'parse_rpm' as a fallback. """ name, version, release, subarch = None, None, None, None try: (major, minor) = sys.version_info[:2] if major >= 2 and minor >= 4: (blob, subarch, extension) = filename.rsplit('.', 2) (name, version, release) = blob.rsplit('-', 2) else: (rextension, rsubarch, rblob) = filename[::-1].split('.', 2) (blob, subarch, extension) = (rblob[::-1], rsubarch[::-1], rextension[::-1]) (rrelease, rversion, rname) = blob[::-1].split('-', 2) (name, version, release) = (rname[::-1], rversion[::-1], rrelease[::-1]) if subarch not in list(subarch_mapping.keys()): raise "%s/%s has invalid subarch %s." % (path, filename, subarch) except: # for incorrectly named rpms (ie, sun's java rpms) we fall back to reading the rpm headers. sys.stderr.write("Warning: could not parse filename %s/%s. Attempting to parse rpm headers.\n" % (path, filename)) (name, version, release, subarch) = parse_rpm(path, filename) return (name, version, release, subarch) def get_pkgs(rpmdir): """scan a dir of rpms and generate a pkgs structure. first try parsing the filename. if that fails, try parsing the rpm headers. """ pkgs = {} """ pkgs structure: * pkgs is a dict of package name, rpmblob list pairs: pkgs = {name:[rpmblob,rpmblob...], name:[rpmblob,rpmblob...]} * rpmblob is a dict describing an rpm file: rpmblob = {'file':'foo-0.1-5.i386.rpm', 'name':'foo', 'version':'0.1', 'release':'5', 'subarch':'i386'}, example: pkgs = { 'foo' : [ {'file':'foo-0.1-5.i386.rpm', 'name':'foo', 'version':'0.1', 'release':'5', 'subarch':'i386'}, {'file':'foo-0.2-3.i386.rpm', 'name':'foo', 'version':'0.2', 'release':'3', 'subarch':'i386'}], 'bar' : [ {'file':'bar-3.2a-12.mips.rpm', 'name':'bar', 'version':'3.2a', 'release':'12', 'subarch':'mips'}, {'file':'bar-3.7j-4.mips.rpm', 'name':'bar', 'version':'3.7j', 'release':'4', 'subarch':'mips'}] } """ rpms = [item for item in os.listdir(rpmdir) if item.endswith('.rpm')] for filename in rpms: (name, version, release, subarch) = parse_rpm_filename(rpmdir, filename) rpmblob = {'file': filename, 'name': name, 'version': version, 'release': release, 'subarch': subarch} if name in pkgs: pkgs[name].append(rpmblob) else: pkgs[name] = [rpmblob] return pkgs def prune_pkgs_latest(pkgs): """prune a pkgs structure to contain only the latest version of each package (includes multiarch results). """ latest_pkgs = {} for rpmblobs in list(pkgs.values()): (major, minor) = sys.version_info[:2] if major >= 2 and minor >= 4: rpmblobs.sort(rpmblob_cmp, reverse=True) else: rpmblobs.sort(rpmblob_cmp) rpmblobs.reverse() pkg_name = rpmblobs[0]['name'] all_archs = [blob for blob in rpmblobs if blob['version'] == rpmblobs[0]['version'] and blob['release'] == rpmblobs[0]['release']] latest_pkgs[pkg_name] = all_archs return latest_pkgs def prune_pkgs_archs(pkgs): """prune a pkgs structure to contain no more than one subarch per architecture for each set of packages. """ pruned_pkgs = {} for rpmblobs in list(pkgs.values()): pkg_name = rpmblobs[0]['name'] arch_sifter = {} for challenger in rpmblobs: arch = subarch_mapping[challenger['subarch']] incumbent = arch_sifter.get(arch) if incumbent == None: arch_sifter[arch] = challenger else: subarchs = arch_mapping[arch] challenger_index = subarchs.index(challenger['subarch']) incumbent_index = subarchs.index(incumbent['subarch']) if challenger_index < incumbent_index: arch_sifter[arch] = challenger pruned_pkgs[pkg_name] = list(arch_sifter.values()) return pruned_pkgs def get_date_from_desc(date_desc): """calls the unix 'date' command to turn a date description into a python date object. example: get_date_from_desc("last sunday 1 week ago") """ stdio = run_or_die('date -d "' + date_desc + '" "+%Y %m %d"') (year_str, month_str, day_str) = stdio.split() year = int(year_str) month = int(month_str) day = int(day_str) date_obj = datetime.date(year, month, day) return date_obj def get_mtime_date(path): """return a naive date object based on the file's mtime.""" return datetime.date.fromtimestamp(os.stat(path).st_mtime) def prune_pkgs_timely(pkgs, start_date_desc=None, end_date_desc=None, rpmdir='.'): """prune a pkgs structure to contain only rpms with an mtime within a certain temporal window. """ start_date = None if start_date_desc != None: start_date = get_date_from_desc(start_date_desc) end_date = None if end_date_desc != None: end_date = get_date_from_desc(end_date_desc) if start_date == None and end_date == None: return pkgs if start_date != None: for rpmblobs in list(pkgs.values()): pkg_name = rpmblobs[0]['name'] timely_blobs = [blob for blob in rpmblobs if start_date < get_mtime_date(rpmdir + '/' + blob['file'])] if len(timely_blobs) == 0: del pkgs[pkg_name] else: pkgs[pkg_name] = timely_blobs if end_date != None: for rpmblobs in list(pkgs.values()): pkg_name = rpmblobs[0]['name'] timely_blobs = [blob for blob in rpmblobs if get_mtime_date(rpmdir + '/' + blob['file']) <= end_date] if len(timely_blobs) == 0: del pkgs[pkg_name] else: pkgs[pkg_name] = timely_blobs return pkgs # from http://aspn.activestate.com/ASPN/Python/Cookbook/Recipe/52306 def sorted_values(adict): """return a list of values from a dict, sorted by key.""" items = list(adict.items()) items.sort() return [value for key, value in items] def scan_rpm_dir(rpmdir, uri, group, priority=0, output=sys.stdout, start_date_desc=None, end_date_desc=None): """the meat of this library.""" output.write('\n' % (uri, priority)) output.write(' \n' % group) pkgs = prune_pkgs_archs(prune_pkgs_latest(prune_pkgs_timely(get_pkgs(rpmdir), start_date_desc, end_date_desc, rpmdir))) for rpmblobs in sorted_values(pkgs): if len(rpmblobs) == 1: # regular pkgmgr entry rpmblob = rpmblobs[0] output.write(' \n' % (rpmblob['name'], rpmblob['file'], rpmblob['version'], rpmblob['release'])) else: # multiarch pkgmgr entry rpmblob = rpmblobs[0] subarchs = [blob['subarch'] for blob in rpmblobs] subarchs.sort() multiarch_string = ' '.join(subarchs) pattern_string = '\.(%s)\.rpm$' % '|'.join(subarchs) # e.g., '\.(i386|x86_64)\.rpm$' pattern = re.compile(pattern_string) multiarch_file = pattern.sub('.%(arch)s.rpm', rpmblob['file']) # e.g., 'foo-1.0-1.%(arch)s.rpm' output.write(' \n' % (rpmblob['name'], multiarch_file, rpmblob['version'], rpmblob['release'], multiarch_string)) output.write(' \n') output.write('\n') def usage(output=sys.stdout): output.write("Usage: %s [-g ] [-u ] [-d ] [-p ] [-o ]\n" % sys.argv[0]) if __name__ == "__main__": try: opts, args = getopt.getopt(sys.argv[1:], "g:u:d:p:o:", ["group=", "uir=", "dir=", "priority=", "output="]) except getopt.GetoptError: usage(sys.stderr) sys.exit(1) group = "base" uri = "http://" + gethostname() + "/rpms" rpmdir = "." priority = "0" output = None for opt, arg in opts: if opt in ['-g', '--group']: group = arg elif opt in ['-u', '--uri']: uri = arg elif opt in ['-d', '--dir']: rpmdir = arg elif opt in ['-p', '--priority']: priority = arg elif opt in ['-o', '--output']: output = arg if output == None: output = sys.stdout else: output = file(output, "w") scan_rpm_dir(rpmdir, uri, group, priority, output) tools/selinux_baseline.py000077500000000000000000000016031303523157100161240ustar00rootroot00000000000000#!/usr/bin/env python import sys import lxml.etree import Bcfg2.Logger import Bcfg2.Options from Bcfg2.Client.Tools.SELinux import SELinux def main(): Bcfg2.Options.get_parser( description="Get a baseline bundle of SELinux entries", components=[SELinux]).parse() config = lxml.etree.Element("Configuration") selinux = SELinux(config) baseline = lxml.etree.Element("Bundle", name="selinux_baseline") for etype, handler in selinux.handlers.items(): baseline.append(lxml.etree.Comment("%s entries" % etype)) extra = handler.FindExtra() for entry in extra: if etype != "SEModule": entry.tag = "Bound%s" % etype else: entry.tag = "%s" % etype baseline.extend(extra) print(lxml.etree.tostring(baseline, pretty_print=True)) if __name__ == "__main__": sys.exit(main()) tools/upgrade/000077500000000000000000000000001303523157100136455ustar00rootroot00000000000000tools/upgrade/1.1/000077500000000000000000000000001303523157100141445ustar00rootroot00000000000000tools/upgrade/1.1/README000066400000000000000000000003661303523157100150310ustar00rootroot00000000000000This directory contains scripts to help upgrading from Bcfg2 1.0 to 1.1. posixunified.py - Change all ConfigFile, Directory, Directory, Permissions, and Symlink entries in Base and Bundler to Path type="" entries in Rules. tools/upgrade/1.1/posixunified.py000077500000000000000000000110611303523157100172260ustar00rootroot00000000000000#!/usr/bin/env python from copy import deepcopy import lxml.etree import os import sys import Bcfg2.Options """ NOTE: This script takes a conservative approach when it comes to updating your Rules. It creates a new unified-rules.xml file without the attributes you have defined in your current rules. The reason for this is to keep this script simple so we don't have to go through and determine the priorities associated with your current rules definitions. """ if __name__ == '__main__': parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.0-style POSIX entries to 1.1-style " "unified Path entries") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() repo = Bcfg2.Options.setup.repository unifiedposixrules = "%s/Rules/unified-rules.xml" % repo rulesroot = lxml.etree.Element("Rules") for plug in ['Base', 'Bundler']: for root, dirs, files in os.walk('%s/%s' % (repo, plug)): if '.svn' in dirs: dirs.remove('.svn') for filename in files: if filename.startswith('new'): continue xdata = lxml.etree.parse(os.path.join(root, filename)) # replace ConfigFile elements for c in xdata.findall('//ConfigFile'): parent = c.getparent() oldc = c c.tag = 'Path' parent.replace(oldc, c) # replace Directory elements for d in xdata.findall('//Directory'): parent = d.getparent() oldd = d d.tag = 'Path' parent.replace(oldd, d) # Create new-style Rules entry newd = deepcopy(d) newd.set('type', 'directory') rulesroot.append(newd) # replace BoundDirectory elements for d in xdata.findall('//BoundDirectory'): parent = d.getparent() oldd = d d.tag = 'BoundPath' parent.replace(oldd, d) # Create new-style entry newd = deepcopy(d) newd.set('type', 'directory') # replace Permissions elements for p in xdata.findall('//Permissions'): parent = p.getparent() oldp = p p.tag = 'Path' parent.replace(oldp, p) # Create new-style Rules entry newp = deepcopy(p) newp.set('type', 'permissions') rulesroot.append(newp) # replace BoundPermissions elements for p in xdata.findall('//BoundPermissions'): parent = p.getparent() oldp = p p.tag = 'BoundPath' parent.replace(oldp, p) # Create new-style entry newp = deepcopy(p) newp.set('type', 'permissions') # replace SymLink elements for s in xdata.findall('//SymLink'): parent = s.getparent() olds = s s.tag = 'Path' parent.replace(olds, s) # Create new-style Rules entry news = deepcopy(s) news.set('type', 'symlink') rulesroot.append(news) # replace BoundSymLink elements for s in xdata.findall('//BoundSymLink'): parent = s.getparent() olds = s s.tag = 'BoundPath' parent.replace(olds, s) # Create new-style entry news = deepcopy(s) news.set('type', 'symlink') # write out the new bundle try: newbundle = open("%s/%s/new%s" % (repo, plug, filename), 'w') except IOError: print("Failed to write %s" % filename) continue newbundle.write(lxml.etree.tostring(xdata, pretty_print=True)) newbundle.close() try: newrules = open(unifiedposixrules, 'w') rulesroot.set('priority', '1') newrules.write(lxml.etree.tostring(rulesroot, pretty_print=True)) newrules.close() except IOError: print("Failed to write %s" % unifiedposixrules) tools/upgrade/1.2/000077500000000000000000000000001303523157100141455ustar00rootroot00000000000000tools/upgrade/1.2/README000066400000000000000000000005241303523157100150260ustar00rootroot00000000000000This directory contains scripts to help upgrading from Bcfg2 1.1 to 1.2. nagiosgen-convert.py - Convert configuration from Properties/NagiosGen.xml to NagiosGen/config.xml and NagiosGen/parents.xml packages-convert.py - Convert configuration from Packages/config.xml to Packages/packages.conf and Packages/sources.xml tools/upgrade/1.2/nagiosgen-convert.py000077500000000000000000000054401303523157100201550ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import lxml.etree import Bcfg2.Options def main(): parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.1-style Properties-based NagiosGen " "configuration to standalone 1.2-style") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() repo = Bcfg2.Options.setup.repository oldconfigfile = os.path.join(repo, 'Properties', 'NagiosGen.xml') newconfigpath = os.path.join(repo, 'NagiosGen') newconfigfile = os.path.join(newconfigpath, 'config.xml') parentsfile = os.path.join(newconfigpath, 'parents.xml') if not os.path.exists(oldconfigfile): print("%s does not exist, nothing to do" % oldconfigfile) return 1 if not os.path.exists(newconfigpath): print("%s does not exist, cannot write %s" % (newconfigpath, newconfigfile)) return 2 newconfig = lxml.etree.XML("") oldconfig = lxml.etree.parse(oldconfigfile) for host in oldconfig.getroot().getchildren(): if host.tag == lxml.etree.Comment: # skip comments continue if host.tag == 'default': print("default tag will not be converted; use a suitable Group tag instead") continue newhost = lxml.etree.Element("Client", name=host.tag) for opt in host: newopt = lxml.etree.Element("Option", name=opt.tag) newopt.text = opt.text newhost.append(newopt) newconfig.append(newhost) # parse the parents config, if it exists if os.path.exists(parentsfile): parentsconfig = lxml.etree.parse(parentsfile) for el in parentsconfig.xpath("//Depend"): newhost = newconfig.find("Client[@name='%s']" % el.get("name")) if newhost is not None: newparents = newhost.find("Option[@name='parents']") if newparents is not None: newparents.text += "," + el.get("on") else: newparents = lxml.etree.Element("Option", name="parents") newparents.text = el.get("on") newhost.append(newparents) else: newhost = lxml.etree.Element("Client", name=el.get("name")) newparents = lxml.etree.Element("Option", name="parents") newparents.text = el.get("on") newhost.append(newparents) newconfig.append(newhost) try: open(newconfigfile, 'w').write(lxml.etree.tostring(newconfig, pretty_print=True)) print("%s written" % newconfigfile) except IOError: print("Failed to write %s" % newconfigfile) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.2/packages-convert.py000077500000000000000000000102541303523157100177600ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import lxml.etree from Bcfg2.Compat import ConfigParser import Bcfg2.Options XI_NAMESPACE = "http://www.w3.org/2001/XInclude" XI = "{%s}" % XI_NAMESPACE def place_source(xdata, source, groups): """ given a source's group memberships, place it appropriately within the given XML document tree """ if not groups: xdata.append(source) else: for group in groups: match = xdata.xpath("Group[@name='%s']" % group) if match: groups.remove(group) xdata.replace(match[0], place_source(match[0], source, groups)) return xdata # no group found to put this source into group = groups.pop() xdata.append(place_source(lxml.etree.Element("Group", name=group), source, groups)) return xdata def main(): parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.1-style Packages configuration to " "1.2-style") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() repo = Bcfg2.Options.setup.repository configpath = os.path.join(repo, 'Packages') oldconfigfile = os.path.join(configpath, 'config.xml') newconfigfile = os.path.join(configpath, 'packages.conf') newsourcesfile = os.path.join(configpath, 'sources.xml') if not os.path.exists(oldconfigfile): print("%s does not exist, nothing to do" % oldconfigfile) return 1 if not os.path.exists(configpath): print("%s does not exist, cannot write %s" % (configpath, newconfigfile)) return 2 newconfig = ConfigParser.SafeConfigParser() newconfig.add_section("global") oldconfig = lxml.etree.parse(oldconfigfile).getroot() config = oldconfig.xpath('//Sources/Config') if config: if config[0].get("resolver", "enabled").lower() == "disabled": newconfig.add_option("global", "resolver", "disabled") if config[0].get("metadata", "enabled").lower() == "disabled": newconfig.add_option("global", "metadata", "disabled") newconfig.write(open(newconfigfile, "w")) print("%s written" % newconfigfile) oldsources = [oldconfigfile] while oldsources: oldfile = oldsources.pop() oldsource = lxml.etree.parse(oldfile).getroot() if oldfile == oldconfigfile: newfile = newsourcesfile else: newfile = os.path.join(configpath, oldfile.replace("%s/" % configpath, '')) newsource = lxml.etree.Element("Sources", nsmap=oldsource.nsmap) for el in oldsource.getchildren(): if el.tag == lxml.etree.Comment or el.tag == 'Config': # skip comments and Config continue if el.tag == XI + 'include': oldsources.append(os.path.join(configpath, el.get('href'))) newsource.append(el) continue # element must be a *Source newel = lxml.etree.Element("Source", type=el.tag.replace("Source", "").lower()) try: newel.set('recommended', el.find('Recommended').text.lower()) except AttributeError: pass for tag in ['RawURL', 'URL', 'Version']: try: newel.set(tag.lower(), el.find(tag).text) except AttributeError: pass for child in el.getchildren(): if child.tag in ['Component', 'Blacklist', 'Whitelist', 'Arch']: newel.append(child) groups = [e.text for e in el.findall("Group")] newsource = place_source(newsource, newel, groups) try: open(newfile, 'w').write(lxml.etree.tostring(newsource, pretty_print=True)) print("%s written" % newfile) except IOError: print("Failed to write %s" % newfile) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.3/000077500000000000000000000000001303523157100141465ustar00rootroot00000000000000tools/upgrade/1.3/README000066400000000000000000000021201303523157100150210ustar00rootroot00000000000000This directory contains scripts to help with upgrading from Bcfg2 1.2 to 1.3. migrate_info.py - Convert info and :info files to info.xml files migrate_configs.py - Migrate configuration in Rules/rules.conf and Packages/packages.conf to bcfg2.conf; move database configuration from [statistics] to [database] service_modes.py - Convert Service tags from "mode" attribute to separate "restart" and "install" attributes migrate_dbstats.py - Convert old DBStats entries to the new Reporting system migrate_perms_to_mode.py - Convert perms attribute to mode (note that if you have info/:info files, you should run migrate_info.py first) migrate_probe_groups_to_db.py - Migrate Probe host and group data from XML to DB backend for Metadata and Probe plugins. Does not migrate individual probe return data. Assumes migration to BOTH Metadata and Probe to database backends. migrate_sysv_simplename.py - Migrate any Pkgmgr entries which may have been using the simplename attribute introduced in 1.3.5 to the simplefile attribute tools/upgrade/1.3/migrate_configs.py000077500000000000000000000065611303523157100176730ustar00rootroot00000000000000#!/usr/bin/env python import os import sys from Bcfg2.Compat import ConfigParser import Bcfg2.Options def copy_section(src_file, tgt_cfg, section, newsection=None): if newsection is None: newsection = section cfg = ConfigParser.ConfigParser() if len(cfg.read(src_file)) == 1: if cfg.has_section(section): try: tgt_cfg.add_section(newsection) except ConfigParser.DuplicateSectionError: print("[%s] section already exists in %s, adding options" % (newsection, Bcfg2.Options.setup.config)) for opt in cfg.options(section): val = cfg.get(section, opt) if tgt_cfg.has_option(newsection, opt): print("%s in [%s] already populated in %s, skipping" % (opt, newsection, Bcfg2.Options.setup.config)) print(" %s: %s" % (Bcfg2.Options.setup.config, tgt_cfg.get(newsection, opt))) print(" %s: %s" % (src_file, val)) else: print("Set %s in [%s] to %s" % (opt, newsection, val)) tgt_cfg.set(newsection, opt, val) def main(): parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.2 per-plugin config files to 1.3 " "unified config file") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() repo = Bcfg2.Options.setup.repository cfp = ConfigParser.ConfigParser() cfp.read(Bcfg2.Options.setup.config) # files that you should remove manually remove = [] # move rules config out of rules.conf and into bcfg2.conf rules_conf = os.path.join(repo, 'Rules', 'rules.conf') if os.path.exists(rules_conf): remove.append(rules_conf) copy_section(rules_conf, cfp, "rules") # move packages config out of packages.conf and into bcfg2.conf pkgs_conf = os.path.join(repo, 'Packages', 'packages.conf') if os.path.exists(pkgs_conf): remove.append(pkgs_conf) copy_section(pkgs_conf, cfp, "global", newsection="packages") for section in ["apt", "yum", "pulp"]: copy_section(pkgs_conf, cfp, section, newsection="packages:" + section) # move reports database config into [database] section if cfp.has_section("statistics"): if not cfp.has_section("database"): cfp.add_section("database") for opt in cfp.options("statistics"): if opt.startswith("database_"): newopt = opt[9:] if cfp.has_option("database", newopt): print("%s in [database] already populated, skipping" % newopt) else: cfp.set("database", newopt, cfp.get("statistics", opt)) cfp.remove_option("statistics", opt) print("Writing %s" % Bcfg2.Options.setup.config) try: cfp.write(open(Bcfg2.Options.setup.config, "w")) if len(remove): print("Settings were migrated, but you must remove these files " "manually:") for path in remove: print(" %s" % path) except IOError: err = sys.exc_info()[1] print("Could not write %s: %s" % (Bcfg2.Options.setup.config, err)) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.3/migrate_dbstats.py000077500000000000000000000236201303523157100177020ustar00rootroot00000000000000#!/usr/bin/env python import os os.environ['BCFG2_LEGACY_MODELS'] = '1' import sys import logging import time import Bcfg2.Logger import Bcfg2.Options from Bcfg2.DBSettings import get_db_label import django from django.db import transaction, connections from Bcfg2.Server.Admin import UpdateReports from Bcfg2.Reporting.utils import BatchFetch from Bcfg2.Reporting.Compat import transaction logger = logging.getLogger(__name__) _our_backend = None def _quote(value): """ Quote a string to use as a table name or column Newer versions and various drivers require an argument https://code.djangoproject.com/ticket/13630 """ global _our_backend if not _our_backend: if django.VERSION[0] == 1 and django.VERSION[1] >= 7: _our_backend = connections[get_db_label('Reporting')].ops else: from django.db import backend try: _our_backend = backend.DatabaseOperations( connections[get_db_label('Reporting')]) except TypeError: _our_backend = backend.DatabaseOperations() return _our_backend.quote_name(value) @transaction.atomic def _migrate_perms(): """helper""" fperms = {} logger.info("Creating FilePerms objects") for data in (('owner', 'group', 'perms'), ('current_owner', 'current_group', 'current_perms')): for grp in legacy_models.Reason.objects.values_list(*data).distinct(): if grp in fperms: continue fp = new_models.FilePerms(owner=grp[0], group=grp[1], mode=grp[2]) fp.save() fperms[grp] = fp return fperms @transaction.atomic def _migrate_transaction(inter, entries, fperms): """helper""" logger.debug("Migrating interaction %s for %s" % (inter.id, inter.client.name)) newint = new_models.Interaction(id=inter.id, client_id=inter.client_id, timestamp=inter.timestamp, state=inter.state, repo_rev_code=inter.repo_rev_code, server=inter.server, good_count=inter.goodcount, total_count=inter.totalcount, bad_count=inter.bad_entries, modified_count=inter.modified_entries, extra_count=inter.extra_entries) groups = [] bundles = [] try: if inter.metadata: newint.profile_id = inter.metadata.profile.id groups = [grp.pk for grp in inter.metadata.groups.all()] bundles = [bun.pk for bun in inter.metadata.bundles.all()] except ObjectDoesNotExist: pass super(new_models.Interaction, newint).save() if bundles: newint.bundles.add(*bundles) if groups: newint.groups.add(*groups) updates = dict(paths=[], packages=[], actions=[], services=[]) for ei in legacy_models.Entries_interactions.objects.select_related('reason')\ .filter(interaction=inter): ent = entries[ei.entry_id] name = ent.name act_dict = dict(name=name, exists=ei.reason.current_exists, state=ei.type) if ent.kind == 'Action': act_dict['status'] = ei.reason.status if not act_dict['status']: act_dict['status'] = "check" act_dict['output'] = -1 logger.debug("Adding action %s" % name) updates['actions'].append(new_models.ActionEntry.entry_get_or_create(act_dict)) elif ent.kind == 'Package': act_dict['target_version'] = ei.reason.version act_dict['current_version'] = ei.reason.current_version logger.debug("Adding package %s %s" % (name, act_dict['target_version'])) updates['packages'].append(new_models.PackageEntry.entry_get_or_create(act_dict)) elif ent.kind == 'Path': # these might be hard.. they aren't one to one with the old model act_dict['path_type'] = 'file' act_dict['target_perms'] = fperms[( ei.reason.owner, ei.reason.group, ei.reason.perms )] act_dict['current_perms'] = fperms[( ei.reason.current_owner, ei.reason.current_group, ei.reason.current_perms )] if ei.reason.to: act_dict['path_type'] = 'symlink' act_dict['target_path'] = ei.reason.to act_dict['current_path'] = ei.reason.current_to logger.debug("Adding link %s" % name) updates['paths'].append(new_models.LinkEntry.entry_get_or_create(act_dict)) continue act_dict['detail_type'] = new_models.PathEntry.DETAIL_UNUSED if ei.reason.unpruned: # this is the only other case we know what the type really is act_dict['path_type'] = 'directory' act_dict['detail_type'] = new_models.PathEntry.DETAIL_PRUNED act_dict['details'] = ei.reason.unpruned if ei.reason.is_sensitive: act_dict['detail_type'] = new_models.PathEntry.DETAIL_SENSITIVE elif ei.reason.is_binary: act_dict['detail_type'] = new_models.PathEntry.DETAIL_BINARY act_dict['details'] = ei.reason.current_diff elif ei.reason.current_diff: act_dict['detail_type'] = new_models.PathEntry.DETAIL_DIFF act_dict['details'] = ei.reason.current_diff logger.debug("Adding path %s" % name) updates['paths'].append(new_models.PathEntry.entry_get_or_create(act_dict)) elif ent.kind == 'Service': act_dict['target_status'] = ei.reason.status act_dict['current_status'] = ei.reason.current_status logger.debug("Adding service %s" % name) updates['services'].append(new_models.ServiceEntry.entry_get_or_create(act_dict)) else: logger.warn("Skipping type %s" % ent.kind) for entry_type in updates.keys(): i = 0 while(i < len(updates[entry_type])): getattr(newint, entry_type).add(*updates[entry_type][i:i + 100]) i += 100 for perf in inter.performance_items.all(): new_models.Performance( interaction=newint, metric=perf.metric, value=perf.value).save() def _shove(old_table, new_table, columns): cols = ",".join([_quote(f) for f in columns]) sql = "insert into %s(%s) select %s from %s" % ( _quote(new_table), cols, cols, _quote(old_table)) cursor = connections[get_db_label('Reporting')].cursor() cursor.execute(sql) cursor.close() @transaction.atomic def migrate_stage1(): logger.info("Migrating clients") try: _shove(legacy_models.Client._meta.db_table, new_models.Client._meta.db_table, ('id', 'name', 'creation', 'expiration')) except: logger.error("Failed to migrate clients", exc_info=1) return False logger.info("Migrating Bundles") try: _shove(legacy_models.Bundle._meta.db_table, new_models.Bundle._meta.db_table, ('id', 'name')) except: logger.error("Failed to migrate bundles", exc_info=1) return False logger.info("Migrating Groups") try: _shove(legacy_models.Group._meta.db_table, new_models.Group._meta.db_table, ('id', 'name', 'profile', 'public', 'category', 'comment')) except: logger.error("Failed to migrate groups", exc_info=1) return False return True def _restructure(): """major restructure of reporting data""" # run any migrations from the previous schema try: from Bcfg2.Server.Reports.updatefix import update_database update_database() except: logger.error("Failed to run legacy schema updates", exc_info=1) return False # try to avoid dangling transactions if not migrate_stage1(): return try: entries = {} for ent in legacy_models.Entries.objects.all(): entries[ent.id] = ent except: logger.error("Failed to populate entries dict", exc_info=1) return False try: fperms = _migrate_perms() except: logger.error("Failed create FilePerms objects", exc_info=1) return False failures = [] int_count = legacy_models.Interaction.objects.count() if int_count == 0: logger.error("Found no legacy interactions") return False int_ctr = 0 start_time = 0 for inter in BatchFetch(legacy_models.Interaction.objects.\ select_related('metadata', 'client').all()): if int_ctr % 1000 == 0: if int_ctr > 0: logger.info("Migrated %s of %s interactions in %ss" % \ (int_ctr, int_count, time.time() - start_time)) else: logger.info("Migrating interactions") start_time = time.time() try: _migrate_transaction(inter, entries, fperms) except: logger.error("Failed to migrate interaction %s for %s" % (inter.id, inter.client.name), exc_info=1) failures.append(inter.id) int_ctr += 1 if not failures: logger.info("Successfully restructured reason data") return True logger.info("Updating recent interactions") for newint in new_models.Interaction.objects.recent(): try: newint.save() except: logger.error("Failed to set current interaction %s for %s" % (newint.id, newint.client.name), exc_info=1) if __name__ == '__main__': parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.2 DBStats plugin to 1.3 Reporting " "subsystem", components=[UpdateReports]) parser.parse() from Bcfg2.Reporting import models as new_models from Bcfg2.Server.Reports.reports import models as legacy_models UpdateReports().run(Bcfg2.Options.setup) _restructure() tools/upgrade/1.3/migrate_info.py000077500000000000000000000042621303523157100171720ustar00rootroot00000000000000#!/usr/bin/env python import os import re import sys import lxml.etree import Bcfg2.Options INFO_REGEX = re.compile(r'owner:\s*(?P\S+)|' + r'group:\s*(?P\S+)|' + r'mode:\s*(?P\w+)|' + r'secontext:\s*(?P\S+)|' + r'paranoid:\s*(?P\S+)|' + r'sensitive:\s*(?P\S+)|' + r'encoding:\s*(?P\S+)|' + r'important:\s*(?P\S+)|' + r'mtime:\s*(?P\w+)') PERMS_REGEX = re.compile(r'perms:\s*(?P\w+)') def convert(info_file): info_xml = os.path.join(os.path.dirname(info_file), "info.xml") if os.path.exists(info_xml): print("%s already exists, not converting %s" % (info_xml, info_file)) return print("Converting %s to %s" % (info_file, info_xml)) fileinfo = lxml.etree.Element("FileInfo") info = lxml.etree.SubElement(fileinfo, "Info") for line in open(info_file).readlines(): match = INFO_REGEX.match(line) or PERMS_REGEX.match(line) if match: mgd = match.groupdict() for key, value in list(mgd.items()): if value: info.set(key, value) open(info_xml, "w").write(lxml.etree.tostring(fileinfo, pretty_print=True)) os.unlink(info_file) def main(): parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.2 info/:info files to 1.3 info.xml") parser.add_options([Bcfg2.Options.Common.repository, Bcfg2.Options.Common.plugins]) parser.parse() for plugin in Bcfg2.Options.setup.plugins: plugin_name = plugin.__name__ if plugin_name not in ['SSLCA', 'Cfg', 'TGenshi', 'TCheetah', 'SSHbase']: continue datastore = os.path.join(Bcfg2.Options.setup.repository, plugin_name) for root, dirs, files in os.walk(datastore): for fname in files: if fname in [":info", "info"]: convert(os.path.join(root, fname)) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.3/migrate_perms_to_mode.py000077500000000000000000000055251303523157100210760ustar00rootroot00000000000000#!/usr/bin/env python import lxml.etree import os import sys from fnmatch import fnmatch from Bcfg2.Compat import any # pylint: disable=W0622 from Bcfg2.Server.FileMonitor import FileMonitor import Bcfg2.Options def setmodeattr(elem): """Set the mode attribute for a given element.""" if 'perms' in elem.attrib: elem.set('mode', elem.get('perms')) del elem.attrib['perms'] return True return False def writefile(f, xdata): """Write xml data to a file""" newfile = open(f, 'w') newfile.write(lxml.etree.tostring(xdata, pretty_print=True)) newfile.close() def convertinfo(ifile): """Do perms -> mode conversion for info.xml files.""" try: xdata = lxml.etree.parse(ifile) except lxml.etree.XMLSyntaxError: err = sys.exc_info()[1] print("Could not parse %s, skipping: %s" % (ifile, err)) return found = False for i in xdata.findall('//Info'): found |= setmodeattr(i) if found: writefile(ifile, xdata) def convertstructure(structfile): """Do perms -> mode conversion for structure files.""" try: xdata = lxml.etree.parse(structfile) except lxml.etree.XMLSyntaxError: err = sys.exc_info()[1] print("Could not parse %s, skipping: %s" % (structfile, err)) return found = False for path in xdata.xpath('//BoundPath|//Path'): found |= setmodeattr(path) if found: writefile(structfile, xdata) def skip_path(path): return any(fnmatch(path, p) or fnmatch(os.path.basename(path), p) for p in Bcfg2.Options.setup.ignore_files) def main(): parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.2 'perms' attribute to 1.3 'mode' " "attribute", components=[FileMonitor]) parser.add_options([Bcfg2.Options.Common.repository, Bcfg2.Options.Common.plugins]) parser.parse() repo = Bcfg2.Options.setup.repository for plugin in Bcfg2.Options.setup.plugins: plugin_name = plugin.__name__ if plugin_name in ['Base', 'Bundler', 'Rules']: for root, _, files in os.walk(os.path.join(repo, plugin_name)): if skip_path(root): continue for fname in files: if skip_path(fname): continue convertstructure(os.path.join(root, fname)) if plugin_name not in ['Cfg', 'TGenshi', 'TCheetah', 'SSHbase', 'SSLCA']: continue for root, dirs, files in os.walk(os.path.join(repo, plugin_name)): if skip_path(root): continue for fname in files: if fname == 'info.xml': convertinfo(os.path.join(root, fname)) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.3/migrate_probe_groups_to_db.py000077500000000000000000000045741303523157100221220ustar00rootroot00000000000000#!/bin/env python """ Migrate Probe host and group data from XML to DB backend for Metadata and Probe plugins. Does not migrate individual probe return data. Assumes migration to BOTH Metadata and Probe to database backends. """ import os import sys import lxml.etree import Bcfg2.Options import Bcfg2.DBSettings def migrate(xclient, MetadataClientModel, ProbesGroupsModel): """ Helper to do the migration given a XML element """ client_name = xclient.get('name') try: try: client = MetadataClientModel.objects.get(hostname=client_name) except MetadataClientModel.DoesNotExist: client = MetadataClientModel(hostname=client_name) client.save() except: print("Failed to migrate client %s" % (client)) return False try: cgroups = [] for xgroup in xclient.findall('Group'): group_name = xgroup.get('name') cgroups.append(group_name) try: group = ProbesGroupsModel.objects.get(hostname=client_name, group=group_name) except ProbesGroupsModel.DoesNotExist: group = ProbesGroupsModel(hostname=client_name, group=group_name) group.save() ProbesGroupsModel.objects.filter( hostname=client.hostname).exclude( group__in=cgroups).delete() except: print("Failed to migrate groups") return False return True def main(): """ Main """ opts = dict(repo=Bcfg2.Options.SERVER_REPOSITORY) setup = Bcfg2.Options.OptionParser(opts) setup.parse(sys.argv[1:]) probefile = os.path.join(setup['repo'], 'Probes', "probed.xml") try: xdata = lxml.etree.parse(probefile) except lxml.etree.XMLSyntaxError: err = sys.exc_info()[1] print("Could not parse %s, skipping: %s" % (probefile, err)) # these must be loaded after option parsing is complete from Bcfg2.Server.Plugins.Metadata import MetadataClientModel from Bcfg2.Server.Plugins.Probes import ProbesGroupsModel for xclient in xdata.findall('Client'): print("Migrating Metadata and Probe groups for %s" % xclient.get('name')) migrate(xclient, MetadataClientModel, ProbesGroupsModel) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.3/migrate_sysv_simplename.py000077500000000000000000000027601303523157100214560ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import glob import lxml.etree import Bcfg2.Options def main(): opts = dict(repo=Bcfg2.Options.SERVER_REPOSITORY) setup = Bcfg2.Options.OptionParser(opts) setup.parse(sys.argv[1:]) files = [] for plugin in ['Pkgmgr']: files.extend(glob.glob(os.path.join(setup['repo'], plugin, "*"))) for bfile in files: bdata = lxml.etree.parse(bfile) changed = False if not bdata.xpath("//@type='sysv'"): print("%s doesn't contain any sysv packages, skipping" % bfile) continue pkglist = bdata.getroot() if pkglist.tag != "PackageList": print("%s doesn't look like a PackageList, skipping" % bfile) continue for pkg in bdata.xpath("//Package"): if "simplename" in pkg.attrib: pkg.set("simplefile", pkg.get("simplename")) del pkg.attrib["simplename"] changed = True # if we switched to simplefile, we also need to switch to uri if changed and "url" in pkglist.attrib: pkglist.set("uri", pkglist.get("url")) del pkglist.attrib["url"] if changed: print("Writing %s" % bfile) try: open(bfile, "w").write(lxml.etree.tostring(bdata)) except IOError: err = sys.exc_info()[1] print("Could not write %s: %s" % (bfile, err)) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.3/service_modes.py000077500000000000000000000035001303523157100173500ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import glob import lxml.etree import Bcfg2.Options def main(): parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.2 Service modes to 1.3-style " "granular Service specification") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() files = [] for plugin in ['Bundler', 'Rules', 'Default']: files.extend(glob.glob(os.path.join(Bcfg2.Options.setup.repository, plugin, "*"))) for bfile in files: bdata = lxml.etree.parse(bfile) changed = False for svc in bdata.xpath("//Service|//BoundService"): if "mode" not in svc.attrib: continue mode = svc.get("mode") del svc.attrib["mode"] if mode not in ["default", "supervised", "interactive_only", "manual"]: print("Unrecognized mode on Service:%s: %s. Assuming default" % (svc.get("name"), mode)) mode = "default" if mode == "default" or mode == "supervised": svc.set("restart", "true") svc.set("install", "true") elif mode == "interactive_only": svc.set("restart", "interactive") svc.set("install", "true") elif mode == "manual": svc.set("restart", "false") svc.set("install", "false") changed = True if changed: print("Writing %s" % bfile) try: open(bfile, "w").write(lxml.etree.tostring(bdata)) except IOError: err = sys.exc_info()[1] print("Could not write %s: %s" % (bfile, err)) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.4/000077500000000000000000000000001303523157100141475ustar00rootroot00000000000000tools/upgrade/1.4/README000066400000000000000000000006741303523157100150360ustar00rootroot00000000000000This directory contains scripts to help with upgrading from Bcfg2 1.3 to 1.4. migrate_decisions.py - Convert old group- and host-specific whitelist and blacklist files into structured XML convert_bundles.py - Remove deprecated explicit bundle names, rename .genshi bundles to .xml migrate_sslca.py - Migrate from the standalone SSLCA plugin to the built-in SSL certificate generation abilities of the Cfg plugintools/upgrade/1.4/convert_bundles.py000077500000000000000000000017741303523157100177310ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import lxml.etree import Bcfg2.Options def main(): parser = Bcfg2.Options.get_parser("Tool to remove bundle names") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() bundler_dir = os.path.join(Bcfg2.Options.setup.repository, "Bundler") if os.path.exists(bundler_dir): for root, _, files in os.walk(bundler_dir): for fname in files: bpath = os.path.join(root, fname) newpath = bpath if newpath.endswith(".genshi"): newpath = newpath[:-6] + "xml" print("Converting %s to %s" % (bpath, newpath)) else: print("Converting %s" % bpath) xroot = lxml.etree.parse(bpath) xdata = xroot.getroot() if 'name' in xdata.attrib: del xdata.attrib['name'] xroot.write(bpath) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.4/migrate_decisions.py000077500000000000000000000056401303523157100202210ustar00rootroot00000000000000#!/usr/bin/env python import os import re import sys import glob import lxml.etree import Bcfg2.Options SPECIFIC = re.compile(r'.*\/(white|black)list' r'(\.(H_(?P.*)|G\d+_(?P.*)))?$') def convert(files, xdata): hosts = [] groups = [] for oldfile in files: spec = SPECIFIC.match(oldfile) if spec and spec.group('host'): hosts.append(spec.group('host')) elif spec and spec.group('group'): groups.append(spec.group('group')) for oldfile in files: print("Converting %s" % oldfile) spec = SPECIFIC.match(oldfile) if not spec: print("Skipping unknown file %s" % oldfile) continue parent = xdata if spec.group('host'): for host in hosts: if host != spec.group('host'): parent = lxml.etree.SubElement(parent, "Client", name=host, negate="true") parent = lxml.etree.SubElement(parent, "Client", name=spec.group('host')) elif spec.group('group'): for host in hosts: parent = lxml.etree.SubElement(parent, "Client", name=host, negate="true") for group in groups: if group != spec.group('group'): parent = lxml.etree.SubElement(parent, "Group", name=group, negate="true") parent = lxml.etree.SubElement(parent, "Group", name=spec.group('group')) parent.append(lxml.etree.Comment("Converted from %s" % oldfile)) olddata = lxml.etree.parse(oldfile, parser=Bcfg2.Server.XMLParser) for decision in olddata.xpath('//Decision'): parent.append(decision) return xdata def main(): parser = Bcfg2.Options.get_parser( description="Migrate from Bcfg2 1.3 Decisions list format to 1.4 " "format") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() datadir = os.path.join(Bcfg2.Options.setup.repository, 'Decisions') whitelist = lxml.etree.Element("Decisions") blacklist = lxml.etree.Element("Decisions") if os.path.exists(datadir): convert(glob.glob(os.path.join(datadir, 'whitelist*')), whitelist) convert(glob.glob(os.path.join(datadir, 'blacklist*')), blacklist) print("Writing %s" % os.path.join(datadir, "whitelist.xml")) open(os.path.join(datadir, "whitelist.xml"), 'w').write(lxml.etree.tostring(whitelist, pretty_print=True)) print("Writing %s" % os.path.join(datadir, "blacklist.xml")) open(os.path.join(datadir, "blacklist.xml"), 'w').write(lxml.etree.tostring(blacklist, pretty_print=True)) if __name__ == '__main__': sys.exit(main()) tools/upgrade/1.4/migrate_sslca.py000077500000000000000000000027451303523157100173510ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import shutil import Bcfg2.Options def main(): parser = Bcfg2.Options.get_parser( description="Migrate from the SSLCA plugin to built-in Cfg SSL cert " "generation") parser.add_options([Bcfg2.Options.Common.repository]) parser.parse() sslcadir = os.path.join(Bcfg2.Options.setup.repository, 'SSLCA') cfgdir = os.path.join(Bcfg2.Options.setup.repository, 'Cfg') for root, _, files in os.walk(sslcadir): if not files: continue newpath = cfgdir + root[len(sslcadir):] if not os.path.exists(newpath): print("Creating %s and copying contents from %s" % (newpath, root)) shutil.copytree(root, newpath) else: print("Copying contents from %s to %s" % (root, newpath)) for fname in files: newfpath = os.path.exists(os.path.join(newpath, fname)) if newfpath: print("%s already exists, skipping" % newfpath) else: shutil.copy(os.path.join(root, fname), newpath) cert = os.path.join(newpath, "cert.xml") newcert = os.path.join(newpath, "sslcert.xml") key = os.path.join(newpath, "key.xml") newkey = os.path.join(newpath, "sslkey.xml") if os.path.exists(cert): os.rename(cert, newcert) if os.path.exists(key): os.rename(key, newkey) if __name__ == '__main__': sys.exit(main()) tools/yum-listpkgs-xml.py000077500000000000000000000022711303523157100160430ustar00rootroot00000000000000#!/usr/bin/python import sys sys.path.append('/usr/bin/') sys.path.append('/usr/share/yum-cli') import yummain def mySimpleList(self, pkg): print("" % (pkg.name, pkg.printVer())) def myListPkgs(self, lst, description, outputType): """outputs based on whatever outputType is. Current options: 'list' - simple pkg list 'info' - similar to rpm -qi output""" if outputType in ['list', 'info']: thingslisted = 0 if len(lst) > 0: thingslisted = 1 from yum.misc import sortPkgObj lst.sort(sortPkgObj) for pkg in lst: if outputType == 'list': self.simpleList(pkg) elif outputType == 'info': self.infoOutput(pkg) else: pass if thingslisted == 0: return 1, ['No Packages to list'] yummain.cli.output.YumOutput.listPkgs = myListPkgs yummain.cli.output.YumOutput.simpleList = mySimpleList try: sys.argv = [sys.argv[0], '-d', '0', 'list'] yummain.main(sys.argv[1:]) except KeyboardInterrupt: sys.stderr.write("\n\nExiting on user cancel.") sys.exit(1)