pax_global_header00006660000000000000000000000064132247735640014527gustar00rootroot0000000000000052 comment=c96b45ab617b5282e15570e1772faa7a36ab54dc WALinuxAgent-2.2.20/000077500000000000000000000000001322477356400141205ustar00rootroot00000000000000WALinuxAgent-2.2.20/.gitattributes000066400000000000000000000047261322477356400170240ustar00rootroot00000000000000############################################################################### # Set default behavior to automatically normalize line endings. ############################################################################### * text=auto ############################################################################### # Set default behavior for command prompt diff. # # This is need for earlier builds of msysgit that does not have it on by # default for csharp files. # Note: This is only used by command line ############################################################################### #*.cs diff=csharp ############################################################################### # Set the merge driver for project and solution files # # Merging from the command prompt will add diff markers to the files if there # are conflicts (Merging from VS is not affected by the settings below, in VS # the diff markers are never inserted). Diff markers may cause the following # file extensions to fail to load in VS. An alternative would be to treat # these files as binary and thus will always conflict and require user # intervention with every merge. To do so, just uncomment the entries below ############################################################################### #*.sln merge=binary #*.csproj merge=binary #*.vbproj merge=binary #*.vcxproj merge=binary #*.vcproj merge=binary #*.dbproj merge=binary #*.fsproj merge=binary #*.lsproj merge=binary #*.wixproj merge=binary #*.modelproj merge=binary #*.sqlproj merge=binary #*.wwaproj merge=binary ############################################################################### # behavior for image files # # image files are treated as binary by default. ############################################################################### #*.jpg binary #*.png binary #*.gif binary ############################################################################### # diff behavior for common document formats # # Convert binary document formats to text before diffing them. This feature # is only available from the command line. Turn it on by uncommenting the # entries below. ############################################################################### #*.doc diff=astextplain #*.DOC diff=astextplain #*.docx diff=astextplain #*.DOCX diff=astextplain #*.dot diff=astextplain #*.DOT diff=astextplain #*.pdf diff=astextplain #*.PDF diff=astextplain #*.rtf diff=astextplain #*.RTF diff=astextplain WALinuxAgent-2.2.20/.gitignore000066400000000000000000000014421322477356400161110ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # Virtualenv py3env/ # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyCharm .idea/ # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .cache nosetests.xml coverage.xml # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder target/ waagentc *.pyproj *.sln *.suo waagentc bin/waagent2.0c # rope project .ropeproject/ WALinuxAgent-2.2.20/.travis.yml000066400000000000000000000003661322477356400162360ustar00rootroot00000000000000language: python python: - "2.6" - "2.7" #- "3.2" #- "3.3" - "3.4" # command to install dependencies install: #- pip install . #- pip install -r requirements.txt - pip install pyasn1 # command to run tests script: nosetests tests WALinuxAgent-2.2.20/Changelog000066400000000000000000000021351322477356400157330ustar00rootroot00000000000000WALinuxAgent Changelog ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 12 August 2016, v2.1.6 . Improved RDMA support . Extension state migration . Alpine Linux support . Fixes for #347, #351, #353 15 July 2016, v2.1.5 . Goal state processing extension . Multi-nic improvements . Bug fixes for #145, #141, #133, #116, #187, #169, #104, #127, #163, #190, #185, #174 09 Mar 2016, WALinuxAgent 2.1.4 . Add support for FreeBSD . Fix a bug for internal extension version resolving 29 Jan 2016, WALinuxAgent 2.1.3 . Fixed endpoint probing for Azure Stack . Multiple fixes for extension handling 07 Dec 2015, WALinuxAgent 2.1.2 . Multiple fixes for extension handling and provisioning 07 Aug 2015, WALinuxAgent 2.1.1 . Support python3 . Fixed bugs for metadata protocol . Fixed a few pylint warnings . Enabled travis-ci ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 01 Jul 2015, WALinuxAgent 2.1.0 . Divide waagent into different modules WALinuxAgent-2.2.20/LICENSE.txt000066400000000000000000000261301322477356400157450ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2016 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. WALinuxAgent-2.2.20/MAINTENANCE.md000066400000000000000000000013131322477356400161620ustar00rootroot00000000000000## Microsoft Azure Linux Agent Maintenance Guide ### Version rules * Production releases are public * Test releases are for internal use * Production versions use only [major].[minor].[revision] * Test versions use [major].[minor].[revision].[build] * Test a.b.c.0 is equivalent to Prod a.b.c * Publishing to Production requires incrementing the revision and dropping the build number * We do not use pre-release labels on any builds ### Version updates * The version of the agent can be found at https://github.com/Azure/WALinuxAgent/blob/master/azurelinuxagent/common/version.py#L53 assigned to AGENT_VERSION * Update the version here and send for PR before declaring a release via GitHub WALinuxAgent-2.2.20/MANIFEST000066400000000000000000000005701322477356400152530ustar00rootroot00000000000000# file GENERATED by distutils, do NOT edit README setup.py bin/waagent config/waagent.conf config/waagent.logrotate test/test_logger.py walinuxagent/__init__.py walinuxagent/agent.py walinuxagent/conf.py walinuxagent/envmonitor.py walinuxagent/extension.py walinuxagent/install.py walinuxagent/logger.py walinuxagent/protocol.py walinuxagent/provision.py walinuxagent/util.py WALinuxAgent-2.2.20/MANIFEST.in000066400000000000000000000001141322477356400156520ustar00rootroot00000000000000recursive-include bin * recursive-include init * recursive-include config * WALinuxAgent-2.2.20/NOTICE000066400000000000000000000002411322477356400150210ustar00rootroot00000000000000Microsoft Azure Linux Agent Copyright 2012 Microsoft Corporation This product includes software developed at Microsoft Corporation (http://www.microsoft.com/). WALinuxAgent-2.2.20/README.md000066400000000000000000000340321322477356400154010ustar00rootroot00000000000000## Microsoft Azure Linux Agent README ### INTRODUCTION The Microsoft Azure Linux Agent (waagent) manages Linux & BSD provisioning, and VM interaction with the Azure Fabric Controller. It provides the following functionality for Linux and BSD IaaS deployments: * Image Provisioning - Creation of a user account - Configuring SSH authentication types - Deployment of SSH public keys and key pairs - Setting the host name - Publishing the host name to the platform DNS - Reporting SSH host key fingerprint to the platform - Resource Disk Management - Formatting and mounting the resource disk - Configuring swap space * Networking - Manages routes to improve compatibility with platform DHCP servers - Ensures the stability of the network interface name * Kernel - Configure virtual NUMA (disable for kernel <2.6.37) - Consume Hyper-V entropy for /dev/random - Configure SCSI timeouts for the root device (which could be remote) * Diagnostics - Console redirection to the serial port * SCVMM Deployments - Detect and bootstrap the VMM agent for Linux when running in a System Center Virtual Machine Manager 2012R2 environment * VM Extension - Inject component authored by Microsoft and Partners into Linux VM (IaaS) to enable software and configuration automation - VM Extension reference implementation on https://github.com/Azure/azure-linux-extensions ### COMMUNICATION The information flow from the platform to the agent occurs via two channels: * A boot-time attached DVD for IaaS deployments. This DVD includes an OVF-compliant configuration file that includes all provisioning information other than the actual SSH keypairs. * A TCP endpoint exposing a REST API used to obtain deployment and topology configuration. The agent will use an HTTP proxy if provided via the `http_proxy` (for `http` requests) or `https_proxy` (for `https` requests) environment variables. The `HttpProxy.Host` and `HttpProxy.Port` configuration variables (see below), if used, will override the environment settings. Due to limitations of Python, the agent *does not* support HTTP proxies requiring authentication. ### REQUIREMENTS The following systems have been tested and are known to work with the Azure Linux Agent. Please note that this list may differ from the official list of supported systems on the Microsoft Azure Platform as described here: http://support.microsoft.com/kb/2805216 Waagent depends on some system packages in order to function properly: * Python 2.6+ * OpenSSL 1.0+ * OpenSSH 5.3+ * Filesystem utilities: sfdisk, fdisk, mkfs, parted * Password tools: chpasswd, sudo * Text processing tools: sed, grep * Network tools: ip-route ### INSTALLATION Installation via your distribution's package repository is preferred. You can also customize your own RPM or DEB packages using the configuration samples provided (see deb and rpm sections below). For more advanced installation options, such as installing to custom locations or prefixes, you can use ***setuptools*** to install from source by running: #sudo python setup.py install --register-service You can view more installation options by running: #sudo python setup.py install --help The agent's log file is kept at /var/log/waagent.log. ### UPGRADE Upgrading via your distribution's package repository is preferred. If upgrading manually, same with installation above by running: #sudo python setup.py install --force Restart waagent service,for most of linux distributions: #sudo service waagent restart For Ubuntu, use: #sudo service walinuxagent restart For CoreOS, use: #sudo systemctl restart waagent The agent's log file is kept at /var/log/waagent.log. ### COMMAND LINE OPTIONS Flags: -verbose: Increase verbosity of specified command -force: Skip interactive confirmation for some commands Commands: -help: Lists the supported commands and flags. -deprovision: Attempt to clean the system and make it suitable for re-provisioning, by deleting the following: * All SSH host keys (if Provisioning.RegenerateSshHostKeyPair is 'y' in the configuration file) * Nameserver configuration in /etc/resolv.conf * Root password from /etc/shadow (if Provisioning.DeleteRootPassword is 'y' in the configuration file) * Cached DHCP client leases * Resets host name to localhost.localdomain WARNING! Deprovision does not guarantee that the image is cleared of all sensitive information and suitable for redistribution. -deprovision+user: Performs everything under deprovision (above) and also deletes the last provisioned user account and associated data. -version: Displays the version of waagent -serialconsole: Configures GRUB to mark ttyS0 (the first serial port) as the boot console. This ensures that kernel bootup logs are sent to the serial port and made available for debugging. -daemon: Run waagent as a daemon to manage interaction with the platform. This argument is specified to waagent in the waagent init script. -start: Run waagent as a background process ### CONFIGURATION A configuration file (/etc/waagent.conf) controls the actions of waagent. Blank lines and lines whose first character is a `#` are ignored (end-of-line comments are *not* supported). A sample configuration file is shown below: ``` Provisioning.Enabled=y Provisioning.UseCloudInit=n Provisioning.DeleteRootPassword=n Provisioning.RegenerateSshHostKeyPair=y Provisioning.SshHostKeyPairType=rsa Provisioning.MonitorHostName=y Provisioning.DecodeCustomData=n Provisioning.ExecuteCustomData=n Provisioning.PasswordCryptId=6 Provisioning.PasswordCryptSaltLength=10 ResourceDisk.Format=y ResourceDisk.Filesystem=ext4 ResourceDisk.MountPoint=/mnt/resource ResourceDisk.MountOptions=None ResourceDisk.EnableSwap=n ResourceDisk.SwapSizeMB=0 Logs.Verbose=n OS.AllowHTTP=n OS.RootDeviceScsiTimeout=300 OS.EnableFIPS=n OS.OpensslPath=None OS.SshClientAliveInterval=180 OS.SshDir=/etc/ssh HttpProxy.Host=None HttpProxy.Port=None ``` The various configuration options are described in detail below. Configuration options are of three types : Boolean, String or Integer. The Boolean configuration options can be specified as "y" or "n". The special keyword "None" may be used for some string type configuration entries as detailed below. #### Configuration File Options * __Provisioning.Enabled__ _Type: Boolean_ _Default: y_ This allows the user to enable or disable the provisioning functionality in the agent. Valid values are "y" or "n". If provisioning is disabled, SSH host and user keys in the image are preserved and any configuration specified in the Azure provisioning API is ignored. * __Provisioning.UseCloudInit__ _Type: Boolean_ _Default: n_ This options enables / disables support for provisioning by means of cloud-init. When true ("y"), the agent will wait for cloud-init to complete before installing extensions and processing the latest goal state. _Provisioning.Enabled_ must be disabled ("n") for this option to have an effect. Setting _Provisioning.Enabled_ to true ("y") overrides this option and runs the built-in agent provisioning code. * __Provisioning.DeleteRootPassword__ _Type: Boolean_ _Default: n_ If set, the root password in the /etc/shadow file is erased during the provisioning process. * __Provisioning.RegenerateSshHostKeyPair__ _Type: Boolean_ _Default: y_ If set, all SSH host key pairs (ecdsa, dsa and rsa) are deleted during the provisioning process from /etc/ssh/. And a single fresh key pair is generated. The encryption type for the fresh key pair is configurable by the Provisioning.SshHostKeyPairType entry. Please note that some distributions will re-create SSH key pairs for any missing encryption types when the SSH daemon is restarted (for example, upon a reboot). * __Provisioning.SshHostKeyPairType__ _Type: String_ _Default: rsa_ This can be set to an encryption algorithm type that is supported by the SSH daemon on the VM. The typically supported values are "rsa", "dsa" and "ecdsa". Note that "putty.exe" on Windows does not support "ecdsa". So, if you intend to use putty.exe on Windows to connect to a Linux deployment, please use "rsa" or "dsa". * __Provisioning.MonitorHostName__ _Type: Boolean_ _Default: y_ If set, waagent will monitor the Linux VM for hostname changes (as returned by the "hostname" command) and automatically update the networking configuration in the image to reflect the change. In order to push the name change to the DNS servers, networking will be restarted in the VM. This will result in brief loss of Internet connectivity. * __Provisioning.DecodeCustomData__ _Type: Boolean_ _Default: n_ If set, waagent will decode CustomData from Base64. * __Provisioning.ExecuteCustomData__ _Type: Boolean_ _Default: n_ If set, waagent will execute CustomData after provisioning. * __Provisioning.PasswordCryptId__ _Type:String_ _Default:6_ Algorithm used by crypt when generating password hash. 1 - MD5 2a - Blowfish 5 - SHA-256 6 - SHA-512 * __Provisioning.PasswordCryptSaltLength__ _Type:String_ _Default:10_ Length of random salt used when generating password hash. * __ResourceDisk.Format__ _Type: Boolean_ _Default: y_ If set, the resource disk provided by the platform will be formatted and mounted by waagent if the filesystem type requested by the user in "ResourceDisk.Filesystem" is anything other than "ntfs". A single partition of type Linux (83) will be made available on the disk. Note that this partition will not be formatted if it can be successfully mounted. * __ResourceDisk.Filesystem__ _Type: String_ _Default: ext4_ This specifies the filesystem type for the resource disk. Supported values vary by Linux distribution. If the string is X, then mkfs.X should be present on the Linux image. SLES 11 images should typically use 'ext3'. BSD images should use 'ufs2' here. * __ResourceDisk.MountPoint__ _Type: String_ _Default: /mnt/resource_ This specifies the path at which the resource disk is mounted. * __ResourceDisk.MountOptions__ _Type: String_ _Default: None_ Specifies disk mount options to be passed to the mount -o command. This is a comma separated list of values, ex. 'nodev,nosuid'. See mount(8) for details. * __ResourceDisk.EnableSwap__ _Type: Boolean_ _Default: n_ If set, a swap file (/swapfile) is created on the resource disk and added to the system swap space. * __ResourceDisk.SwapSizeMB__ _Type: Integer_ _Default: 0_ The size of the swap file in megabytes. * Logs.Verbose _Type: Boolean_ _Default: n_ If set, log verbosity is boosted. Waagent logs to /var/log/waagent.log and leverages the system logrotate functionality to rotate logs. * __OS.AllowHTTP__ _Type: Boolean_ _Default: n_ If set to `y` and SSL support is not compiled into Python, the agent will fall-back to use HTTP. Otherwise, if SSL support is not compiled into Python, the agent will fail all HTTPS requests. Note: Allowing HTTP may unintentionally expose secure data. * __OS.EnableRDMA__ _Type: Boolean_ _Default: n_ If set, the agent will attempt to install and then load an RDMA kernel driver that matches the version of the firmware on the underlying hardware. * __OS.EnableFIPS__ _Type: Boolean_ _Default: n_ If set, the agent will emit into the environment "OPENSSL_FIPS=1" when executing OpenSSL commands. This signals OpenSSL to use any installed FIPS-compliant libraries. Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant are installed, then enabling this option will cause all OpenSSL commands to fail._ * __OS.RootDeviceScsiTimeout__ _Type: Integer_ _Default: 300_ This configures the SCSI timeout in seconds on the root device. If not set, the system defaults are used. * __OS.OpensslPath__ _Type: String_ _Default: None_ This can be used to specify an alternate path for the openssl binary to use for cryptographic operations. * __OS.SshClientAliveInterval__ _Type: Integer_ _Default: 180_ This values sets the number of seconds the agent uses for the SSH ClientAliveInterval configuration option. * __OS.SshDir__ _Type: String_ _Default: `/etc/ssh`_ This option can be used to override the normal location of the SSH configuration directory. * __HttpProxy.Host, HttpProxy.Port__ _Type: String_ _Default: None_ If set, the agent will use this proxy server to access the internet. These values *will* override the `http_proxy` or `https_proxy` environment variables. Lastly, `HttpProxy.Host` is required (if to be used) and `HttpProxy.Port` is optional. ### APPENDIX We do not maintain packaging information in this repo but some samples are shown below as a reference. See the downstream distribution repositories for officially maintained packaging. #### deb packages The official Ubuntu WALinuxAgent package can be found here: https://launchpad.net/ubuntu/+source/walinuxagent Run once: 1. Install required packages: `sudo apt-get -y install ubuntu-dev-tools pbuilder python-all debhelper` 2. Create the pbuilder environment: `sudo pbuilder create --debootstrapopts --variant=buildd` 3. Obtain from a downstream package repo To compile the package, from the top-most directory: 1. Build the source package: `dpkg-buildpackage -S` 2. Build the package: `sudo pbuilder build ` 3. Fetch the built package, usually from `/var/cache/pbuilder/result` #### rpm packages The instructions below describe how to build an rpm package. 1. Install setuptools `curl https://bootstrap.pypa.io/ez_setup.py -o - | python` 2. The following command will build the binary and source RPMs: `python setup.py bdist_rpm` ----- This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. WALinuxAgent-2.2.20/__main__.py000066400000000000000000000012521322477356400162120ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.agent as agent agent.main() WALinuxAgent-2.2.20/azurelinuxagent/000077500000000000000000000000001322477356400173455ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/__init__.py000066400000000000000000000011651322477356400214610ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/azurelinuxagent/agent.py000066400000000000000000000211471322477356400210220ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # """ Module agent """ from __future__ import print_function import os import sys import re import subprocess import traceback import azurelinuxagent.common.logger as logger import azurelinuxagent.common.event as event import azurelinuxagent.common.conf as conf from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_VERSION, \ DISTRO_NAME, DISTRO_VERSION, \ PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO, GOAL_STATE_AGENT_VERSION from azurelinuxagent.common.osutil import get_osutil class Agent(object): def __init__(self, verbose, conf_file_path=None): """ Initialize agent running environment. """ self.conf_file_path = conf_file_path self.osutil = get_osutil() #Init stdout log level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.STDOUT, level) #Init config conf_file_path = self.conf_file_path \ if self.conf_file_path is not None \ else self.osutil.get_agent_conf_file_path() conf.load_conf_from_file(conf_file_path) #Init log verbose = verbose or conf.get_logs_verbose() level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.FILE, level, path="/var/log/waagent.log") logger.add_logger_appender(logger.AppenderType.CONSOLE, level, path="/dev/console") ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): os.makedirs(ext_log_dir) except Exception as e: logger.error( "Exception occurred while creating extension " "log directory {0}: {1}".format(ext_log_dir, e)) #Init event reporter event.init_event_status(conf.get_lib_dir()) event_dir = os.path.join(conf.get_lib_dir(), "events") event.init_event_logger(event_dir) event.enable_unhandled_err_dump("WALA") def daemon(self): """ Run agent daemon """ child_args = None \ if self.conf_file_path is None \ else "-configuration-path:{0}".format(self.conf_file_path) from azurelinuxagent.daemon import get_daemon_handler daemon_handler = get_daemon_handler() daemon_handler.run(child_args=child_args) def provision(self): """ Run provision command """ from azurelinuxagent.pa.provision import get_provision_handler provision_handler = get_provision_handler() provision_handler.run() def deprovision(self, force=False, deluser=False): """ Run deprovision command """ from azurelinuxagent.pa.deprovision import get_deprovision_handler deprovision_handler = get_deprovision_handler() deprovision_handler.run(force=force, deluser=deluser) def register_service(self): """ Register agent as a service """ print("Register {0} service".format(AGENT_NAME)) self.osutil.register_agent_service() print("Stop {0} service".format(AGENT_NAME)) self.osutil.stop_agent_service() print("Start {0} service".format(AGENT_NAME)) self.osutil.start_agent_service() def run_exthandlers(self): """ Run the update and extension handler """ from azurelinuxagent.ga.update import get_update_handler update_handler = get_update_handler() update_handler.run() def show_configuration(self): configuration = conf.get_configuration() for k in sorted(configuration.keys()): print("{0} = {1}".format(k, configuration[k])) def main(args=[]): """ Parse command line arguments, exit with usage() on error. Invoke different methods according to different command """ if len(args) <= 0: args = sys.argv[1:] command, force, verbose, conf_file_path = parse_args(args) if command == "version": version() elif command == "help": usage() elif command == "start": start(conf_file_path=conf_file_path) else: try: agent = Agent(verbose, conf_file_path=conf_file_path) if command == "deprovision+user": agent.deprovision(force, deluser=True) elif command == "deprovision": agent.deprovision(force, deluser=False) elif command == "provision": agent.provision() elif command == "register-service": agent.register_service() elif command == "daemon": agent.daemon() elif command == "run-exthandlers": agent.run_exthandlers() elif command == "show-configuration": agent.show_configuration() except Exception: logger.error(u"Failed to run '{0}': {1}", command, traceback.format_exc()) def parse_args(sys_args): """ Parse command line arguments """ cmd = "help" force = False verbose = False conf_file_path = None for a in sys_args: m = re.match("^(?:[-/]*)configuration-path:([\w/\.\-_]+)", a) if not m is None: conf_file_path = m.group(1) if not os.path.exists(conf_file_path): print("Error: Configuration file {0} does not exist".format( conf_file_path), file=sys.stderr) usage() sys.exit(1) elif re.match("^([-/]*)deprovision\\+user", a): cmd = "deprovision+user" elif re.match("^([-/]*)deprovision", a): cmd = "deprovision" elif re.match("^([-/]*)daemon", a): cmd = "daemon" elif re.match("^([-/]*)start", a): cmd = "start" elif re.match("^([-/]*)register-service", a): cmd = "register-service" elif re.match("^([-/]*)run-exthandlers", a): cmd = "run-exthandlers" elif re.match("^([-/]*)version", a): cmd = "version" elif re.match("^([-/]*)verbose", a): verbose = True elif re.match("^([-/]*)force", a): force = True elif re.match("^([-/]*)show-configuration", a): cmd = "show-configuration" elif re.match("^([-/]*)(help|usage|\\?)", a): cmd = "help" else: cmd = "help" break return cmd, force, verbose, conf_file_path def version(): """ Show agent version """ print(("{0} running on {1} {2}".format(AGENT_LONG_VERSION, DISTRO_NAME, DISTRO_VERSION))) print("Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO)) print("Goal state agent: {0}".format(GOAL_STATE_AGENT_VERSION)) def usage(): """ Show agent usage """ print("") print((("usage: {0} [-verbose] [-force] [-help] " "-configuration-path:" "-deprovision[+user]|-register-service|-version|-daemon|-start|" "-run-exthandlers]" "").format(sys.argv[0]))) print("") def start(conf_file_path=None): """ Start agent daemon in a background process and set stdout/stderr to /dev/null """ devnull = open(os.devnull, 'w') args = [sys.argv[0], '-daemon'] if conf_file_path is not None: args.append('-configuration-path:{0}'.format(conf_file_path)) subprocess.Popen(args, stdout=devnull, stderr=devnull) if __name__ == '__main__' : main() WALinuxAgent-2.2.20/azurelinuxagent/common/000077500000000000000000000000001322477356400206355ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/common/__init__.py000066400000000000000000000011661322477356400227520ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/azurelinuxagent/common/conf.py000066400000000000000000000234721322477356400221440ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # """ Module conf loads and parses configuration file """ import os import os.path import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import AgentConfigError class ConfigurationProvider(object): """ Parse and store key:values in /etc/waagent.conf. """ def __init__(self): self.values = dict() def load(self, content): if not content: raise AgentConfigError("Can't not parse empty configuration") for line in content.split('\n'): if not line.startswith("#") and "=" in line: parts = line.split('=') if len(parts) < 2: continue key = parts[0].strip() value = parts[1].split('#')[0].strip("\" ") self.values[key] = value if value != "None" else None def get(self, key, default_val): val = self.values.get(key) return val if val is not None else default_val def get_switch(self, key, default_val): val = self.values.get(key) if val is not None and val.lower() == 'y': return True elif val is not None and val.lower() == 'n': return False return default_val def get_int(self, key, default_val): try: return int(self.values.get(key)) except TypeError: return default_val except ValueError: return default_val __conf__ = ConfigurationProvider() def load_conf_from_file(conf_file_path, conf=__conf__): """ Load conf file from: conf_file_path """ if os.path.isfile(conf_file_path) == False: raise AgentConfigError(("Missing configuration in {0}" "").format(conf_file_path)) try: content = fileutil.read_file(conf_file_path) conf.load(content) except IOError as err: raise AgentConfigError(("Failed to load conf file:{0}, {1}" "").format(conf_file_path, err)) __SWITCH_OPTIONS__ = { "OS.AllowHTTP" : False, "OS.EnableFirewall" : False, "OS.EnableFIPS" : False, "OS.EnableRDMA" : False, "OS.UpdateRdmaDriver" : False, "OS.CheckRdmaDriver" : False, "Logs.Verbose" : False, "Provisioning.Enabled" : True, "Provisioning.UseCloudInit" : False, "Provisioning.AllowResetSysUser" : False, "Provisioning.RegenerateSshHostKeyPair" : False, "Provisioning.DeleteRootPassword" : False, "Provisioning.DecodeCustomData" : False, "Provisioning.ExecuteCustomData" : False, "Provisioning.MonitorHostName" : False, "DetectScvmmEnv" : False, "ResourceDisk.Format" : False, "DetectScvmmEnv" : False, "ResourceDisk.Format" : False, "ResourceDisk.EnableSwap" : False, "AutoUpdate.Enabled" : True, "EnableOverProvisioning" : False } __STRING_OPTIONS__ = { "Lib.Dir" : "/var/lib/waagent", "DVD.MountPoint" : "/mnt/cdrom/secure", "Pid.File" : "/var/run/waagent.pid", "Extension.LogDir" : "/var/log/azure", "OS.OpensslPath" : "/usr/bin/openssl", "OS.SshDir" : "/etc/ssh", "OS.HomeDir" : "/home", "OS.PasswordPath" : "/etc/shadow", "OS.SudoersDir" : "/etc/sudoers.d", "OS.RootDeviceScsiTimeout" : None, "Provisioning.SshHostKeyPairType" : "rsa", "Provisioning.PasswordCryptId" : "6", "HttpProxy.Host" : None, "ResourceDisk.MountPoint" : "/mnt/resource", "ResourceDisk.MountOptions" : None, "ResourceDisk.Filesystem" : "ext3", "AutoUpdate.GAFamily" : "Prod" } __INTEGER_OPTIONS__ = { "OS.SshClientAliveInterval" : 180, "Provisioning.PasswordCryptSaltLength" : 10, "HttpProxy.Port" : None, "ResourceDisk.SwapSizeMB" : 0, "Autoupdate.Frequency" : 3600 } def get_configuration(conf=__conf__): options = {} for option in __SWITCH_OPTIONS__: options[option] = conf.get_switch(option, __SWITCH_OPTIONS__[option]) for option in __STRING_OPTIONS__: options[option] = conf.get(option, __STRING_OPTIONS__[option]) for option in __INTEGER_OPTIONS__: options[option] = conf.get_int(option, __INTEGER_OPTIONS__[option]) return options def enable_firewall(conf=__conf__): return conf.get_switch("OS.EnableFirewall", False) def enable_rdma(conf=__conf__): return conf.get_switch("OS.EnableRDMA", False) or \ conf.get_switch("OS.UpdateRdmaDriver", False) or \ conf.get_switch("OS.CheckRdmaDriver", False) def enable_rdma_update(conf=__conf__): return conf.get_switch("OS.UpdateRdmaDriver", False) def get_logs_verbose(conf=__conf__): return conf.get_switch("Logs.Verbose", False) def get_lib_dir(conf=__conf__): return conf.get("Lib.Dir", "/var/lib/waagent") def get_published_hostname(conf=__conf__): return os.path.join(get_lib_dir(conf), 'published_hostname') def get_dvd_mount_point(conf=__conf__): return conf.get("DVD.MountPoint", "/mnt/cdrom/secure") def get_agent_pid_file_path(conf=__conf__): return conf.get("Pid.File", "/var/run/waagent.pid") def get_ext_log_dir(conf=__conf__): return conf.get("Extension.LogDir", "/var/log/azure") def get_fips_enabled(conf=__conf__): return conf.get_switch("OS.EnableFIPS", False) def get_openssl_cmd(conf=__conf__): return conf.get("OS.OpensslPath", "/usr/bin/openssl") def get_ssh_client_alive_interval(conf=__conf__): return conf.get("OS.SshClientAliveInterval", 180) def get_ssh_dir(conf=__conf__): return conf.get("OS.SshDir", "/etc/ssh") def get_home_dir(conf=__conf__): return conf.get("OS.HomeDir", "/home") def get_passwd_file_path(conf=__conf__): return conf.get("OS.PasswordPath", "/etc/shadow") def get_sudoers_dir(conf=__conf__): return conf.get("OS.SudoersDir", "/etc/sudoers.d") def get_sshd_conf_file_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), "sshd_config") def get_ssh_key_glob(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_*key*') def get_ssh_key_private_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key'.format(get_ssh_host_keypair_type(conf))) def get_ssh_key_public_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key.pub'.format(get_ssh_host_keypair_type(conf))) def get_root_device_scsi_timeout(conf=__conf__): return conf.get("OS.RootDeviceScsiTimeout", None) def get_ssh_host_keypair_type(conf=__conf__): keypair_type = conf.get("Provisioning.SshHostKeyPairType", "rsa") if keypair_type == "auto": ''' auto generates all supported key types and returns the rsa thumbprint as the default. ''' return "rsa" return keypair_type def get_ssh_host_keypair_mode(conf=__conf__): return conf.get("Provisioning.SshHostKeyPairType", "rsa") def get_provision_enabled(conf=__conf__): return conf.get_switch("Provisioning.Enabled", True) def get_provision_cloudinit(conf=__conf__): return conf.get_switch("Provisioning.UseCloudInit", False) def get_allow_reset_sys_user(conf=__conf__): return conf.get_switch("Provisioning.AllowResetSysUser", False) def get_regenerate_ssh_host_key(conf=__conf__): return conf.get_switch("Provisioning.RegenerateSshHostKeyPair", False) def get_delete_root_password(conf=__conf__): return conf.get_switch("Provisioning.DeleteRootPassword", False) def get_decode_customdata(conf=__conf__): return conf.get_switch("Provisioning.DecodeCustomData", False) def get_execute_customdata(conf=__conf__): return conf.get_switch("Provisioning.ExecuteCustomData", False) def get_password_cryptid(conf=__conf__): return conf.get("Provisioning.PasswordCryptId", "6") def get_password_crypt_salt_len(conf=__conf__): return conf.get_int("Provisioning.PasswordCryptSaltLength", 10) def get_monitor_hostname(conf=__conf__): return conf.get_switch("Provisioning.MonitorHostName", False) def get_httpproxy_host(conf=__conf__): return conf.get("HttpProxy.Host", None) def get_httpproxy_port(conf=__conf__): return conf.get_int("HttpProxy.Port", None) def get_detect_scvmm_env(conf=__conf__): return conf.get_switch("DetectScvmmEnv", False) def get_resourcedisk_format(conf=__conf__): return conf.get_switch("ResourceDisk.Format", False) def get_resourcedisk_enable_swap(conf=__conf__): return conf.get_switch("ResourceDisk.EnableSwap", False) def get_resourcedisk_mountpoint(conf=__conf__): return conf.get("ResourceDisk.MountPoint", "/mnt/resource") def get_resourcedisk_mountoptions(conf=__conf__): return conf.get("ResourceDisk.MountOptions", None) def get_resourcedisk_filesystem(conf=__conf__): return conf.get("ResourceDisk.Filesystem", "ext3") def get_resourcedisk_swap_size_mb(conf=__conf__): return conf.get_int("ResourceDisk.SwapSizeMB", 0) def get_autoupdate_gafamily(conf=__conf__): return conf.get("AutoUpdate.GAFamily", "Prod") def get_autoupdate_enabled(conf=__conf__): return conf.get_switch("AutoUpdate.Enabled", True) def get_autoupdate_frequency(conf=__conf__): return conf.get_int("Autoupdate.Frequency", 3600) def get_enable_overprovisioning(conf=__conf__): return conf.get_switch("EnableOverProvisioning", False) def get_allow_http(conf=__conf__): return conf.get_switch("OS.AllowHTTP", False) WALinuxAgent-2.2.20/azurelinuxagent/common/dhcp.py000066400000000000000000000344751322477356400221420ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ import os import socket import array import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.utils.textutil import hex_dump, hex_dump2, \ hex_dump3, \ compare_bytes, str_to_ord, \ unpack_big_endian, \ int_to_ip4_addr from azurelinuxagent.common.exception import DhcpError from azurelinuxagent.common.osutil import get_osutil # the kernel routing table representation of 168.63.129.16 KNOWN_WIRESERVER_IP_ENTRY = '10813FA8' KNOWN_WIRESERVER_IP = '168.63.129.16' def get_dhcp_handler(): return DhcpHandler() class DhcpHandler(object): """ Azure use DHCP option 245 to pass endpoint ip to VMs. """ def __init__(self): self.osutil = get_osutil() self.endpoint = None self.gateway = None self.routes = None self._request_broadcast = False self.skip_cache = False def run(self): """ Send dhcp request Configure default gateway and routes Save wire server endpoint if found """ if self.wireserver_route_exists or self.dhcp_cache_exists: return self.send_dhcp_req() self.conf_routes() def wait_for_network(self): """ Wait for network stack to be initialized. """ ipv4 = self.osutil.get_ip4_addr() while ipv4 == '' or ipv4 == '0.0.0.0': logger.info("Waiting for network.") time.sleep(10) logger.info("Try to start network interface.") self.osutil.start_network() ipv4 = self.osutil.get_ip4_addr() @property def wireserver_route_exists(self): """ Determine whether a route to the known wireserver ip already exists, and if so use that as the endpoint. This is true when running in a virtual network. :return: True if a route to KNOWN_WIRESERVER_IP exists. """ route_exists = False logger.info("Test for route to {0}".format(KNOWN_WIRESERVER_IP)) try: route_file = '/proc/net/route' if os.path.exists(route_file) and \ KNOWN_WIRESERVER_IP_ENTRY in open(route_file).read(): # reset self.gateway and self.routes # we do not need to alter the routing table self.endpoint = KNOWN_WIRESERVER_IP self.gateway = None self.routes = None route_exists = True logger.info("Route to {0} exists".format(KNOWN_WIRESERVER_IP)) else: logger.warn("No route exists to {0}".format(KNOWN_WIRESERVER_IP)) except Exception as e: logger.error( "Could not determine whether route exists to {0}: {1}".format( KNOWN_WIRESERVER_IP, e)) return route_exists @property def dhcp_cache_exists(self): """ Check whether the dhcp options cache exists and contains the wireserver endpoint, unless skip_cache is True. :return: True if the cached endpoint was found in the dhcp lease """ if self.skip_cache: return False exists = False logger.info("Checking for dhcp lease cache") cached_endpoint = self.osutil.get_dhcp_lease_endpoint() if cached_endpoint is not None: self.endpoint = cached_endpoint exists = True logger.info("Cache exists [{0}]".format(exists)) return exists def conf_routes(self): logger.info("Configure routes") logger.info("Gateway:{0}", self.gateway) logger.info("Routes:{0}", self.routes) # Add default gateway if self.gateway is not None and self.osutil.is_missing_default_route(): self.osutil.route_add(0, 0, self.gateway) if self.routes is not None: for route in self.routes: self.osutil.route_add(route[0], route[1], route[2]) def _send_dhcp_req(self, request): __waiting_duration__ = [0, 10, 30, 60, 60] for duration in __waiting_duration__: try: self.osutil.allow_dhcp_broadcast() response = socket_send(request) validate_dhcp_resp(request, response) return response except DhcpError as e: logger.warn("Failed to send DHCP request: {0}", e) time.sleep(duration) return None def send_dhcp_req(self): """ Build dhcp request with mac addr Configure route to allow dhcp traffic Stop dhcp service if necessary """ logger.info("Send dhcp request") mac_addr = self.osutil.get_mac_addr() # Do unicast first, then fallback to broadcast if fails. req = build_dhcp_request(mac_addr, self._request_broadcast) if not self._request_broadcast: self._request_broadcast = True # Temporary allow broadcast for dhcp. Remove the route when done. missing_default_route = self.osutil.is_missing_default_route() ifname = self.osutil.get_if_name() if missing_default_route: self.osutil.set_route_for_dhcp_broadcast(ifname) # In some distros, dhcp service needs to be shutdown before agent probe # endpoint through dhcp. if self.osutil.is_dhcp_enabled(): self.osutil.stop_dhcp_service() resp = self._send_dhcp_req(req) if self.osutil.is_dhcp_enabled(): self.osutil.start_dhcp_service() if missing_default_route: self.osutil.remove_route_for_dhcp_broadcast(ifname) if resp is None: raise DhcpError("Failed to receive dhcp response.") self.endpoint, self.gateway, self.routes = parse_dhcp_resp(resp) def validate_dhcp_resp(request, response): bytes_recv = len(response) if bytes_recv < 0xF6: logger.error("HandleDhcpResponse: Too few bytes received:{0}", bytes_recv) return False logger.verbose("BytesReceived:{0}", hex(bytes_recv)) logger.verbose("DHCP response:{0}", hex_dump(response, bytes_recv)) # check transactionId, cookie, MAC address cookie should never mismatch # transactionId and MAC address may mismatch if we see a response # meant from another machine if not compare_bytes(request, response, 0xEC, 4): logger.verbose("Cookie not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0xEC, 4), hex_dump3(response, 0xEC, 4)) raise DhcpError("Cookie in dhcp respones doesn't match the request") if not compare_bytes(request, response, 4, 4): logger.verbose("TransactionID not match:\nsend={0},\nreceive={1}", hex_dump3(request, 4, 4), hex_dump3(response, 4, 4)) raise DhcpError("TransactionID in dhcp respones " "doesn't match the request") if not compare_bytes(request, response, 0x1C, 6): logger.verbose("Mac Address not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0x1C, 6), hex_dump3(response, 0x1C, 6)) raise DhcpError("Mac Addr in dhcp respones " "doesn't match the request") def parse_route(response, option, i, length, bytes_recv): # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx logger.verbose("Routes at offset: {0} with length:{1}", hex(i), hex(length)) routes = [] if length < 5: logger.error("Data too small for option:{0}", option) j = i + 2 while j < (i + length + 2): mask_len_bits = str_to_ord(response[j]) mask_len_bytes = (((mask_len_bits + 7) & ~7) >> 3) mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - mask_len_bits)) j += 1 net = unpack_big_endian(response, j, mask_len_bytes) net <<= (32 - mask_len_bytes * 8) net &= mask j += mask_len_bytes gateway = unpack_big_endian(response, j, 4) j += 4 routes.append((net, mask, gateway)) if j != (i + length + 2): logger.error("Unable to parse routes") return routes def parse_ip_addr(response, option, i, length, bytes_recv): if i + 5 < bytes_recv: if length != 4: logger.error("Endpoint or Default Gateway not 4 bytes") return None addr = unpack_big_endian(response, i + 2, 4) ip_addr = int_to_ip4_addr(addr) return ip_addr else: logger.error("Data too small for option:{0}", option) return None def parse_dhcp_resp(response): """ Parse DHCP response: Returns endpoint server or None on error. """ logger.verbose("parse Dhcp Response") bytes_recv = len(response) endpoint = None gateway = None routes = None # Walk all the returned options, parsing out what we need, ignoring the # others. We need the custom option 245 to find the the endpoint we talk to # as well as to handle some Linux DHCP client incompatibilities; # options 3 for default gateway and 249 for routes; 255 is end. i = 0xF0 # offset to first option while i < bytes_recv: option = str_to_ord(response[i]) length = 0 if (i + 1) < bytes_recv: length = str_to_ord(response[i + 1]) logger.verbose("DHCP option {0} at offset:{1} with length:{2}", hex(option), hex(i), hex(length)) if option == 255: logger.verbose("DHCP packet ended at offset:{0}", hex(i)) break elif option == 249: routes = parse_route(response, option, i, length, bytes_recv) elif option == 3: gateway = parse_ip_addr(response, option, i, length, bytes_recv) logger.verbose("Default gateway:{0}, at {1}", gateway, hex(i)) elif option == 245: endpoint = parse_ip_addr(response, option, i, length, bytes_recv) logger.verbose("Azure wire protocol endpoint:{0}, at {1}", endpoint, hex(i)) else: logger.verbose("Skipping DHCP option:{0} at {1} with length {2}", hex(option), hex(i), hex(length)) i += length + 2 return endpoint, gateway, routes def socket_send(request): sock = None try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(("0.0.0.0", 68)) sock.sendto(request, ("", 67)) sock.settimeout(10) logger.verbose("Send DHCP request: Setting socket.timeout=10, " "entering recv") response = sock.recv(1024) return response except IOError as e: raise DhcpError("{0}".format(e)) finally: if sock is not None: sock.close() def build_dhcp_request(mac_addr, request_broadcast): """ Build DHCP request string. """ # # typedef struct _DHCP { # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ # UINT8 HardwareAddressType; /* htype: ethernet */ # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ # UINT8 Hops; /* hops: 0 */ # UINT8 TransactionID[4]; /* xid: random */ # UINT8 Seconds[2]; /* secs: 0 */ # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast*/ # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */ # UINT8 ServerName[64]; /* sname: 0 */ # UINT8 BootFileName[128]; /* file: 0 */ # UINT8 MagicCookie[4]; /* 99 130 83 99 */ # /* 0x63 0x82 0x53 0x63 */ # /* options -- hard code ours */ # # UINT8 MessageTypeCode; /* 53 */ # UINT8 MessageTypeLength; /* 1 */ # UINT8 MessageType; /* 1 for DISCOVER */ # UINT8 End; /* 255 */ # } DHCP; # # tuple of 244 zeros # (struct.pack_into would be good here, but requires Python 2.5) request = [0] * 244 trans_id = gen_trans_id() # Opcode = 1 # HardwareAddressType = 1 (ethernet/MAC) # HardwareAddressLength = 6 (ethernet/MAC/48 bits) for a in range(0, 3): request[a] = [1, 1, 6][a] # fill in transaction id (random number to ensure response matches request) for a in range(0, 4): request[4 + a] = str_to_ord(trans_id[a]) logger.verbose("BuildDhcpRequest: transactionId:%s,%04X" % ( hex_dump2(trans_id), unpack_big_endian(request, 4, 4))) if request_broadcast: # set broadcast flag to true to request the dhcp sever # to respond to a boradcast address, # this is useful when user dhclient fails. request[0x0A] = 0x80; # fill in ClientHardwareAddress for a in range(0, 6): request[0x1C + a] = str_to_ord(mac_addr[a]) # DHCP Magic Cookie: 99, 130, 83, 99 # MessageTypeCode = 53 DHCP Message Type # MessageTypeLength = 1 # MessageType = DHCPDISCOVER # End = 255 DHCP_END for a in range(0, 8): request[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] return array.array("B", request) def gen_trans_id(): return os.urandom(4) WALinuxAgent-2.2.20/azurelinuxagent/common/event.py000066400000000000000000000257241322477356400223420ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import sys import traceback import atexit import json import time import datetime import threading import platform from datetime import datetime, timedelta import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import EventError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ TelemetryEventList, \ TelemetryEvent, \ set_properties, get_properties from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_CODE_NAME, AGENT_VERSION, \ CURRENT_AGENT, CURRENT_VERSION _EVENT_MSG = "Event: name={0}, op={1}, message={2}, duration={3}" class WALAEventOperation: ActivateResourceDisk = "ActivateResourceDisk" AgentBlacklisted = "AgentBlacklisted" AgentEnabled = "AgentEnabled" AutoUpdate = "AutoUpdate" CustomData = "CustomData" Deploy = "Deploy" Disable = "Disable" Download = "Download" Enable = "Enable" ExtensionProcessing = "ExtensionProcessing" Firewall = "Firewall" HealthCheck = "HealthCheck" HeartBeat = "HeartBeat" HostPlugin = "HostPlugin" HttpErrors = "HttpErrors" Install = "Install" InitializeHostPlugin = "InitializeHostPlugin" Partition = "Partition" ProcessGoalState = "ProcessGoalState" Provision = "Provision" ReportStatus = "ReportStatus" Restart = "Restart" UnhandledError = "UnhandledError" UnInstall = "UnInstall" Unknown = "Unknown" Upgrade = "Upgrade" Update = "Update" class EventStatus(object): EVENT_STATUS_FILE = "event_status.json" def __init__(self, status_dir=conf.get_lib_dir()): self._path = None self._status = {} def clear(self): self._status = {} self._save() def event_marked(self, name, version, op): return self._event_name(name, version, op) in self._status def event_succeeded(self, name, version, op): event = self._event_name(name, version, op) if event not in self._status: return True return self._status[event] == True def initialize(self, status_dir=conf.get_lib_dir()): self._path = os.path.join(status_dir, EventStatus.EVENT_STATUS_FILE) self._load() def mark_event_status(self, name, version, op, status): event = self._event_name(name, version, op) self._status[event] = (status == True) self._save() def _event_name(self, name, version, op): return "{0}-{1}-{2}".format(name, version, op) def _load(self): try: self._status = {} if os.path.isfile(self._path): with open(self._path, 'r') as f: self._status = json.load(f) except Exception as e: logger.warn("Exception occurred loading event status: {0}".format(e)) self._status = {} def _save(self): try: with open(self._path, 'w') as f: json.dump(self._status, f) except Exception as e: logger.warn("Exception occurred saving event status: {0}".format(e)) __event_status__ = EventStatus() __event_status_operations__ = [ WALAEventOperation.AutoUpdate, WALAEventOperation.ReportStatus ] def _log_event(name, op, message, duration, is_success=True): global _EVENT_MSG if not is_success: logger.error(_EVENT_MSG, name, op, message, duration) else: logger.info(_EVENT_MSG, name, op, message, duration) class EventLogger(object): def __init__(self): self.event_dir = None self.periodic_events = {} def save_event(self, data): if self.event_dir is None: logger.warn("Cannot save event -- Event reporter is not initialized.") return if not os.path.exists(self.event_dir): os.mkdir(self.event_dir) os.chmod(self.event_dir, 0o700) existing_events = os.listdir(self.event_dir) if len(existing_events) >= 1000: existing_events.sort() oldest_files = existing_events[:-999] logger.warn("Too many files under: {0}, removing oldest".format(self.event_dir)) try: for f in oldest_files: os.remove(os.path.join(self.event_dir, f)) except IOError as e: raise EventError(e) filename = os.path.join(self.event_dir, ustr(int(time.time() * 1000000))) try: with open(filename + ".tmp", 'wb+') as hfile: hfile.write(data.encode("utf-8")) os.rename(filename + ".tmp", filename + ".tld") except IOError as e: raise EventError("Failed to write events to file:{0}", e) def reset_periodic(self): self.periodic_events = {} def is_period_elapsed(self, delta, h): return h not in self.periodic_events or \ (self.periodic_events[h] + delta) <= datetime.now() def add_periodic(self, delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True, force=False): h = hash(name+op+ustr(is_success)+message) if force or self.is_period_elapsed(delta, h): self.add_event(name, op=op, is_success=is_success, duration=duration, version=version, message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event) self.periodic_events[h] = datetime.now() def add_event(self, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True): if not is_success or log_event: _log_event(name, op, message, duration, is_success=is_success) event = TelemetryEvent(1, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) event.parameters.append(TelemetryEventParam('Message', message)) event.parameters.append(TelemetryEventParam('Duration', duration)) event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError as e: logger.error("{0}", e) __event_logger__ = EventLogger() def elapsed_milliseconds(utc_start): d = datetime.utcnow() - utc_start return int(((d.days * 24 * 60 * 60 + d.seconds) * 1000) + \ (d.microseconds / 1000.0)) def report_event(op, is_success=True, message=''): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=is_success, message=message, op=op) def report_periodic(delta, op, is_success=True, message=''): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_periodic(delta, AGENT_NAME, version=CURRENT_VERSION, is_success=is_success, message=message, op=op) def add_event(name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True, reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) return if should_emit_event(name, version, op, is_success): mark_event_status(name, version, op, is_success) reporter.add_event( name, op=op, is_success=is_success, duration=duration, version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event) def add_periodic( delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True, force=False, reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add periodic event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) return reporter.add_periodic( delta, name, op=op, is_success=is_success, duration=duration, version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event, force=force) def mark_event_status(name, version, op, status): if op in __event_status_operations__: __event_status__.mark_event_status(name, version, op, status) def should_emit_event(name, version, op, status): return \ op not in __event_status_operations__ or \ __event_status__ is None or \ not __event_status__.event_marked(name, version, op) or \ __event_status__.event_succeeded(name, version, op) != status def init_event_logger(event_dir): __event_logger__.event_dir = event_dir def init_event_status(status_dir): __event_status__.initialize(status_dir) def dump_unhandled_err(name): if hasattr(sys, 'last_type') and hasattr(sys, 'last_value') and \ hasattr(sys, 'last_traceback'): last_type = getattr(sys, 'last_type') last_value = getattr(sys, 'last_value') last_traceback = getattr(sys, 'last_traceback') error = traceback.format_exception(last_type, last_value, last_traceback) message = "".join(error) add_event(name, is_success=False, message=message, op=WALAEventOperation.UnhandledError) def enable_unhandled_err_dump(name): atexit.register(dump_unhandled_err, name) WALinuxAgent-2.2.20/azurelinuxagent/common/exception.py000066400000000000000000000072431322477356400232130ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # """ Defines all exceptions """ class AgentError(Exception): """ Base class of agent error. """ def __init__(self, msg, inner=None): msg = u"[{0}] {1}".format(type(self).__name__, msg) if inner is not None: msg = u"{0}\nInner error: {1}".format(msg, inner) super(AgentError, self).__init__(msg) class AgentConfigError(AgentError): """ When configure file is not found or malformed. """ def __init__(self, msg=None, inner=None): super(AgentConfigError, self).__init__(msg, inner) class AgentNetworkError(AgentError): """ When network is not avaiable. """ def __init__(self, msg=None, inner=None): super(AgentNetworkError, self).__init__(msg, inner) class ExtensionError(AgentError): """ When failed to execute an extension """ def __init__(self, msg=None, inner=None): super(ExtensionError, self).__init__(msg, inner) class ProvisionError(AgentError): """ When provision failed """ def __init__(self, msg=None, inner=None): super(ProvisionError, self).__init__(msg, inner) class ResourceDiskError(AgentError): """ Mount resource disk failed """ def __init__(self, msg=None, inner=None): super(ResourceDiskError, self).__init__(msg, inner) class DhcpError(AgentError): """ Failed to handle dhcp response """ def __init__(self, msg=None, inner=None): super(DhcpError, self).__init__(msg, inner) class OSUtilError(AgentError): """ Failed to perform operation to OS configuration """ def __init__(self, msg=None, inner=None): super(OSUtilError, self).__init__(msg, inner) class ProtocolError(AgentError): """ Azure protocol error """ def __init__(self, msg=None, inner=None): super(ProtocolError, self).__init__(msg, inner) class ProtocolNotFoundError(ProtocolError): """ Azure protocol endpoint not found """ def __init__(self, msg=None, inner=None): super(ProtocolNotFoundError, self).__init__(msg, inner) class HttpError(AgentError): """ Http request failure """ def __init__(self, msg=None, inner=None): super(HttpError, self).__init__(msg, inner) class EventError(AgentError): """ Event reporting error """ def __init__(self, msg=None, inner=None): super(EventError, self).__init__(msg, inner) class CryptError(AgentError): """ Encrypt/Decrypt error """ def __init__(self, msg=None, inner=None): super(CryptError, self).__init__(msg, inner) class UpdateError(AgentError): """ Update Guest Agent error """ def __init__(self, msg=None, inner=None): super(UpdateError, self).__init__(msg, inner) class ResourceGoneError(HttpError): """ The requested resource no longer exists (i.e., status code 410) """ def __init__(self, msg=None, inner=None): if msg is None: msg = "Resource is gone" super(ResourceGoneError, self).__init__(msg, inner) WALinuxAgent-2.2.20/azurelinuxagent/common/future.py000066400000000000000000000010231322477356400225150ustar00rootroot00000000000000import sys """ Add alias for python2 and python3 libs and functions. """ if sys.version_info[0] == 3: import http.client as httpclient from urllib.parse import urlparse """Rename Python3 str to ustr""" ustr = str bytebuffer = memoryview elif sys.version_info[0] == 2: import httplib as httpclient from urlparse import urlparse """Rename Python2 unicode to ustr""" ustr = unicode bytebuffer = buffer else: raise ImportError("Unknown python version: {0}".format(sys.version_info)) WALinuxAgent-2.2.20/azurelinuxagent/common/logger.py000066400000000000000000000127011322477356400224670ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and openssl_bin 1.0+ # """ Log utils """ import os import sys from azurelinuxagent.common.future import ustr from datetime import datetime, timedelta EVERY_DAY = timedelta(days=1) EVERY_HALF_DAY = timedelta(hours=12) EVERY_HOUR = timedelta(hours=1) EVERY_HALF_HOUR = timedelta(minutes=30) EVERY_FIFTEEN_MINUTES = timedelta(minutes=15) class Logger(object): """ Logger class """ def __init__(self, logger=None, prefix=None): self.appenders = [] self.logger = self if logger is None else logger self.periodic_messages = {} self.prefix = prefix def reset_periodic(self): self.logger.periodic_messages = {} def is_period_elapsed(self, delta, h): return h not in self.logger.periodic_messages or \ (self.logger.periodic_messages[h] + delta) <= datetime.now() def periodic(self, delta, msg_format, *args): h = hash(msg_format) if self.is_period_elapsed(delta, h): self.info(msg_format, *args) self.logger.periodic_messages[h] = datetime.now() def verbose(self, msg_format, *args): self.log(LogLevel.VERBOSE, msg_format, *args) def info(self, msg_format, *args): self.log(LogLevel.INFO, msg_format, *args) def warn(self, msg_format, *args): self.log(LogLevel.WARNING, msg_format, *args) def error(self, msg_format, *args): self.log(LogLevel.ERROR, msg_format, *args) def log(self, level, msg_format, *args): #if msg_format is not unicode convert it to unicode if type(msg_format) is not ustr: msg_format = ustr(msg_format, errors="backslashreplace") if len(args) > 0: msg = msg_format.format(*args) else: msg = msg_format time = datetime.now().strftime(u'%Y/%m/%d %H:%M:%S.%f') level_str = LogLevel.STRINGS[level] if self.prefix is not None: log_item = u"{0} {1} {2} {3}\n".format(time, level_str, self.prefix, msg) else: log_item = u"{0} {1} {2}\n".format(time, level_str, msg) log_item = ustr(log_item.encode('ascii', "backslashreplace"), encoding="ascii") for appender in self.appenders: appender.write(level, log_item) if self.logger != self: for appender in self.logger.appenders: appender.write(level, log_item) def add_appender(self, appender_type, level, path): appender = _create_logger_appender(appender_type, level, path) self.appenders.append(appender) class ConsoleAppender(object): def __init__(self, level, path): self.level = level self.path = path def write(self, level, msg): if self.level <= level: try: with open(self.path, "w") as console: console.write(msg) except IOError: pass class FileAppender(object): def __init__(self, level, path): self.level = level self.path = path def write(self, level, msg): if self.level <= level: try: with open(self.path, "a+") as log_file: log_file.write(msg) except IOError: pass class StdoutAppender(object): def __init__(self, level): self.level = level def write(self, level, msg): if self.level <= level: try: sys.stdout.write(msg) except IOError: pass #Initialize logger instance DEFAULT_LOGGER = Logger() class LogLevel(object): VERBOSE = 0 INFO = 1 WARNING = 2 ERROR = 3 STRINGS = [ "VERBOSE", "INFO", "WARNING", "ERROR" ] class AppenderType(object): FILE = 0 CONSOLE = 1 STDOUT = 2 def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): DEFAULT_LOGGER.add_appender(appender_type, level, path) def reset_periodic(): DEFAULT_LOGGER.reset_periodic() def periodic(delta, msg_format, *args): DEFAULT_LOGGER.periodic(delta, msg_format, *args) def verbose(msg_format, *args): DEFAULT_LOGGER.verbose(msg_format, *args) def info(msg_format, *args): DEFAULT_LOGGER.info(msg_format, *args) def warn(msg_format, *args): DEFAULT_LOGGER.warn(msg_format, *args) def error(msg_format, *args): DEFAULT_LOGGER.error(msg_format, *args) def log(level, msg_format, *args): DEFAULT_LOGGER.log(level, msg_format, args) def _create_logger_appender(appender_type, level=LogLevel.INFO, path=None): if appender_type == AppenderType.CONSOLE: return ConsoleAppender(level, path) elif appender_type == AppenderType.FILE: return FileAppender(level, path) elif appender_type == AppenderType.STDOUT: return StdoutAppender(level) else: raise ValueError("Unknown appender type") WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/000077500000000000000000000000001322477356400221545ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/__init__.py000066400000000000000000000012631322477356400242670ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.factory import get_osutil WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/alpine.py000066400000000000000000000032701322477356400240000ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class AlpineOSUtil(DefaultOSUtil): def __init__(self): super(AlpineOSUtil, self).__init__() self.agent_conf_file_path = '/etc/waagent.conf' def is_dhcp_enabled(self): return True def get_dhcp_pid(self): ret = shellutil.run_get_output('pidof dhcpcd', chk_err=False) if ret[0] == 0: logger.info('dhcpcd is pid {}'.format(ret[1])) return ret[1].strip() return None def restart_if(self, ifname): logger.info('restarting {} (sort of, actually SIGHUPing dhcpcd)'.format(ifname)) pid = self.get_dhcp_pid() if pid != None: ret = shellutil.run_get_output('kill -HUP {}'.format(pid)) def set_ssh_client_alive_interval(self): # Alpine will handle this. pass def conf_sshd(self, disable_password): # Alpine will handle this. pass WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/arch.py000066400000000000000000000035011322477356400234420ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class ArchUtil(DefaultOSUtil): def is_dhcp_enabled(self): return True def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, iface): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated on CoreOS. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def get_dhcp_pid(self): ret= shellutil.run_get_output("pidof systemd-networkd") return ret[1] if ret[0] == 0 else None def conf_sshd(self, disable_password): # Don't whack the system default sshd conf passWALinuxAgent-2.2.20/azurelinuxagent/common/osutil/bigip.py000066400000000000000000000320321322477356400236200ustar00rootroot00000000000000# Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import array import fcntl import os import platform import re import socket import struct import time try: # WAAgent > 2.1.3 import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil except ImportError: # WAAgent <= 2.1.3 import azurelinuxagent.logger as logger import azurelinuxagent.utils.shellutil as shellutil from azurelinuxagent.exception import OSUtilError from azurelinuxagent.distro.default.osutil import DefaultOSUtil class BigIpOSUtil(DefaultOSUtil): def __init__(self): super(BigIpOSUtil, self).__init__() def _wait_until_mcpd_is_initialized(self): """Wait for mcpd to become available All configuration happens in mcpd so we need to wait that this is available before we go provisioning the system. I call this method at the first opportunity I have (during the DVD mounting call). This ensures that the rest of the provisioning does not need to wait for mcpd to be available unless it absolutely wants to. :return bool: Returns True upon success :raises OSUtilError: Raises exception if mcpd does not come up within roughly 50 minutes (100 * 30 seconds) """ for retries in range(1, 100): # Retry until mcpd completes startup: logger.info("Checking to see if mcpd is up") rc = shellutil.run("/usr/bin/tmsh -a show sys mcp-state field-fmt 2>/dev/null | grep phase | grep running", chk_err=False) if rc == 0: logger.info("mcpd is up!") break time.sleep(30) if rc is 0: return True raise OSUtilError( "mcpd hasn't completed initialization! Cannot proceed!" ) def _save_sys_config(self): cmd = "/usr/bin/tmsh save sys config" rc = shellutil.run(cmd) if rc != 0: logger.error("WARNING: Cannot save sys config on 1st boot.") return rc def restart_ssh_service(self): return shellutil.run("/usr/bin/bigstart restart sshd", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service waagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service waagent start", chk_err=False) def register_agent_service(self): return shellutil.run("/sbin/chkconfig --add waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("/sbin/chkconfig --del waagent", chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("/sbin/pidof dhclient") return ret[1] if ret[0] == 0 else None def set_hostname(self, hostname): """Set the static hostname of the device Normally, tmsh is used to set the hostname for the system. For our purposes at this time though, I would hesitate to trust this function. Azure(Stack) uses the name that you provide in the Web UI or ARM (for example) as the value of the hostname argument to this method. The problem is that there is nowhere in the UI that specifies the restrictions and checks that tmsh has for the hostname. For example, if you set the name "bigip1" in the Web UI, Azure(Stack) considers that a perfectly valid name. When WAAgent gets around to running though, tmsh will reject that value because it is not a fully qualified domain name. The proper value should have been bigip.xxx.yyy WAAgent will not fail if this command fails, but the hostname will not be what the user set either. Currently we do not set the hostname when WAAgent starts up, so I am passing on setting it here too. :param hostname: The hostname to set on the device """ return None def set_dhcp_hostname(self, hostname): """Sets the DHCP hostname See `set_hostname` for an explanation of why I pass here :param hostname: The hostname to set on the device """ return None def useradd(self, username, expiration=None): """Create user account using tmsh Our policy is to create two accounts when booting a BIG-IP instance. The first account is the one that the user specified when they did the instance creation. The second one is the admin account that is, or should be, built in to the system. :param username: The username that you want to add to the system :param expiration: The expiration date to use. We do not use this value. """ if self.get_userentry(username): logger.info("User {0} already exists, skip useradd", username) return None cmd = "/usr/bin/tmsh create auth user %s partition-access add { all-partitions { role admin } } shell bash" % (username) retcode, out = shellutil.run_get_output(cmd, log_cmd=True, chk_err=True) if retcode != 0: raise OSUtilError( "Failed to create user account:{0}, retcode:{1}, output:{2}".format(username, retcode, out) ) self._save_sys_config() return retcode def chpasswd(self, username, password, crypt_id=6, salt_len=10): """Change a user's password with tmsh Since we are creating the user specified account and additionally changing the password of the built-in 'admin' account, both must be modified in this method. Note that the default method also checks for a "system level" of the user; based on the value of UID_MIN in /etc/login.defs. In our env, all user accounts have the UID 0. So we can't rely on this value. :param username: The username whose password to change :param password: The unencrypted password to set for the user :param crypt_id: If encrypting the password, the crypt_id that was used :param salt_len: If encrypting the password, the length of the salt value used to do it. """ # Start by setting the password of the user provided account cmd = "/usr/bin/tmsh modify auth user {0} password '{1}'".format(username, password) ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True) if ret != 0: raise OSUtilError( "Failed to set password for {0}: {1}".format(username, output) ) # Next, set the password of the built-in 'admin' account to be have # the same password as the user provided account userentry = self.get_userentry('admin') if userentry is None: raise OSUtilError("The 'admin' user account was not found!") cmd = "/usr/bin/tmsh modify auth user 'admin' password '{0}'".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True) if ret != 0: raise OSUtilError( "Failed to set password for 'admin': {0}".format(output) ) self._save_sys_config() return ret def del_account(self, username): """Deletes a user account. Note that the default method also checks for a "system level" of the user; based on the value of UID_MIN in /etc/login.defs. In our env, all user accounts have the UID 0. So we can't rely on this value. We also don't use sudo, so we remove that method call as well. :param username: :return: """ shellutil.run("> /var/run/utmp") shellutil.run("/usr/bin/tmsh delete auth user " + username) def get_dvd_device(self, dev_dir='/dev'): """Find BIG-IP's CD/DVD device This device is almost certainly /dev/cdrom so I added the ? to this pattern. Note that this method will return upon the first device found, but in my tests with 12.1.1 it will also find /dev/sr0 on occasion. This is NOT the correct CD/DVD device though. :todo: Consider just always returning "/dev/cdrom" here if that device device exists on all platforms that are supported on Azure(Stack) :param dev_dir: The root directory from which to look for devices """ patten = r'(sr[0-9]|hd[c-z]|cdrom[0-9]?)' for dvd in [re.match(patten, dev) for dev in os.listdir(dev_dir)]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) raise OSUtilError("Failed to get dvd device") def mount_dvd(self, **kwargs): """Mount the DVD containing the provisioningiso.iso file This is the _first_ hook that WAAgent provides for us, so this is the point where we should wait for mcpd to load. I am just overloading this method to add the mcpd wait. Then I proceed with the stock code. :param max_retry: Maximum number of retries waagent will make when mounting the provisioningiso.iso DVD :param chk_err: Whether to check for errors or not in the mounting commands """ self._wait_until_mcpd_is_initialized() return super(BigIpOSUtil, self).mount_dvd(**kwargs) def eject_dvd(self, chk_err=True): """Runs the eject command to eject the provisioning DVD BIG-IP does not include an eject command. It is sufficient to just umount the DVD disk. But I will log that we do not support this for future reference. :param chk_err: Whether or not to check for errors raised by the eject command """ logger.warn("Eject is not supported on this platform") def get_first_if(self): """Return the interface name, and ip addr of the management interface. We need to add a struct_size check here because, curiously, our 64bit platform is identified by python in Azure(Stack) as 32 bit and without adjusting the struct_size, we can't get the information we need. I believe this may be caused by only python i686 being shipped with BIG-IP instead of python x86_64?? """ iface = '' expected = 16 # how many devices should I expect... python_arc = platform.architecture()[0] if python_arc == '64bit': struct_size = 40 # for 64bit the size is 40 bytes else: struct_size = 32 # for 32bit the size is 32 bytes sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) buff = array.array('B', b'\0' * (expected * struct_size)) param = struct.pack('iL', expected*struct_size, buff.buffer_info()[0]) ret = fcntl.ioctl(sock.fileno(), 0x8912, param) retsize = (struct.unpack('iL', ret)[0]) if retsize == (expected * struct_size): logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) sock = buff.tostring() for i in range(0, struct_size * expected, struct_size): iface = self._format_single_interface_name(sock, i) # Azure public was returning "lo:1" when deploying WAF if b'lo' in iface: continue else: break return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) def _format_single_interface_name(self, sock, offset): return sock[offset:offset+16].split(b'\0', 1)[0] def route_add(self, net, mask, gateway): """Add specified route using tmsh. :param net: :param mask: :param gateway: :return: """ cmd = ("/usr/bin/tmsh create net route " "{0}/{1} gw {2}").format(net, mask, gateway) return shellutil.run(cmd, chk_err=False) def device_for_ide_port(self, port_id): """Return device name attached to ide port 'n'. Include a wait in here because BIG-IP may not have yet initialized this list of devices. :param port_id: :return: """ for retries in range(1, 100): # Retry until devices are ready if os.path.exists("/sys/bus/vmbus/devices/"): break else: time.sleep(10) return super(BigIpOSUtil, self).device_for_ide_port(port_id) WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/clearlinux.py000066400000000000000000000061271322477356400247020ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class ClearLinuxUtil(DefaultOSUtil): def __init__(self): super(ClearLinuxUtil, self).__init__() self.agent_conf_file_path = '/usr/share/defaults/waagent/waagent.conf' def is_dhcp_enabled(self): return True def start_network(self) : return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, iface): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def get_dhcp_pid(self): ret= shellutil.run_get_output("pidof systemd-networkd") return ret[1] if ret[0] == 0 else None def conf_sshd(self, disable_password): # Don't whack the system default sshd conf pass def del_root_password(self): try: passwd_file_path = conf.get_passwd_file_path() try: passwd_content = fileutil.read_file(passwd_file_path) if not passwd_content: # Empty file is no better than no file raise FileNotFoundError except FileNotFoundError: new_passwd = ["root:*LOCK*:14600::::::"] else: passwd = passwd_content.split('\n') new_passwd = [x for x in passwd if not x.startswith("root:")] new_passwd.insert(0, "root:*LOCK*:14600::::::") fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) except IOError as e: raise OSUtilError("Failed to delete root password:{0}".format(e)) pass WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/coreos.py000066400000000000000000000055501322477356400240250ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class CoreOSUtil(DefaultOSUtil): def __init__(self): super(CoreOSUtil, self).__init__() self.agent_conf_file_path = '/usr/share/oem/waagent.conf' self.waagent_path = '/usr/share/oem/bin/waagent' self.python_path = '/usr/share/oem/python/bin' if 'PATH' in os.environ: path = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: path = self.python_path os.environ['PATH'] = path if 'PYTHONPATH' in os.environ: py_path = os.environ['PYTHONPATH'] py_path = "{0}:{1}".format(py_path, self.waagent_path) else: py_path = self.waagent_path os.environ['PYTHONPATH'] = py_path def is_sys_user(self, username): # User 'core' is not a sysuser. if username == 'core': return False return super(CoreOSUtil, self).is_sys_user(username) def is_dhcp_enabled(self): return True def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, *dummy, **_): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated on CoreOS. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("systemctl show -p MainPID " "systemd-networkd", chk_err=False) pid = ret[1].split('=', 1)[-1].strip() if ret[0] == 0 else None return pid if pid != '0' else None def conf_sshd(self, disable_password): # In CoreOS, /etc/sshd_config is mount readonly. Skip the setting. pass WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/debian.py000066400000000000000000000034311322477356400237510ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class DebianOSUtil(DefaultOSUtil): def __init__(self): super(DebianOSUtil, self).__init__() def restart_ssh_service(self): return shellutil.run("systemctl --job-mode=ignore-dependencies try-reload-or-restart ssh", chk_err=False) def stop_agent_service(self): return shellutil.run("service azurelinuxagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("service azurelinuxagent start", chk_err=False) def start_network(self): pass def remove_rules_files(self, rules_files=""): pass def restore_rules_files(self, rules_files=""): pass def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/default.py000066400000000000000000001166231322477356400241630ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import array import base64 import datetime import fcntl import glob import multiprocessing import os import platform import pwd import re import shutil import socket import struct import sys import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion __RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] """ Define distro specific behavior. OSUtil class defines default behavior for all distros. Each concrete distro classes could overwrite default behavior if needed. """ IPTABLES_VERSION_PATTERN = re.compile("^[^\d\.]*([\d\.]+).*$") IPTABLES_VERSION = "iptables --version" IPTABLES_LOCKING_VERSION = FlexibleVersion('1.4.21') FIREWALL_ACCEPT = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m owner --uid-owner {3} -j ACCEPT" # Note: # -- Initially "flight" the change to ACCEPT packets and develop a metric baseline # A subsequent release will convert the ACCEPT to DROP FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" # FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" FIREWALL_LIST = "iptables {0} -t security -L -nxv" FIREWALL_PACKETS = "iptables {0} -t security -L OUTPUT --zero OUTPUT -nxv" FIREWALL_FLUSH = "iptables {0} -t security --flush" PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" _enable_firewall = True DMIDECODE_CMD = 'dmidecode --string system-uuid' PRODUCT_ID_FILE = '/sys/class/dmi/id/product_uuid' UUID_PATTERN = re.compile( r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) class DefaultOSUtil(object): def __init__(self): self.agent_conf_file_path = '/etc/waagent.conf' self.selinux = None self.disable_route_warning = False def get_firewall_dropped_packets(self, dst_ip=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return 0 try: wait = self.get_firewall_will_wait() rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait)) if rc != 0: return -1 pattern = re.compile(PACKET_PATTERN.format(dst_ip)) for line in output.split('\n'): m = pattern.match(line) if m is not None: return int(m.group(1)) return 0 except Exception as e: _enable_firewall = False logger.warn("Unable to retrieve firewall packets dropped" "{0}".format(ustr(e))) return -1 def get_firewall_will_wait(self): # Determine if iptables will serialize access rc, output = shellutil.run_get_output(IPTABLES_VERSION) if rc != 0: msg = "Unable to determine version of iptables" logger.warn(msg) raise Exception(msg) m = IPTABLES_VERSION_PATTERN.match(output) if m is None: msg = "iptables did not return version information" logger.warn(msg) raise Exception(msg) wait = "-w" \ if FlexibleVersion(m.group(1)) >= IPTABLES_LOCKING_VERSION \ else "" return wait def remove_firewall(self): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: wait = self.get_firewall_will_wait() flush_rule = FIREWALL_FLUSH.format(wait) if shellutil.run(flush_rule, chk_err=True) != 0: raise Exception("non-zero return code") return True except Exception as e: _enable_firewall = False logger.info("Unable to flush firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False def enable_firewall(self, dst_ip=None, uid=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: if dst_ip is None or uid is None: msg = "Missing arguments to enable_firewall" logger.warn(msg) raise Exception(msg) wait = self.get_firewall_will_wait() # If the DROP rule exists, make no changes drop_rule = FIREWALL_DROP.format(wait, "C", dst_ip) if shellutil.run(drop_rule, chk_err=False) == 0: logger.verbose("Firewall appears established") return True # Otherwise, append both rules accept_rule = FIREWALL_ACCEPT.format(wait, "A", dst_ip, uid) drop_rule = FIREWALL_DROP.format(wait, "A", dst_ip) if shellutil.run(accept_rule) != 0: msg = "Unable to add ACCEPT firewall rule '{0}'".format( accept_rule) logger.warn(msg) raise Exception(msg) if shellutil.run(drop_rule) != 0: msg = "Unable to add DROP firewall rule '{0}'".format( drop_rule) logger.warn(msg) raise Exception(msg) logger.info("Successfully added Azure fabric firewall rules") rc, output = shellutil.run_get_output(FIREWALL_LIST.format(wait)) if rc == 0: logger.info("Firewall rules:\n{0}".format(output)) else: logger.warn("Listing firewall rules failed: {0}".format(output)) return True except Exception as e: _enable_firewall = False logger.info("Unable to establish firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False def _correct_instance_id(self, id): ''' Azure stores the instance ID with an incorrect byte ordering for the first parts. For example, the ID returned by the metadata service: D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8 will be found as: 544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8 This code corrects the byte order such that it is consistent with that returned by the metadata service. ''' if not UUID_PATTERN.match(id): return id parts = id.split('-') return '-'.join([ textutil.swap_hexstring(parts[0], width=2), textutil.swap_hexstring(parts[1], width=2), textutil.swap_hexstring(parts[2], width=2), parts[3], parts[4] ]) def is_current_instance_id(self, id_that): ''' Compare two instance IDs for equality, but allow that some IDs may have been persisted using the incorrect byte ordering. ''' id_this = self.get_instance_id() return id_that == id_this or \ id_that == self._correct_instance_id(id_this) def get_agent_conf_file_path(self): return self.agent_conf_file_path def get_instance_id(self): ''' Azure records a UUID as the instance ID First check /sys/class/dmi/id/product_uuid. If that is missing, then extracts from dmidecode If nothing works (for old VMs), return the empty string ''' if os.path.isfile(PRODUCT_ID_FILE): s = fileutil.read_file(PRODUCT_ID_FILE).strip() else: rc, s = shellutil.run_get_output(DMIDECODE_CMD) if rc != 0 or UUID_PATTERN.match(s) is None: return "" return self._correct_instance_id(s.strip()) def get_userentry(self, username): try: return pwd.getpwnam(username) except KeyError: return None def is_sys_user(self, username): """ Check whether use is a system user. If reset sys user is allowed in conf, return False Otherwise, check whether UID is less than UID_MIN """ if conf.get_allow_reset_sys_user(): return False userentry = self.get_userentry(username) uidmin = None try: uidmin_def = fileutil.get_line_startingwith("UID_MIN", "/etc/login.defs") if uidmin_def is not None: uidmin = int(uidmin_def.split()[1]) except IOError as e: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: return True else: return False def useradd(self, username, expiration=None): """ Create user account with 'username' """ userentry = self.get_userentry(username) if userentry is not None: logger.info("User {0} already exists, skip useradd", username) return if expiration is not None: cmd = "useradd -m {0} -e {1}".format(username, expiration) else: cmd = "useradd -m {0}".format(username) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " "retcode:{1}, " "output:{2}").format(username, retcode, out)) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def conf_sudoer(self, username, nopasswd=False, remove=False): sudoers_dir = conf.get_sudoers_dir() sudoers_wagent = os.path.join(sudoers_dir, 'waagent') if not remove: # for older distros create sudoers.d if not os.path.isdir(sudoers_dir): sudoers_file = os.path.join(sudoers_dir, '../sudoers') # create the sudoers.d directory os.mkdir(sudoers_dir) # add the include of sudoers.d to the /etc/sudoers sudoers = '\n#includedir ' + sudoers_dir + '\n' fileutil.append_file(sudoers_file, sudoers) sudoer = None if nopasswd: sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username) else: sudoer = "{0} ALL=(ALL) ALL".format(username) if not os.path.isfile(sudoers_wagent) or \ fileutil.findstr_in_file(sudoers_wagent, sudoer) is False: fileutil.append_file(sudoers_wagent, "{0}\n".format(sudoer)) fileutil.chmod(sudoers_wagent, 0o440) else: # remove user from sudoers if os.path.isfile(sudoers_wagent): try: content = fileutil.read_file(sudoers_wagent) sudoers = content.split("\n") sudoers = [x for x in sudoers if username not in x] fileutil.write_file(sudoers_wagent, "\n".join(sudoers)) except IOError as e: raise OSUtilError("Failed to remove sudoer: {0}".format(e)) def del_root_password(self): try: passwd_file_path = conf.get_passwd_file_path() passwd_content = fileutil.read_file(passwd_file_path) passwd = passwd_content.split('\n') new_passwd = [x for x in passwd if not x.startswith("root:")] new_passwd.insert(0, "root:*LOCK*:14600::::::") fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) except IOError as e: raise OSUtilError("Failed to delete root password:{0}".format(e)) def _norm_path(self, filepath): home = conf.get_home_dir() # Expand HOME variable if present in path path = os.path.normpath(filepath.replace("$HOME", home)) return path def deploy_ssh_keypair(self, username, keypair): """ Deploy id_rsa and id_rsa.pub """ path, thumbprint = keypair path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) lib_dir = conf.get_lib_dir() prv_path = os.path.join(lib_dir, thumbprint + '.prv') if not os.path.isfile(prv_path): raise OSUtilError("Can't find {0}.prv".format(thumbprint)) shutil.copyfile(prv_path, path) pub_path = path + '.pub' crytputil = CryptUtil(conf.get_openssl_cmd()) pub = crytputil.get_pubkey_from_prv(prv_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') os.chmod(path, 0o644) os.chmod(pub_path, 0o600) def openssl_to_openssh(self, input_file, output_file): cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.crt_to_ssh(input_file, output_file) def deploy_ssh_pubkey(self, username, pubkey): """ Deploy authorized_key """ path, thumbprint, value = pubkey if path is None: raise OSUtilError("Public key path is None") crytputil = CryptUtil(conf.get_openssl_cmd()) path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) if value is not None: if not value.startswith("ssh-"): raise OSUtilError("Bad public key: {0}".format(value)) fileutil.write_file(path, value) elif thumbprint is not None: lib_dir = conf.get_lib_dir() crt_path = os.path.join(lib_dir, thumbprint + '.crt') if not os.path.isfile(crt_path): raise OSUtilError("Can't find {0}.crt".format(thumbprint)) pub_path = os.path.join(lib_dir, thumbprint + '.pub') pub = crytputil.get_pubkey_from_crt(crt_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.openssl_to_openssh(pub_path, path) fileutil.chmod(pub_path, 0o600) else: raise OSUtilError("SSH public key Fingerprint and Value are None") self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') fileutil.chowner(path, username) fileutil.chmod(path, 0o644) def is_selinux_system(self): """ Checks and sets self.selinux = True if SELinux is available on system. """ if self.selinux == None: if shellutil.run("which getenforce", chk_err=False) == 0: self.selinux = True else: self.selinux = False return self.selinux def is_selinux_enforcing(self): """ Calls shell command 'getenforce' and returns True if 'Enforcing'. """ if self.is_selinux_system(): output = shellutil.run_get_output("getenforce")[1] return output.startswith("Enforcing") else: return False def set_selinux_context(self, path, con): """ Calls shell 'chcon' with 'path' and 'con' context. Returns exit result. """ if self.is_selinux_system(): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) return 1 return shellutil.run('chcon ' + con + ' ' + path) def conf_sshd(self, disable_password): option = "no" if disable_password else "yes" conf_file_path = conf.get_sshd_conf_file_path() conf_file = fileutil.read_file(conf_file_path).split("\n") textutil.set_ssh_config(conf_file, "PasswordAuthentication", option) textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option) textutil.set_ssh_config(conf_file, "ClientAliveInterval", str(conf.get_ssh_client_alive_interval())) fileutil.write_file(conf_file_path, "\n".join(conf_file)) logger.info("{0} SSH password-based authentication methods." .format("Disabled" if disable_password else "Enabled")) logger.info("Configured SSH client probing to keep connections alive.") def get_dvd_device(self, dev_dir='/dev'): pattern = r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9])' device_list = os.listdir(dev_dir) for dvd in [re.match(pattern, dev) for dev in device_list]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) inner_detail = "The following devices were found, but none matched " \ "the pattern [{0}]: {1}\n".format(pattern, device_list) raise OSUtilError(msg="Failed to get dvd device from {0}".format(dev_dir), inner=inner_detail) def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None, sleep_time=5): if dvd_device is None: dvd_device = self.get_dvd_device() if mount_point is None: mount_point = conf.get_dvd_mount_point() mount_list = shellutil.run_get_output("mount")[1] existing = self.get_mount_point(mount_list, dvd_device) if existing is not None: # already mounted logger.info("{0} is already mounted at {1}", dvd_device, existing) return if not os.path.isdir(mount_point): os.makedirs(mount_point) err = '' for retry in range(1, max_retry): return_code, err = self.mount(dvd_device, mount_point, option="-o ro -t udf,iso9660", chk_err=False) if return_code == 0: logger.info("Successfully mounted dvd") return else: logger.warn( "Mounting dvd failed [retry {0}/{1}, sleeping {2} sec]", retry, max_retry - 1, sleep_time) if retry < max_retry: time.sleep(sleep_time) if chk_err: raise OSUtilError("Failed to mount dvd device", inner=err) def umount_dvd(self, chk_err=True, mount_point=None): if mount_point is None: mount_point = conf.get_dvd_mount_point() return_code = self.umount(mount_point, chk_err=chk_err) if chk_err and return_code != 0: raise OSUtilError("Failed to unmount dvd device at {0}", mount_point) def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("eject {0}".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) def try_load_atapiix_mod(self): try: self.load_atapiix_mod() except Exception as e: logger.warn("Could not load ATAPI driver: {0}".format(e)) def load_atapiix_mod(self): if self.is_atapiix_mod_loaded(): return ret, kern_version = shellutil.run_get_output("uname -r") if ret != 0: raise Exception("Failed to call uname -r") mod_path = os.path.join('/lib/modules', kern_version.strip('\n'), 'kernel/drivers/ata/ata_piix.ko') if not os.path.isfile(mod_path): raise Exception("Can't find module file:{0}".format(mod_path)) ret, output = shellutil.run_get_output("insmod " + mod_path) if ret != 0: raise Exception("Error calling insmod for ATAPI CD-ROM driver") if not self.is_atapiix_mod_loaded(max_retry=3): raise Exception("Failed to load ATAPI CD-ROM driver") def is_atapiix_mod_loaded(self, max_retry=1): for retry in range(0, max_retry): ret = shellutil.run("lsmod | grep ata_piix", chk_err=False) if ret == 0: logger.info("Module driver for ATAPI CD-ROM is already present.") return True if retry < max_retry - 1: time.sleep(1) return False def mount(self, dvd, mount_point, option="", chk_err=True): cmd = "mount {0} {1} {2}".format(option, dvd, mount_point) retcode, err = shellutil.run_get_output(cmd, chk_err) if retcode != 0: detail = "[{0}] returned {1}: {2}".format(cmd, retcode, err) err = detail return retcode, err def umount(self, mount_point, chk_err=True): return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err) def allow_dhcp_broadcast(self): #Open DHCP port if iptables is enabled. # We supress error logging on error. shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) def remove_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for src in rules_files: file_name = fileutil.base_name(src) dest = os.path.join(lib_dir, file_name) if os.path.isfile(dest): os.remove(dest) if os.path.isfile(src): logger.warn("Move rules file {0} to {1}", file_name, dest) shutil.move(src, dest) def restore_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for dest in rules_files: filename = fileutil.base_name(dest) src = os.path.join(lib_dir, filename) if os.path.isfile(dest): continue if os.path.isfile(src): logger.warn("Move rules file {0} to {1}", filename, dest) shutil.move(src, dest) def get_mac_addr(self): """ Convienience function, returns mac addr bound to first non-loopback interface. """ ifname='' while len(ifname) < 2 : ifname=self.get_first_if()[0] addr = self.get_if_mac(ifname) return textutil.hexstr_to_bytearray(addr) def get_if_mac(self, ifname): """ Return the mac-address bound to the socket. """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1')) info = fcntl.ioctl(sock.fileno(), 0x8927, param) return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]]) def get_first_if(self): """ Return the interface name, and ip addr of the first active non-loopback interface. """ iface='' expected=16 # how many devices should I expect... # for 64bit the size is 40 bytes # for 32bit the size is 32 bytes python_arc = platform.architecture()[0] struct_size = 32 if python_arc == '32bit' else 40 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) buff=array.array('B', b'\0' * (expected * struct_size)) param = struct.pack('iL', expected*struct_size, buff.buffer_info()[0]) ret = fcntl.ioctl(sock.fileno(), 0x8912, param) retsize=(struct.unpack('iL', ret)[0]) if retsize == (expected * struct_size): logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) sock = buff.tostring() primary = bytearray(self.get_primary_interface(), encoding='utf-8') for i in range(0, struct_size * expected, struct_size): iface=sock[i:i+16].split(b'\0', 1)[0] if len(iface) == 0 or self.is_loopback(iface) or iface != primary: # test the next one if len(iface) != 0 and not self.disable_route_warning: logger.info('Interface [{0}] skipped'.format(iface)) continue else: # use this one logger.info('Interface [{0}] selected'.format(iface)) break return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) def get_primary_interface(self): """ Get the name of the primary interface, which is the one with the default route attached to it; if there are multiple default routes, the primary has the lowest Metric. :return: the interface which has the default route """ # from linux/route.h RTF_GATEWAY = 0x02 DEFAULT_DEST = "00000000" hdr_iface = "Iface" hdr_dest = "Destination" hdr_flags = "Flags" hdr_metric = "Metric" idx_iface = -1 idx_dest = -1 idx_flags = -1 idx_metric = -1 primary = None primary_metric = None if not self.disable_route_warning: logger.info("Examine /proc/net/route for primary interface") with open('/proc/net/route') as routing_table: idx = 0 for header in filter(lambda h: len(h) > 0, routing_table.readline().strip(" \n").split("\t")): if header == hdr_iface: idx_iface = idx elif header == hdr_dest: idx_dest = idx elif header == hdr_flags: idx_flags = idx elif header == hdr_metric: idx_metric = idx idx = idx + 1 for entry in routing_table.readlines(): route = entry.strip(" \n").split("\t") if route[idx_dest] == DEFAULT_DEST and int(route[idx_flags]) & RTF_GATEWAY == RTF_GATEWAY: metric = int(route[idx_metric]) iface = route[idx_iface] if primary is None or metric < primary_metric: primary = iface primary_metric = metric if primary is None: primary = '' if not self.disable_route_warning: with open('/proc/net/route') as routing_table_fh: routing_table_text = routing_table_fh.read() logger.warn('Could not determine primary interface, ' 'please ensure /proc/net/route is correct') logger.warn('Contents of /proc/net/route:\n{0}'.format(routing_table_text)) logger.warn('Primary interface examination will retry silently') self.disable_route_warning = True else: logger.info('Primary interface is [{0}]'.format(primary)) self.disable_route_warning = False return primary def is_primary_interface(self, ifname): """ Indicate whether the specified interface is the primary. :param ifname: the name of the interface - eth0, lo, etc. :return: True if this interface binds the default route """ return self.get_primary_interface() == ifname def is_loopback(self, ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) result = fcntl.ioctl(s.fileno(), 0x8913, struct.pack('256s', ifname[:15])) flags, = struct.unpack('H', result[16:18]) isloopback = flags & 8 == 8 if not self.disable_route_warning: logger.info('interface [{0}] has flags [{1}], ' 'is loopback [{2}]'.format(ifname, flags, isloopback)) return isloopback def get_dhcp_lease_endpoint(self): """ OS specific, this should return the decoded endpoint of the wireserver from option 245 in the dhcp leases file if it exists on disk. :return: The endpoint if available, or None """ return None @staticmethod def get_endpoint_from_leases_path(pathglob): """ Try to discover and decode the wireserver endpoint in the specified dhcp leases path. :param pathglob: The path containing dhcp lease files :return: The endpoint if available, otherwise None """ endpoint = None HEADER_LEASE = "lease" HEADER_OPTION = "option unknown-245" HEADER_DNS = "option domain-name-servers" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S" logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() if HEADER_OPTION in leases: cached_endpoint = None has_option_245 = False expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None has_option_245 = False expired = True elif HEADER_DNS in line: cached_endpoint = line.replace(HEADER_DNS, '').strip(" ;") elif HEADER_OPTION in line: has_option_245 = True elif HEADER_EXPIRE in line: if "never" in line: expired = False else: try: expire_string = line.split(" ", 4)[-1].strip(";") expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME) if expire_date > datetime.datetime.utcnow(): expired = False except: logger.error("could not parse expiry token '{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format( cached_endpoint, has_option_245, expired)) if not expired and cached_endpoint is not None and has_option_245: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: logger.info("cached endpoint not found") return endpoint def is_missing_default_route(self): routes = shellutil.run_get_output("route -n")[1] for route in routes.split("\n"): if route.startswith("0.0.0.0 ") or route.startswith("default "): return False return True def get_if_name(self): return self.get_first_if()[0] def get_ip4_addr(self): return self.get_first_if()[1] def set_route_for_dhcp_broadcast(self, ifname): return shellutil.run("route add 255.255.255.255 dev {0}".format(ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): shellutil.run("route del 255.255.255.255 dev {0}".format(ifname), chk_err=False) def is_dhcp_enabled(self): return False def stop_dhcp_service(self): pass def start_dhcp_service(self): pass def start_network(self): pass def start_agent_service(self): pass def stop_agent_service(self): pass def register_agent_service(self): pass def unregister_agent_service(self): pass def restart_ssh_service(self): pass def route_add(self, net, mask, gateway): """ Add specified route using /sbin/route add -net. """ cmd = ("/sbin/route add -net " "{0} netmask {1} gw {2}").format(net, mask, gateway) return shellutil.run(cmd, chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof dhclient", chk_err=False) return ret[1] if ret[0] == 0 else None def set_hostname(self, hostname): fileutil.write_file('/etc/hostname', hostname) shellutil.run("hostname {0}".format(hostname), chk_err=False) def set_dhcp_hostname(self, hostname): autosend = r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])' dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf'] for conf_file in dhclient_files: if not os.path.isfile(conf_file): continue if fileutil.findre_in_file(conf_file, autosend): #Return if auto send host-name is configured return fileutil.update_conf_file(conf_file, 'send host-name', 'send host-name "{0}";'.format(hostname)) def restart_if(self, ifname, retries=3, wait=5): retry_limit=retries+1 for attempt in range(1, retry_limit): return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname)) if return_code == 0: return logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) if attempt < retry_limit: logger.info("retrying in {0} seconds".format(wait)) time.sleep(wait) else: logger.warn("exceeded restart retries") def publish_hostname(self, hostname): self.set_dhcp_hostname(hostname) self.set_hostname_record(hostname) ifname = self.get_if_name() self.restart_if(ifname) def set_scsi_disks_timeout(self, timeout): for dev in os.listdir("/sys/block"): if dev.startswith('sd'): self.set_block_device_timeout(dev, timeout) def set_block_device_timeout(self, dev, timeout): if dev is not None and timeout is not None: file_path = "/sys/block/{0}/device/timeout".format(dev) content = fileutil.read_file(file_path) original = content.splitlines()[0].rstrip() if original != timeout: fileutil.write_file(file_path, timeout) logger.info("Set block dev timeout: {0} with timeout: {1}", dev, timeout) def get_mount_point(self, mountlist, device): """ Example of mountlist: /dev/sda1 on / type ext4 (rw) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0") none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) /dev/sdb1 on /mnt/resource type ext4 (rw) """ if (mountlist and device): for entry in mountlist.split('\n'): if(re.search(device, entry)): tokens = entry.split() #Return the 3rd column of this line return tokens[2] if len(tokens) > 2 else None return None def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ if port_id > 3: return None g0 = "00000000" if port_id > 1: g0 = "00000001" port_id = port_id - 2 device = None path = "/sys/bus/vmbus/devices/" if os.path.exists(path): for vmbus in os.listdir(path): deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id")) guid = deviceid.lstrip('{').split('-') if guid[0] == g0 and guid[1] == "000" + ustr(port_id): for root, dirs, files in os.walk(path + vmbus): if root.endswith("/block"): device = dirs[0] break else : #older distros for d in dirs: if ':' in d and "block" == d.split(':')[0]: device = d.split(':')[1] break break return device def set_hostname_record(self, hostname): fileutil.write_file(conf.get_published_hostname(), contents=hostname) def get_hostname_record(self): hostname_record = conf.get_published_hostname() if not os.path.exists(hostname_record): # this file is created at provisioning time with agents >= 2.2.3 hostname = socket.gethostname() logger.warn('Hostname record does not exist, ' 'creating [{0}] with hostname [{1}]', hostname_record, hostname) self.set_hostname_record(hostname) record = fileutil.read_file(hostname_record) return record def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run("> /var/run/utmp") shellutil.run("userdel -f -r " + username) self.conf_sudoer(username, remove=True) def decode_customdata(self, data): return base64.b64decode(data).decode('utf-8') def get_total_mem(self): # Get total memory in bytes and divide by 1024**2 to get the value in MB. return os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024**2) def get_processor_cores(self): return multiprocessing.cpu_count() def check_pid_alive(self, pid): return pid is not None and os.path.isdir(os.path.join('/proc', pid)) @property def is_64bit(self): return sys.maxsize > 2**32 WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/factory.py000066400000000000000000000067551322477356400242120ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import * from .default import DefaultOSUtil from .arch import ArchUtil from .clearlinux import ClearLinuxUtil from .coreos import CoreOSUtil from .debian import DebianOSUtil from .freebsd import FreeBSDOSUtil from .openbsd import OpenBSDOSUtil from .redhat import RedhatOSUtil, Redhat6xOSUtil from .suse import SUSEOSUtil, SUSE11OSUtil from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, UbuntuSnappyOSUtil from .alpine import AlpineOSUtil from .bigip import BigIpOSUtil from .gaia import GaiaOSUtil def get_osutil(distro_name=DISTRO_NAME, distro_code_name=DISTRO_CODE_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "arch": return ArchUtil() if distro_name == "clear linux os for intel architecture" \ or distro_name == "clear linux software for intel architecture": return ClearLinuxUtil() if distro_name == "ubuntu": if Version(distro_version) == Version("12.04") or Version(distro_version) == Version("12.10"): return Ubuntu12OSUtil() elif Version(distro_version) == Version("14.04") or Version(distro_version) == Version("14.10"): return Ubuntu14OSUtil() elif distro_full_name == "Snappy Ubuntu Core": return UbuntuSnappyOSUtil() else: return UbuntuOSUtil() if distro_name == "alpine": return AlpineOSUtil() if distro_name == "kali": return DebianOSUtil() if distro_name == "coreos" or distro_code_name == "coreos": return CoreOSUtil() if distro_name == "suse": if distro_full_name == 'SUSE Linux Enterprise Server' \ and Version(distro_version) < Version('12') \ or distro_full_name == 'openSUSE' and Version(distro_version) < Version('13.2'): return SUSE11OSUtil() else: return SUSEOSUtil() elif distro_name == "debian": return DebianOSUtil() elif distro_name == "redhat" \ or distro_name == "centos" \ or distro_name == "oracle": if Version(distro_version) < Version("7"): return Redhat6xOSUtil() else: return RedhatOSUtil() elif distro_name == "euleros": return RedhatOSUtil() elif distro_name == "freebsd": return FreeBSDOSUtil() elif distro_name == "openbsd": return OpenBSDOSUtil() elif distro_name == "bigip": return BigIpOSUtil() elif distro_name == "gaia": return GaiaOSUtil() else: logger.warn("Unable to load distro implementation for {0}. Using " "default distro implementation instead.", distro_name) return DefaultOSUtil() WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/freebsd.py000066400000000000000000000235471322477356400241530ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.future import ustr class FreeBSDOSUtil(DefaultOSUtil): def __init__(self): super(FreeBSDOSUtil, self).__init__() self._scsi_disks_timeout_set = False def set_hostname(self, hostname): rc_file_path = '/etc/rc.conf' conf_file = fileutil.read_file(rc_file_path).split("\n") textutil.set_ini_config(conf_file, "hostname", hostname) fileutil.write_file(rc_file_path, "\n".join(conf_file)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def restart_ssh_service(self): return shellutil.run('service sshd restart', chk_err=False) def useradd(self, username, expiration=None): """ Create user account with 'username' """ userentry = self.get_userentry(username) if userentry is not None: logger.warn("User {0} already exists, skip useradd", username) return if expiration is not None: cmd = "pw useradd {0} -e {1} -m".format(username, expiration) else: cmd = "pw useradd {0} -m".format(username) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " "retcode:{1}, " "output:{2}").format(username, retcode, out)) def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run('> /var/run/utx.active') shellutil.run('rmuser -y ' + username) self.conf_sudoer(username, remove=True) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) cmd = "echo '{0}'|pw usermod {1} -H 0 ".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def del_root_password(self): err = shellutil.run('pw usermod root -h -') if err: raise OSUtilError("Failed to delete root password: Failed to update password database.") def get_if_mac(self, ifname): data = self._get_net_info() if data[0] == ifname: return data[2].replace(':', '').upper() return None def get_first_if(self): return self._get_net_info()[:2] def route_add(self, net, mask, gateway): cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) return shellutil.run(cmd, chk_err=False) def is_missing_default_route(self): """ For FreeBSD, the default broadcast goes to current default gw, not a all-ones broadcast address, need to specify the route manually to get it work in a VNET environment. SEE ALSO: man ip(4) IP_ONESBCAST, """ return True def is_dhcp_enabled(self): return True def start_dhcp_service(self): shellutil.run("/etc/rc.d/dhclient start {0}".format(self.get_if_name()), chk_err=False) def allow_dhcp_broadcast(self): pass def set_route_for_dhcp_broadcast(self, ifname): return shellutil.run("route add 255.255.255.255 -iface {0}".format(ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): shellutil.run("route delete 255.255.255.255 -iface {0}".format(ifname), chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("pgrep -n dhclient", chk_err=False) return ret[1] if ret[0] == 0 else None def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("cdcontrol -f {0} eject".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) def restart_if(self, ifname): # Restart dhclient only to publish hostname shellutil.run("/etc/rc.d/dhclient restart {0}".format(ifname), chk_err=False) def get_total_mem(self): cmd = "sysctl hw.physmem |awk '{print $2}'" ret, output = shellutil.run_get_output(cmd) if ret: raise OSUtilError("Failed to get total memory: {0}".format(output)) try: return int(output)/1024/1024 except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def get_processor_cores(self): ret, output = shellutil.run_get_output("sysctl hw.ncpu |awk '{print $2}'") if ret: raise OSUtilError("Failed to get processor cores.") try: return int(output) except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def set_scsi_disks_timeout(self, timeout): if self._scsi_disks_timeout_set: return ret, output = shellutil.run_get_output('sysctl kern.cam.da.default_timeout={0}'.format(timeout)) if ret: raise OSUtilError("Failed set SCSI disks timeout: {0}".format(output)) self._scsi_disks_timeout_set = True def check_pid_alive(self, pid): return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 @staticmethod def _get_net_info(): """ There is no SIOCGIFCONF on freeBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ iface = '' inet = '' mac = '' err, output = shellutil.run_get_output('ifconfig -l ether', chk_err=False) if err: raise OSUtilError("Can't find ether interface:{0}".format(output)) ifaces = output.split() if not ifaces: raise OSUtilError("Can't find ether interface.") iface = ifaces[0] err, output = shellutil.run_get_output('ifconfig ' + iface, chk_err=False) if err: raise OSUtilError("Can't get info for interface:{0}".format(iface)) for line in output.split('\n'): if line.find('inet ') != -1: inet = line.split()[1] elif line.find('ether ') != -1: mac = line.split()[1] logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) return iface, inet, mac def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ if port_id > 3: return None g0 = "00000000" if port_id > 1: g0 = "00000001" port_id = port_id - 2 err, output = shellutil.run_get_output('sysctl dev.storvsc | grep pnpinfo | grep deviceid=') if err: return None g1 = "000" + ustr(port_id) g0g1 = "{0}-{1}".format(g0, g1) """ search 'X' from 'dev.storvsc.X.%pnpinfo: classid=32412632-86cb-44a2-9b5c-50d1417354f5 deviceid=00000000-0001-8899-0000-000000000000' """ cmd_search_ide = "sysctl dev.storvsc | grep pnpinfo | grep deviceid={0}".format(g0g1) err, output = shellutil.run_get_output(cmd_search_ide) if err: return None cmd_extract_id = cmd_search_ide + "|awk -F . '{print $3}'" err, output = shellutil.run_get_output(cmd_extract_id) """ try to search 'blkvscX' and 'storvscX' to find device name """ output = output.rstrip() cmd_search_blkvsc = "camcontrol devlist -b | grep blkvsc{0} | awk '{{print $1}}'".format(output) err, output = shellutil.run_get_output(cmd_search_blkvsc) if err == 0: output = output.rstrip() cmd_search_dev="camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): if not possible.startswith('pass'): return possible cmd_search_storvsc = "camcontrol devlist -b | grep storvsc{0} | awk '{{print $1}}'".format(output) err, output = shellutil.run_get_output(cmd_search_storvsc) if err == 0: output = output.rstrip() cmd_search_dev="camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): if not possible.startswith('pass'): return possible return None WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/gaia.py000066400000000000000000000160271322477356400234350ustar00rootroot00000000000000# # Copyright 2017 Check Point Software Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import base64 import socket import struct import time import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr, bytebuffer import azurelinuxagent.common.logger as logger from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.utils.cryptutil import CryptUtil import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil class GaiaOSUtil(DefaultOSUtil): def __init__(self): super(GaiaOSUtil, self).__init__() def _run_clish(self, cmd, log_cmd=True): for i in xrange(10): ret, out = shellutil.run_get_output( "/bin/clish -s -c '" + cmd + "'", log_cmd=log_cmd) if not ret: break if 'NMSHST0025' in out: # Entry for [hostname] already present ret = 0 break time.sleep(2) return ret, out def useradd(self, username, expiration=None): logger.warn('useradd is not supported on GAiA') def chpasswd(self, username, password, crypt_id=6, salt_len=10): logger.info('chpasswd') passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) ret, out = self._run_clish( 'set user admin password-hash ' + passwd_hash, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format('admin', out)) def conf_sudoer(self, username, nopasswd=False, remove=False): logger.info('conf_sudoer is not supported on GAiA') def del_root_password(self): logger.info('del_root_password') ret, out = self._run_clish('set user admin password-hash *LOCK*') if ret != 0: raise OSUtilError("Failed to delete root password") def _replace_user(self, path, username): if path.startswith('$HOME'): path = '/home' + path[5:] parts = path.split('/') parts[2] = username return '/'.join(parts) def deploy_ssh_keypair(self, username, keypair): logger.info('deploy_ssh_keypair') username = 'admin' path, thumbprint = keypair path = self._replace_user(path, username) super(GaiaOSUtil, self).deploy_ssh_keypair( username, (path, thumbprint)) def openssl_to_openssh(self, input_file, output_file): cryptutil = CryptUtil(conf.get_openssl_cmd()) ret, out = shellutil.run_get_output( conf.get_openssl_cmd() + " rsa -pubin -noout -text -in '" + input_file + "'") if ret != 0: raise OSUtilError('openssl failed with {0}'.format(ret)) modulus = [] exponent = [] buf = None for line in out.split('\n'): if line.startswith('Modulus:'): buf = modulus buf.append(line) continue if line.startswith('Exponent:'): buf = exponent buf.append(line) continue if buf and line: buf.append(line.strip().replace(':', '')) def text_to_num(buf): if len(buf) == 1: return int(buf[0].split()[1]) return long(''.join(buf[1:]), 16) n = text_to_num(modulus) e = text_to_num(exponent) keydata = bytearray() keydata.extend(struct.pack('>I', len('ssh-rsa'))) keydata.extend(b'ssh-rsa') keydata.extend(struct.pack('>I', len(cryptutil.num_to_bytes(e)))) keydata.extend(cryptutil.num_to_bytes(e)) keydata.extend(struct.pack('>I', len(cryptutil.num_to_bytes(n)) + 1)) keydata.extend(b'\0') keydata.extend(cryptutil.num_to_bytes(n)) keydata_base64 = base64.b64encode(bytebuffer(keydata)) fileutil.write_file(output_file, ustr(b'ssh-rsa ' + keydata_base64 + b'\n', encoding='utf-8')) def deploy_ssh_pubkey(self, username, pubkey): logger.info('deploy_ssh_pubkey') username = 'admin' path, thumbprint, value = pubkey path = self._replace_user(path, username) super(GaiaOSUtil, self).deploy_ssh_pubkey( username, (path, thumbprint, value)) def eject_dvd(self, chk_err=True): logger.warn('eject is not supported on GAiA') def mount(self, dvd, mount_point, option="", chk_err=True): logger.info('mount {0} {1} {2}', dvd, mount_point, option) if 'udf,iso9660' in option: ret, out = super(GaiaOSUtil, self).mount( dvd, mount_point, option=option.replace('udf,iso9660', 'udf'), chk_err=chk_err) if not ret: return ret, out return super(GaiaOSUtil, self).mount( dvd, mount_point, option=option, chk_err=chk_err) def allow_dhcp_broadcast(self): logger.info('allow_dhcp_broadcast is ignored on GAiA') def remove_rules_files(self, rules_files=''): pass def restore_rules_files(self, rules_files=''): logger.info('restore_rules_files is ignored on GAiA') def restart_ssh_service(self): return shellutil.run('/sbin/service sshd condrestart', chk_err=False) def _address_to_string(self, addr): return socket.inet_ntoa(struct.pack("!I", addr)) def _get_prefix(self, mask): return str(sum([bin(int(x)).count('1') for x in mask.split('.')])) def route_add(self, net, mask, gateway): logger.info('route_add {0} {1} {2}', net, mask, gateway) if net == 0 and mask == 0: cidr = 'default' else: cidr = self._address_to_string(net) + '/' + self._get_prefix( self._address_to_string(mask)) ret, out = self._run_clish( 'set static-route ' + cidr + ' nexthop gateway address ' + self._address_to_string(gateway) + ' on') return ret def set_hostname(self, hostname): logger.warn('set_hostname is ignored on GAiA') def set_dhcp_hostname(self, hostname): logger.warn('set_dhcp_hostname is ignored on GAiA') def publish_hostname(self, hostname): logger.warn('publish_hostname is ignored on GAiA') def del_account(self, username): logger.warn('del_account is ignored on GAiA') WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/openbsd.py000066400000000000000000000323251322477356400241650ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and OpenSSL 1.0+ import os import re import time import glob import datetime import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil UUID_PATTERN = re.compile( r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) class OpenBSDOSUtil(DefaultOSUtil): def __init__(self): super(OpenBSDOSUtil, self).__init__() self._scsi_disks_timeout_set = False def get_instance_id(self): ret, output = shellutil.run_get_output("sysctl -n hw.uuid") if ret != 0 or UUID_PATTERN.match(output) is None: return "" return output.strip() def set_hostname(self, hostname): fileutil.write_file("/etc/myname", "{}\n".format(hostname)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def restart_ssh_service(self): return shellutil.run('rcctl restart sshd', chk_err=False) def start_agent_service(self): return shellutil.run('rcctl start waagent', chk_err=False) def stop_agent_service(self): return shellutil.run('rcctl stop waagent', chk_err=False) def register_agent_service(self): shellutil.run('chmod 0555 /etc/rc.d/waagent', chk_err=False) return shellutil.run('rcctl enable waagent', chk_err=False) def unregister_agent_service(self): return shellutil.run('rcctl disable waagent', chk_err=False) def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run("> /var/run/utmp") shellutil.run("userdel -r " + username) self.conf_sudoer(username, remove=True) def conf_sudoer(self, username, nopasswd=False, remove=False): doas_conf = "/etc/doas.conf" doas = None if not remove: if not os.path.isfile(doas_conf): # always allow root to become root doas = "permit keepenv nopass root\n" fileutil.append_file(doas_conf, doas) if nopasswd: doas = "permit keepenv nopass {0}\n".format(username) else: doas = "permit keepenv persist {0}\n".format(username) fileutil.append_file(doas_conf, doas) fileutil.chmod(doas_conf, 0o644) else: # Remove user from doas.conf if os.path.isfile(doas_conf): try: content = fileutil.read_file(doas_conf) doas = content.split("\n") doas = [x for x in doas if username not in x] fileutil.write_file(doas_conf, "\n".join(doas)) except IOError as err: raise OSUtilError("Failed to remove sudoer: " "{0}".format(err)) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user. " "Will not set passwd.").format(username)) cmd = "echo -n {0}|encrypt".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to encrypt password for {0}: {1}" "").format(username, output)) passwd_hash = output.strip() cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def del_root_password(self): ret, output = shellutil.run_get_output('usermod -p "*" root') if ret: raise OSUtilError("Failed to delete root password: " "{0}".format(output)) def get_if_mac(self, ifname): data = self._get_net_info() if data[0] == ifname: return data[2].replace(':', '').upper() return None def get_first_if(self): return self._get_net_info()[:2] def route_add(self, net, mask, gateway): cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) return shellutil.run(cmd, chk_err=False) def is_missing_default_route(self): ret = shellutil.run("route -n get default", chk_err=False) if ret == 0: return False return True def is_dhcp_enabled(self): pass def start_dhcp_service(self): pass def stop_dhcp_service(self): pass def get_dhcp_lease_endpoint(self): """ OpenBSD has a sligthly different lease file format. """ endpoint = None pathglob = '/var/db/dhclient.leases.{}'.format(self.get_first_if()[0]) HEADER_LEASE = "lease" HEADER_OPTION = "option option-245" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S %Z" logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() if HEADER_OPTION in leases: cached_endpoint = None has_option_245 = False expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None has_option_245 = False expired = True elif HEADER_OPTION in line: try: ipaddr = line.split(" ")[-1].strip(";").split(":") cached_endpoint = \ ".".join(str(int(d, 16)) for d in ipaddr) has_option_245 = True except ValueError: logger.error("could not parse '{0}'".format(line)) elif HEADER_EXPIRE in line: if "never" in line: expired = False else: try: expire_string = line.split( " ", 4)[-1].strip(";") expire_date = datetime.datetime.strptime( expire_string, FORMAT_DATETIME) if expire_date > datetime.datetime.utcnow(): expired = False except ValueError: logger.error("could not parse expiry token " "'{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired: {2}" .format(cached_endpoint, has_option_245, expired)) if not expired and cached_endpoint is not None and has_option_245: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: logger.info("cached endpoint not found") return endpoint def allow_dhcp_broadcast(self): pass def set_route_for_dhcp_broadcast(self, ifname): return shellutil.run("route add 255.255.255.255 -iface " "{0}".format(ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): shellutil.run("route delete 255.255.255.255 -iface " "{0}".format(ifname), chk_err=False) def get_dhcp_pid(self): ret, output = shellutil.run_get_output("pgrep -n dhclient", chk_err=False) return output if ret == 0 else None def get_dvd_device(self, dev_dir='/dev'): pattern = r'cd[0-9]c' for dvd in [re.match(pattern, dev) for dev in os.listdir(dev_dir)]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) raise OSUtilError("Failed to get DVD device") def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None, sleep_time=5): if dvd_device is None: dvd_device = self.get_dvd_device() if mount_point is None: mount_point = conf.get_dvd_mount_point() if not os.path.isdir(mount_point): os.makedirs(mount_point) for retry in range(0, max_retry): retcode = self.mount(dvd_device, mount_point, option="-o ro -t udf", chk_err=False) if retcode == 0: logger.info("Successfully mounted DVD") return if retry < max_retry - 1: mountlist = shellutil.run_get_output("/sbin/mount")[1] existing = self.get_mount_point(mountlist, dvd_device) if existing is not None: logger.info("{0} is mounted at {1}", dvd_device, existing) return logger.warn("Mount DVD failed: retry={0}, ret={1}", retry, retcode) time.sleep(sleep_time) if chk_err: raise OSUtilError("Failed to mount DVD.") def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("cdio eject {0}".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject DVD: ret={0}".format(retcode)) def restart_if(self, ifname, retries=3, wait=5): # Restart dhclient only to publish hostname shellutil.run("/sbin/dhclient {0}".format(ifname), chk_err=False) def get_total_mem(self): ret, output = shellutil.run_get_output("sysctl -n hw.physmem") if ret: raise OSUtilError("Failed to get total memory: {0}".format(output)) try: return int(output)/1024/1024 except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def get_processor_cores(self): ret, output = shellutil.run_get_output("sysctl -n hw.ncpu") if ret: raise OSUtilError("Failed to get processor cores.") try: return int(output) except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def set_scsi_disks_timeout(self, timeout): pass def check_pid_alive(self, pid): if not pid: return return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 @staticmethod def _get_net_info(): """ There is no SIOCGIFCONF on OpenBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ iface = '' inet = '' mac = '' ret, output = shellutil.run_get_output( 'ifconfig hvn | grep -E "^hvn.:" | sed "s/:.*//g"', chk_err=False) if ret: raise OSUtilError("Can't find ether interface:{0}".format(output)) ifaces = output.split() if not ifaces: raise OSUtilError("Can't find ether interface.") iface = ifaces[0] ret, output = shellutil.run_get_output( 'ifconfig ' + iface, chk_err=False) if ret: raise OSUtilError("Can't get info for interface:{0}".format(iface)) for line in output.split('\n'): if line.find('inet ') != -1: inet = line.split()[1] elif line.find('lladdr ') != -1: mac = line.split()[1] logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) return iface, inet, mac def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ return "wd{0}".format(port_id) WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/redhat.py000066400000000000000000000115111322477356400237740ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import OSUtilError, CryptError import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.osutil.default import DefaultOSUtil class Redhat6xOSUtil(DefaultOSUtil): def __init__(self): super(Redhat6xOSUtil, self).__init__() def start_network(self): return shellutil.run("/sbin/service networking start", chk_err=False) def restart_ssh_service(self): return shellutil.run("/sbin/service sshd condrestart", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service waagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service waagent start", chk_err=False) def register_agent_service(self): return shellutil.run("chkconfig --add waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("chkconfig --del waagent", chk_err=False) def openssl_to_openssh(self, input_file, output_file): pubkey = fileutil.read_file(input_file) try: cryptutil = CryptUtil(conf.get_openssl_cmd()) ssh_rsa_pubkey = cryptutil.asn1_to_ssh(pubkey) except CryptError as e: raise OSUtilError(ustr(e)) fileutil.write_file(output_file, ssh_rsa_pubkey) # Override def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof dhclient", chk_err=False) return ret[1] if ret[0] == 0 else None def set_hostname(self, hostname): """ Set /etc/sysconfig/network """ fileutil.update_conf_file('/etc/sysconfig/network', 'HOSTNAME', 'HOSTNAME={0}'.format(hostname)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def set_dhcp_hostname(self, hostname): ifname = self.get_if_name() filepath = "/etc/sysconfig/network-scripts/ifcfg-{0}".format(ifname) fileutil.update_conf_file(filepath, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME={0}'.format(hostname)) def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.leases') class RedhatOSUtil(Redhat6xOSUtil): def __init__(self): super(RedhatOSUtil, self).__init__() def set_hostname(self, hostname): """ Unlike redhat 6.x, redhat 7.x will set hostname via hostnamectl Due to a bug in systemd in Centos-7.0, if this call fails, fallback to hostname. """ hostnamectl_cmd = "hostnamectl set-hostname {0} --static".format(hostname) if shellutil.run(hostnamectl_cmd, chk_err=False) != 0: logger.warn("[{0}] failed, attempting fallback".format(hostnamectl_cmd)) DefaultOSUtil.set_hostname(self, hostname) def publish_hostname(self, hostname): """ Restart NetworkManager first before publishing hostname """ shellutil.run("service NetworkManager restart") super(RedhatOSUtil, self).publish_hostname(hostname) def register_agent_service(self): return shellutil.run("systemctl enable waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable waagent", chk_err=False) def openssl_to_openssh(self, input_file, output_file): DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) def get_dhcp_lease_endpoint(self): # dhclient endpoint = self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.lease') if endpoint is None: # NetworkManager endpoint = self.get_endpoint_from_leases_path('/var/lib/NetworkManager/dhclient-*.lease') return endpoint WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/suse.py000066400000000000000000000072131322477356400235100ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME from azurelinuxagent.common.osutil.default import DefaultOSUtil class SUSE11OSUtil(DefaultOSUtil): def __init__(self): super(SUSE11OSUtil, self).__init__() self.dhclient_name='dhcpcd' def set_hostname(self, hostname): fileutil.write_file('/etc/HOSTNAME', hostname) shellutil.run("hostname {0}".format(hostname), chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof {0}".format(self.dhclient_name), chk_err=False) return ret[1] if ret[0] == 0 else None def is_dhcp_enabled(self): return True def stop_dhcp_service(self): cmd = "/sbin/service {0} stop".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_dhcp_service(self): cmd = "/sbin/service {0} start".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_network(self) : return shellutil.run("/sbin/service start network", chk_err=False) def restart_ssh_service(self): return shellutil.run("/sbin/service sshd restart", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service waagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service waagent start", chk_err=False) def register_agent_service(self): return shellutil.run("/sbin/insserv waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("/sbin/insserv -r waagent", chk_err=False) class SUSEOSUtil(SUSE11OSUtil): def __init__(self): super(SUSEOSUtil, self).__init__() self.dhclient_name = 'wickedd-dhcp4' def stop_dhcp_service(self): cmd = "systemctl stop {0}".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_dhcp_service(self): cmd = "systemctl start {0}".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_network(self) : return shellutil.run("systemctl start network", chk_err=False) def restart_ssh_service(self): return shellutil.run("systemctl restart sshd", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def register_agent_service(self): return shellutil.run("systemctl enable waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable waagent", chk_err=False) WALinuxAgent-2.2.20/azurelinuxagent/common/osutil/ubuntu.py000066400000000000000000000043001322477356400240450ustar00rootroot00000000000000# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class Ubuntu14OSUtil(DefaultOSUtil): def __init__(self): super(Ubuntu14OSUtil, self).__init__() def start_network(self): return shellutil.run("service networking start", chk_err=False) def stop_agent_service(self): return shellutil.run("service walinuxagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("service walinuxagent start", chk_err=False) def remove_rules_files(self, rules_files=""): pass def restore_rules_files(self, rules_files=""): pass def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') class Ubuntu12OSUtil(Ubuntu14OSUtil): def __init__(self): super(Ubuntu12OSUtil, self).__init__() # Override def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof dhclient3", chk_err=False) return ret[1] if ret[0] == 0 else None class UbuntuOSUtil(Ubuntu14OSUtil): def __init__(self): super(UbuntuOSUtil, self).__init__() def register_agent_service(self): return shellutil.run("systemctl unmask walinuxagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl mask walinuxagent", chk_err=False) class UbuntuSnappyOSUtil(Ubuntu14OSUtil): def __init__(self): super(UbuntuSnappyOSUtil, self).__init__() self.conf_file_path = '/apps/walinuxagent/current/waagent.conf' WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/000077500000000000000000000000001322477356400224765ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/__init__.py000066400000000000000000000014761322477356400246170ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.common.protocol.util import get_protocol_util, \ OVF_FILE_NAME, \ TAG_FILE_NAME WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/hostplugin.py000066400000000000000000000240071322477356400252470ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import base64 import json import traceback from azurelinuxagent.common import logger from azurelinuxagent.common.exception import HttpError, ProtocolError, \ ResourceGoneError from azurelinuxagent.common.future import ustr, httpclient from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.utils import textutil from azurelinuxagent.common.utils.textutil import remove_bom from azurelinuxagent.common.version import PY_VERSION_MAJOR HOST_PLUGIN_PORT = 32526 URI_FORMAT_GET_API_VERSIONS = "http://{0}:{1}/versions" URI_FORMAT_GET_EXTENSION_ARTIFACT = "http://{0}:{1}/extensionArtifact" URI_FORMAT_PUT_VM_STATUS = "http://{0}:{1}/status" URI_FORMAT_PUT_LOG = "http://{0}:{1}/vmAgentLog" API_VERSION = "2015-09-01" HEADER_CONTAINER_ID = "x-ms-containerid" HEADER_VERSION = "x-ms-version" HEADER_HOST_CONFIG_NAME = "x-ms-host-config-name" HEADER_ARTIFACT_LOCATION = "x-ms-artifact-location" HEADER_ARTIFACT_MANIFEST_LOCATION = "x-ms-artifact-manifest-location" MAXIMUM_PAGEBLOB_PAGE_SIZE = 4 * 1024 * 1024 # Max page size: 4MB class HostPluginProtocol(object): _is_default_channel = False def __init__(self, endpoint, container_id, role_config_name): if endpoint is None: raise ProtocolError("HostGAPlugin: Endpoint not provided") self.is_initialized = False self.is_available = False self.api_versions = None self.endpoint = endpoint self.container_id = container_id self.deployment_id = None self.role_config_name = role_config_name self.manifest_uri = None @staticmethod def is_default_channel(): return HostPluginProtocol._is_default_channel @staticmethod def set_default_channel(is_default): HostPluginProtocol._is_default_channel = is_default def ensure_initialized(self): if not self.is_initialized: self.api_versions = self.get_api_versions() self.is_available = API_VERSION in self.api_versions self.is_initialized = self.is_available from azurelinuxagent.common.event import WALAEventOperation, report_event report_event(WALAEventOperation.InitializeHostPlugin, is_success=self.is_available) return self.is_available def get_api_versions(self): url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, HOST_PLUGIN_PORT) logger.verbose("HostGAPlugin: Getting API versions at [{0}]".format( url)) return_val = [] try: headers = {HEADER_CONTAINER_ID: self.container_id} response = restutil.http_get(url, headers) if restutil.request_failed(response): logger.error( "HostGAPlugin: Failed Get API versions: {0}".format( restutil.read_response_error(response))) else: return_val = ustr(remove_bom(response.read()), encoding='utf-8') except HttpError as e: logger.error("HostGAPlugin: Exception Get API versions: {0}".format(e)) return return_val def get_artifact_request(self, artifact_url, artifact_manifest_url=None): if not self.ensure_initialized(): raise ProtocolError("HostGAPlugin: Host plugin channel is not available") if textutil.is_str_none_or_whitespace(artifact_url): raise ProtocolError("HostGAPlugin: No extension artifact url was provided") url = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT) headers = {HEADER_VERSION: API_VERSION, HEADER_CONTAINER_ID: self.container_id, HEADER_HOST_CONFIG_NAME: self.role_config_name, HEADER_ARTIFACT_LOCATION: artifact_url} if artifact_manifest_url is not None: headers[HEADER_ARTIFACT_MANIFEST_LOCATION] = artifact_manifest_url return url, headers def put_vm_log(self, content): raise NotImplementedError("Unimplemented") def put_vm_status(self, status_blob, sas_url, config_blob_type=None): """ Try to upload the VM status via the host plugin /status channel :param sas_url: the blob SAS url to pass to the host plugin :param config_blob_type: the blob type from the extension config :type status_blob: StatusBlob """ if not self.ensure_initialized(): raise ProtocolError("HostGAPlugin: HostGAPlugin is not available") if status_blob is None or status_blob.vm_status is None: raise ProtocolError("HostGAPlugin: Status blob was not provided") logger.verbose("HostGAPlugin: Posting VM status") try: blob_type = status_blob.type if status_blob.type else config_blob_type if blob_type == "BlockBlob": self._put_block_blob_status(sas_url, status_blob) else: self._put_page_blob_status(sas_url, status_blob) except Exception as e: # If the HostPlugin rejects the request, # let the error continue, but set to use the HostPlugin if isinstance(e, ResourceGoneError): logger.verbose("HostGAPlugin: Setting host plugin as default channel") HostPluginProtocol.set_default_channel(True) raise def _put_block_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_block_blob_headers(len(status_blob.data)), bytearray(status_blob.data, encoding='utf-8')), headers=self._build_status_headers()) if restutil.request_failed(response): raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}".format( restutil.read_response_error(response))) else: logger.verbose("HostGAPlugin: Put BlockBlob status succeeded") def _put_page_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) # Convert the status into a blank-padded string whose length is modulo 512 status = bytearray(status_blob.data, encoding='utf-8') status_size = int((len(status) + 511) / 512) * 512 status = bytearray(status_blob.data.ljust(status_size), encoding='utf-8') # First, initialize an empty blob response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_page_blob_create_headers(status_size)), headers=self._build_status_headers()) if restutil.request_failed(response): raise HttpError( "HostGAPlugin: Failed PageBlob clean-up: {0}".format( restutil.read_response_error(response))) else: logger.verbose("HostGAPlugin: PageBlob clean-up succeeded") # Then, upload the blob in pages if sas_url.count("?") <= 0: sas_url = "{0}?comp=page".format(sas_url) else: sas_url = "{0}&comp=page".format(sas_url) start = 0 end = 0 while start < len(status): # Create the next page end = start + min(len(status) - start, MAXIMUM_PAGEBLOB_PAGE_SIZE) page_size = int((end - start + 511) / 512) * 512 buf = bytearray(page_size) buf[0: end - start] = status[start: end] # Send the page response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_page_blob_page_headers(start, end), buf), headers=self._build_status_headers()) if restutil.request_failed(response): raise HttpError( "HostGAPlugin Error: Put PageBlob bytes [{0},{1}]: " \ "{2}".format( start, end, restutil.read_response_error(response))) # Advance to the next page (if any) start = end def _build_status_data(self, sas_url, blob_headers, content=None): headers = [] for name in iter(blob_headers.keys()): headers.append({ 'headerName': name, 'headerValue': blob_headers[name] }) data = { 'requestUri': sas_url, 'headers': headers } if not content is None: data['content'] = self._base64_encode(content) return json.dumps(data, sort_keys=True) def _build_status_headers(self): return { HEADER_VERSION: API_VERSION, "Content-type": "application/json", HEADER_CONTAINER_ID: self.container_id, HEADER_HOST_CONFIG_NAME: self.role_config_name } def _base64_encode(self, data): s = base64.b64encode(bytes(data)) if PY_VERSION_MAJOR > 2: return s.decode('utf-8') return s WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/metadata.py000066400000000000000000000374221322477356400246400ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ import base64 import json import os import shutil import re import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.protocol.restapi import * from azurelinuxagent.common.utils.cryptutil import CryptUtil METADATA_ENDPOINT = '169.254.169.254' APIVERSION = '2015-05-01-preview' BASE_URI = "http://{0}/Microsoft.Compute/{1}?api-version={2}" TRANSPORT_PRV_FILE_NAME = "V2TransportPrivate.pem" TRANSPORT_CERT_FILE_NAME = "V2TransportCert.pem" P7M_FILE_NAME = "Certificates.p7m" P7B_FILE_NAME = "Certificates.p7b" PEM_FILE_NAME = "Certificates.pem" KEY_AGENT_VERSION_URIS = "versionsManifestUris" KEY_URI = "uri" # TODO remote workaround for azure stack MAX_PING = 30 RETRY_PING_INTERVAL = 10 def _add_content_type(headers): if headers is None: headers = {} headers["content-type"] = "application/json" return headers class MetadataProtocol(Protocol): def __init__(self, apiversion=APIVERSION, endpoint=METADATA_ENDPOINT): self.apiversion = apiversion self.endpoint = endpoint self.identity_uri = BASE_URI.format(self.endpoint, "identity", self.apiversion) self.cert_uri = BASE_URI.format(self.endpoint, "certificates", self.apiversion) self.ext_uri = BASE_URI.format(self.endpoint, "extensionHandlers", self.apiversion) self.vmagent_uri = BASE_URI.format(self.endpoint, "vmAgentVersions", self.apiversion) self.provision_status_uri = BASE_URI.format(self.endpoint, "provisioningStatus", self.apiversion, "") self.vm_status_uri = BASE_URI.format(self.endpoint, "status/vmagent", self.apiversion, "") self.ext_status_uri = BASE_URI.format(self.endpoint, "status/extensions/{0}", self.apiversion, "") self.event_uri = BASE_URI.format(self.endpoint, "status/telemetry", self.apiversion, "") self.certs = None self.agent_manifests = None self.agent_etag = None def _get_data(self, url, headers=None): try: resp = restutil.http_get(url, headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if restutil.request_failed(resp): raise ProtocolError("{0} - GET: {1}".format(resp.status, url)) data = resp.read() etag = resp.getheader('ETag') if data is not None: data = json.loads(ustr(data, encoding="utf-8")) return data, etag def _put_data(self, url, data, headers=None): headers = _add_content_type(headers) try: resp = restutil.http_put(url, json.dumps(data), headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if restutil.request_failed(resp): raise ProtocolError("{0} - PUT: {1}".format(resp.status, url)) def _post_data(self, url, data, headers=None): headers = _add_content_type(headers) try: resp = restutil.http_post(url, json.dumps(data), headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if resp.status != httpclient.CREATED: logger.warn("{0} for POST {1}".format(resp.status, url)) def _get_trans_cert(self): trans_crt_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) if not os.path.isfile(trans_crt_file): raise ProtocolError("{0} is missing.".format(trans_crt_file)) content = fileutil.read_file(trans_crt_file) return textutil.get_bytes_from_pem(content) def detect(self): self.get_vminfo() trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) # "Install" the cert and private key to /var/lib/waagent thumbprint = cryptutil.get_thumbprint_from_crt(trans_cert_file) prv_file = os.path.join(conf.get_lib_dir(), "{0}.prv".format(thumbprint)) crt_file = os.path.join(conf.get_lib_dir(), "{0}.crt".format(thumbprint)) shutil.copyfile(trans_prv_file, prv_file) shutil.copyfile(trans_cert_file, crt_file) self.update_goal_state(forced=True) def get_vminfo(self): vminfo = VMInfo() data, etag = self._get_data(self.identity_uri) set_properties("vminfo", vminfo, data) return vminfo def get_certs(self): certlist = CertList() certificatedata = CertificateData() data, etag = self._get_data(self.cert_uri) set_properties("certlist", certlist, data) cert_list = get_properties(certlist) headers = { "x-ms-vmagent-public-x509-cert": self._get_trans_cert() } for cert_i in cert_list["certificates"]: certificate_data_uri = cert_i['certificateDataUri'] data, etag = self._get_data(certificate_data_uri, headers=headers) set_properties("certificatedata", certificatedata, data) json_certificate_data = get_properties(certificatedata) self.certs = Certificates(self, json_certificate_data) if self.certs is None: return None return self.certs def get_incarnation(self): # Always return 0 since Azure Stack does not maintain goal state # incarnation identifiers return 0 def get_vmagent_manifests(self): self.update_goal_state() data, etag = self._get_data(self.vmagent_uri) if self.agent_etag is None or self.agent_etag < etag: self.agent_etag = etag # Create a list with a single manifest # -- The protocol lacks "family," use the configured family self.agent_manifests = VMAgentManifestList() manifest = VMAgentManifest() manifest.family = family=conf.get_autoupdate_gafamily() if not KEY_AGENT_VERSION_URIS in data: raise ProtocolError( "Agent versions missing '{0}': {1}".format( KEY_AGENT_VERSION_URIS, data)) for version in data[KEY_AGENT_VERSION_URIS]: if not KEY_URI in version: raise ProtocolError( "Agent versions missing '{0': {1}".format( KEY_URI, data)) manifest_uri = VMAgentManifestUri(uri=version[KEY_URI]) manifest.versionsManifestUris.append(manifest_uri) self.agent_manifests.vmAgentManifests.append(manifest) return self.agent_manifests, self.agent_etag def get_vmagent_pkgs(self, vmagent_manifest): data = None etag = None for manifest_uri in vmagent_manifest.versionsManifestUris: try: data, etag = self._get_data(manifest_uri.uri) break except ProtocolError as e: logger.verbose( "Error retrieving agent package from {0}: {1}".format( manifest_uri, e)) if data is None: raise ProtocolError( "Failed retrieving agent package from all URIs") vmagent_pkgs = ExtHandlerPackageList() set_properties("vmAgentVersions", vmagent_pkgs, data) return vmagent_pkgs def get_ext_handlers(self, last_etag=None): self.update_goal_state() headers = { "x-ms-vmagent-public-x509-cert": self._get_trans_cert() } ext_list = ExtHandlerList() data, etag = self._get_data(self.ext_uri, headers=headers) if last_etag is None or last_etag < etag: set_properties("extensionHandlers", ext_list.extHandlers, data) return ext_list, etag def get_ext_handler_pkgs(self, ext_handler): logger.verbose("Get extension handler packages") pkg_list = ExtHandlerPackageList() manifest = None for version_uri in ext_handler.versionUris: try: manifest, etag = self._get_data(version_uri.uri) logger.verbose("Successfully downloaded manifest") break except ProtocolError as e: logger.warn("Failed to fetch manifest: {0}", e) if manifest is None: raise ValueError("Extension manifest is empty") set_properties("extensionPackages", pkg_list, manifest) return pkg_list def report_provision_status(self, provision_status): validate_param('provisionStatus', provision_status, ProvisionStatus) data = get_properties(provision_status) self._put_data(self.provision_status_uri, data) def report_vm_status(self, vm_status): validate_param('vmStatus', vm_status, VMStatus) data = get_properties(vm_status) # TODO code field is not implemented for metadata protocol yet. # Remove it handler_statuses = data['vmAgent']['extensionHandlers'] for handler_status in handler_statuses: try: handler_status.pop('code', None) except KeyError: pass self._put_data(self.vm_status_uri, data) def report_ext_status(self, ext_handler_name, ext_name, ext_status): validate_param('extensionStatus', ext_status, ExtensionStatus) data = get_properties(ext_status) uri = self.ext_status_uri.format(ext_name) self._put_data(uri, data) def report_event(self, events): validate_param('events', events, TelemetryEventList) data = get_properties(events) self._post_data(self.event_uri, data) def update_certs(self): certificates = self.get_certs() return certificates.cert_list def update_goal_state(self, forced=False, max_retry=3): # Start updating goalstate, retry on 410 for retry in range(0, max_retry): try: self.update_certs() return except: logger.verbose("Incarnation is out of date. Update goalstate.") raise ProtocolError("Exceeded max retry updating goal state") class Certificates(object): """ Object containing certificates of host and provisioned user. """ def __init__(self, client, json_text): self.cert_list = CertList() self.parse(json_text) def parse(self, json_text): """ Parse multiple certificates into seperate files. """ data = json_text["certificateData"] if data is None: logger.verbose("No data in json_text received!") return cryptutil = CryptUtil(conf.get_openssl_cmd()) p7b_file = os.path.join(conf.get_lib_dir(), P7B_FILE_NAME) # Wrapping the certificate lines. # decode and save the result into p7b_file fileutil.write_file(p7b_file, base64.b64decode(data), asbin=True) ssl_cmd = "openssl pkcs7 -text -in {0} -inform der | grep -v '^-----' " ret, data = shellutil.run_get_output(ssl_cmd.format(p7b_file)) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" "Content-Disposition: attachment; filename=\"{0}\"\n" "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" "Content-Transfer-Encoding: base64\n" "\n" "{2}").format(p7m_file, p7m_file, data) self.save_cache(p7m_file, p7m) trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) # decrypt certificates cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, pem_file) # The parsing process use public key to match prv and crt. buf = [] begin_crt = False begin_prv = False prvs = {} thumbprints = {} index = 0 v1_cert_list = [] with open(pem_file) as pem: for line in pem.readlines(): buf.append(line) if re.match(r'[-]+BEGIN.*KEY[-]+', line): begin_prv = True elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): begin_crt = True elif re.match(r'[-]+END.*KEY[-]+', line): tmp_file = self.write_to_tmp_file(index, 'prv', buf) pub = cryptutil.get_pubkey_from_prv(tmp_file) prvs[pub] = tmp_file buf = [] index += 1 begin_prv = False elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): tmp_file = self.write_to_tmp_file(index, 'crt', buf) pub = cryptutil.get_pubkey_from_crt(tmp_file) thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) thumbprints[pub] = thumbprint # Rename crt with thumbprint as the file name crt = "{0}.crt".format(thumbprint) v1_cert_list.append({ "name": None, "thumbprint": thumbprint }) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) buf = [] index += 1 begin_crt = False # Rename prv key with thumbprint as the file name for pubkey in prvs: thumbprint = thumbprints[pubkey] if thumbprint: tmp_file = prvs[pubkey] prv = "{0}.prv".format(thumbprint) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) for v1_cert in v1_cert_list: cert = Cert() set_properties("certs", cert, v1_cert) self.cert_list.certificates.append(cert) def save_cache(self, local_file, data): try: fileutil.write_file(local_file, data) except IOError as e: raise ProtocolError("Failed to write cache: {0}".format(e)) def write_to_tmp_file(self, index, suffix, buf): file_name = os.path.join(conf.get_lib_dir(), "{0}.{1}".format(index, suffix)) self.save_cache(file_name, "".join(buf)) return file_name WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/ovfenv.py000066400000000000000000000103241322477356400243530ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # """ Copy and parse ovf-env.xml from provisioning ISO and local cache """ import os import re import shutil import xml.dom.minidom as minidom import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.future import ustr import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext OVF_VERSION = "1.0" OVF_NAME_SPACE = "http://schemas.dmtf.org/ovf/environment/1" WA_NAME_SPACE = "http://schemas.microsoft.com/windowsazure" def _validate_ovf(val, msg): if val is None: raise ProtocolError("Failed to validate OVF: {0}".format(msg)) class OvfEnv(object): """ Read, and process provisioning info from provisioning file OvfEnv.xml """ def __init__(self, xml_text): if xml_text is None: raise ValueError("ovf-env is None") logger.verbose("Load ovf-env.xml") self.hostname = None self.username = None self.user_password = None self.customdata = None self.disable_ssh_password_auth = True self.ssh_pubkeys = [] self.ssh_keypairs = [] self.parse(xml_text) def parse(self, xml_text): """ Parse xml tree, retreiving user and ssh key information. Return self. """ wans = WA_NAME_SPACE ovfns = OVF_NAME_SPACE xml_doc = parse_doc(xml_text) environment = find(xml_doc, "Environment", namespace=ovfns) _validate_ovf(environment, "Environment not found") section = find(environment, "ProvisioningSection", namespace=wans) _validate_ovf(section, "ProvisioningSection not found") version = findtext(environment, "Version", namespace=wans) _validate_ovf(version, "Version not found") if version > OVF_VERSION: logger.warn("Newer provisioning configuration detected. " "Please consider updating waagent") conf_set = find(section, "LinuxProvisioningConfigurationSet", namespace=wans) _validate_ovf(conf_set, "LinuxProvisioningConfigurationSet not found") self.hostname = findtext(conf_set, "HostName", namespace=wans) _validate_ovf(self.hostname, "HostName not found") self.username = findtext(conf_set, "UserName", namespace=wans) _validate_ovf(self.username, "UserName not found") self.user_password = findtext(conf_set, "UserPassword", namespace=wans) self.customdata = findtext(conf_set, "CustomData", namespace=wans) auth_option = findtext(conf_set, "DisableSshPasswordAuthentication", namespace=wans) if auth_option is not None and auth_option.lower() == "true": self.disable_ssh_password_auth = True else: self.disable_ssh_password_auth = False public_keys = findall(conf_set, "PublicKey", namespace=wans) for public_key in public_keys: path = findtext(public_key, "Path", namespace=wans) fingerprint = findtext(public_key, "Fingerprint", namespace=wans) value = findtext(public_key, "Value", namespace=wans) self.ssh_pubkeys.append((path, fingerprint, value)) keypairs = findall(conf_set, "KeyPair", namespace=wans) for keypair in keypairs: path = findtext(keypair, "Path", namespace=wans) fingerprint = findtext(keypair, "Fingerprint", namespace=wans) self.ssh_keypairs.append((path, fingerprint)) WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/restapi.py000066400000000000000000000236311322477356400245240ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import socket import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.exception import ProtocolError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.version import DISTRO_VERSION, DISTRO_NAME, CURRENT_VERSION def validate_param(name, val, expected_type): if val is None: raise ProtocolError("{0} is None".format(name)) if not isinstance(val, expected_type): raise ProtocolError(("{0} type should be {1} not {2}" "").format(name, expected_type, type(val))) def set_properties(name, obj, data): if isinstance(obj, DataContract): validate_param("Property '{0}'".format(name), data, dict) for prob_name, prob_val in data.items(): prob_full_name = "{0}.{1}".format(name, prob_name) try: prob = getattr(obj, prob_name) except AttributeError: logger.warn("Unknown property: {0}", prob_full_name) continue prob = set_properties(prob_full_name, prob, prob_val) setattr(obj, prob_name, prob) return obj elif isinstance(obj, DataContractList): validate_param("List '{0}'".format(name), data, list) for item_data in data: item = obj.item_cls() item = set_properties(name, item, item_data) obj.append(item) return obj else: return data def get_properties(obj): if isinstance(obj, DataContract): data = {} props = vars(obj) for prob_name, prob in list(props.items()): data[prob_name] = get_properties(prob) return data elif isinstance(obj, DataContractList): data = [] for item in obj: item_data = get_properties(item) data.append(item_data) return data else: return obj class DataContract(object): pass class DataContractList(list): def __init__(self, item_cls): self.item_cls = item_cls """ Data contract between guest and host """ class VMInfo(DataContract): def __init__(self, subscriptionId=None, vmName=None, containerId=None, roleName=None, roleInstanceName=None, tenantName=None): self.subscriptionId = subscriptionId self.vmName = vmName self.containerId = containerId self.roleName = roleName self.roleInstanceName = roleInstanceName self.tenantName = tenantName class CertificateData(DataContract): def __init__(self, certificateData=None): self.certificateData = certificateData class Cert(DataContract): def __init__(self, name=None, thumbprint=None, certificateDataUri=None, storeName=None, storeLocation=None): self.name = name self.thumbprint = thumbprint self.certificateDataUri = certificateDataUri self.storeLocation = storeLocation self.storeName = storeName class CertList(DataContract): def __init__(self): self.certificates = DataContractList(Cert) # TODO: confirm vmagent manifest schema class VMAgentManifestUri(DataContract): def __init__(self, uri=None): self.uri = uri class VMAgentManifest(DataContract): def __init__(self, family=None): self.family = family self.versionsManifestUris = DataContractList(VMAgentManifestUri) class VMAgentManifestList(DataContract): def __init__(self): self.vmAgentManifests = DataContractList(VMAgentManifest) class Extension(DataContract): def __init__(self, name=None, sequenceNumber=None, publicSettings=None, protectedSettings=None, certificateThumbprint=None): self.name = name self.sequenceNumber = sequenceNumber self.publicSettings = publicSettings self.protectedSettings = protectedSettings self.certificateThumbprint = certificateThumbprint class ExtHandlerProperties(DataContract): def __init__(self): self.version = None self.upgradePolicy = None self.upgradeGuid = None self.state = None self.extensions = DataContractList(Extension) class ExtHandlerVersionUri(DataContract): def __init__(self): self.uri = None class ExtHandler(DataContract): def __init__(self, name=None): self.name = name self.properties = ExtHandlerProperties() self.versionUris = DataContractList(ExtHandlerVersionUri) class ExtHandlerList(DataContract): def __init__(self): self.extHandlers = DataContractList(ExtHandler) class ExtHandlerPackageUri(DataContract): def __init__(self, uri=None): self.uri = uri class ExtHandlerPackage(DataContract): def __init__(self, version=None): self.version = version self.uris = DataContractList(ExtHandlerPackageUri) # TODO update the naming to align with metadata protocol self.isinternal = False self.disallow_major_upgrade = False class ExtHandlerPackageList(DataContract): def __init__(self): self.versions = DataContractList(ExtHandlerPackage) class VMProperties(DataContract): def __init__(self, certificateThumbprint=None): # TODO need to confirm the property name self.certificateThumbprint = certificateThumbprint class ProvisionStatus(DataContract): def __init__(self, status=None, subStatus=None, description=None): self.status = status self.subStatus = subStatus self.description = description self.properties = VMProperties() class ExtensionSubStatus(DataContract): def __init__(self, name=None, status=None, code=None, message=None): self.name = name self.status = status self.code = code self.message = message class ExtensionStatus(DataContract): def __init__(self, configurationAppliedTime=None, operation=None, status=None, seq_no=None, code=None, message=None): self.configurationAppliedTime = configurationAppliedTime self.operation = operation self.status = status self.sequenceNumber = seq_no self.code = code self.message = message self.substatusList = DataContractList(ExtensionSubStatus) class ExtHandlerStatus(DataContract): def __init__(self, name=None, version=None, upgradeGuid=None, status=None, code=0, message=None): self.name = name self.version = version self.upgradeGuid = upgradeGuid self.status = status self.code = code self.message = message self.extensions = DataContractList(ustr) class VMAgentStatus(DataContract): def __init__(self, status=None, message=None): self.status = status self.message = message self.hostname = socket.gethostname() self.version = str(CURRENT_VERSION) self.osname = DISTRO_NAME self.osversion = DISTRO_VERSION self.extensionHandlers = DataContractList(ExtHandlerStatus) class VMStatus(DataContract): def __init__(self, status, message): self.vmAgent = VMAgentStatus(status=status, message=message) class TelemetryEventParam(DataContract): def __init__(self, name=None, value=None): self.name = name self.value = value class TelemetryEvent(DataContract): def __init__(self, eventId=None, providerId=None): self.eventId = eventId self.providerId = providerId self.parameters = DataContractList(TelemetryEventParam) class TelemetryEventList(DataContract): def __init__(self): self.events = DataContractList(TelemetryEvent) class Protocol(DataContract): def detect(self): raise NotImplementedError() def get_vminfo(self): raise NotImplementedError() def get_certs(self): raise NotImplementedError() def get_incarnation(self): raise NotImplementedError() def get_vmagent_manifests(self): raise NotImplementedError() def get_vmagent_pkgs(self, manifest): raise NotImplementedError() def get_ext_handlers(self): raise NotImplementedError() def get_ext_handler_pkgs(self, extension): raise NotImplementedError() def get_artifacts_profile(self): raise NotImplementedError() def download_ext_handler_pkg(self, uri, headers=None): try: resp = restutil.http_get(uri, use_proxy=True, headers=headers) if restutil.request_succeeded(resp): return resp.read() except Exception as e: logger.warn("Failed to download from: {0}".format(uri), e) def report_provision_status(self, provision_status): raise NotImplementedError() def report_vm_status(self, vm_status): raise NotImplementedError() def report_ext_status(self, ext_handler_name, ext_name, ext_status): raise NotImplementedError() def report_event(self, event): raise NotImplementedError() WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/util.py000066400000000000000000000250261322477356400240320ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import errno import os import re import shutil import time import threading import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import ProtocolError, OSUtilError, \ ProtocolNotFoundError, DhcpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.dhcp import get_dhcp_handler from azurelinuxagent.common.protocol.ovfenv import OvfEnv from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.common.protocol.metadata import MetadataProtocol, \ METADATA_ENDPOINT from azurelinuxagent.common.utils.restutil import IOErrorCounter OVF_FILE_NAME = "ovf-env.xml" TAG_FILE_NAME = "useMetadataEndpoint.tag" PROTOCOL_FILE_NAME = "Protocol" MAX_RETRY = 360 PROBE_INTERVAL = 10 ENDPOINT_FILE_NAME = "WireServerEndpoint" PASSWORD_PATTERN = ".*?<" PASSWORD_REPLACEMENT = "*<" def get_protocol_util(): return ProtocolUtil() class ProtocolUtil(object): """ ProtocolUtil handles initialization for protocol instance. 2 protocol types are invoked, wire protocol and metadata protocols. """ def __init__(self): self.lock = threading.Lock() self.protocol = None self.osutil = get_osutil() self.dhcp_handler = get_dhcp_handler() def copy_ovf_env(self): """ Copy ovf env file from dvd to hard disk. Remove password before save it to the disk """ dvd_mount_point = conf.get_dvd_mount_point() ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) try: self.osutil.mount_dvd() except OSUtilError as e: raise ProtocolError("[CopyOvfEnv] Error mounting dvd: " "{0}".format(ustr(e))) try: ovfxml = fileutil.read_file(ovf_file_path_on_dvd, remove_bom=True) ovfenv = OvfEnv(ovfxml) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error reading file " "{0}: {1}".format(ovf_file_path_on_dvd, ustr(e))) try: ovfxml = re.sub(PASSWORD_PATTERN, PASSWORD_REPLACEMENT, ovfxml) fileutil.write_file(ovf_file_path, ovfxml) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error writing file " "{0}: {1}".format(ovf_file_path, ustr(e))) try: if os.path.isfile(tag_file_path_on_dvd): logger.info("Found {0} in provisioning ISO", TAG_FILE_NAME) shutil.copyfile(tag_file_path_on_dvd, tag_file_path) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error copying file " "{0} to {1}: {2}".format(tag_file_path, tag_file_path, ustr(e))) try: self.osutil.umount_dvd() self.osutil.eject_dvd() except OSUtilError as e: logger.warn(ustr(e)) return ovfenv def get_ovf_env(self): """ Load saved ovf-env.xml """ ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) if os.path.isfile(ovf_file_path): xml_text = fileutil.read_file(ovf_file_path) return OvfEnv(xml_text) else: raise ProtocolError("ovf-env.xml is missing from {0}".format(ovf_file_path)) def _get_wireserver_endpoint(self): try: file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) return fileutil.read_file(file_path) except IOError as e: raise OSUtilError(ustr(e)) def _set_wireserver_endpoint(self, endpoint): try: file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) fileutil.write_file(file_path, endpoint) except IOError as e: raise OSUtilError(ustr(e)) def _detect_wire_protocol(self): endpoint = self.dhcp_handler.endpoint if endpoint is None: logger.info("WireServer endpoint is not found. Rerun dhcp handler") try: self.dhcp_handler.run() except DhcpError as e: raise ProtocolError(ustr(e)) endpoint = self.dhcp_handler.endpoint try: protocol = WireProtocol(endpoint) protocol.detect() self._set_wireserver_endpoint(endpoint) self.save_protocol("WireProtocol") return protocol except ProtocolError as e: logger.info("WireServer is not responding. Reset endpoint") self.dhcp_handler.endpoint = None self.dhcp_handler.skip_cache = True raise e def _detect_metadata_protocol(self): protocol = MetadataProtocol() protocol.detect() self.save_protocol("MetadataProtocol") return protocol def _detect_protocol(self, protocols): """ Probe protocol endpoints in turn. """ self.clear_protocol() for retry in range(0, MAX_RETRY): for protocol_name in protocols: try: protocol = self._detect_wire_protocol() \ if protocol_name == "WireProtocol" \ else self._detect_metadata_protocol() IOErrorCounter.set_protocol_endpoint( endpoint=protocol.endpoint) return protocol except ProtocolError as e: logger.info("Protocol endpoint not found: {0}, {1}", protocol_name, e) if retry < MAX_RETRY -1: logger.info("Retry detect protocols: retry={0}", retry) time.sleep(PROBE_INTERVAL) raise ProtocolNotFoundError("No protocol found.") def _get_protocol(self): """ Get protocol instance based on previous detecting result. """ protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) if not os.path.isfile(protocol_file_path): raise ProtocolNotFoundError("No protocol found") protocol_name = fileutil.read_file(protocol_file_path) if protocol_name == "WireProtocol": endpoint = self._get_wireserver_endpoint() return WireProtocol(endpoint) elif protocol_name == "MetadataProtocol": return MetadataProtocol() else: raise ProtocolNotFoundError(("Unknown protocol: {0}" "").format(protocol_name)) def save_protocol(self, protocol_name): """ Save protocol endpoint """ protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) try: fileutil.write_file(protocol_file_path, protocol_name) except IOError as e: logger.error("Failed to save protocol endpoint: {0}", e) def clear_protocol(self): """ Cleanup previous saved endpoint. """ logger.info("Clean protocol") self.protocol = None protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) if not os.path.isfile(protocol_file_path): return try: os.remove(protocol_file_path) except IOError as e: # Ignore file-not-found errors (since the file is being removed) if e.errno == errno.ENOENT: return logger.error("Failed to clear protocol endpoint: {0}", e) def get_protocol(self): """ Detect protocol by endpoints :returns: protocol instance """ self.lock.acquire() try: if self.protocol is not None: return self.protocol try: self.protocol = self._get_protocol() return self.protocol except ProtocolNotFoundError: pass logger.info("Detect protocol endpoints") protocols = ["WireProtocol", "MetadataProtocol"] self.protocol = self._detect_protocol(protocols) return self.protocol finally: self.lock.release() def get_protocol_by_file(self): """ Detect protocol by tag file. If a file "useMetadataEndpoint.tag" is found on provision iso, metedata protocol will be used. No need to probe for wire protocol :returns: protocol instance """ self.lock.acquire() try: if self.protocol is not None: return self.protocol try: self.protocol = self._get_protocol() return self.protocol except ProtocolNotFoundError: pass logger.info("Detect protocol by file") tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) protocols = [] if os.path.isfile(tag_file_path): protocols.append("MetadataProtocol") else: protocols.append("WireProtocol") self.protocol = self._detect_protocol(protocols) return self.protocol finally: self.lock.release() WALinuxAgent-2.2.20/azurelinuxagent/common/protocol/wire.py000066400000000000000000001640311322477356400240230ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ import json import os import re import time import xml.sax.saxutils as saxutils import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import ProtocolNotFoundError, \ ResourceGoneError from azurelinuxagent.common.future import httpclient, bytebuffer from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.protocol.restapi import * from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, \ findtext, getattrib, gettext, remove_bom, get_bytes_from_pem, parse_json VERSION_INFO_URI = "http://{0}/?comp=versions" GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" HEALTH_REPORT_URI = "http://{0}/machine?comp=health" ROLE_PROP_URI = "http://{0}/machine?comp=roleProperties" TELEMETRY_URI = "http://{0}/machine?comp=telemetrydata" WIRE_SERVER_ADDR_FILE_NAME = "WireServer" INCARNATION_FILE_NAME = "Incarnation" GOAL_STATE_FILE_NAME = "GoalState.{0}.xml" HOSTING_ENV_FILE_NAME = "HostingEnvironmentConfig.xml" SHARED_CONF_FILE_NAME = "SharedConfig.xml" CERTS_FILE_NAME = "Certificates.xml" P7M_FILE_NAME = "Certificates.p7m" PEM_FILE_NAME = "Certificates.pem" EXT_CONF_FILE_NAME = "ExtensionsConfig.{0}.xml" MANIFEST_FILE_NAME = "{0}.{1}.manifest.xml" AGENTS_MANIFEST_FILE_NAME = "{0}.{1}.agentsManifest" TRANSPORT_CERT_FILE_NAME = "TransportCert.pem" TRANSPORT_PRV_FILE_NAME = "TransportPrivate.pem" PROTOCOL_VERSION = "2012-11-30" ENDPOINT_FINE_NAME = "WireServer" SHORT_WAITING_INTERVAL = 1 # 1 second class UploadError(HttpError): pass class WireProtocol(Protocol): """Slim layer to adapt wire protocol data to metadata protocol interface""" # TODO: Clean-up goal state processing # At present, some methods magically update GoalState (e.g., # get_vmagent_manifests), others (e.g., get_vmagent_pkgs) # assume its presence. A better approach would make an explicit update # call that returns the incarnation number and # establishes that number the "context" for all other calls (either by # updating the internal state of the protocol or # by having callers pass the incarnation number to the method). def __init__(self, endpoint): if endpoint is None: raise ProtocolError("WireProtocol endpoint is None") self.endpoint = endpoint self.client = WireClient(self.endpoint) def detect(self): self.client.check_wire_protocol_version() trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) self.update_goal_state(forced=True) def update_goal_state(self, forced=False, max_retry=3): self.client.update_goal_state(forced=forced, max_retry=max_retry) def get_vminfo(self): goal_state = self.client.get_goal_state() hosting_env = self.client.get_hosting_env() vminfo = VMInfo() vminfo.subscriptionId = None vminfo.vmName = hosting_env.vm_name vminfo.tenantName = hosting_env.deployment_name vminfo.roleName = hosting_env.role_name vminfo.roleInstanceName = goal_state.role_instance_id vminfo.containerId = goal_state.container_id return vminfo def get_certs(self): certificates = self.client.get_certs() return certificates.cert_list def get_incarnation(self): path = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) if os.path.exists(path): return fileutil.read_file(path) else: return 0 def get_vmagent_manifests(self): # Update goal state to get latest extensions config self.update_goal_state() goal_state = self.client.get_goal_state() ext_conf = self.client.get_ext_conf() return ext_conf.vmagent_manifests, goal_state.incarnation def get_vmagent_pkgs(self, vmagent_manifest): goal_state = self.client.get_goal_state() ga_manifest = self.client.get_gafamily_manifest(vmagent_manifest, goal_state) valid_pkg_list = self.client.filter_package_list(vmagent_manifest.family, ga_manifest, goal_state) return valid_pkg_list def get_ext_handlers(self): logger.verbose("Get extension handler config") # Update goal state to get latest extensions config self.update_goal_state() goal_state = self.client.get_goal_state() ext_conf = self.client.get_ext_conf() # In wire protocol, incarnation is equivalent to ETag return ext_conf.ext_handlers, goal_state.incarnation def get_ext_handler_pkgs(self, ext_handler): logger.verbose("Get extension handler package") goal_state = self.client.get_goal_state() man = self.client.get_ext_manifest(ext_handler, goal_state) return man.pkg_list def get_artifacts_profile(self): logger.verbose("Get In-VM Artifacts Profile") return self.client.get_artifacts_profile() def download_ext_handler_pkg(self, uri, headers=None): package = super(WireProtocol, self).download_ext_handler_pkg(uri) if package is not None: return package else: logger.warn("Download did not succeed, falling back to host plugin") host = self.client.get_host_plugin() uri, headers = host.get_artifact_request(uri, host.manifest_uri) package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers) return package def report_provision_status(self, provision_status): validate_param("provision_status", provision_status, ProvisionStatus) if provision_status.status is not None: self.client.report_health(provision_status.status, provision_status.subStatus, provision_status.description) if provision_status.properties.certificateThumbprint is not None: thumbprint = provision_status.properties.certificateThumbprint self.client.report_role_prop(thumbprint) def report_vm_status(self, vm_status): validate_param("vm_status", vm_status, VMStatus) self.client.status_blob.set_vm_status(vm_status) self.client.upload_status_blob() def report_ext_status(self, ext_handler_name, ext_name, ext_status): validate_param("ext_status", ext_status, ExtensionStatus) self.client.status_blob.set_ext_status(ext_handler_name, ext_status) def report_event(self, events): validate_param("events", events, TelemetryEventList) self.client.report_event(events) def _build_role_properties(container_id, role_instance_id, thumbprint): xml = (u"" u"" u"" u"{0}" u"" u"" u"{1}" u"" u"" u"" u"" u"" u"" u"" u"").format(container_id, role_instance_id, thumbprint) return xml def _build_health_report(incarnation, container_id, role_instance_id, status, substatus, description): # Escape '&', '<' and '>' description = saxutils.escape(ustr(description)) detail = u'' if substatus is not None: substatus = saxutils.escape(ustr(substatus)) detail = (u"
" u"{0}" u"{1}" u"
").format(substatus, description) xml = (u"" u"" u"{0}" u"" u"{1}" u"" u"" u"{2}" u"" u"{3}" u"{4}" u"" u"" u"" u"" u"" u"").format(incarnation, container_id, role_instance_id, status, detail) return xml """ Convert VMStatus object to status blob format """ def ga_status_to_guest_info(ga_status): v1_ga_guest_info = { "computerName" : ga_status.hostname, "osName" : ga_status.osname, "osVersion" : ga_status.osversion, "version" : ga_status.version, } return v1_ga_guest_info def ga_status_to_v1(ga_status): formatted_msg = { 'lang': 'en-US', 'message': ga_status.message } v1_ga_status = { "version" : ga_status.version, "status" : ga_status.status, "formattedMessage" : formatted_msg } return v1_ga_status def ext_substatus_to_v1(sub_status_list): status_list = [] for substatus in sub_status_list: status = { "name": substatus.name, "status": substatus.status, "code": substatus.code, "formattedMessage": { "lang": "en-US", "message": substatus.message } } status_list.append(status) return status_list def ext_status_to_v1(ext_name, ext_status): if ext_status is None: return None timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_sub_status = ext_substatus_to_v1(ext_status.substatusList) v1_ext_status = { "status": { "name": ext_name, "configurationAppliedTime": ext_status.configurationAppliedTime, "operation": ext_status.operation, "status": ext_status.status, "code": ext_status.code, "formattedMessage": { "lang": "en-US", "message": ext_status.message } }, "version": 1.0, "timestampUTC": timestamp } if len(v1_sub_status) != 0: v1_ext_status['status']['substatus'] = v1_sub_status return v1_ext_status def ext_handler_status_to_v1(handler_status, ext_statuses, timestamp): v1_handler_status = { 'handlerVersion': handler_status.version, 'handlerName': handler_status.name, 'status': handler_status.status, 'code': handler_status.code } if handler_status.message is not None: v1_handler_status["formattedMessage"] = { "lang": "en-US", "message": handler_status.message } if handler_status.upgradeGuid is not None: v1_handler_status["upgradeGuid"] = handler_status.upgradeGuid if len(handler_status.extensions) > 0: # Currently, no more than one extension per handler ext_name = handler_status.extensions[0] ext_status = ext_statuses.get(ext_name) v1_ext_status = ext_status_to_v1(ext_name, ext_status) if ext_status is not None and v1_ext_status is not None: v1_handler_status["runtimeSettingsStatus"] = { 'settingsStatus': v1_ext_status, 'sequenceNumber': ext_status.sequenceNumber } return v1_handler_status def vm_status_to_v1(vm_status, ext_statuses): timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_ga_guest_info = ga_status_to_guest_info(vm_status.vmAgent) v1_ga_status = ga_status_to_v1(vm_status.vmAgent) v1_handler_status_list = [] for handler_status in vm_status.vmAgent.extensionHandlers: v1_handler_status = ext_handler_status_to_v1(handler_status, ext_statuses, timestamp) if v1_handler_status is not None: v1_handler_status_list.append(v1_handler_status) v1_agg_status = { 'guestAgentStatus': v1_ga_status, 'handlerAggregateStatus': v1_handler_status_list } v1_vm_status = { 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, 'guestOSInfo' : v1_ga_guest_info } return v1_vm_status class StatusBlob(object): def __init__(self, client): self.vm_status = None self.ext_statuses = {} self.client = client self.type = None self.data = None def set_vm_status(self, vm_status): validate_param("vmAgent", vm_status, VMStatus) self.vm_status = vm_status def set_ext_status(self, ext_handler_name, ext_status): validate_param("extensionStatus", ext_status, ExtensionStatus) self.ext_statuses[ext_handler_name] = ext_status def to_json(self): report = vm_status_to_v1(self.vm_status, self.ext_statuses) return json.dumps(report) __storage_version__ = "2014-02-14" def prepare(self, blob_type): logger.verbose("Prepare status blob") self.data = self.to_json() self.type = blob_type def upload(self, url): try: if not self.type in ["BlockBlob", "PageBlob"]: raise ProtocolError("Illegal blob type: {0}".format(self.type)) if self.type == "BlockBlob": self.put_block_blob(url, self.data) else: self.put_page_blob(url, self.data) return True except Exception as e: logger.verbose("Initial status upload failed: {0}", e) return False def get_block_blob_headers(self, blob_size): return { "Content-Length": ustr(blob_size), "x-ms-blob-type": "BlockBlob", "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-version": self.__class__.__storage_version__ } def put_block_blob(self, url, data): logger.verbose("Put block blob") headers = self.get_block_blob_headers(len(data)) resp = self.client.call_storage_service(restutil.http_put, url, data, headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to upload block blob: {0}".format(resp.status)) def get_page_blob_create_headers(self, blob_size): return { "Content-Length": "0", "x-ms-blob-content-length": ustr(blob_size), "x-ms-blob-type": "PageBlob", "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-version": self.__class__.__storage_version__ } def get_page_blob_page_headers(self, start, end): return { "Content-Length": ustr(end - start), "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-range": "bytes={0}-{1}".format(start, end - 1), "x-ms-page-write": "update", "x-ms-version": self.__class__.__storage_version__ } def put_page_blob(self, url, data): logger.verbose("Put page blob") # Convert string into bytes and align to 512 bytes data = bytearray(data, encoding='utf-8') page_blob_size = int((len(data) + 511) / 512) * 512 headers = self.get_page_blob_create_headers(page_blob_size) resp = self.client.call_storage_service(restutil.http_put, url, "", headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to clean up page blob: {0}".format(resp.status)) if url.count("?") <= 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) logger.verbose("Upload page blob") page_max = 4 * 1024 * 1024 # Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + page_max) content_size = end - start # Align to 512 bytes page_end = int((end + 511) / 512) * 512 buf_size = page_end - start buf = bytearray(buf_size) buf[0: content_size] = data[start: end] headers = self.get_page_blob_page_headers(start, page_end) resp = self.client.call_storage_service( restutil.http_put, url, bytebuffer(buf), headers) if resp is None or resp.status != httpclient.CREATED: raise UploadError( "Failed to upload page blob: {0}".format(resp.status)) start = end def event_param_to_v1(param): param_format = '' param_type = type(param.value) attr_type = "" if param_type is int: attr_type = 'mt:uint64' elif param_type is str: attr_type = 'mt:wstr' elif ustr(param_type).count("'unicode'") > 0: attr_type = 'mt:wstr' elif param_type is bool: attr_type = 'mt:bool' elif param_type is float: attr_type = 'mt:float64' return param_format.format(param.name, saxutils.quoteattr(ustr(param.value)), attr_type) def event_to_v1(event): params = "" for param in event.parameters: params += event_param_to_v1(param) event_str = ('' '' '').format(event.eventId, params) return event_str class WireClient(object): def __init__(self, endpoint): logger.info("Wire server endpoint:{0}", endpoint) self.endpoint = endpoint self.goal_state = None self.updated = None self.hosting_env = None self.shared_conf = None self.certs = None self.ext_conf = None self.host_plugin = None self.status_blob = StatusBlob(self) def call_wireserver(self, http_req, *args, **kwargs): try: # Never use the HTTP proxy for wireserver kwargs['use_proxy'] = False resp = http_req(*args, **kwargs) if restutil.request_failed(resp): msg = "[Wireserver Failed] URI {0} ".format(args[0]) if resp is not None: msg += " [HTTP Failed] Status Code {0}".format(resp.status) raise ProtocolError(msg) # If the GoalState is stale, pass along the exception to the caller except ResourceGoneError: raise except Exception as e: raise ProtocolError("[Wireserver Exception] {0}".format( ustr(e))) return resp def decode_config(self, data): if data is None: return None data = remove_bom(data) xml_text = ustr(data, encoding='utf-8') return xml_text def fetch_config(self, uri, headers): resp = self.call_wireserver(restutil.http_get, uri, headers=headers) return self.decode_config(resp.read()) def fetch_cache(self, local_file): if not os.path.isfile(local_file): raise ProtocolError("{0} is missing.".format(local_file)) try: return fileutil.read_file(local_file) except IOError as e: raise ProtocolError("Failed to read cache: {0}".format(e)) def save_cache(self, local_file, data): try: fileutil.write_file(local_file, data) except IOError as e: fileutil.clean_ioerror(e, paths=[local_file]) raise ProtocolError("Failed to write cache: {0}".format(e)) @staticmethod def call_storage_service(http_req, *args, **kwargs): # Default to use the configured HTTP proxy if not 'use_proxy' in kwargs or kwargs['use_proxy'] is None: kwargs['use_proxy'] = True return http_req(*args, **kwargs) def fetch_manifest(self, version_uris): logger.verbose("Fetch manifest") for version in version_uris: response = None if not HostPluginProtocol.is_default_channel(): response = self.fetch(version.uri) if not response: if HostPluginProtocol.is_default_channel(): logger.verbose("Using host plugin as default channel") else: logger.verbose("Failed to download manifest, " "switching to host plugin") try: host = self.get_host_plugin() uri, headers = host.get_artifact_request(version.uri) response = self.fetch(uri, headers, use_proxy=False) # If the HostPlugin rejects the request, # let the error continue, but set to use the HostPlugin except ResourceGoneError: HostPluginProtocol.set_default_channel(True) raise host.manifest_uri = version.uri logger.verbose("Manifest downloaded successfully from host plugin") if not HostPluginProtocol.is_default_channel(): logger.info("Setting host plugin as default channel") HostPluginProtocol.set_default_channel(True) if response: return response raise ProtocolError("Failed to fetch manifest from all sources") def fetch(self, uri, headers=None, use_proxy=None): logger.verbose("Fetch [{0}] with headers [{1}]", uri, headers) try: resp = self.call_storage_service( restutil.http_get, uri, headers=headers, use_proxy=use_proxy) if restutil.request_failed(resp): msg = "[Storage Failed] URI {0} ".format(uri) if resp is not None: msg += restutil.read_response_error(resp) logger.warn(msg) raise ProtocolError(msg) return self.decode_config(resp.read()) except (HttpError, ProtocolError) as e: logger.verbose("Fetch failed from [{0}]: {1}", uri, e) if isinstance(e, ResourceGoneError): raise return None def update_hosting_env(self, goal_state): if goal_state.hosting_env_uri is None: raise ProtocolError("HostingEnvironmentConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) xml_text = self.fetch_config(goal_state.hosting_env_uri, self.get_header()) self.save_cache(local_file, xml_text) self.hosting_env = HostingEnv(xml_text) def update_shared_conf(self, goal_state): if goal_state.shared_conf_uri is None: raise ProtocolError("SharedConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) xml_text = self.fetch_config(goal_state.shared_conf_uri, self.get_header()) self.save_cache(local_file, xml_text) self.shared_conf = SharedConfig(xml_text) def update_certs(self, goal_state): if goal_state.certs_uri is None: return local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) xml_text = self.fetch_config(goal_state.certs_uri, self.get_header_for_cert()) self.save_cache(local_file, xml_text) self.certs = Certificates(self, xml_text) def update_ext_conf(self, goal_state): if goal_state.ext_uri is None: logger.info("ExtensionsConfig.xml uri is empty") self.ext_conf = ExtensionsConfig(None) return incarnation = goal_state.incarnation local_file = os.path.join(conf.get_lib_dir(), EXT_CONF_FILE_NAME.format(incarnation)) xml_text = self.fetch_config(goal_state.ext_uri, self.get_header()) self.save_cache(local_file, xml_text) self.ext_conf = ExtensionsConfig(xml_text) def update_goal_state(self, forced=False, max_retry=3): incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) uri = GOAL_STATE_URI.format(self.endpoint) # Start updating goalstate, retry on 410 fetch_goal_state = True for retry in range(0, max_retry): try: if fetch_goal_state: fetch_goal_state = False xml_text = self.fetch_config(uri, self.get_header()) goal_state = GoalState(xml_text) if not forced: last_incarnation = None if os.path.isfile(incarnation_file): last_incarnation = fileutil.read_file( incarnation_file) new_incarnation = goal_state.incarnation if last_incarnation is not None and \ last_incarnation == new_incarnation: # Goalstate is not updated. return self.goal_state = goal_state file_name = GOAL_STATE_FILE_NAME.format(goal_state.incarnation) goal_state_file = os.path.join(conf.get_lib_dir(), file_name) self.save_cache(goal_state_file, xml_text) self.update_hosting_env(goal_state) self.update_shared_conf(goal_state) self.update_certs(goal_state) self.update_ext_conf(goal_state) self.save_cache(incarnation_file, goal_state.incarnation) if self.host_plugin is not None: self.host_plugin.container_id = goal_state.container_id self.host_plugin.role_config_name = goal_state.role_config_name return except ResourceGoneError: logger.info("GoalState is stale -- re-fetching") fetch_goal_state = True except Exception as e: log_method = logger.info \ if type(e) is ProtocolError \ else logger.warn log_method( "Exception processing GoalState-related files: {0}".format( ustr(e))) if retry < max_retry-1: continue raise raise ProtocolError("Exceeded max retry updating goal state") def get_goal_state(self): if self.goal_state is None: incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) incarnation = self.fetch_cache(incarnation_file) file_name = GOAL_STATE_FILE_NAME.format(incarnation) goal_state_file = os.path.join(conf.get_lib_dir(), file_name) xml_text = self.fetch_cache(goal_state_file) self.goal_state = GoalState(xml_text) return self.goal_state def get_hosting_env(self): if self.hosting_env is None: local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) xml_text = self.fetch_cache(local_file) self.hosting_env = HostingEnv(xml_text) return self.hosting_env def get_shared_conf(self): if self.shared_conf is None: local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) xml_text = self.fetch_cache(local_file) self.shared_conf = SharedConfig(xml_text) return self.shared_conf def get_certs(self): if self.certs is None: local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) xml_text = self.fetch_cache(local_file) self.certs = Certificates(self, xml_text) if self.certs is None: return None return self.certs def get_ext_conf(self): if self.ext_conf is None: goal_state = self.get_goal_state() if goal_state.ext_uri is None: self.ext_conf = ExtensionsConfig(None) else: local_file = EXT_CONF_FILE_NAME.format(goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_cache(local_file) self.ext_conf = ExtensionsConfig(xml_text) return self.ext_conf def get_ext_manifest(self, ext_handler, goal_state): for update_goal_state in [False, True]: try: if update_goal_state: self.update_goal_state(forced=True) goal_state = self.get_goal_state() local_file = MANIFEST_FILE_NAME.format( ext_handler.name, goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_manifest(ext_handler.versionUris) self.save_cache(local_file, xml_text) return ExtensionManifest(xml_text) except ResourceGoneError: continue raise ProtocolError("Failed to retrieve extension manifest") def filter_package_list(self, family, ga_manifest, goal_state): complete_list = ga_manifest.pkg_list agent_manifest = os.path.join(conf.get_lib_dir(), AGENTS_MANIFEST_FILE_NAME.format( family, goal_state.incarnation)) if not os.path.exists(agent_manifest): # clear memory cache ga_manifest.allowed_versions = None # create disk cache with open(agent_manifest, mode='w') as manifest_fh: for version in complete_list.versions: manifest_fh.write('{0}\n'.format(version.version)) fileutil.chmod(agent_manifest, 0o644) return complete_list else: # use allowed versions from cache, otherwise from disk if ga_manifest.allowed_versions is None: with open(agent_manifest, mode='r') as manifest_fh: ga_manifest.allowed_versions = [v.strip('\n') for v in manifest_fh.readlines()] # use the updated manifest urls for allowed versions allowed_list = ExtHandlerPackageList() allowed_list.versions = [version for version in complete_list.versions if version.version in ga_manifest.allowed_versions] return allowed_list def get_gafamily_manifest(self, vmagent_manifest, goal_state): for update_goal_state in [False, True]: try: if update_goal_state: self.update_goal_state(forced=True) goal_state = self.get_goal_state() local_file = MANIFEST_FILE_NAME.format( vmagent_manifest.family, goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_manifest( vmagent_manifest.versionsManifestUris) fileutil.write_file(local_file, xml_text) return ExtensionManifest(xml_text) except ResourceGoneError: continue raise ProtocolError("Failed to retrieve GAFamily manifest") def check_wire_protocol_version(self): uri = VERSION_INFO_URI.format(self.endpoint) version_info_xml = self.fetch_config(uri, None) version_info = VersionInfo(version_info_xml) preferred = version_info.get_preferred() if PROTOCOL_VERSION == preferred: logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) elif PROTOCOL_VERSION in version_info.get_supported(): logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) logger.warn("Server preferred version:{0}", preferred) else: error = ("Agent supported wire protocol version: {0} was not " "advised by Fabric.").format(PROTOCOL_VERSION) raise ProtocolNotFoundError(error) def upload_status_blob(self): for update_goal_state in [False, True]: try: if update_goal_state: self.update_goal_state(forced=True) ext_conf = self.get_ext_conf() blob_uri = ext_conf.status_upload_blob blob_type = ext_conf.status_upload_blob_type if blob_uri is not None: if not blob_type in ["BlockBlob", "PageBlob"]: blob_type = "BlockBlob" logger.verbose("Status Blob type is unspecified " "-- assuming it is a BlockBlob") try: self.status_blob.prepare(blob_type) except Exception as e: self.report_status_event( "Exception creating status blob: {0}", ustr(e)) return if not HostPluginProtocol.is_default_channel(): try: if self.status_blob.upload(blob_uri): return except HttpError as e: pass host = self.get_host_plugin() host.put_vm_status(self.status_blob, ext_conf.status_upload_blob, ext_conf.status_upload_blob_type) HostPluginProtocol.set_default_channel(True) return except Exception as e: # If the HostPlugin rejects the request, # let the error continue, but set to use the HostPlugin if isinstance(e, ResourceGoneError): HostPluginProtocol.set_default_channel(True) continue self.report_status_event( "Exception uploading status blob: {0}", ustr(e)) return def report_role_prop(self, thumbprint): goal_state = self.get_goal_state() role_prop = _build_role_properties(goal_state.container_id, goal_state.role_instance_id, thumbprint) role_prop = role_prop.encode("utf-8") role_prop_uri = ROLE_PROP_URI.format(self.endpoint) headers = self.get_header_for_xml_content() try: resp = self.call_wireserver(restutil.http_post, role_prop_uri, role_prop, headers=headers) except HttpError as e: raise ProtocolError((u"Failed to send role properties: " u"{0}").format(e)) if resp.status != httpclient.ACCEPTED: raise ProtocolError((u"Failed to send role properties: " u",{0}: {1}").format(resp.status, resp.read())) def report_health(self, status, substatus, description): goal_state = self.get_goal_state() health_report = _build_health_report(goal_state.incarnation, goal_state.container_id, goal_state.role_instance_id, status, substatus, description) health_report = health_report.encode("utf-8") health_report_uri = HEALTH_REPORT_URI.format(self.endpoint) headers = self.get_header_for_xml_content() try: # 30 retries with 10s sleep gives ~5min for wireserver updates; # this is retried 3 times with 15s sleep before throwing a # ProtocolError, for a total of ~15min. resp = self.call_wireserver(restutil.http_post, health_report_uri, health_report, headers=headers, max_retry=30, retry_delay=15) except HttpError as e: raise ProtocolError((u"Failed to send provision status: " u"{0}").format(e)) if restutil.request_failed(resp): raise ProtocolError((u"Failed to send provision status: " u",{0}: {1}").format(resp.status, resp.read())) def send_event(self, provider_id, event_str): uri = TELEMETRY_URI.format(self.endpoint) data_format = ('' '' '{1}' '' '') data = data_format.format(provider_id, event_str) try: header = self.get_header_for_xml_content() resp = self.call_wireserver(restutil.http_post, uri, data, header) except HttpError as e: raise ProtocolError("Failed to send events:{0}".format(e)) if restutil.request_failed(resp): logger.verbose(resp.read()) raise ProtocolError( "Failed to send events:{0}".format(resp.status)) def report_event(self, event_list): buf = {} # Group events by providerId for event in event_list.events: if event.providerId not in buf: buf[event.providerId] = "" event_str = event_to_v1(event) if len(event_str) >= 63 * 1024: logger.warn("Single event too large: {0}", event_str[300:]) continue if len(buf[event.providerId] + event_str) >= 63 * 1024: self.send_event(event.providerId, buf[event.providerId]) buf[event.providerId] = "" buf[event.providerId] = buf[event.providerId] + event_str # Send out all events left in buffer. for provider_id in list(buf.keys()): if len(buf[provider_id]) > 0: self.send_event(provider_id, buf[provider_id]) def report_status_event(self, message, *args): from azurelinuxagent.common.event import report_event, \ WALAEventOperation message = message.format(*args) logger.warn(message) report_event(op=WALAEventOperation.ReportStatus, is_success=False, message=message) def get_header(self): return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION } def get_header_for_xml_content(self): return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION, "Content-Type": "text/xml;charset=utf-8" } def get_header_for_cert(self): trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) content = self.fetch_cache(trans_cert_file) cert = get_bytes_from_pem(content) return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION, "x-ms-cipher-name": "DES_EDE3_CBC", "x-ms-guest-agent-public-x509-cert": cert } def get_host_plugin(self): if self.host_plugin is None: goal_state = self.get_goal_state() self.host_plugin = HostPluginProtocol(self.endpoint, goal_state.container_id, goal_state.role_config_name) return self.host_plugin def has_artifacts_profile_blob(self): return self.ext_conf and not \ textutil.is_str_none_or_whitespace(self.ext_conf.artifacts_profile_blob) def get_artifacts_profile(self): artifacts_profile = None for update_goal_state in [False, True]: try: if update_goal_state: self.update_goal_state(forced=True) if self.has_artifacts_profile_blob(): blob = self.ext_conf.artifacts_profile_blob profile = None if not HostPluginProtocol.is_default_channel(): logger.verbose("Retrieving the artifacts profile") profile = self.fetch(blob) if profile is None: if HostPluginProtocol.is_default_channel(): logger.verbose("Using host plugin as default channel") else: logger.verbose("Failed to download artifacts profile, " "switching to host plugin") host = self.get_host_plugin() uri, headers = host.get_artifact_request(blob) config = self.fetch(uri, headers, use_proxy=False) profile = self.decode_config(config) if not textutil.is_str_none_or_whitespace(profile): logger.verbose("Artifacts profile downloaded") artifacts_profile = InVMArtifactsProfile(profile) return artifacts_profile except ResourceGoneError: HostPluginProtocol.set_default_channel(True) continue except Exception as e: logger.warn( "Exception retrieving artifacts profile: {0}".format( ustr(e))) return None class VersionInfo(object): def __init__(self, xml_text): """ Query endpoint server for wire protocol version. Fail if our desired protocol version is not seen. """ logger.verbose("Load Version.xml") self.parse(xml_text) def parse(self, xml_text): xml_doc = parse_doc(xml_text) preferred = find(xml_doc, "Preferred") self.preferred = findtext(preferred, "Version") logger.info("Fabric preferred wire protocol version:{0}", self.preferred) self.supported = [] supported = find(xml_doc, "Supported") supported_version = findall(supported, "Version") for node in supported_version: version = gettext(node) logger.verbose("Fabric supported wire protocol version:{0}", version) self.supported.append(version) def get_preferred(self): return self.preferred def get_supported(self): return self.supported class GoalState(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("GoalState.xml is None") logger.verbose("Load GoalState.xml") self.incarnation = None self.expected_state = None self.hosting_env_uri = None self.shared_conf_uri = None self.certs_uri = None self.ext_uri = None self.role_instance_id = None self.role_config_name = None self.container_id = None self.load_balancer_probe_port = None self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ Request configuration data from endpoint server. """ self.xml_text = xml_text xml_doc = parse_doc(xml_text) self.incarnation = findtext(xml_doc, "Incarnation") self.expected_state = findtext(xml_doc, "ExpectedState") self.hosting_env_uri = findtext(xml_doc, "HostingEnvironmentConfig") self.shared_conf_uri = findtext(xml_doc, "SharedConfig") self.certs_uri = findtext(xml_doc, "Certificates") self.ext_uri = findtext(xml_doc, "ExtensionsConfig") role_instance = find(xml_doc, "RoleInstance") self.role_instance_id = findtext(role_instance, "InstanceId") role_config = find(role_instance, "Configuration") self.role_config_name = findtext(role_config, "ConfigName") container = find(xml_doc, "Container") self.container_id = findtext(container, "ContainerId") lbprobe_ports = find(xml_doc, "LBProbePorts") self.load_balancer_probe_port = findtext(lbprobe_ports, "Port") return self class HostingEnv(object): """ parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ def __init__(self, xml_text): if xml_text is None: raise ValueError("HostingEnvironmentConfig.xml is None") logger.verbose("Load HostingEnvironmentConfig.xml") self.vm_name = None self.role_name = None self.deployment_name = None self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ parse and create HostingEnvironmentConfig.xml. """ self.xml_text = xml_text xml_doc = parse_doc(xml_text) incarnation = find(xml_doc, "Incarnation") self.vm_name = getattrib(incarnation, "instance") role = find(xml_doc, "Role") self.role_name = getattrib(role, "name") deployment = find(xml_doc, "Deployment") self.deployment_name = getattrib(deployment, "name") return self class SharedConfig(object): """ parse role endpoint server and goal state config. """ def __init__(self, xml_text): logger.verbose("Load SharedConfig.xml") self.parse(xml_text) def parse(self, xml_text): """ parse and write configuration to file SharedConfig.xml. """ # Not used currently return self class Certificates(object): """ Object containing certificates of host and provisioned user. """ def __init__(self, client, xml_text): logger.verbose("Load Certificates.xml") self.client = client self.cert_list = CertList() self.parse(xml_text) def parse(self, xml_text): """ Parse multiple certificates into seperate files. """ xml_doc = parse_doc(xml_text) data = findtext(xml_doc, "Data") if data is None: return cryptutil = CryptUtil(conf.get_openssl_cmd()) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" "Content-Disposition: attachment; filename=\"{0}\"\n" "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" "Content-Transfer-Encoding: base64\n" "\n" "{2}").format(p7m_file, p7m_file, data) self.client.save_cache(p7m_file, p7m) trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) # decrypt certificates cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, pem_file) # The parsing process use public key to match prv and crt. buf = [] begin_crt = False begin_prv = False prvs = {} thumbprints = {} index = 0 v1_cert_list = [] with open(pem_file) as pem: for line in pem.readlines(): buf.append(line) if re.match(r'[-]+BEGIN.*KEY[-]+', line): begin_prv = True elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): begin_crt = True elif re.match(r'[-]+END.*KEY[-]+', line): tmp_file = self.write_to_tmp_file(index, 'prv', buf) pub = cryptutil.get_pubkey_from_prv(tmp_file) prvs[pub] = tmp_file buf = [] index += 1 begin_prv = False elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): tmp_file = self.write_to_tmp_file(index, 'crt', buf) pub = cryptutil.get_pubkey_from_crt(tmp_file) thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) thumbprints[pub] = thumbprint # Rename crt with thumbprint as the file name crt = "{0}.crt".format(thumbprint) v1_cert_list.append({ "name": None, "thumbprint": thumbprint }) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) buf = [] index += 1 begin_crt = False # Rename prv key with thumbprint as the file name for pubkey in prvs: thumbprint = thumbprints[pubkey] if thumbprint: tmp_file = prvs[pubkey] prv = "{0}.prv".format(thumbprint) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) for v1_cert in v1_cert_list: cert = Cert() set_properties("certs", cert, v1_cert) self.cert_list.certificates.append(cert) def write_to_tmp_file(self, index, suffix, buf): file_name = os.path.join(conf.get_lib_dir(), "{0}.{1}".format(index, suffix)) self.client.save_cache(file_name, "".join(buf)) return file_name class ExtensionsConfig(object): """ parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. Install if true, remove if it is set to false. """ def __init__(self, xml_text): logger.verbose("Load ExtensionsConfig.xml") self.ext_handlers = ExtHandlerList() self.vmagent_manifests = VMAgentManifestList() self.status_upload_blob = None self.status_upload_blob_type = None self.artifacts_profile_blob = None if xml_text is not None: self.parse(xml_text) def parse(self, xml_text): """ Write configuration to file ExtensionsConfig.xml. """ xml_doc = parse_doc(xml_text) ga_families_list = find(xml_doc, "GAFamilies") ga_families = findall(ga_families_list, "GAFamily") for ga_family in ga_families: family = findtext(ga_family, "Name") uris_list = find(ga_family, "Uris") uris = findall(uris_list, "Uri") manifest = VMAgentManifest() manifest.family = family for uri in uris: manifestUri = VMAgentManifestUri(uri=gettext(uri)) manifest.versionsManifestUris.append(manifestUri) self.vmagent_manifests.vmAgentManifests.append(manifest) plugins_list = find(xml_doc, "Plugins") plugins = findall(plugins_list, "Plugin") plugin_settings_list = find(xml_doc, "PluginSettings") plugin_settings = findall(plugin_settings_list, "Plugin") for plugin in plugins: ext_handler = self.parse_plugin(plugin) self.ext_handlers.extHandlers.append(ext_handler) self.parse_plugin_settings(ext_handler, plugin_settings) self.status_upload_blob = findtext(xml_doc, "StatusUploadBlob") self.artifacts_profile_blob = findtext(xml_doc, "InVMArtifactsProfileBlob") status_upload_node = find(xml_doc, "StatusUploadBlob") self.status_upload_blob_type = getattrib(status_upload_node, "statusBlobType") logger.verbose("Extension config shows status blob type as [{0}]", self.status_upload_blob_type) def parse_plugin(self, plugin): ext_handler = ExtHandler() ext_handler.name = getattrib(plugin, "name") ext_handler.properties.version = getattrib(plugin, "version") ext_handler.properties.state = getattrib(plugin, "state") ext_handler.properties.upgradeGuid = getattrib(plugin, "upgradeGuid") if not ext_handler.properties.upgradeGuid: ext_handler.properties.upgradeGuid = None auto_upgrade = getattrib(plugin, "autoUpgrade") if auto_upgrade is not None and auto_upgrade.lower() == "true": ext_handler.properties.upgradePolicy = "auto" else: ext_handler.properties.upgradePolicy = "manual" location = getattrib(plugin, "location") failover_location = getattrib(plugin, "failoverlocation") for uri in [location, failover_location]: version_uri = ExtHandlerVersionUri() version_uri.uri = uri ext_handler.versionUris.append(version_uri) return ext_handler def parse_plugin_settings(self, ext_handler, plugin_settings): if plugin_settings is None: return name = ext_handler.name version = ext_handler.properties.version settings = [x for x in plugin_settings \ if getattrib(x, "name") == name and \ getattrib(x, "version") == version] if settings is None or len(settings) == 0: return runtime_settings = None runtime_settings_node = find(settings[0], "RuntimeSettings") seqNo = getattrib(runtime_settings_node, "seqNo") runtime_settings_str = gettext(runtime_settings_node) try: runtime_settings = json.loads(runtime_settings_str) except ValueError as e: logger.error("Invalid extension settings") return for plugin_settings_list in runtime_settings["runtimeSettings"]: handler_settings = plugin_settings_list["handlerSettings"] ext = Extension() # There is no "extension name" in wire protocol. # Put ext.name = ext_handler.name ext.sequenceNumber = seqNo ext.publicSettings = handler_settings.get("publicSettings") ext.protectedSettings = handler_settings.get("protectedSettings") thumbprint = handler_settings.get( "protectedSettingsCertThumbprint") ext.certificateThumbprint = thumbprint ext_handler.properties.extensions.append(ext) class ExtensionManifest(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("ExtensionManifest is None") logger.verbose("Load ExtensionManifest.xml") self.pkg_list = ExtHandlerPackageList() self.allowed_versions = None self.parse(xml_text) def parse(self, xml_text): xml_doc = parse_doc(xml_text) self._handle_packages(findall(find(xml_doc, "Plugins"), "Plugin"), False) self._handle_packages(findall(find(xml_doc, "InternalPlugins"), "Plugin"), True) def _handle_packages(self, packages, isinternal): for package in packages: version = findtext(package, "Version") disallow_major_upgrade = findtext(package, "DisallowMajorVersionUpgrade") if disallow_major_upgrade is None: disallow_major_upgrade = '' disallow_major_upgrade = disallow_major_upgrade.lower() == "true" uris = find(package, "Uris") uri_list = findall(uris, "Uri") uri_list = [gettext(x) for x in uri_list] pkg = ExtHandlerPackage() pkg.version = version pkg.disallow_major_upgrade = disallow_major_upgrade for uri in uri_list: pkg_uri = ExtHandlerVersionUri() pkg_uri.uri = uri pkg.uris.append(pkg_uri) pkg.isinternal = isinternal self.pkg_list.versions.append(pkg) # Do not extend this class class InVMArtifactsProfile(object): """ deserialized json string of InVMArtifactsProfile. It is expected to contain the following fields: * inVMArtifactsProfileBlobSeqNo * profileId (optional) * onHold (optional) * certificateThumbprint (optional) * encryptedHealthChecks (optional) * encryptedApplicationProfile (optional) """ def __init__(self, artifacts_profile): if not textutil.is_str_none_or_whitespace(artifacts_profile): self.__dict__.update(parse_json(artifacts_profile)) def is_on_hold(self): # hasattr() is not available in Python 2.6 if 'onHold' in self.__dict__: return self.onHold.lower() == 'true' return False WALinuxAgent-2.2.20/azurelinuxagent/common/rdma.py000066400000000000000000000316271322477356400221430ustar00rootroot00000000000000# Windows Azure Linux Agent # # Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Handle packages and modules to enable RDMA for IB networking """ import os import re import time import threading import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME dapl_config_paths = [ '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf' ] def setup_rdma_device(): logger.verbose("Parsing SharedConfig XML contents for RDMA details") xml_doc = parse_doc( fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME))) if xml_doc is None: logger.error("Could not parse SharedConfig XML document") return instance_elem = find(xml_doc, "Instance") if not instance_elem: logger.error("Could not find in SharedConfig document") return rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address") if not rdma_ipv4_addr: logger.error( "Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document") return rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress") if not rdma_mac_addr: logger.error( "Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document") return # add colons to the MAC address (e.g. 00155D33FF1D -> # 00:15:5D:33:FF:1D) rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2] for i in range(0, len(rdma_mac_addr), 2)]) logger.info("Found RDMA details. IPv4={0} MAC={1}".format( rdma_ipv4_addr, rdma_mac_addr)) # Set up the RDMA device with collected informatino RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start() logger.info("RDMA: device is set up") return class RDMAHandler(object): driver_module_name = 'hv_network_direct' @staticmethod def get_rdma_version(): """Retrieve the firmware version information from the system. This depends on information provided by the Linux kernel.""" driver_info_source = '/var/lib/hyperv/.kvp_pool_0' base_kernel_err_msg = 'Kernel does not provide the necessary ' base_kernel_err_msg += 'information or the kvp daemon is not running.' if not os.path.isfile(driver_info_source): error_msg = 'RDMA: Source file "%s" does not exist. ' error_msg += base_kernel_err_msg logger.error(error_msg % driver_info_source) return lines = open(driver_info_source).read() if not lines: error_msg = 'RDMA: Source file "%s" is empty. ' error_msg += base_kernel_err_msg logger.error(error_msg % driver_info_source) return r = re.search("NdDriverVersion\0+(\d\d\d\.\d)", lines) if r: NdDriverVersion = r.groups()[0] return NdDriverVersion else: error_msg = 'RDMA: NdDriverVersion not found in "%s"' logger.error(error_msg % driver_info_source) return @staticmethod def is_kvp_daemon_running(): """Look for kvp daemon names in ps -ef output and return True/False """ # for centos, the hypervkvpd and the hv_kvp_daemon both are ok. # for suse, it uses hv_kvp_daemon kvp_daemon_names = ['hypervkvpd', 'hv_kvp_daemon'] exitcode, ps_out = shellutil.run_get_output("ps -ef") if exitcode != 0: raise Exception('RDMA: ps -ef failed: %s' % ps_out) for n in kvp_daemon_names: if n in ps_out: logger.info('RDMA: kvp daemon (%s) is running' % n) return True else: logger.verbose('RDMA: kvp daemon (%s) is not running' % n) return False def load_driver_module(self): """Load the kernel driver, this depends on the proper driver to be installed with the install_driver() method""" logger.info("RDMA: probing module '%s'" % self.driver_module_name) result = shellutil.run('modprobe --first-time %s' % self.driver_module_name) if result != 0: error_msg = 'Could not load "%s" kernel module. ' error_msg += 'Run "modprobe --first-time %s" as root for more details' logger.error( error_msg % (self.driver_module_name, self.driver_module_name) ) return False logger.info('RDMA: Loaded the kernel driver successfully.') return True def install_driver(self): """Install the driver. This is distribution specific and must be overwritten in the child implementation.""" logger.error('RDMAHandler.install_driver not implemented') def is_driver_loaded(self): """Check if the network module is loaded in kernel space""" cmd = 'lsmod | grep ^%s' % self.driver_module_name status, loaded_modules = shellutil.run_get_output(cmd) logger.info('RDMA: Checking if the module loaded.') if loaded_modules: logger.info('RDMA: module loaded.') return True logger.info('RDMA: module not loaded.') return False def reboot_system(self): """Reboot the system. This is required as the kernel module for the rdma driver cannot be unloaded with rmmod""" logger.info('RDMA: Rebooting system.') ret = shellutil.run('shutdown -r now') if ret != 0: logger.error('RDMA: Failed to reboot the system') dapl_config_paths = [ '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf'] class RDMADeviceHandler(object): """ Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma interface. """ rdma_dev = '/dev/hvnd_rdma' device_check_timeout_sec = 120 device_check_interval_sec = 1 ipv4_addr = None mac_adr = None def __init__(self, ipv4_addr, mac_addr): self.ipv4_addr = ipv4_addr self.mac_addr = mac_addr def start(self): """ Start a thread in the background to process the RDMA tasks and returns. """ logger.info("RDMA: starting device processing in the background.") threading.Thread(target=self.process).start() def process(self): try: RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr) skip_rdma_device = False module_name = "hv_network_direct" retcode,out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False) if retcode == 0: module_name = out.strip() else: logger.info("RDMA: failed to resolve module name. Use original name") retcode,out = shellutil.run_get_output("modprobe %s" % module_name) if retcode != 0: logger.error("RDMA: failed to load module %s" % module_name) return retcode,out = shellutil.run_get_output("modinfo %s" % module_name) if retcode == 0: version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) if version: v1 = int(version.groups(0)[0]) v2 = int(version.groups(0)[1]) if v1>4 or v1==4 and v2>0: logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later") skip_rdma_device = True else: logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.") else: logger.warn("RDMA: failed to get module info on hv_network_direct.") if not skip_rdma_device: RDMADeviceHandler.wait_rdma_device( self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec) RDMADeviceHandler.write_rdma_config_to_device( self.rdma_dev, self.ipv4_addr, self.mac_addr) RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) except Exception as e: logger.error("RDMA: device processing failed: {0}".format(e)) @staticmethod def update_dat_conf(paths, ipv4_addr): """ Looks at paths for dat.conf file and updates the ip address for the infiniband interface. """ logger.info("Updating DAPL configuration file") for f in paths: logger.info("RDMA: trying {0}".format(f)) if not os.path.isfile(f): logger.info( "RDMA: DAPL config not found at {0}".format(f)) continue logger.info("RDMA: DAPL config is at: {0}".format(f)) cfg = fileutil.read_file(f) new_cfg = RDMADeviceHandler.replace_dat_conf_contents( cfg, ipv4_addr) fileutil.write_file(f, new_cfg) logger.info("RDMA: DAPL configuration is updated") return raise Exception("RDMA: DAPL configuration file not found at predefined paths") @staticmethod def replace_dat_conf_contents(cfg, ipv4_addr): old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format( ipv4_addr) return re.sub(old, new, cfg) @staticmethod def write_rdma_config_to_device(path, ipv4_addr, mac_addr): data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr) logger.info( "RDMA: Updating device with configuration: {0}".format(data)) with open(path, "w") as f: logger.info("RDMA: Device opened for writing") f.write(data) logger.info("RDMA: Updated device with IPv4/MAC addr successfully") @staticmethod def generate_rdma_config(ipv4_addr, mac_addr): return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr) @staticmethod def wait_rdma_device(path, timeout_sec, check_interval_sec): logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec)) total_retries = timeout_sec/check_interval_sec n = 0 while n < total_retries: if os.path.exists(path): logger.info("RDMA: device ready") return logger.verbose( "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) time.sleep(check_interval_sec) n += 1 logger.error("RDMA device wait timed out") raise Exception("The device did not show up in {0} seconds ({1} retries)".format( timeout_sec, total_retries)) @staticmethod def update_network_interface(mac_addr, ipv4_addr): netmask=16 logger.info("RDMA: will update the network interface with IPv4/MAC") if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr) logger.info("RDMA: network interface found: {0}", if_name) logger.info("RDMA: bringing network interface up") if shellutil.run("ifconfig {0} up".format(if_name)) != 0: raise Exception("Could not bring up RMDA interface: {0}".format(if_name)) logger.info("RDMA: configuring IPv4 addr and netmask on interface") addr = '{0}/{1}'.format(ipv4_addr, netmask) if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0: raise Exception("Could set addr to {1} on {0}".format(if_name, addr)) logger.info("RDMA: network address and netmask configured on interface") @staticmethod def get_interface_by_mac(mac): ret, output = shellutil.run_get_output("ifconfig -a") if ret != 0: raise Exception("Failed to list network interfaces") output = output.replace('\n', '') match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), output, re.IGNORECASE) if match is None: raise Exception("Failed to get ifname with mac: {0}".format(mac)) output = match.group(0) eths = re.findall(r"eth\d", output) if eths is None or len(eths) == 0: raise Exception("ifname with mac: {0} not found".format(mac)) return eths[-1] WALinuxAgent-2.2.20/azurelinuxagent/common/utils/000077500000000000000000000000001322477356400217755ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/common/utils/__init__.py000066400000000000000000000011661322477356400241120ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/azurelinuxagent/common/utils/cryptutil.py000066400000000000000000000116501322477356400244110ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import base64 import struct from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import CryptError import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil class CryptUtil(object): def __init__(self, openssl_cmd): self.openssl_cmd = openssl_cmd def gen_transport_cert(self, prv_file, crt_file): """ Create ssl certificate for https communication with endpoint server. """ cmd = ("{0} req -x509 -nodes -subj /CN=LinuxTransport -days 730 " "-newkey rsa:2048 -keyout {1} " "-out {2}").format(self.openssl_cmd, prv_file, crt_file) rc = shellutil.run(cmd) if rc != 0: logger.error("Failed to create {0} and {1} certificates".format( prv_file, crt_file)) def get_pubkey_from_prv(self, file_name): cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd, file_name) pub = shellutil.run_get_output(cmd)[1] return pub def get_pubkey_from_crt(self, file_name): cmd = "{0} x509 -in {1} -pubkey -noout".format(self.openssl_cmd, file_name) pub = shellutil.run_get_output(cmd)[1] return pub def get_thumbprint_from_crt(self, file_name): cmd="{0} x509 -in {1} -fingerprint -noout".format(self.openssl_cmd, file_name) thumbprint = shellutil.run_get_output(cmd)[1] thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() return thumbprint def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file): cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " "| {4} pkcs12 -nodes -password pass: -out {5}" "").format(self.openssl_cmd, p7m_file, trans_prv_file, trans_cert_file, self.openssl_cmd, pem_file) shellutil.run(cmd) rc = shellutil.run(cmd) if rc != 0: logger.error("Failed to decrypt {0}".format(p7m_file)) def crt_to_ssh(self, input_file, output_file): shellutil.run("ssh-keygen -i -m PKCS8 -f {0} >> {1}".format(input_file, output_file)) def asn1_to_ssh(self, pubkey): lines = pubkey.split("\n") lines = [x for x in lines if not x.startswith("----")] base64_encoded = "".join(lines) try: #TODO remove pyasn1 dependency from pyasn1.codec.der import decoder as der_decoder der_encoded = base64.b64decode(base64_encoded) der_encoded = der_decoder.decode(der_encoded)[0][1] key = der_decoder.decode(self.bits_to_bytes(der_encoded))[0] n=key[0] e=key[1] keydata = bytearray() keydata.extend(struct.pack('>I', len("ssh-rsa"))) keydata.extend(b"ssh-rsa") keydata.extend(struct.pack('>I', len(self.num_to_bytes(e)))) keydata.extend(self.num_to_bytes(e)) keydata.extend(struct.pack('>I', len(self.num_to_bytes(n)) + 1)) keydata.extend(b"\0") keydata.extend(self.num_to_bytes(n)) keydata_base64 = base64.b64encode(bytebuffer(keydata)) return ustr(b"ssh-rsa " + keydata_base64 + b"\n", encoding='utf-8') except ImportError as e: raise CryptError("Failed to load pyasn1.codec.der") def num_to_bytes(self, num): """ Pack number into bytes. Retun as string. """ result = bytearray() while num: result.append(num & 0xFF) num >>= 8 result.reverse() return result def bits_to_bytes(self, bits): """ Convert an array contains bits, [0,1] to a byte array """ index = 7 byte_array = bytearray() curr = 0 for bit in bits: curr = curr | (bit << index) index = index - 1 if index == -1: byte_array.append(curr) curr = 0 index = 7 return bytes(byte_array) WALinuxAgent-2.2.20/azurelinuxagent/common/utils/fileutil.py000066400000000000000000000150641322477356400241720ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # """ File operation util functions """ import errno as errno import glob import os import pwd import re import shutil import string import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import ustr KNOWN_IOERRORS = [ errno.EIO, # I/O error errno.ENOMEM, # Out of memory errno.ENFILE, # File table overflow errno.EMFILE, # Too many open files errno.ENOSPC, # Out of space errno.ENAMETOOLONG, # Name too long errno.ELOOP, # Too many symbolic links encountered 121 # Remote I/O error (errno.EREMOTEIO -- not present in all Python 2.7+) ] def copy_file(from_path, to_path=None, to_dir=None): if to_path is None: to_path = os.path.join(to_dir, os.path.basename(from_path)) shutil.copyfile(from_path, to_path) return to_path def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): """ Read and return contents of 'filepath'. """ mode = 'rb' with open(filepath, mode) as in_file: data = in_file.read() if data is None: return None if asbin: return data if remove_bom: # remove bom on bytes data before it is converted into string. data = textutil.remove_bom(data) data = ustr(data, encoding=encoding) return data def write_file(filepath, contents, asbin=False, encoding='utf-8', append=False): """ Write 'contents' to 'filepath'. """ mode = "ab" if append else "wb" data = contents if not asbin: data = contents.encode(encoding) with open(filepath, mode) as out_file: out_file.write(data) def append_file(filepath, contents, asbin=False, encoding='utf-8'): """ Append 'contents' to 'filepath'. """ write_file(filepath, contents, asbin=asbin, encoding=encoding, append=True) def base_name(path): head, tail = os.path.split(path) return tail def get_line_startingwith(prefix, filepath): """ Return line from 'filepath' if the line startswith 'prefix' """ for line in read_file(filepath).split('\n'): if line.startswith(prefix): return line return None def mkdir(dirpath, mode=None, owner=None): if not os.path.isdir(dirpath): os.makedirs(dirpath) if mode is not None: chmod(dirpath, mode) if owner is not None: chowner(dirpath, owner) def chowner(path, owner): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) else: owner_info = pwd.getpwnam(owner) os.chown(path, owner_info[2], owner_info[3]) def chmod(path, mode): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) else: os.chmod(path, mode) def rm_files(*args): for paths in args: # find all possible file paths for path in glob.glob(paths): if os.path.isfile(path): os.remove(path) def rm_dirs(*args): """ Remove the contents of each directry """ for p in args: if not os.path.isdir(p): continue for pp in os.listdir(p): path = os.path.join(p, pp) if os.path.isfile(path): os.remove(path) elif os.path.islink(path): os.unlink(path) elif os.path.isdir(path): shutil.rmtree(path) def trim_ext(path, ext): if not ext.startswith("."): ext = "." + ext return path.split(ext)[0] if path.endswith(ext) else path def update_conf_file(path, line_start, val, chk_err=False): conf = [] if not os.path.isfile(path) and chk_err: raise IOError("Can't find config file:{0}".format(path)) conf = read_file(path).split('\n') conf = [x for x in conf if x is not None and len(x) > 0 and not x.startswith(line_start)] conf.append(val) write_file(path, '\n'.join(conf) + '\n') def search_file(target_dir_name, target_file_name): for root, dirs, files in os.walk(target_dir_name): for file_name in files: if file_name == target_file_name: return os.path.join(root, file_name) return None def chmod_tree(path, mode): for root, dirs, files in os.walk(path): for file_name in files: os.chmod(os.path.join(root, file_name), mode) def findstr_in_file(file_path, line_str): """ Return True if the line is in the file; False otherwise. (Trailing whitespace is ignored.) """ try: for line in (open(file_path, 'r')).readlines(): if line_str == line.rstrip(): return True except Exception: # swallow exception pass return False def findre_in_file(file_path, line_re): """ Return match object if found in file. """ try: pattern = re.compile(line_re) for line in (open(file_path, 'r')).readlines(): match = re.search(pattern, line) if match: return match except: pass return None def get_all_files(root_path): """ Find all files under the given root path """ result = [] for root, dirs, files in os.walk(root_path): result.extend([os.path.join(root, file) for file in files]) return result def clean_ioerror(e, paths=[]): """ Clean-up possibly bad files and directories after an IO error. The code ignores *all* errors since disk state may be unhealthy. """ if isinstance(e, IOError) and e.errno in KNOWN_IOERRORS: for path in paths: if path is None: continue try: if os.path.isdir(path): shutil.rmtree(path, ignore_errors=True) else: os.remove(path) except Exception: # swallow exception pass WALinuxAgent-2.2.20/azurelinuxagent/common/utils/flexible_version.py000066400000000000000000000153231322477356400257120ustar00rootroot00000000000000from distutils import version import re class FlexibleVersion(version.Version): """ A more flexible implementation of distutils.version.StrictVersion The implementation allows to specify: - an arbitrary number of version numbers: not only '1.2.3' , but also '1.2.3.4.5' - the separator between version numbers: '1-2-3' is allowed when '-' is specified as separator - a flexible pre-release separator: '1.2.3.alpha1', '1.2.3-alpha1', and '1.2.3alpha1' are considered equivalent - an arbitrary ordering of pre-release tags: 1.1alpha3 < 1.1beta2 < 1.1rc1 < 1.1 when ["alpha", "beta", "rc"] is specified as pre-release tag list Inspiration from this discussion at StackOverflow: http://stackoverflow.com/questions/12255554/sort-versions-in-python """ def __init__(self, vstring=None, sep='.', prerel_tags=('alpha', 'beta', 'rc')): version.Version.__init__(self) if sep is None: sep = '.' if prerel_tags is None: prerel_tags = () self.sep = sep self.prerel_sep = '' self.prerel_tags = tuple(prerel_tags) if prerel_tags is not None else () self._compile_pattern() self.prerelease = None self.version = () if vstring: self._parse(str(vstring)) return _nn_version = 'version' _nn_prerel_sep = 'prerel_sep' _nn_prerel_tag = 'tag' _nn_prerel_num = 'tag_num' _re_prerel_sep = r'(?P<{pn}>{sep})?'.format( pn=_nn_prerel_sep, sep='|'.join(map(re.escape, ('.', '-')))) @property def major(self): return self.version[0] if len(self.version) > 0 else 0 @property def minor(self): return self.version[1] if len(self.version) > 1 else 0 @property def patch(self): return self.version[2] if len(self.version) > 2 else 0 def _parse(self, vstring): m = self.version_re.match(vstring) if not m: raise ValueError("Invalid version number '{0}'".format(vstring)) self.prerelease = None self.version = () self.prerel_sep = m.group(self._nn_prerel_sep) tag = m.group(self._nn_prerel_tag) tag_num = m.group(self._nn_prerel_num) if tag is not None and tag_num is not None: self.prerelease = (tag, int(tag_num) if len(tag_num) else None) self.version = tuple(map(int, self.sep_re.split(m.group(self._nn_version)))) return def __add__(self, increment): version = list(self.version) version[-1] += increment vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) def __sub__(self, decrement): version = list(self.version) if version[-1] <= 0: raise ArithmeticError("Cannot decrement final numeric component of {0} below zero" \ .format(self)) version[-1] -= decrement vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) def __repr__(self): return "{cls} ('{vstring}', '{sep}', {prerel_tags})"\ .format( cls=self.__class__.__name__, vstring=str(self), sep=self.sep, prerel_tags=self.prerel_tags) def __str__(self): return self._assemble(self.version, self.sep, self.prerel_sep, self.prerelease) def __ge__(self, that): return not self.__lt__(that) def __gt__(self, that): return (not self.__lt__(that)) and (not self.__eq__(that)) def __le__(self, that): return (self.__lt__(that)) or (self.__eq__(that)) def __lt__(self, that): this_version, that_version = self._ensure_compatible(that) if this_version != that_version \ or self.prerelease is None and that.prerelease is None: return this_version < that_version if self.prerelease is not None and that.prerelease is None: return True if self.prerelease is None and that.prerelease is not None: return False this_index = self.prerel_tags_set[self.prerelease[0]] that_index = self.prerel_tags_set[that.prerelease[0]] if this_index == that_index: return self.prerelease[1] < that.prerelease[1] return this_index < that_index def __ne__(self, that): return not self.__eq__(that) def __eq__(self, that): this_version, that_version = self._ensure_compatible(that) if this_version != that_version: return False if self.prerelease != that.prerelease: return False return True def _assemble(self, version, sep, prerel_sep, prerelease): s = sep.join(map(str, version)) if prerelease is not None: if prerel_sep is not None: s += prerel_sep s += prerelease[0] if prerelease[1] is not None: s += str(prerelease[1]) return s def _compile_pattern(self): sep, self.sep_re = self._compile_separator(self.sep) if self.prerel_tags: tags = '|'.join(re.escape(tag) for tag in self.prerel_tags) self.prerel_tags_set = dict(zip(self.prerel_tags, range(len(self.prerel_tags)))) release_re = '(?:{prerel_sep}(?P<{tn}>{tags})(?P<{nn}>\d*))?'.format( prerel_sep=self._re_prerel_sep, tags=tags, tn=self._nn_prerel_tag, nn=self._nn_prerel_num) else: release_re = '' version_re = r'^(?P<{vn}>\d+(?:(?:{sep}\d+)*)?){rel}$'.format( vn=self._nn_version, sep=sep, rel=release_re) self.version_re = re.compile(version_re) return def _compile_separator(self, sep): if sep is None: return '', re.compile('') return re.escape(sep), re.compile(re.escape(sep)) def _ensure_compatible(self, that): """ Ensures the instances have the same structure and, if so, returns length compatible version lists (so that x.y.0.0 is equivalent to x.y). """ if self.prerel_tags != that.prerel_tags or self.sep != that.sep: raise ValueError("Unable to compare: versions have different structures") this_version = list(self.version[:]) that_version = list(that.version[:]) while len(this_version) < len(that_version): this_version.append(0) while len(that_version) < len(this_version): that_version.append(0) return this_version, that_version WALinuxAgent-2.2.20/azurelinuxagent/common/utils/restutil.py000066400000000000000000000321641322477356400242300ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import threading import time import traceback import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import httpclient, urlparse, ustr from azurelinuxagent.common.version import PY_VERSION_MAJOR SECURE_WARNING_EMITTED = False DEFAULT_RETRIES = 6 DELAY_IN_SECONDS = 1 THROTTLE_RETRIES = 25 THROTTLE_DELAY_IN_SECONDS = 1 RETRY_CODES = [ httpclient.RESET_CONTENT, httpclient.PARTIAL_CONTENT, httpclient.FORBIDDEN, httpclient.INTERNAL_SERVER_ERROR, httpclient.NOT_IMPLEMENTED, httpclient.BAD_GATEWAY, httpclient.SERVICE_UNAVAILABLE, httpclient.GATEWAY_TIMEOUT, httpclient.INSUFFICIENT_STORAGE, 429, # Request Rate Limit Exceeded ] RESOURCE_GONE_CODES = [ httpclient.BAD_REQUEST, httpclient.GONE ] OK_CODES = [ httpclient.OK, httpclient.CREATED, httpclient.ACCEPTED ] THROTTLE_CODES = [ httpclient.FORBIDDEN, httpclient.SERVICE_UNAVAILABLE, 429, # Request Rate Limit Exceeded ] RETRY_EXCEPTIONS = [ httpclient.NotConnected, httpclient.IncompleteRead, httpclient.ImproperConnectionState, httpclient.BadStatusLine ] HTTP_PROXY_ENV = "http_proxy" HTTPS_PROXY_ENV = "https_proxy" DEFAULT_PROTOCOL_ENDPOINT='168.63.129.16' HOST_PLUGIN_PORT = 32526 class IOErrorCounter(object): _lock = threading.RLock() _protocol_endpoint = DEFAULT_PROTOCOL_ENDPOINT _counts = {"hostplugin":0, "protocol":0, "other":0} @staticmethod def increment(host=None, port=None): with IOErrorCounter._lock: if host == IOErrorCounter._protocol_endpoint: if port == HOST_PLUGIN_PORT: IOErrorCounter._counts["hostplugin"] += 1 else: IOErrorCounter._counts["protocol"] += 1 else: IOErrorCounter._counts["other"] += 1 @staticmethod def get_and_reset(): with IOErrorCounter._lock: counts = IOErrorCounter._counts.copy() IOErrorCounter.reset() return counts @staticmethod def reset(): with IOErrorCounter._lock: IOErrorCounter._counts = {"hostplugin":0, "protocol":0, "other":0} @staticmethod def set_protocol_endpoint(endpoint=DEFAULT_PROTOCOL_ENDPOINT): IOErrorCounter._protocol_endpoint = endpoint def _compute_delay(retry_attempt=1, delay=DELAY_IN_SECONDS): fib = (1, 1) for n in range(retry_attempt): fib = (fib[1], fib[0]+fib[1]) return delay*fib[1] def _is_retry_status(status, retry_codes=RETRY_CODES): return status in retry_codes def _is_retry_exception(e): return len([x for x in RETRY_EXCEPTIONS if isinstance(e, x)]) > 0 def _is_throttle_status(status): return status in THROTTLE_CODES def _parse_url(url): o = urlparse(url) rel_uri = o.path if o.fragment: rel_uri = "{0}#{1}".format(rel_uri, o.fragment) if o.query: rel_uri = "{0}?{1}".format(rel_uri, o.query) secure = False if o.scheme.lower() == "https": secure = True return o.hostname, o.port, secure, rel_uri def _get_http_proxy(secure=False): # Prefer the configuration settings over environment variables host = conf.get_httpproxy_host() port = None if not host is None: port = conf.get_httpproxy_port() else: http_proxy_env = HTTPS_PROXY_ENV if secure else HTTP_PROXY_ENV http_proxy_url = None for v in [http_proxy_env, http_proxy_env.upper()]: if v in os.environ: http_proxy_url = os.environ[v] break if not http_proxy_url is None: host, port, _, _ = _parse_url(http_proxy_url) return host, port def _http_request(method, host, rel_uri, port=None, data=None, secure=False, headers=None, proxy_host=None, proxy_port=None): headers = {} if headers is None else headers use_proxy = proxy_host is not None and proxy_port is not None if port is None: port = 443 if secure else 80 if use_proxy: conn_host, conn_port = proxy_host, proxy_port scheme = "https" if secure else "http" url = "{0}://{1}:{2}{3}".format(scheme, host, port, rel_uri) else: conn_host, conn_port = host, port url = rel_uri if secure: conn = httpclient.HTTPSConnection(conn_host, conn_port, timeout=10) if use_proxy: conn.set_tunnel(host, port) else: conn = httpclient.HTTPConnection(conn_host, conn_port, timeout=10) logger.verbose("HTTP connection [{0}] [{1}] [{2}] [{3}]", method, url, data, headers) conn.request(method=method, url=url, body=data, headers=headers) return conn.getresponse() def http_request(method, url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): global SECURE_WARNING_EMITTED host, port, secure, rel_uri = _parse_url(url) # Use the HTTP(S) proxy proxy_host, proxy_port = (None, None) if use_proxy: proxy_host, proxy_port = _get_http_proxy(secure=secure) if proxy_host or proxy_port: logger.verbose("HTTP proxy: [{0}:{1}]", proxy_host, proxy_port) # If httplib module is not built with ssl support, # fallback to HTTP if allowed if secure and not hasattr(httpclient, "HTTPSConnection"): if not conf.get_allow_http(): raise HttpError("HTTPS is unavailable and required") secure = False if not SECURE_WARNING_EMITTED: logger.warn("Python does not include SSL support") SECURE_WARNING_EMITTED = True # If httplib module doesn't support HTTPS tunnelling, # fallback to HTTP if allowed if secure and \ proxy_host is not None and \ proxy_port is not None \ and not hasattr(httpclient.HTTPSConnection, "set_tunnel"): if not conf.get_allow_http(): raise HttpError("HTTPS tunnelling is unavailable and required") secure = False if not SECURE_WARNING_EMITTED: logger.warn("Python does not support HTTPS tunnelling") SECURE_WARNING_EMITTED = True msg = '' attempt = 0 delay = 0 was_throttled = False while attempt < max_retry: if attempt > 0: # Compute the request delay # -- Use a fixed delay if the server ever rate-throttles the request # (with a safe, minimum number of retry attempts) # -- Otherwise, compute a delay that is the product of the next # item in the Fibonacci series and the initial delay value delay = THROTTLE_DELAY_IN_SECONDS \ if was_throttled \ else _compute_delay(retry_attempt=attempt, delay=retry_delay) logger.verbose("[HTTP Retry] " "Attempt {0} of {1} will delay {2} seconds: {3}", attempt+1, max_retry, delay, msg) time.sleep(delay) attempt += 1 try: resp = _http_request(method, host, rel_uri, port=port, data=data, secure=secure, headers=headers, proxy_host=proxy_host, proxy_port=proxy_port) logger.verbose("[HTTP Response] Status Code {0}", resp.status) if request_failed(resp): if _is_retry_status(resp.status, retry_codes=retry_codes): msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format( method, url, resp.status) # Note if throttled and ensure a safe, minimum number of # retry attempts if _is_throttle_status(resp.status): was_throttled = True max_retry = max(max_retry, THROTTLE_RETRIES) continue if resp.status in RESOURCE_GONE_CODES: raise ResourceGoneError() return resp except httpclient.HTTPException as e: msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format( method, url, e) if _is_retry_exception(e): continue break except IOError as e: IOErrorCounter.increment(host=host, port=port) msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format( method, url, e) continue raise HttpError("{0} -- {1} attempts made".format(msg,attempt)) def http_get(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("GET", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_head(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("HEAD", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_post(url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("POST", url, data, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_put(url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("PUT", url, data, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_delete(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("DELETE", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def request_failed(resp, ok_codes=OK_CODES): return not request_succeeded(resp, ok_codes=ok_codes) def request_succeeded(resp, ok_codes=OK_CODES): return resp is not None and resp.status in ok_codes def read_response_error(resp): result = '' if resp is not None: try: result = "[HTTP Failed] [{0}: {1}] {2}".format( resp.status, resp.reason, resp.read()) # this result string is passed upstream to several methods # which do a raise HttpError() or a format() of some kind; # as a result it cannot have any unicode characters if PY_VERSION_MAJOR < 3: result = ustr(result, encoding='ascii', errors='ignore') else: result = result\ .encode(encoding='ascii', errors='ignore')\ .decode(encoding='ascii', errors='ignore') result = textutil.replace_non_ascii(result) except Exception: logger.warn(traceback.format_exc()) return result WALinuxAgent-2.2.20/azurelinuxagent/common/utils/shellutil.py000066400000000000000000000102171322477356400243550ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import subprocess import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr if not hasattr(subprocess, 'check_output'): def check_output(*popenargs, **kwargs): r"""Backport from subprocess module from python 2.7""" if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, ' 'it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output # Exception classes used by this module. class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return ("Command '{0}' returned non-zero exit status {1}" "").format(self.cmd, self.returncode) subprocess.check_output = check_output subprocess.CalledProcessError = CalledProcessError """ Shell command util functions """ def run(cmd, chk_err=True): """ Calls run_get_output on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ retcode, out = run_get_output(cmd, chk_err) return retcode def run_get_output(cmd, chk_err=True, log_cmd=True): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: logger.verbose(u"Run '{0}'", cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) output = ustr(output, encoding='utf-8', errors="backslashreplace") except Exception as e: if type(e) is subprocess.CalledProcessError: output = ustr(e.output, encoding='utf-8', errors="backslashreplace") if chk_err: if log_cmd: logger.error(u"Command: '{0}'", e.cmd) logger.error(u"Return code: {0}", e.returncode) logger.error(u"Result: {0}", output) return e.returncode, output else: logger.error( u"'{0}' raised unexpected exception: '{1}'".format( cmd, ustr(e))) return -1, ustr(e) return 0, output def quote(word_list): """ Quote a list or tuple of strings for Unix Shell as words, using the byte-literal single quote. The resulting string is safe for use with ``shell=True`` in ``subprocess``, and in ``os.system``. ``assert shlex.split(ShellQuote(wordList)) == wordList``. See POSIX.1:2013 Vol 3, Chap 2, Sec 2.2.2: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02_02 """ if not isinstance(word_list, (tuple, list)): word_list = (word_list,) return " ".join(list("'{0}'".format(s.replace("'", "'\\''")) for s in word_list)) # End shell command util functions WALinuxAgent-2.2.20/azurelinuxagent/common/utils/textutil.py000066400000000000000000000216401322477356400242340ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ import base64 import crypt import random import re import string import struct import sys import xml.dom.minidom as minidom from distutils.version import LooseVersion as Version def parse_doc(xml_text): """ Parse xml document from string """ # The minidom lib has some issue with unicode in python2. # Encode the string into utf-8 first xml_text = xml_text.encode('utf-8') return minidom.parseString(xml_text) def findall(root, tag, namespace=None): """ Get all nodes by tag and namespace under Node root. """ if root is None: return [] if namespace is None: return root.getElementsByTagName(tag) else: return root.getElementsByTagNameNS(namespace, tag) def find(root, tag, namespace=None): """ Get first node by tag and namespace under Node root. """ nodes = findall(root, tag, namespace=namespace) if nodes is not None and len(nodes) >= 1: return nodes[0] else: return None def gettext(node): """ Get node text """ if node is None: return None for child in node.childNodes: if child.nodeType == child.TEXT_NODE: return child.data return None def findtext(root, tag, namespace=None): """ Get text of node by tag and namespace under Node root. """ node = find(root, tag, namespace=namespace) return gettext(node) def getattrib(node, attr_name): """ Get attribute of xml node """ if node is not None: return node.getAttribute(attr_name) else: return None def unpack(buf, offset, range): """ Unpack bytes into python values. """ result = 0 for i in range: result = (result << 8) | str_to_ord(buf[offset + i]) return result def unpack_little_endian(buf, offset, length): """ Unpack little endian bytes into python values. """ return unpack(buf, offset, list(range(length - 1, -1, -1))) def unpack_big_endian(buf, offset, length): """ Unpack big endian bytes into python values. """ return unpack(buf, offset, list(range(0, length))) def hex_dump3(buf, offset, length): """ Dump range of buf in formatted hex. """ return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]]) def hex_dump2(buf): """ Dump buf in formatted hex. """ return hex_dump3(buf, 0, len(buf)) def is_in_range(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) def is_printable(ch): """ Return True if character is displayable. """ return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z')) or is_in_range(ch, str_to_ord('a'), str_to_ord('z')) or is_in_range(ch, str_to_ord('0'), str_to_ord('9'))) def hex_dump(buffer, size): """ Return Hex formated dump of a 'buffer' of 'size'. """ if size < 0: size = len(buffer) result = "" for i in range(0, size): if (i % 16) == 0: result += "%06X: " % i byte = buffer[i] if type(byte) == str: byte = ord(byte.decode('latin1')) result += "%02X " % byte if (i & 15) == 7: result += " " if ((i + 1) % 16) == 0 or (i + 1) == size: j = i while ((j + 1) % 16) != 0: result += " " if (j & 7) == 7: result += " " j += 1 result += " " for j in range(i - (i % 16), i + 1): byte = buffer[j] if type(byte) == str: byte = str_to_ord(byte.decode('latin1')) k = '.' if is_printable(byte): k = chr(byte) result += k if (i + 1) != size: result += "\n" return result def str_to_ord(a): """ Allows indexing into a string or an array of integers transparently. Generic utility function. """ if type(a) == type(b'') or type(a) == type(u''): a = ord(a) return a def compare_bytes(a, b, start, length): for offset in range(start, start + length): if str_to_ord(a[offset]) != str_to_ord(b[offset]): return False return True def int_to_ip4_addr(a): """ Build DHCP request string. """ return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, (a) & 0xFF) def hexstr_to_bytearray(a): """ Return hex string packed into a binary struct. """ b = b"" for c in range(0, len(a) // 2): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b def set_ssh_config(config, name, val): found = False no_match = -1 match_start = no_match for i in range(0, len(config)): if config[i].startswith(name) and match_start == no_match: config[i] = "{0} {1}".format(name, val) found = True elif config[i].lower().startswith("match"): if config[i].lower().startswith("match all"): # outside match block match_start = no_match elif match_start == no_match: # inside match block match_start = i if not found: if match_start != no_match: i = match_start config.insert(i, "{0} {1}".format(name, val)) return config def set_ini_config(config, name, val): notfound = True nameEqual = name + '=' length = len(config) text = "{0}=\"{1}\"".format(name, val) for i in reversed(range(0, length)): if config[i].startswith(nameEqual): config[i] = text notfound = False break if notfound: config.insert(length - 1, text) def replace_non_ascii(incoming, replace_char=''): outgoing = '' if incoming is not None: for c in incoming: if str_to_ord(c) > 128: outgoing += replace_char else: outgoing += c return outgoing def remove_bom(c): ''' bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8. ''' if not is_str_none_or_whitespace(c) and \ len(c) > 2 and \ str_to_ord(c[0]) > 128 and \ str_to_ord(c[1]) > 128 and \ str_to_ord(c[2]) > 128: c = c[3:] return c def gen_password_hash(password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) if sys.version_info[0] == 2: # if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt password = password.encode('utf-8') return crypt.crypt(password, salt) def get_bytes_from_pem(pem_str): base64_bytes = "" for line in pem_str.split('\n'): if "----" not in line: base64_bytes += line return base64_bytes def b64encode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8') return base64.b64encode(s) def b64decode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64decode(s).decode('utf-8') return base64.b64decode(s) def safe_shlex_split(s): import shlex from azurelinuxagent.common.version import PY_VERSION if PY_VERSION[:2] == (2, 6): return shlex.split(s.encode('utf-8')) return shlex.split(s) def swap_hexstring(s, width=2): r = len(s) % width if r != 0: s = ('0' * (width - (len(s) % width))) + s return ''.join(reversed( re.findall( r'[a-f0-9]{{{0}}}'.format(width), s, re.IGNORECASE))) def parse_json(json_str): """ Parse json string and return a resulting dictionary """ # trim null and whitespaces result = None if not is_str_none_or_whitespace(json_str): import json result = json.loads(json_str.rstrip(' \t\r\n\0')) return result def is_str_none_or_whitespace(s): return s is None or len(s) == 0 or s.isspace() WALinuxAgent-2.2.20/azurelinuxagent/common/version.py000066400000000000000000000161221322477356400226760ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import re import platform import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.future import ustr def get_f5_platform(): """ Add this workaround for detecting F5 products because BIG-IP/IQ/etc do not show their version info in the /etc/product-version location. Instead, the version and product information is contained in the /VERSION file. """ result = [None, None, None, None] f5_version = re.compile("^Version: (\d+\.\d+\.\d+)") f5_product = re.compile("^Product: ([\w-]+)") with open('/VERSION', 'r') as fh: content = fh.readlines() for line in content: version_matches = f5_version.match(line) product_matches = f5_product.match(line) if version_matches: result[1] = version_matches.group(1) elif product_matches: result[3] = product_matches.group(1) if result[3] == "BIG-IP": result[0] = "bigip" result[2] = "bigip" elif result[3] == "BIG-IQ": result[0] = "bigiq" result[2] = "bigiq" elif result[3] == "iWorkflow": result[0] = "iworkflow" result[2] = "iworkflow" return result def get_checkpoint_platform(): take = build = release = "" full_name = open("/etc/cp-release").read().strip() with open("/etc/cloud-version") as f: for line in f: k, _, v = line.partition(": ") v = v.strip() if k == "release": release = v elif k == "take": take = v elif k == "build": build = v return ["gaia", take + "." + build, release, full_name] def get_distro(): if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['freebsd', release, '', 'freebsd'] elif 'OpenBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['openbsd', release, '', 'openbsd'] elif 'linux_distribution' in dir(platform): supported = platform._supported_dists + ('alpine',) osinfo = list(platform.linux_distribution(full_distribution_name=0, supported_dists=supported)) full_name = platform.linux_distribution()[0].strip() osinfo.append(full_name) else: osinfo = platform.dist() # The platform.py lib has issue with detecting oracle linux distribution. # Merge the following patch provided by oracle as a temporary fix. if os.path.exists("/etc/oracle-release"): osinfo[2] = "oracle" osinfo[3] = "Oracle Linux" if os.path.exists("/etc/euleros-release"): osinfo[0] = "euleros" # The platform.py lib has issue with detecting BIG-IP linux distribution. # Merge the following patch provided by F5. if os.path.exists("/shared/vadc"): osinfo = get_f5_platform() if os.path.exists("/etc/cp-release"): osinfo = get_checkpoint_platform() # Remove trailing whitespace and quote in distro name osinfo[0] = osinfo[0].strip('"').strip(' ').lower() return osinfo AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" AGENT_VERSION = '2.2.20' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """ The Azure Linux Agent supports the provisioning and running of Linux VMs in the Azure cloud. This package should be installed on Linux disk images that are built to run in the Azure environment. """ AGENT_DIR_GLOB = "{0}-*".format(AGENT_NAME) AGENT_PKG_GLOB = "{0}-*.zip".format(AGENT_NAME) AGENT_PATTERN = "{0}-(.*)".format(AGENT_NAME) AGENT_NAME_PATTERN = re.compile(AGENT_PATTERN) AGENT_PKG_PATTERN = re.compile(AGENT_PATTERN+"\.zip") AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN)) EXT_HANDLER_PATTERN = b".*/WALinuxAgent-(\d+.\d+.\d+[.\d+]*).*-run-exthandlers" EXT_HANDLER_REGEX = re.compile(EXT_HANDLER_PATTERN) __distro__ = get_distro() DISTRO_NAME = __distro__[0] DISTRO_VERSION = __distro__[1] DISTRO_CODE_NAME = __distro__[2] DISTRO_FULL_NAME = __distro__[3] PY_VERSION = sys.version_info PY_VERSION_MAJOR = sys.version_info[0] PY_VERSION_MINOR = sys.version_info[1] PY_VERSION_MICRO = sys.version_info[2] # Set the CURRENT_AGENT and CURRENT_VERSION to match the agent directory name # - This ensures the agent will "see itself" using the same name and version # as the code that downloads agents. def set_current_agent(): path = os.getcwd() lib_dir = conf.get_lib_dir() if lib_dir[-1] != os.path.sep: lib_dir += os.path.sep agent = path[len(lib_dir):].split(os.path.sep)[0] match = AGENT_NAME_PATTERN.match(agent) if match: version = match.group(1) else: agent = AGENT_LONG_VERSION version = AGENT_VERSION return agent, FlexibleVersion(version) def is_agent_package(path): path = os.path.basename(path) return not re.match(AGENT_PKG_PATTERN, path) is None def is_agent_path(path): path = os.path.basename(path) return not re.match(AGENT_NAME_PATTERN, path) is None CURRENT_AGENT, CURRENT_VERSION = set_current_agent() def set_goal_state_agent(): agent = None if os.path.isdir("/proc"): pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] else: pids = [] for pid in pids: try: pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() match = EXT_HANDLER_REGEX.match(pname) if match: agent = match.group(1) if PY_VERSION_MAJOR > 2: agent = agent.decode('UTF-8') break except IOError: continue if agent is None: agent = CURRENT_VERSION return agent GOAL_STATE_AGENT_VERSION = set_goal_state_agent() def is_current_agent_installed(): return CURRENT_AGENT == AGENT_LONG_VERSION def is_snappy(): """ Add this workaround for detecting Snappy Ubuntu Core temporarily, until ubuntu fixed this bug: https://bugs.launchpad.net/snappy/+bug/1481086 """ if os.path.exists("/etc/motd"): motd = fileutil.read_file("/etc/motd") if "snappy" in motd: return True return False if is_snappy(): DISTRO_FULL_NAME = "Snappy Ubuntu Core" WALinuxAgent-2.2.20/azurelinuxagent/daemon/000077500000000000000000000000001322477356400206105ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/daemon/__init__.py000066400000000000000000000012611322477356400227210ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.daemon.main import get_daemon_handler WALinuxAgent-2.2.20/azurelinuxagent/daemon/main.py000066400000000000000000000125461322477356400221160ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import sys import time import traceback import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.wire import WireClient from azurelinuxagent.common.rdma import setup_rdma_device from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_NAME, \ AGENT_VERSION, \ DISTRO_NAME, DISTRO_VERSION, PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler from azurelinuxagent.daemon.scvmm import get_scvmm_handler from azurelinuxagent.ga.update import get_update_handler from azurelinuxagent.pa.provision import get_provision_handler from azurelinuxagent.pa.rdma import get_rdma_handler OPENSSL_FIPS_ENVIRONMENT = "OPENSSL_FIPS" def get_daemon_handler(): return DaemonHandler() class DaemonHandler(object): """ Main thread of daemon. It will invoke other threads to do actual work """ def __init__(self): self.running = True self.osutil = get_osutil() def run(self, child_args=None): logger.info("{0} Version:{1}", AGENT_LONG_NAME, AGENT_VERSION) logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION) logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO) self.check_pid() # If FIPS is enabled, set the OpenSSL environment variable # Note: # -- Subprocesses inherit the current environment if conf.get_fips_enabled(): os.environ[OPENSSL_FIPS_ENVIRONMENT] = '1' while self.running: try: self.daemon(child_args) except Exception as e: err_msg = traceback.format_exc() add_event(name=AGENT_NAME, is_success=False, message=ustr(err_msg), op=WALAEventOperation.UnhandledError) logger.warn("Daemon ended with exception -- Sleep 15 seconds and restart daemon") time.sleep(15) def check_pid(self): """Check whether daemon is already running""" pid = None pid_file = conf.get_agent_pid_file_path() if os.path.isfile(pid_file): pid = fileutil.read_file(pid_file) if self.osutil.check_pid_alive(pid): logger.info("Daemon is already running: {0}", pid) sys.exit(0) fileutil.write_file(pid_file, ustr(os.getpid())) def daemon(self, child_args=None): logger.info("Run daemon") self.protocol_util = get_protocol_util() self.scvmm_handler = get_scvmm_handler() self.resourcedisk_handler = get_resourcedisk_handler() self.rdma_handler = get_rdma_handler() self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir()) if conf.get_detect_scvmm_env(): self.scvmm_handler.run() if conf.get_resourcedisk_format(): self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): self.rdma_handler.install_driver() logger.info("RDMA capabilities are enabled in configuration") try: # Ensure the most recent SharedConfig is available # - Changes to RDMA state may not increment the goal state # incarnation number. A forced update ensures the most # current values. protocol = self.protocol_util.get_protocol() client = protocol.client if client is None or type(client) is not WireClient: raise Exception("Attempt to setup RDMA without Wireserver") client.update_goal_state(forced=True) setup_rdma_device() except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") while self.running: self.update_handler.run_latest(child_args=child_args) WALinuxAgent-2.2.20/azurelinuxagent/daemon/resourcedisk/000077500000000000000000000000001322477356400233125ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/daemon/resourcedisk/__init__.py000066400000000000000000000013471322477356400254300ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.daemon.resourcedisk.factory import get_resourcedisk_handler WALinuxAgent-2.2.20/azurelinuxagent/daemon/resourcedisk/default.py000066400000000000000000000330231322477356400253110ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import re import sys import threading from time import sleep import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr import azurelinuxagent.common.conf as conf from azurelinuxagent.common.event import add_event, WALAEventOperation import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.version import AGENT_NAME DATALOSS_WARNING_FILE_NAME = "DATALOSS_WARNING_README.txt" DATA_LOSS_WARNING = """\ WARNING: THIS IS A TEMPORARY DISK. Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. Please do not use this disk for storing any personal or application data. For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx """ class ResourceDiskHandler(object): def __init__(self): self.osutil = get_osutil() self.fs = conf.get_resourcedisk_filesystem() def start_activate_resource_disk(self): disk_thread = threading.Thread(target=self.run) disk_thread.start() def run(self): mount_point = None if conf.get_resourcedisk_format(): mount_point = self.activate_resource_disk() if mount_point is not None and \ conf.get_resourcedisk_enable_swap(): self.enable_swap(mount_point) def activate_resource_disk(self): logger.info("Activate resource disk") try: mount_point = conf.get_resourcedisk_mountpoint() mount_point = self.mount_resource_disk(mount_point) warning_file = os.path.join(mount_point, DATALOSS_WARNING_FILE_NAME) try: fileutil.write_file(warning_file, DATA_LOSS_WARNING) except IOError as e: logger.warn("Failed to write data loss warning:{0}", e) return mount_point except ResourceDiskError as e: logger.error("Failed to mount resource disk {0}", e) add_event(name=AGENT_NAME, is_success=False, message=ustr(e), op=WALAEventOperation.ActivateResourceDisk) def enable_swap(self, mount_point): logger.info("Enable swap") try: size_mb = conf.get_resourcedisk_swap_size_mb() self.create_swap_space(mount_point, size_mb) except ResourceDiskError as e: logger.error("Failed to enable swap {0}", e) def reread_partition_table(self, device): if shellutil.run("sfdisk -R {0}".format(device), chk_err=False): shellutil.run("blockdev --rereadpt {0}".format(device), chk_err=False) def mount_resource_disk(self, mount_point): device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") device = "/dev/{0}".format(device) partition = device + "1" mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, device) if existing: logger.info("Resource disk [{0}] is already mounted [{1}]", partition, existing) return existing try: fileutil.mkdir(mount_point, mode=0o755) except OSError as ose: msg = "Failed to create mount point " \ "directory [{0}]: {1}".format(mount_point, ose) logger.error(msg) raise ResourceDiskError(msg=msg, inner=ose) logger.info("Examining partition table") ret = shellutil.run_get_output("parted {0} print".format(device)) if ret[0]: raise ResourceDiskError("Could not determine partition info for " "{0}: {1}".format(device, ret[1])) force_option = 'F' if self.fs == 'xfs': force_option = 'f' mkfs_string = "mkfs.{0} -{2} {1}".format(self.fs, partition, force_option) if "gpt" in ret[1]: logger.info("GPT detected, finding partitions") parts = [x for x in ret[1].split("\n") if re.match("^\s*[0-9]+", x)] logger.info("Found {0} GPT partition(s).", len(parts)) if len(parts) > 1: logger.info("Removing old GPT partitions") for i in range(1, len(parts) + 1): logger.info("Remove partition {0}", i) shellutil.run("parted {0} rm {1}".format(device, i)) logger.info("Creating new GPT partition") shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("GPT not detected, determining filesystem") ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device)) ptype = ret[1].strip() if ptype == "7" and self.fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device)) self.reread_partition_table(device) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("The partition type is {0}", ptype) mount_options = conf.get_resourcedisk_mountoptions() mount_string = self.get_mount_string(mount_options, partition, mount_point) attempts = 5 while not os.path.exists(partition) and attempts > 0: logger.info("Waiting for partition [{0}], {1} attempts remaining", partition, attempts) sleep(5) attempts -= 1 if not os.path.exists(partition): raise ResourceDiskError("Partition was not created [{0}]".format(partition)) logger.info("Mount resource disk [{0}]", mount_string) ret, output = shellutil.run_get_output(mount_string, chk_err=False) # if the exit code is 32, then the resource disk can be already mounted if ret == 32 and output.find("is already mounted") != -1: logger.warn("Could not mount resource disk: {0}", output) elif ret != 0: # Some kernels seem to issue an async partition re-read after a # 'parted' command invocation. This causes mount to fail if the # partition re-read is not complete by the time mount is # attempted. Seen in CentOS 7.2. Force a sequential re-read of # the partition and try mounting. logger.warn("Failed to mount resource disk. " "Retry mounting after re-reading partition info.") self.reread_partition_table(device) ret, output = shellutil.run_get_output(mount_string) if ret: logger.warn("Failed to mount resource disk. " "Attempting to format and retry mount. [{0}]", output) shellutil.run(mkfs_string) ret, output = shellutil.run_get_output(mount_string) if ret: raise ResourceDiskError("Could not mount {0} " "after syncing partition table: " "[{1}] {2}".format(partition, ret, output)) logger.info("Resource disk {0} is mounted at {1} with {2}", device, mount_point, self.fs) return mount_point def change_partition_type(self, suppress_message, option_str): """ use sfdisk to change partition type. First try with --part-type; if fails, fall back to -c """ command_to_use = '--part-type' input = "sfdisk {0} {1} {2}".format(command_to_use, '-f' if suppress_message else '', option_str) err_code, output = shellutil.run_get_output(input, chk_err=False, log_cmd=True) # fall back to -c if err_code != 0: logger.info("sfdisk with --part-type failed [{0}], retrying with -c", err_code) command_to_use = '-c' input = "sfdisk {0} {1} {2}".format(command_to_use, '-f' if suppress_message else '', option_str) err_code, output = shellutil.run_get_output(input, log_cmd=True) if err_code == 0: logger.info('{0} succeeded', input) else: logger.error('{0} failed [{1}: {2}]', input, err_code, output) return err_code, output @staticmethod def get_mount_string(mount_options, partition, mount_point): if mount_options is not None: return 'mount -o {0} {1} {2}'.format(mount_options, partition, mount_point) else: return 'mount {0} {1}'.format(partition, mount_point) def create_swap_space(self, mount_point, size_mb): size_kb = size_mb * 1024 size = size_kb * 1024 swapfile = os.path.join(mount_point, 'swapfile') swaplist = shellutil.run_get_output("swapon -s")[1] if swapfile in swaplist \ and os.path.isfile(swapfile) \ and os.path.getsize(swapfile) == size: logger.info("Swap already enabled") return if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: logger.info("Remove old swap file") shellutil.run("swapoff -a", chk_err=False) os.remove(swapfile) if not os.path.isfile(swapfile): logger.info("Create swap file") self.mkfile(swapfile, size_kb * 1024) shellutil.run("mkswap {0}".format(swapfile)) if shellutil.run("swapon {0}".format(swapfile)): raise ResourceDiskError("{0}".format(swapfile)) logger.info("Enabled {0}KB of swap at {1}".format(size_kb, swapfile)) def mkfile(self, filename, nbytes): """ Create a non-sparse file of that size. Deletes and replaces existing file. To allow efficient execution, fallocate will be tried first. This includes ``os.posix_fallocate`` on Python 3.3+ (unix) and the ``fallocate`` command in the popular ``util-linux{,-ng}`` package. A dd fallback will be tried too. When size < 64M, perform single-pass dd. Otherwise do two-pass dd. """ if not isinstance(nbytes, int): nbytes = int(nbytes) if nbytes <= 0: raise ResourceDiskError("Invalid swap size [{0}]".format(nbytes)) if os.path.isfile(filename): os.remove(filename) # If file system is xfs, use dd right away as we have been reported that # swap enabling fails in xfs fs when disk space is allocated with fallocate ret = 0 fn_sh = shellutil.quote((filename,)) if self.fs != 'xfs': # os.posix_fallocate if sys.version_info >= (3, 3): # Probable errors: # - OSError: Seen on Cygwin, libc notimpl? # - AttributeError: What if someone runs this under... with open(filename, 'w') as f: try: os.posix_fallocate(f.fileno(), 0, nbytes) return 0 except: # Not confident with this thing, just keep trying... pass # fallocate command ret = shellutil.run( u"umask 0077 && fallocate -l {0} {1}".format(nbytes, fn_sh)) if ret == 0: return ret logger.info("fallocate unsuccessful, falling back to dd") # dd fallback dd_maxbs = 64 * 1024 ** 2 dd_cmd = "umask 0077 && dd if=/dev/zero bs={0} count={1} " \ "conv=notrunc of={2}" blocks = int(nbytes / dd_maxbs) if blocks > 0: ret = shellutil.run(dd_cmd.format(dd_maxbs, blocks, fn_sh)) << 8 remains = int(nbytes % dd_maxbs) if remains > 0: ret += shellutil.run(dd_cmd.format(remains, 1, fn_sh)) if ret == 0: logger.info("dd successful") else: logger.error("dd unsuccessful") return ret WALinuxAgent-2.2.20/azurelinuxagent/daemon/resourcedisk/factory.py000066400000000000000000000026161322477356400253400ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, \ DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ResourceDiskHandler from .freebsd import FreeBSDResourceDiskHandler from .openbsd import OpenBSDResourceDiskHandler def get_resourcedisk_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "freebsd": return FreeBSDResourceDiskHandler() if distro_name == "openbsd": return OpenBSDResourceDiskHandler() return ResourceDiskHandler() WALinuxAgent-2.2.20/azurelinuxagent/daemon/resourcedisk/freebsd.py000066400000000000000000000121031322477356400252730ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class FreeBSDResourceDiskHandler(ResourceDiskHandler): """ This class handles resource disk mounting for FreeBSD. The resource disk locates at following slot: scbus2 on blkvsc1 bus 0: at scbus2 target 1 lun 0 (da1,pass2) There are 2 variations based on partition table type: 1. MBR: The resource disk partition is /dev/da1s1 2. GPT: The resource disk partition is /dev/da1p2, /dev/da1p1 is for reserved usage. """ def __init__(self): super(FreeBSDResourceDiskHandler, self).__init__() @staticmethod def parse_gpart_list(data): dic = {} for line in data.split('\n'): if line.find("Geom name: ") != -1: geom_name = line[11:] elif line.find("scheme: ") != -1: dic[geom_name] = line[8:] return dic def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ufs': raise ResourceDiskError("Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) # 1. Detect device err, output = shellutil.run_get_output('gpart list') if err: raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) disks = self.parse_gpart_list(output) device = self.osutil.device_for_ide_port(1) if device is None or not device in disks: # fallback logic to find device err, output = shellutil.run_get_output('camcontrol periphlist 2:1:0') if err: # try again on "3:1:0" err, output = shellutil.run_get_output('camcontrol periphlist 3:1:0') if err: raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' for line in output.split('\n'): index = line.find(':') if index > 0: geom_name = line[:index] if geom_name in disks: device = geom_name break if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Detect partition partition_table_type = disks[device] if partition_table_type == 'MBR': provider_name = device + 's1' elif partition_table_type == 'GPT': provider_name = device + 'p2' else: raise ResourceDiskError("Unsupported partition table type:{0}".format(output)) err, output = shellutil.run_get_output('gpart show -p {0}'.format(device)) if err or output.find(provider_name) == -1: raise ResourceDiskError("Resource disk partition not found.") partition = '/dev/' + provider_name logger.info('Resource disk partition {0} found.', partition) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info('Creating {0} filesystem on partition {1}'.format(fs, partition)) err, output = shellutil.run_get_output('newfs -U {0}'.format(partition)) if err: raise ResourceDiskError("Failed to create new filesystem on partition {0}, error:{1}" .format(partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to mount partition {0}, error {1}".format(partition, output)) logger.info("Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs) return mount_point WALinuxAgent-2.2.20/azurelinuxagent/daemon/resourcedisk/openbsd.py000066400000000000000000000114431322477356400253210ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and OpenSSL 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class OpenBSDResourceDiskHandler(ResourceDiskHandler): def __init__(self): super(OpenBSDResourceDiskHandler, self).__init__() # Fase File System (FFS) is UFS if self.fs == 'ufs' or self.fs == 'ufs2': self.fs = 'ffs' def create_swap_space(self, mount_point, size_mb): pass def enable_swap(self, mount_point): size_mb = conf.get_resourcedisk_swap_size_mb() if size_mb: logger.info("Enable swap") device = self.osutil.device_for_ide_port(1) err, output = shellutil.run_get_output("swapctl -a /dev/" "{0}b".format(device), chk_err=False) if err: logger.error("Failed to enable swap, error {0}", output) def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ffs': raise ResourceDiskError("Unsupported filesystem type: {0}, only " "ufs/ffs is supported.".format(fs)) # 1. Get device device = self.osutil.device_for_ide_port(1) if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Get partition partition = "/dev/{0}a".format(device) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(self.fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info('Creating {0} filesystem on {1}'.format(fs, device)) fdisk_cmd = "/sbin/fdisk -yi {0}".format(device) err, output = shellutil.run_get_output(fdisk_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to create new MBR on {0}, " "error: {1}".format(device, output)) size_mb = conf.get_resourcedisk_swap_size_mb() if size_mb: if size_mb > 512 * 1024: size_mb = 512 * 1024 disklabel_cmd = ("echo -e '{0} 1G-* 50%\nswap 1-{1}M 50%' " "| disklabel -w -A -T /dev/stdin " "{2}").format(mount_point, size_mb, device) ret, output = shellutil.run_get_output( disklabel_cmd, chk_err=False) if ret: raise ResourceDiskError("Failed to create new disklabel " "on {0}, error " "{1}".format(device, output)) err, output = shellutil.run_get_output("newfs -O2 {0}a" "".format(device)) if err: raise ResourceDiskError("Failed to create new filesystem on " "partition {0}, error " "{1}".format(partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to mount partition {0}, " "error {1}".format(partition, output)) logger.info("Resource disk partition {0} is mounted at {1} with fstype " "{2}", partition, mount_point, fs) return mount_point WALinuxAgent-2.2.20/azurelinuxagent/daemon/scvmm.py000066400000000000000000000053101322477356400223060ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import re import os import sys import subprocess import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf from azurelinuxagent.common.osutil import get_osutil VMM_CONF_FILE_NAME = "linuxosconfiguration.xml" VMM_STARTUP_SCRIPT_NAME= "install" def get_scvmm_handler(): return ScvmmHandler() class ScvmmHandler(object): def __init__(self): self.osutil = get_osutil() def detect_scvmm_env(self, dev_dir='/dev'): logger.info("Detecting Microsoft System Center VMM Environment") found=False # try to load the ATAPI driver, continue on failure self.osutil.try_load_atapiix_mod() # cycle through all available /dev/sr*|hd*|cdrom*|cd* looking for the scvmm configuration file mount_point = conf.get_dvd_mount_point() for devices in filter(lambda x: x is not None, [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]?|cd[0-9]+)', dev) for dev in os.listdir(dev_dir)]): dvd_device = os.path.join(dev_dir, devices.group(0)) self.osutil.mount_dvd(max_retry=1, chk_err=False, dvd_device=dvd_device, mount_point=mount_point) found = os.path.isfile(os.path.join(mount_point, VMM_CONF_FILE_NAME)) if found: self.start_scvmm_agent(mount_point=mount_point) break else: self.osutil.umount_dvd(chk_err=False, mount_point=mount_point) return found def start_scvmm_agent(self, mount_point=None): logger.info("Starting Microsoft System Center VMM Initialization " "Process") if mount_point is None: mount_point = conf.get_dvd_mount_point() startup_script = os.path.join(mount_point, VMM_STARTUP_SCRIPT_NAME) devnull = open(os.devnull, 'w') subprocess.Popen(["/bin/bash", startup_script, "-p " + mount_point], stdout=devnull, stderr=devnull) def run(self): if self.detect_scvmm_env(): logger.info("Exiting") time.sleep(300) sys.exit(0) WALinuxAgent-2.2.20/azurelinuxagent/distro/000077500000000000000000000000001322477356400206515ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/distro/__init__.py000066400000000000000000000011661322477356400227660ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/azurelinuxagent/distro/suse/000077500000000000000000000000001322477356400216305ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/distro/suse/__init__.py000066400000000000000000000011661322477356400237450ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/azurelinuxagent/ga/000077500000000000000000000000001322477356400177345ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/ga/__init__.py000066400000000000000000000011661322477356400220510ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/azurelinuxagent/ga/env.py000066400000000000000000000174111322477356400211020ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import re import os import socket import time import threading import operator import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.dhcp import get_dhcp_handler from azurelinuxagent.common.event import add_periodic, WALAEventOperation from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.wire import INCARNATION_FILE_NAME from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION CACHE_PATTERNS = [ re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) ] MAXIMUM_CACHED_FILES = 50 CACHE_PURGE_INTERVAL = datetime.timedelta(hours=24) def get_env_handler(): return EnvHandler() class EnvHandler(object): """ Monitor changes to dhcp and hostname. If dhcp client process re-start has occurred, reset routes, dhcp with fabric. Monitor scsi disk. If new scsi disk found, set timeout """ def __init__(self): self.osutil = get_osutil() self.dhcp_handler = get_dhcp_handler() self.protocol_util = get_protocol_util() self.stopped = True self.hostname = None self.dhcp_id = None self.server_thread = None self.dhcp_warning_enabled = True self.last_purge = None def run(self): if not self.stopped: logger.info("Stop existing env monitor service.") self.stop() self.stopped = False logger.info("Start env monitor service.") self.dhcp_handler.conf_routes() self.hostname = self.osutil.get_hostname_record() self.dhcp_id = self.osutil.get_dhcp_pid() self.server_thread = threading.Thread(target=self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() def monitor(self): """ Monitor firewall rules Monitor dhcp client pid and hostname. If dhcp client process re-start has occurred, reset routes. Purge unnecessary files from disk cache. """ protocol = self.protocol_util.get_protocol() while not self.stopped: self.osutil.remove_rules_files() if conf.enable_firewall(): success = self.osutil.enable_firewall( dst_ip=protocol.endpoint, uid=os.getuid()) add_periodic( logger.EVERY_HOUR, AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Firewall, is_success=success, log_event=True) timeout = conf.get_root_device_scsi_timeout() if timeout is not None: self.osutil.set_scsi_disks_timeout(timeout) if conf.get_monitor_hostname(): self.handle_hostname_update() self.handle_dhclient_restart() self.purge_disk_cache() time.sleep(5) def handle_hostname_update(self): curr_hostname = socket.gethostname() if curr_hostname != self.hostname: logger.info("EnvMonitor: Detected hostname change: {0} -> {1}", self.hostname, curr_hostname) self.osutil.set_hostname(curr_hostname) self.osutil.publish_hostname(curr_hostname) self.hostname = curr_hostname def handle_dhclient_restart(self): if self.dhcp_id is None: if self.dhcp_warning_enabled: logger.warn("Dhcp client is not running. ") self.dhcp_id = self.osutil.get_dhcp_pid() # disable subsequent error logging self.dhcp_warning_enabled = self.dhcp_id is not None return # the dhcp process has not changed since the last check if self.osutil.check_pid_alive(self.dhcp_id.strip()): return new_pid = self.osutil.get_dhcp_pid() if new_pid is not None and new_pid != self.dhcp_id: logger.info("EnvMonitor: Detected dhcp client restart. " "Restoring routing table.") self.dhcp_handler.conf_routes() self.dhcp_id = new_pid def purge_disk_cache(self): """ Ensure the number of cached files does not exceed a maximum count. Purge only once per interval, and never delete files related to the current incarnation. """ if self.last_purge is not None \ and datetime.datetime.utcnow() < \ self.last_purge + CACHE_PURGE_INTERVAL: return current_incarnation = -1 self.last_purge = datetime.datetime.utcnow() incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) if os.path.exists(incarnation_file): last_incarnation = fileutil.read_file(incarnation_file) if last_incarnation is not None: current_incarnation = int(last_incarnation) logger.info("Purging disk cache, current incarnation is {0}" .format('not found' if current_incarnation == -1 else current_incarnation)) # Create tuples: (prefix, suffix, incarnation, name, file_modified) files = [] for f in os.listdir(conf.get_lib_dir()): full_path = os.path.join(conf.get_lib_dir(), f) for pattern in CACHE_PATTERNS: m = pattern.match(f) if m is not None: prefix = m.group(1) suffix = m.group(3) incarnation = int(m.group(2)) file_modified = os.path.getmtime(full_path) t = (prefix, suffix, incarnation, f, file_modified) files.append(t) break if len(files) <= 0: return # Sort by (prefix, suffix, file_modified) in reverse order files = sorted(files, key=operator.itemgetter(0, 1, 4), reverse=True) # Remove any files in excess of the maximum allowed # -- Restart then whenever the (prefix, suffix) change count = 0 last_match = [None, None] for f in files: if last_match != f[0:2]: last_match = f[0:2] count = 0 if current_incarnation == f[2]: logger.verbose("Skipping {0}".format(f[3])) continue count += 1 if count > MAXIMUM_CACHED_FILES: full_name = os.path.join(conf.get_lib_dir(), f[3]) logger.verbose("Deleting {0}".format(full_name)) os.remove(full_name) def stop(self): """ Stop server communication and join the thread to main thread. """ self.stopped = True if self.server_thread is not None: self.server_thread.join() WALinuxAgent-2.2.20/azurelinuxagent/ga/exthandlers.py000066400000000000000000001313511322477356400226330ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import datetime import glob import json import os import os.path import re import shutil import stat import subprocess import time import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.restutil as restutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.version as version from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.exception import ExtensionError, ProtocolError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.version import AGENT_VERSION from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ ExtensionStatus, \ ExtensionSubStatus, \ Extension, \ VMStatus, ExtHandler, \ get_properties, \ set_properties from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.version import AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION #HandlerEnvironment.json schema version HANDLER_ENVIRONMENT_VERSION = 1.0 VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] HANDLER_PATTERN = "^([^-]+)-(\d+(?:\.\d+)*)" HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN+"$", re.IGNORECASE) HANDLER_PKG_EXT = ".zip" HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN+"\\"+HANDLER_PKG_EXT+"$", re.IGNORECASE) def validate_has_key(obj, key, fullname): if key not in obj: raise ExtensionError("Missing: {0}".format(fullname)) def validate_in_range(val, valid_range, name): if val not in valid_range: raise ExtensionError("Invalid {0}: {1}".format(name, val)) def parse_formatted_message(formatted_message): if formatted_message is None: return None validate_has_key(formatted_message, 'lang', 'formattedMessage/lang') validate_has_key(formatted_message, 'message', 'formattedMessage/message') return formatted_message.get('message') def parse_ext_substatus(substatus): #Check extension sub status format validate_has_key(substatus, 'status', 'substatus/status') validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, 'substatus/status') status = ExtensionSubStatus() status.name = substatus.get('name') status.status = substatus.get('status') status.code = substatus.get('code', 0) formatted_message = substatus.get('formattedMessage') status.message = parse_formatted_message(formatted_message) return status def parse_ext_status(ext_status, data): if data is None or len(data) is None: return #Currently, only the first status will be reported data = data[0] #Check extension status format validate_has_key(data, 'status', 'status') status_data = data['status'] validate_has_key(status_data, 'status', 'status/status') validate_in_range(status_data['status'], VALID_EXTENSION_STATUS, 'status/status') applied_time = status_data.get('configurationAppliedTime') ext_status.configurationAppliedTime = applied_time ext_status.operation = status_data.get('operation') ext_status.status = status_data.get('status') ext_status.code = status_data.get('code', 0) formatted_message = status_data.get('formattedMessage') ext_status.message = parse_formatted_message(formatted_message) substatus_list = status_data.get('substatus') if substatus_list is None: return for substatus in substatus_list: if substatus is not None: ext_status.substatusList.append(parse_ext_substatus(substatus)) # This code migrates, if it exists, handler state and status from an # agent-owned directory into the handler-owned config directory # # Notes: # - The v2.0.x branch wrote all handler-related state into the handler-owned # config directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). # - The v2.1.x branch original moved that state into an agent-owned handler # state directory (e.g., /var/lib/waagent/handler_state). # - This move can cause v2.1.x agents to multiply invoke a handler's install # command. It also makes clean-up more difficult since the agent must # remove the state as well as the handler directory. def migrate_handler_state(): handler_state_path = os.path.join(conf.get_lib_dir(), "handler_state") if not os.path.isdir(handler_state_path): return for handler_path in glob.iglob(os.path.join(handler_state_path, "*")): handler = os.path.basename(handler_path) handler_config_path = os.path.join(conf.get_lib_dir(), handler, "config") if os.path.isdir(handler_config_path): for file in ("State", "Status"): from_path = os.path.join(handler_state_path, handler, file.lower()) to_path = os.path.join(handler_config_path, "Handler" + file) if os.path.isfile(from_path) and not os.path.isfile(to_path): try: shutil.move(from_path, to_path) except Exception as e: logger.warn( "Exception occurred migrating {0} {1} file: {2}", handler, file, str(e)) try: shutil.rmtree(handler_state_path) except Exception as e: logger.warn("Exception occurred removing {0}: {1}", handler_state_path, str(e)) return class ExtHandlerState(object): NotInstalled = "NotInstalled" Installed = "Installed" Enabled = "Enabled" def get_exthandlers_handler(): return ExtHandlersHandler() class ExtHandlersHandler(object): def __init__(self): self.protocol_util = get_protocol_util() self.protocol = None self.ext_handlers = None self.last_etag = None self.last_guids = {} self.log_report = False self.log_etag = True def run(self): self.ext_handlers, etag = None, None try: self.protocol = self.protocol_util.get_protocol() self.ext_handlers, etag = self.protocol.get_ext_handlers() except Exception as e: msg = u"Exception retrieving extension handlers: {0}".format( ustr(e)) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=msg) return try: msg = u"Handle extensions updates for incarnation {0}".format(etag) logger.verbose(msg) # Log status report success on new config self.log_report = True self.handle_ext_handlers(etag) self.last_etag = etag self.report_ext_handlers_status() self.cleanup_outdated_handlers() except Exception as e: msg = u"Exception processing extension handlers: {0}".format( ustr(e)) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=msg) return def run_status(self): self.report_ext_handlers_status() return def get_guid(self, name): return self.last_guids.get(name, None) def is_new_guid(self, ext_handler): last_guid = self.get_guid(ext_handler.name) if last_guid is None: return True return last_guid != ext_handler.properties.upgradeGuid def cleanup_outdated_handlers(self): handlers = [] pkgs = [] # Build a collection of uninstalled handlers and orphaned packages # Note: # -- An orphaned package is one without a corresponding handler # directory for item in os.listdir(conf.get_lib_dir()): path = os.path.join(conf.get_lib_dir(), item) if version.is_agent_package(path) or version.is_agent_path(path): continue if os.path.isdir(path): if re.match(HANDLER_NAME_PATTERN, item) is None: continue try: eh = ExtHandler() separator = item.rfind('-') eh.name = item[0:separator] eh.properties.version = str(FlexibleVersion(item[separator+1:])) handler = ExtHandlerInstance(eh, self.protocol) except Exception as e: continue if handler.get_handler_state() != ExtHandlerState.NotInstalled: continue handlers.append(handler) elif os.path.isfile(path) and \ not os.path.isdir(path[0:-len(HANDLER_PKG_EXT)]): if not re.match(HANDLER_PKG_PATTERN, item): continue pkgs.append(path) # Then, remove the orphaned packages for pkg in pkgs: try: os.remove(pkg) logger.verbose("Removed orphaned extension package " "{0}".format(pkg)) except Exception as e: logger.warn("Failed to remove orphaned package: {0}".format( pkg)) # Finally, remove the directories and packages of the # uninstalled handlers for handler in handlers: handler.rm_ext_handler_dir() pkg = os.path.join(conf.get_lib_dir(), handler.get_full_name() + HANDLER_PKG_EXT) if os.path.isfile(pkg): try: os.remove(pkg) logger.verbose("Removed extension package " "{0}".format(pkg)) except Exception as e: logger.warn("Failed to remove extension package: " "{0}".format(pkg)) def handle_ext_handlers(self, etag=None): if self.ext_handlers.extHandlers is None or \ len(self.ext_handlers.extHandlers) == 0: logger.verbose("No extension handler config found") return if conf.get_enable_overprovisioning(): artifacts_profile = self.protocol.get_artifacts_profile() if artifacts_profile and artifacts_profile.is_on_hold(): logger.info("Extension handling is on hold") return for ext_handler in self.ext_handlers.extHandlers: # TODO: handle install in sequence, enable in parallel self.handle_ext_handler(ext_handler, etag) def handle_ext_handler(self, ext_handler, etag): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) try: state = ext_handler.properties.state # The extension is to be enabled, there is an upgrade GUID # and the GUID is NOT new if state == u"enabled" and \ ext_handler.properties.upgradeGuid is not None and \ not self.is_new_guid(ext_handler): logger.info("New GUID is the same as the old GUID. Exiting without upgrading.") return ext_handler_i.decide_version(target_state=state) if not ext_handler_i.is_upgrade and self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Version {0} is current for etag {1}", ext_handler_i.pkg.version, etag) self.log_etag = False return self.log_etag = True ext_handler_i.logger.info("Target handler state: {0}", state) if state == u"enabled": self.handle_enable(ext_handler_i) if ext_handler.properties.upgradeGuid is not None: ext_handler_i.logger.info("New Upgrade GUID: {0}", ext_handler.properties.upgradeGuid) self.last_guids[ext_handler.name] = ext_handler.properties.upgradeGuid elif state == u"disabled": self.handle_disable(ext_handler_i) # Remove the GUID from the dictionary so that it is upgraded upon re-enable self.last_guids.pop(ext_handler.name, None) elif state == u"uninstall": self.handle_uninstall(ext_handler_i) # Remove the GUID from the dictionary so that it is upgraded upon re-install self.last_guids.pop(ext_handler.name, None) else: message = u"Unknown ext handler state:{0}".format(state) raise ExtensionError(message) except Exception as e: ext_handler_i.set_handler_status(message=ustr(e), code=-1) ext_handler_i.report_event(message=ustr(e), is_success=False) def handle_enable(self, ext_handler_i): old_ext_handler_i = ext_handler_i.get_installed_ext_handler() if old_ext_handler_i is not None and \ old_ext_handler_i.version_gt(ext_handler_i): raise ExtensionError(u"Downgrade not allowed") handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Enable] current handler state is: {0}", handler_state.lower()) if handler_state == ExtHandlerState.NotInstalled: ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled) ext_handler_i.download() ext_handler_i.update_settings() if old_ext_handler_i is None: ext_handler_i.install() elif ext_handler_i.version_gt(old_ext_handler_i): old_ext_handler_i.disable() ext_handler_i.copy_status_files(old_ext_handler_i) ext_handler_i.update() old_ext_handler_i.uninstall() old_ext_handler_i.rm_ext_handler_dir() ext_handler_i.update_with_install() else: ext_handler_i.update_settings() ext_handler_i.enable() def handle_disable(self, ext_handler_i): handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Disable] current handler state is: {0}", handler_state.lower()) if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() def handle_uninstall(self, ext_handler_i): handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Uninstall] current handler state is: {0}", handler_state.lower()) if handler_state != ExtHandlerState.NotInstalled: if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() ext_handler_i.uninstall() ext_handler_i.rm_ext_handler_dir() def report_ext_handlers_status(self): """Go through handler_state dir, collect and report status""" vm_status = VMStatus(status="Ready", message="Guest Agent is running") if self.ext_handlers is not None: for ext_handler in self.ext_handlers.extHandlers: try: self.report_ext_handler_status(vm_status, ext_handler) except ExtensionError as e: add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=ustr(e)) logger.verbose("Report vm agent status") try: self.protocol.report_vm_status(vm_status) if self.log_report: logger.verbose("Completed vm agent status report") except ProtocolError as e: message = "Failed to report vm agent status: {0}".format(e) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=message) def report_ext_handler_status(self, vm_status, ext_handler): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) handler_status = ext_handler_i.get_handler_status() if handler_status is None: return guid = self.get_guid(ext_handler.name) if guid is not None: handler_status.upgradeGuid = guid handler_state = ext_handler_i.get_handler_state() if handler_state != ExtHandlerState.NotInstalled: try: active_exts = ext_handler_i.report_ext_status() handler_status.extensions.extend(active_exts) except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=-1) try: heartbeat = ext_handler_i.collect_heartbeat() if heartbeat is not None: handler_status.status = heartbeat.get('status') except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=-1) vm_status.vmAgent.extensionHandlers.append(handler_status) class ExtHandlerInstance(object): def __init__(self, ext_handler, protocol): self.ext_handler = ext_handler self.protocol = protocol self.operation = None self.pkg = None self.pkg_file = None self.is_upgrade = False prefix = "[{0}]".format(self.get_full_name()) self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) try: fileutil.mkdir(self.get_log_dir(), mode=0o755) except IOError as e: self.logger.error(u"Failed to create extension log dir: {0}", e) log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") self.logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, log_file) def decide_version(self, target_state=None): self.logger.verbose("Decide which version to use") try: pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler) except ProtocolError as e: raise ExtensionError("Failed to get ext handler pkgs", e) # Determine the desired and installed versions requested_version = FlexibleVersion( str(self.ext_handler.properties.version)) installed_version_string = self.get_installed_version() installed_version = requested_version \ if installed_version_string is None \ else FlexibleVersion(installed_version_string) # Divide packages # - Find the installed package (its version must exactly match) # - Find the internal candidate (its version must exactly match) # - Separate the public packages internal_pkg = None installed_pkg = None public_pkgs = [] for pkg in pkg_list.versions: pkg_version = FlexibleVersion(pkg.version) if pkg_version == installed_version: installed_pkg = pkg if pkg.isinternal and pkg_version == requested_version: internal_pkg = pkg if not pkg.isinternal: public_pkgs.append(pkg) internal_version = FlexibleVersion(internal_pkg.version) \ if internal_pkg is not None \ else FlexibleVersion() public_pkgs.sort(key=lambda pkg: FlexibleVersion(pkg.version), reverse=True) # Determine the preferred version and type of upgrade occurring preferred_version = max(requested_version, installed_version) is_major_upgrade = preferred_version.major > installed_version.major allow_minor_upgrade = self.ext_handler.properties.upgradePolicy == 'auto' # Find the first public candidate which # - Matches the preferred major version # - Does not upgrade to a new, disallowed major version # - And only increments the minor version if allowed # Notes: # - The patch / hotfix version is not considered public_pkg = None for pkg in public_pkgs: pkg_version = FlexibleVersion(pkg.version) if pkg_version.major == preferred_version.major \ and (not pkg.disallow_major_upgrade or not is_major_upgrade) \ and (allow_minor_upgrade or pkg_version.minor == preferred_version.minor): public_pkg = pkg break # If there are no candidates, locate the highest public version whose # major matches that installed if internal_pkg is None and public_pkg is None: for pkg in public_pkgs: pkg_version = FlexibleVersion(pkg.version) if pkg_version.major == installed_version.major: public_pkg = pkg break public_version = FlexibleVersion(public_pkg.version) \ if public_pkg is not None \ else FlexibleVersion() # Select the candidate # - Use the public candidate if there is no internal candidate or # the public is more recent (e.g., a hotfix patch) # - Otherwise use the internal candidate if internal_pkg is None or (public_pkg is not None and public_version > internal_version): selected_pkg = public_pkg else: selected_pkg = internal_pkg selected_version = FlexibleVersion(selected_pkg.version) \ if selected_pkg is not None \ else FlexibleVersion() # Finally, update the version only if not downgrading # Note: # - A downgrade, which will be bound to the same major version, # is allowed if the installed version is no longer available if target_state == u"uninstall": if installed_pkg is None: msg = "Failed to find installed version of {0} " \ "to uninstall".format(self.ext_handler.name) self.logger.warn(msg) self.pkg = installed_pkg self.ext_handler.properties.version = str(installed_version) \ if installed_version is not None else None elif selected_pkg is None \ or (installed_pkg is not None and selected_version < installed_version): self.pkg = installed_pkg self.ext_handler.properties.version = str(installed_version) \ if installed_version is not None else None else: self.pkg = selected_pkg self.ext_handler.properties.version = str(selected_pkg.version) # Note if the selected package is greater than that installed if installed_pkg is None \ or FlexibleVersion(self.pkg.version) > FlexibleVersion(installed_pkg.version): self.is_upgrade = True if self.pkg is None: raise ExtensionError("Failed to find any valid extension package") self.logger.verbose("Use version: {0}", self.pkg.version) return def version_gt(self, other): self_version = self.ext_handler.properties.version other_version = other.ext_handler.properties.version return FlexibleVersion(self_version) > FlexibleVersion(other_version) def get_installed_ext_handler(self): lastest_version = self.get_installed_version() if lastest_version is None: return None installed_handler = ExtHandler() set_properties("ExtHandler", installed_handler, get_properties(self.ext_handler)) installed_handler.properties.version = lastest_version return ExtHandlerInstance(installed_handler, self.protocol) def get_installed_version(self): lastest_version = None for path in glob.iglob(os.path.join(conf.get_lib_dir(), self.ext_handler.name + "-*")): if not os.path.isdir(path): continue separator = path.rfind('-') version = FlexibleVersion(path[separator+1:]) state_path = os.path.join(path, 'config', 'HandlerState') if not os.path.exists(state_path) or \ fileutil.read_file(state_path) == \ ExtHandlerState.NotInstalled: logger.verbose("Ignoring version of uninstalled extension: " "{0}".format(path)) continue if lastest_version is None or lastest_version < version: lastest_version = version return str(lastest_version) if lastest_version is not None else None def copy_status_files(self, old_ext_handler_i): self.logger.info("Copy status files from old plugin to new") old_ext_dir = old_ext_handler_i.get_base_dir() new_ext_dir = self.get_base_dir() old_ext_mrseq_file = os.path.join(old_ext_dir, "mrseq") if os.path.isfile(old_ext_mrseq_file): shutil.copy2(old_ext_mrseq_file, new_ext_dir) old_ext_status_dir = old_ext_handler_i.get_status_dir() new_ext_status_dir = self.get_status_dir() if os.path.isdir(old_ext_status_dir): for status_file in os.listdir(old_ext_status_dir): status_file = os.path.join(old_ext_status_dir, status_file) if os.path.isfile(status_file): shutil.copy2(status_file, new_ext_status_dir) def set_operation(self, op): self.operation = op def report_event(self, message="", is_success=True): version = self.ext_handler.properties.version add_event(name=self.ext_handler.name, version=version, message=message, op=self.operation, is_success=is_success) def download(self): self.logger.verbose("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") package = None for uri in self.pkg.uris: try: package = self.protocol.download_ext_handler_pkg(uri.uri) if package is not None: break except Exception as e: logger.warn("Error while downloading extension: {0}", e) if package is None: raise ExtensionError("Failed to download extension") self.logger.verbose("Unpack extension package") self.pkg_file = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") try: fileutil.write_file(self.pkg_file, bytearray(package), asbin=True) zipfile.ZipFile(self.pkg_file).extractall(self.get_base_dir()) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to write and unzip plugin", e) #Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) self.report_event(message="Download succeeded") self.logger.info("Initialize extension directory") #Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to save HandlerManifest.json", e) #Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) seq_no, status_path = self.get_status_file_path() if seq_no > -1: now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") status = { "version": 1.0, "timestampUTC" : now, "status" : { "name" : self.ext_handler.name, "operation" : "Enabling Handler", "status" : "transitioning", "code" : 0 } } fileutil.write_file(json.dumps(status), status_path) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to create status or config dir", e) #Save HandlerEnvironment.json self.create_handler_env() def enable(self): self.set_operation(WALAEventOperation.Enable) man = self.load_manifest() enable_cmd = man.get_enable_command() self.logger.info("Enable extension [{0}]".format(enable_cmd)) self.launch_command(enable_cmd, timeout=300) self.set_handler_state(ExtHandlerState.Enabled) self.set_handler_status(status="Ready", message="Plugin enabled") def disable(self): self.set_operation(WALAEventOperation.Disable) man = self.load_manifest() disable_cmd = man.get_disable_command() self.logger.info("Disable extension [{0}]".format(disable_cmd)) self.launch_command(disable_cmd, timeout=900) self.set_handler_state(ExtHandlerState.Installed) self.set_handler_status(status="NotReady", message="Plugin disabled") def install(self): man = self.load_manifest() install_cmd = man.get_install_command() self.logger.info("Install extension [{0}]".format(install_cmd)) self.set_operation(WALAEventOperation.Install) self.launch_command(install_cmd, timeout=900) self.set_handler_state(ExtHandlerState.Installed) def uninstall(self): try: self.set_operation(WALAEventOperation.UnInstall) man = self.load_manifest() uninstall_cmd = man.get_uninstall_command() self.logger.info("Uninstall extension [{0}]".format(uninstall_cmd)) self.launch_command(uninstall_cmd) except ExtensionError as e: self.report_event(message=ustr(e), is_success=False) def rm_ext_handler_dir(self): try: base_dir = self.get_base_dir() if os.path.isdir(base_dir): self.logger.info("Remove extension handler directory: {0}", base_dir) shutil.rmtree(base_dir) except IOError as e: message = "Failed to remove extension handler directory: {0}".format(e) self.report_event(message=message, is_success=False) self.logger.warn(message) def update(self): self.set_operation(WALAEventOperation.Update) man = self.load_manifest() update_cmd = man.get_update_command() self.logger.info("Update extension [{0}]".format(update_cmd)) self.launch_command(update_cmd, timeout=900) def update_with_install(self): man = self.load_manifest() if man.is_update_with_install(): self.install() else: self.logger.info("UpdateWithInstall not set. " "Skip install during upgrade.") self.set_handler_state(ExtHandlerState.Installed) def get_largest_seq_no(self): seq_no = -1 conf_dir = self.get_conf_dir() for item in os.listdir(conf_dir): item_path = os.path.join(conf_dir, item) if os.path.isfile(item_path): try: seperator = item.rfind(".") if seperator > 0 and item[seperator + 1:] == 'settings': curr_seq_no = int(item.split('.')[0]) if curr_seq_no > seq_no: seq_no = curr_seq_no except Exception as e: self.logger.verbose("Failed to parse file name: {0}", item) continue return seq_no def get_status_file_path(self): seq_no = self.get_largest_seq_no() path = None if seq_no > -1: path = os.path.join( self.get_status_dir(), "{0}.status".format(seq_no)) return seq_no, path def collect_ext_status(self, ext): self.logger.verbose("Collect extension status") seq_no, ext_status_file = self.get_status_file_path() if seq_no == -1: return None ext_status = ExtensionStatus(seq_no=seq_no) try: data_str = fileutil.read_file(ext_status_file) data = json.loads(data_str) parse_ext_status(ext_status, data) except IOError as e: ext_status.message = u"Failed to get status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" except (ExtensionError, ValueError) as e: ext_status.message = u"Malformed status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" return ext_status def report_ext_status(self): active_exts = [] for ext in self.ext_handler.properties.extensions: ext_status = self.collect_ext_status(ext) if ext_status is None: continue try: self.protocol.report_ext_status(self.ext_handler.name, ext.name, ext_status) active_exts.append(ext.name) except ProtocolError as e: self.logger.error(u"Failed to report extension status: {0}", e) return active_exts def collect_heartbeat(self): man = self.load_manifest() if not man.is_report_heartbeat(): return heartbeat_file = os.path.join(conf.get_lib_dir(), self.get_heartbeat_file()) if not os.path.isfile(heartbeat_file): raise ExtensionError("Failed to get heart beat file") if not self.is_responsive(heartbeat_file): return { "status": "Unresponsive", "code": -1, "message": "Extension heartbeat is not responsive" } try: heartbeat_json = fileutil.read_file(heartbeat_file) heartbeat = json.loads(heartbeat_json)[0]['heartbeat'] except IOError as e: raise ExtensionError("Failed to get heartbeat file:{0}".format(e)) except (ValueError, KeyError) as e: raise ExtensionError("Malformed heartbeat file: {0}".format(e)) return heartbeat def is_responsive(self, heartbeat_file): last_update = int(time.time() - os.stat(heartbeat_file).st_mtime) return last_update <= 600 # updated within the last 10 min def launch_command(self, cmd, timeout=300): self.logger.verbose("Launch command: [{0}]", cmd) base_dir = self.get_base_dir() try: devnull = open(os.devnull, 'w') child = subprocess.Popen(base_dir + "/" + cmd, shell=True, cwd=base_dir, stdout=devnull, env=os.environ) except Exception as e: #TODO do not catch all exception raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e)) retry = timeout while retry > 0 and child.poll() is None: time.sleep(1) retry -= 1 if retry == 0: os.kill(child.pid, 9) raise ExtensionError("Timeout({0}): {1}".format(timeout, cmd)) ret = child.wait() if ret == None or ret != 0: raise ExtensionError("Non-zero exit code: {0}, {1}".format(ret, cmd)) self.report_event(message="Launch command succeeded: {0}".format(cmd)) def load_manifest(self): man_file = self.get_manifest_file() try: data = json.loads(fileutil.read_file(man_file)) except IOError as e: raise ExtensionError('Failed to load manifest file.') except ValueError as e: raise ExtensionError('Malformed manifest file.') return HandlerManifest(data[0]) def update_settings_file(self, settings_file, settings): settings_file = os.path.join(self.get_conf_dir(), settings_file) try: fileutil.write_file(settings_file, settings) except IOError as e: fileutil.clean_ioerror(e, paths=[settings_file]) raise ExtensionError(u"Failed to update settings file", e) def update_settings(self): if self.ext_handler.properties.extensions is None or \ len(self.ext_handler.properties.extensions) == 0: #This is the behavior of waagent 2.0.x #The new agent has to be consistent with the old one. self.logger.info("Extension has no settings, write empty 0.settings") self.update_settings_file("0.settings", "") return for ext in self.ext_handler.properties.extensions: settings = { 'publicSettings': ext.publicSettings, 'protectedSettings': ext.protectedSettings, 'protectedSettingsCertThumbprint': ext.certificateThumbprint } ext_settings = { "runtimeSettings":[{ "handlerSettings": settings }] } settings_file = "{0}.settings".format(ext.sequenceNumber) self.logger.info("Update settings file: {0}", settings_file) self.update_settings_file(settings_file, json.dumps(ext_settings)) def create_handler_env(self): env = [{ "name": self.ext_handler.name, "version" : HANDLER_ENVIRONMENT_VERSION, "handlerEnvironment" : { "logFolder" : self.get_log_dir(), "configFolder" : self.get_conf_dir(), "statusFolder" : self.get_status_dir(), "heartbeatFile" : self.get_heartbeat_file() } }] try: fileutil.write_file(self.get_env_file(), json.dumps(env)) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to save handler environment", e) def set_handler_state(self, handler_state): state_dir = self.get_conf_dir() try: if not os.path.exists(state_dir): fileutil.mkdir(state_dir, mode=0o700) state_file = os.path.join(state_dir, "HandlerState") fileutil.write_file(state_file, handler_state) except IOError as e: fileutil.clean_ioerror(e, paths=[state_file]) self.logger.error("Failed to set state: {0}", e) def get_handler_state(self): state_dir = self.get_conf_dir() state_file = os.path.join(state_dir, "HandlerState") if not os.path.isfile(state_file): return ExtHandlerState.NotInstalled try: return fileutil.read_file(state_file) except IOError as e: self.logger.error("Failed to get state: {0}", e) return ExtHandlerState.NotInstalled def set_handler_status(self, status="NotReady", message="", code=0): state_dir = self.get_conf_dir() handler_status = ExtHandlerStatus() handler_status.name = self.ext_handler.name handler_status.version = str(self.ext_handler.properties.version) handler_status.message = message handler_status.code = code handler_status.status = status status_file = os.path.join(state_dir, "HandlerStatus") try: fileutil.write_file(status_file, json.dumps(get_properties(handler_status))) except (IOError, ValueError, ProtocolError) as e: fileutil.clean_ioerror(e, paths=[status_file]) self.logger.error("Failed to save handler status: {0}", e) def get_handler_status(self): state_dir = self.get_conf_dir() status_file = os.path.join(state_dir, "HandlerStatus") if not os.path.isfile(status_file): return None try: data = json.loads(fileutil.read_file(status_file)) handler_status = ExtHandlerStatus() set_properties("ExtHandlerStatus", handler_status, data) return handler_status except (IOError, ValueError) as e: self.logger.error("Failed to get handler status: {0}", e) def get_full_name(self): return "{0}-{1}".format(self.ext_handler.name, self.ext_handler.properties.version) def get_base_dir(self): return os.path.join(conf.get_lib_dir(), self.get_full_name()) def get_status_dir(self): return os.path.join(self.get_base_dir(), "status") def get_conf_dir(self): return os.path.join(self.get_base_dir(), 'config') def get_heartbeat_file(self): return os.path.join(self.get_base_dir(), 'heartbeat.log') def get_manifest_file(self): return os.path.join(self.get_base_dir(), 'HandlerManifest.json') def get_env_file(self): return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json') def get_log_dir(self): return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name, str(self.ext_handler.properties.version)) class HandlerEnvironment(object): def __init__(self, data): self.data = data def get_version(self): return self.data["version"] def get_log_dir(self): return self.data["handlerEnvironment"]["logFolder"] def get_conf_dir(self): return self.data["handlerEnvironment"]["configFolder"] def get_status_dir(self): return self.data["handlerEnvironment"]["statusFolder"] def get_heartbeat_file(self): return self.data["handlerEnvironment"]["heartbeatFile"] class HandlerManifest(object): def __init__(self, data): if data is None or data['handlerManifest'] is None: raise ExtensionError('Malformed manifest file.') self.data = data def get_name(self): return self.data["name"] def get_version(self): return self.data["version"] def get_install_command(self): return self.data['handlerManifest']["installCommand"] def get_uninstall_command(self): return self.data['handlerManifest']["uninstallCommand"] def get_update_command(self): return self.data['handlerManifest']["updateCommand"] def get_enable_command(self): return self.data['handlerManifest']["enableCommand"] def get_disable_command(self): return self.data['handlerManifest']["disableCommand"] def is_reboot_after_install(self): """ Deprecated """ return False def is_report_heartbeat(self): return self.data['handlerManifest'].get('reportHeartbeat', False) def is_update_with_install(self): update_mode = self.data['handlerManifest'].get('updateMode') if update_mode is None: return True return update_mode.lower() == "updatewithinstall" WALinuxAgent-2.2.20/azurelinuxagent/ga/monitor.py000066400000000000000000000222171322477356400220010ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import datetime import json import os import platform import time import threading import uuid import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.logger as logger from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ TelemetryEventList, \ TelemetryEvent, \ set_properties from azurelinuxagent.common.utils.restutil import IOErrorCounter from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_CODE_NAME, AGENT_LONG_VERSION, \ AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION def parse_event(data_str): try: return parse_json_event(data_str) except ValueError: return parse_xml_event(data_str) def parse_xml_param(param_node): name = getattrib(param_node, "Name") value_str = getattrib(param_node, "Value") attr_type = getattrib(param_node, "T") value = value_str if attr_type == 'mt:uint64': value = int(value_str) elif attr_type == 'mt:bool': value = bool(value_str) elif attr_type == 'mt:float64': value = float(value_str) return TelemetryEventParam(name, value) def parse_xml_event(data_str): try: xml_doc = parse_doc(data_str) event_id = getattrib(find(xml_doc, "Event"), 'id') provider_id = getattrib(find(xml_doc, "Provider"), 'id') event = TelemetryEvent(event_id, provider_id) param_nodes = findall(xml_doc, 'Param') for param_node in param_nodes: event.parameters.append(parse_xml_param(param_node)) return event except Exception as e: raise ValueError(ustr(e)) def parse_json_event(data_str): data = json.loads(data_str) event = TelemetryEvent() set_properties("TelemetryEvent", event, data) return event def get_monitor_handler(): return MonitorHandler() class MonitorHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.sysinfo = [] def run(self): self.init_sysinfo() event_thread = threading.Thread(target=self.daemon) event_thread.setDaemon(True) event_thread.start() def init_sysinfo(self): osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), DISTRO_NAME, DISTRO_VERSION, DISTRO_CODE_NAME, platform.release()) self.sysinfo.append(TelemetryEventParam("OSVersion", osversion)) self.sysinfo.append( TelemetryEventParam("GAVersion", CURRENT_AGENT)) try: ram = self.osutil.get_total_mem() processors = self.osutil.get_processor_cores() self.sysinfo.append(TelemetryEventParam("RAM", ram)) self.sysinfo.append(TelemetryEventParam("Processors", processors)) except OSUtilError as e: logger.warn("Failed to get system info: {0}", e) try: protocol = self.protocol_util.get_protocol() vminfo = protocol.get_vminfo() self.sysinfo.append(TelemetryEventParam("VMName", vminfo.vmName)) self.sysinfo.append(TelemetryEventParam("TenantName", vminfo.tenantName)) self.sysinfo.append(TelemetryEventParam("RoleName", vminfo.roleName)) self.sysinfo.append(TelemetryEventParam("RoleInstanceName", vminfo.roleInstanceName)) self.sysinfo.append(TelemetryEventParam("ContainerId", vminfo.containerId)) except ProtocolError as e: logger.warn("Failed to get system info: {0}", e) def collect_event(self, evt_file_name): try: logger.verbose("Found event file: {0}", evt_file_name) with open(evt_file_name, "rb") as evt_file: # if fail to open or delete the file, throw exception data_str = evt_file.read().decode("utf-8", 'ignore') logger.verbose("Processed event file: {0}", evt_file_name) os.remove(evt_file_name) return data_str except IOError as e: msg = "Failed to process {0}, {1}".format(evt_file_name, e) raise EventError(msg) def collect_and_send_events(self): event_list = TelemetryEventList() event_dir = os.path.join(conf.get_lib_dir(), "events") event_files = os.listdir(event_dir) for event_file in event_files: if not event_file.endswith(".tld"): continue event_file_path = os.path.join(event_dir, event_file) try: data_str = self.collect_event(event_file_path) except EventError as e: logger.error("{0}", e) continue try: event = parse_event(data_str) self.add_sysinfo(event) event_list.events.append(event) except (ValueError, ProtocolError) as e: logger.warn("Failed to decode event file: {0}", e) continue if len(event_list.events) == 0: return try: protocol = self.protocol_util.get_protocol() protocol.report_event(event_list) except ProtocolError as e: logger.error("{0}", e) def daemon(self): period = datetime.timedelta(minutes=30) protocol = self.protocol_util.get_protocol() last_heartbeat = datetime.datetime.utcnow() - period # Create a new identifier on each restart and reset the counter heartbeat_id = str(uuid.uuid4()).upper() counter = 0 while True: if datetime.datetime.utcnow() >= (last_heartbeat + period): last_heartbeat = datetime.datetime.utcnow() incarnation = protocol.get_incarnation() dropped_packets = self.osutil.get_firewall_dropped_packets( protocol.endpoint) msg = "{0};{1};{2};{3}".format( incarnation, counter, heartbeat_id, dropped_packets) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, message=msg) counter += 1 ioerrors = IOErrorCounter.get_and_reset() hostplugin_errors = ioerrors.get("hostplugin") protocol_errors = ioerrors.get("protocol") other_errors = ioerrors.get("other") if hostplugin_errors > 0 or \ protocol_errors > 0 or \ other_errors > 0: msg = "hostplugin:{0};protocol:{1};other:{2}".format( hostplugin_errors, protocol_errors, other_errors) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HttpErrors, is_success=False, msg=msg) try: self.collect_and_send_events() except Exception as e: logger.warn("Failed to send events: {0}", e) time.sleep(60) def add_sysinfo(self, event): sysinfo_names = [v.name for v in self.sysinfo] for param in event.parameters: if param.name in sysinfo_names: logger.verbose("Remove existing event parameter: [{0}:{1}]", param.name, param.value) event.parameters.remove(param) event.parameters.extend(self.sysinfo) WALinuxAgent-2.2.20/azurelinuxagent/ga/update.py000066400000000000000000001104651322477356400215770ustar00rootroot00000000000000# Windows Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob import json import os import platform import re import shutil import signal import stat import subprocess import sys import time import traceback import zipfile from datetime import datetime, timedelta import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.restutil as restutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.event import add_event, add_periodic, \ elapsed_milliseconds, \ WALAEventOperation from azurelinuxagent.common.exception import ProtocolError, \ ResourceGoneError, \ UpdateError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \ AGENT_DIR_GLOB, AGENT_PKG_GLOB, \ AGENT_PATTERN, AGENT_NAME_PATTERN, AGENT_DIR_PATTERN, \ CURRENT_AGENT, CURRENT_VERSION, \ is_current_agent_installed from azurelinuxagent.ga.exthandlers import HandlerManifest AGENT_ERROR_FILE = "error.json" # File name for agent error record AGENT_MANIFEST_FILE = "HandlerManifest.json" AGENT_PARTITION_FILE = "partition" CHILD_HEALTH_INTERVAL = 15 * 60 CHILD_LAUNCH_INTERVAL = 5 * 60 CHILD_LAUNCH_RESTART_MAX = 3 CHILD_POLL_INTERVAL = 60 MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted GOAL_STATE_INTERVAL = 3 ORPHAN_WAIT_INTERVAL = 15 * 60 AGENT_SENTINAL_FILE = "current_version" READONLY_FILE_GLOBS = [ "*.crt", "*.p7m", "*.pem", "*.prv", "ovf-env.xml" ] def get_update_handler(): return UpdateHandler() def get_python_cmd(): major_version = platform.python_version_tuple()[0] return "python" if int(major_version) <= 2 else "python{0}".format(major_version) class UpdateHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.running = True self.last_attempt_time = None self.agents = [] self.child_agent = None self.child_launch_time = None self.child_launch_attempts = 0 self.child_process = None self.signal_handler = None def run_latest(self, child_args=None): """ This method is called from the daemon to find and launch the most current, downloaded agent. Note: - Most events should be tagged to the launched agent (agent_version) """ if self.child_process is not None: raise Exception("Illegal attempt to launch multiple goal state Agent processes") if self.signal_handler is None: self.signal_handler = signal.signal(signal.SIGTERM, self.forward_signal) latest_agent = self.get_latest_agent() if latest_agent is None: logger.info(u"Installed Agent {0} is the most current agent", CURRENT_AGENT) agent_cmd = "python -u {0} -run-exthandlers".format(sys.argv[0]) agent_dir = os.getcwd() agent_name = CURRENT_AGENT agent_version = CURRENT_VERSION else: logger.info(u"Determined Agent {0} to be the latest agent", latest_agent.name) agent_cmd = latest_agent.get_agent_cmd() agent_dir = latest_agent.get_agent_dir() agent_name = latest_agent.name agent_version = latest_agent.version if child_args is not None: agent_cmd = "{0} {1}".format(agent_cmd, child_args) try: # Launch the correct Python version for python-based agents cmds = textutil.safe_shlex_split(agent_cmd) if cmds[0].lower() == "python": cmds[0] = get_python_cmd() agent_cmd = " ".join(cmds) self._evaluate_agent_health(latest_agent) self.child_process = subprocess.Popen( cmds, cwd=agent_dir, stdout=sys.stdout, stderr=sys.stderr, env=os.environ) logger.verbose(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) # If the most current agent is the installed agent and update is enabled, # assume updates are likely available and poll every second. # This reduces the start-up impact of finding / launching agent updates on # fresh VMs. if latest_agent is None and conf.get_autoupdate_enabled(): poll_interval = 1 else: poll_interval = CHILD_POLL_INTERVAL ret = None start_time = time.time() while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: time.sleep(poll_interval) ret = self.child_process.poll() if ret is not None: break if ret is None or ret <= 0: msg = u"Agent {0} launched with command '{1}' is successfully running".format( agent_name, agent_cmd) logger.info(msg) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=True, message=msg) if ret is None: ret = self.child_process.wait() else: msg = u"Agent {0} launched with command '{1}' failed with return code: {2}".format( agent_name, agent_cmd, ret) logger.warn(msg) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, message=msg) if ret is not None and ret > 0: msg = u"Agent {0} launched with command '{1}' returned code: {2}".format( agent_name, agent_cmd, ret) logger.warn(msg) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) except Exception as e: # Ignore child errors during termination if self.running: msg = u"Agent {0} launched with command '{1}' failed with exception: {2}".format( agent_name, agent_cmd, ustr(e)) logger.warn(msg) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, message=msg) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) self.child_process = None return def run(self): """ This is the main loop which watches for agent and extension updates. """ try: logger.info(u"Agent {0} is running as the goal state agent", CURRENT_AGENT) # Launch monitoring threads from azurelinuxagent.ga.monitor import get_monitor_handler get_monitor_handler().run() from azurelinuxagent.ga.env import get_env_handler get_env_handler().run() from azurelinuxagent.ga.exthandlers import get_exthandlers_handler, migrate_handler_state exthandlers_handler = get_exthandlers_handler() migrate_handler_state() self._ensure_no_orphans() self._emit_restart_event() self._ensure_partition_assigned() self._ensure_readonly_files() while self.running: if self._is_orphaned: logger.info("Agent {0} is an orphan -- exiting", CURRENT_AGENT) break if self._upgrade_available(): available_agent = self.get_latest_agent() if available_agent is None: logger.info( "Agent {0} is reverting to the installed agent -- exiting", CURRENT_AGENT) else: logger.info( u"Agent {0} discovered update {1} -- exiting", CURRENT_AGENT, available_agent.name) break utc_start = datetime.utcnow() last_etag = exthandlers_handler.last_etag exthandlers_handler.run() if last_etag != exthandlers_handler.last_etag: self._ensure_readonly_files() add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ProcessGoalState, is_success=True, duration=elapsed_milliseconds(utc_start), message="Incarnation {0}".format( exthandlers_handler.last_etag), log_event=True) time.sleep(GOAL_STATE_INTERVAL) except Exception as e: msg = u"Agent {0} failed with exception: {1}".format( CURRENT_AGENT, ustr(e)) self._set_sentinal(msg=msg) logger.warn(msg) logger.warn(traceback.format_exc()) sys.exit(1) # additional return here because sys.exit is mocked in unit tests return self._shutdown() sys.exit(0) def forward_signal(self, signum, frame): # Note: # - At present, the handler is registered only for SIGTERM. # However, clean shutdown is both SIGTERM and SIGKILL. # A SIGKILL handler is not being registered at this time to # minimize perturbing the code. if signum in (signal.SIGTERM, signal.SIGKILL): self._shutdown() if self.child_process is None: return logger.info( u"Agent {0} forwarding signal {1} to {2}", CURRENT_AGENT, signum, self.child_agent.name if self.child_agent is not None else CURRENT_AGENT) self.child_process.send_signal(signum) if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL): self.signal_handler(signum, frame) elif self.signal_handler is signal.SIG_DFL: if signum == signal.SIGTERM: # TODO: This should set self.running to False vs. just exiting sys.exit(0) return def get_latest_agent(self): """ If autoupdate is enabled, return the most current, downloaded, non-blacklisted agent which is not the current version (if any). Otherwise, return None (implying to use the installed agent). """ if not conf.get_autoupdate_enabled(): return None self._find_agents() available_agents = [agent for agent in self.agents if agent.is_available and agent.version > FlexibleVersion(AGENT_VERSION)] return available_agents[0] if len(available_agents) >= 1 else None def _emit_restart_event(self): try: if not self._is_clean_start: msg = u"Agent did not terminate cleanly: {0}".format( fileutil.read_file(self._sentinal_file_path())) logger.info(msg) add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Restart, is_success=False, message=msg) except Exception: pass self._set_sentinal(msg="Starting") return def _ensure_no_orphans(self, orphan_wait_interval=ORPHAN_WAIT_INTERVAL): pid_files, ignored = self._write_pid_file() for pid_file in pid_files: try: pid = fileutil.read_file(pid_file) wait_interval = orphan_wait_interval while self.osutil.check_pid_alive(pid): wait_interval -= GOAL_STATE_INTERVAL if wait_interval <= 0: logger.warn( u"{0} forcibly terminated orphan process {1}", CURRENT_AGENT, pid) os.kill(pid, signal.SIGKILL) break logger.info( u"{0} waiting for orphan process {1} to terminate", CURRENT_AGENT, pid) time.sleep(GOAL_STATE_INTERVAL) os.remove(pid_file) except Exception as e: logger.warn( u"Exception occurred waiting for orphan agent to terminate: {0}", ustr(e)) return def _ensure_partition_assigned(self): """ Assign the VM to a partition (0 - 99). Downloaded updates may be configured to run on only some VMs; the assigned partition determines eligibility. """ if not os.path.exists(self._partition_file): partition = ustr(int(datetime.utcnow().microsecond / 10000)) fileutil.write_file(self._partition_file, partition) add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Partition, is_success=True, message=partition) def _ensure_readonly_files(self): for g in READONLY_FILE_GLOBS: for path in glob.iglob(os.path.join(conf.get_lib_dir(), g)): os.chmod(path, stat.S_IRUSR) def _evaluate_agent_health(self, latest_agent): """ Evaluate the health of the selected agent: If it is restarting too frequently, raise an Exception to force blacklisting. """ if latest_agent is None: self.child_agent = None return if self.child_agent is None or latest_agent.version != self.child_agent.version: self.child_agent = latest_agent self.child_launch_time = None self.child_launch_attempts = 0 if self.child_launch_time is None: self.child_launch_time = time.time() self.child_launch_attempts += 1 if (time.time() - self.child_launch_time) <= CHILD_LAUNCH_INTERVAL \ and self.child_launch_attempts >= CHILD_LAUNCH_RESTART_MAX: msg = u"Agent {0} restarted more than {1} times in {2} seconds".format( self.child_agent.name, CHILD_LAUNCH_RESTART_MAX, CHILD_LAUNCH_INTERVAL) raise Exception(msg) return def _filter_blacklisted_agents(self): self.agents = [agent for agent in self.agents if not agent.is_blacklisted] def _find_agents(self): """ Load all non-blacklisted agents currently on disk. """ try: self._set_agents(self._load_agents()) self._filter_blacklisted_agents() except Exception as e: logger.warn(u"Exception occurred loading available agents: {0}", ustr(e)) return def _get_host_plugin(self, protocol=None): return protocol.client.get_host_plugin() \ if protocol and \ type(protocol) is WireProtocol and \ protocol.client \ else None def _get_pid_parts(self): pid_file = conf.get_agent_pid_file_path() pid_dir = os.path.dirname(pid_file) pid_name = os.path.basename(pid_file) pid_re = re.compile("(\d+)_{0}".format(re.escape(pid_name))) return pid_dir, pid_name, pid_re def _get_pid_files(self): pid_dir, pid_name, pid_re = self._get_pid_parts() pid_files = [os.path.join(pid_dir, f) for f in os.listdir(pid_dir) if pid_re.match(f)] pid_files.sort(key=lambda f: int(pid_re.match(os.path.basename(f)).group(1))) return pid_files @property def _is_clean_start(self): return not os.path.isfile(self._sentinal_file_path()) @property def _is_orphaned(self): parent_pid = os.getppid() if parent_pid in (1, None): return True if not os.path.isfile(conf.get_agent_pid_file_path()): return True return fileutil.read_file(conf.get_agent_pid_file_path()) != ustr(parent_pid) def _is_version_eligible(self, version): # Ensure the installed version is always eligible if version == CURRENT_VERSION and is_current_agent_installed(): return True for agent in self.agents: if agent.version == version: return agent.is_available return False def _load_agents(self): path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) return [GuestAgent(path=agent_dir) for agent_dir in glob.iglob(path) if os.path.isdir(agent_dir)] def _partition(self): return int(fileutil.read_file(self._partition_file)) @property def _partition_file(self): return os.path.join(conf.get_lib_dir(), AGENT_PARTITION_FILE) def _purge_agents(self): """ Remove from disk all directories and .zip files of unknown agents (without removing the current, running agent). """ path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) known_versions = [agent.version for agent in self.agents] if CURRENT_VERSION not in known_versions: logger.info( u"Running Agent {0} was not found in the agent manifest - adding to list", CURRENT_VERSION) known_versions.append(CURRENT_VERSION) for agent_path in glob.iglob(path): try: name = fileutil.trim_ext(agent_path, "zip") m = AGENT_DIR_PATTERN.match(name) if m is not None and FlexibleVersion(m.group(1)) not in known_versions: if os.path.isfile(agent_path): logger.info(u"Purging outdated Agent file {0}", agent_path) os.remove(agent_path) else: logger.info(u"Purging outdated Agent directory {0}", agent_path) shutil.rmtree(agent_path) except Exception as e: logger.warn(u"Purging {0} raised exception: {1}", agent_path, ustr(e)) return def _set_agents(self, agents=[]): self.agents = agents self.agents.sort(key=lambda agent: agent.version, reverse=True) return def _set_sentinal(self, agent=CURRENT_AGENT, msg="Unknown cause"): try: fileutil.write_file( self._sentinal_file_path(), "[{0}] [{1}]".format(agent, msg)) except Exception as e: logger.warn( u"Exception writing sentinal file {0}: {1}", self._sentinal_file_path(), str(e)) return def _sentinal_file_path(self): return os.path.join(conf.get_lib_dir(), AGENT_SENTINAL_FILE) def _shutdown(self): self.running = False if not os.path.isfile(self._sentinal_file_path()): return try: os.remove(self._sentinal_file_path()) except Exception as e: logger.warn( u"Exception removing sentinal file {0}: {1}", self._sentinal_file_path(), str(e)) return def _upgrade_available(self, base_version=CURRENT_VERSION): # Emit an event expressing the state of AutoUpdate # Note: # - Duplicate events get suppressed; state transitions always emit add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.AutoUpdate, is_success=conf.get_autoupdate_enabled()) # Ignore new agents if updating is disabled if not conf.get_autoupdate_enabled(): return False now = time.time() if self.last_attempt_time is not None: next_attempt_time = self.last_attempt_time + \ conf.get_autoupdate_frequency() else: next_attempt_time = now if next_attempt_time > now: return False family = conf.get_autoupdate_gafamily() logger.verbose("Checking for agent family {0} updates", family) self.last_attempt_time = now protocol = self.protocol_util.get_protocol() for update_goal_state in [False, True]: try: if update_goal_state: protocol.update_goal_state(forced=True) manifest_list, etag = protocol.get_vmagent_manifests() manifests = [m for m in manifest_list.vmAgentManifests \ if m.family == family and \ len(m.versionsManifestUris) > 0] if len(manifests) == 0: logger.verbose(u"Incarnation {0} has no {1} agent updates", etag, family) return False pkg_list = protocol.get_vmagent_pkgs(manifests[0]) # Set the agents to those available for download at least as # current as the existing agent and remove from disk any agent # no longer reported to the VM. # Note: # The code leaves on disk available, but blacklisted, agents # so as to preserve the state. Otherwise, those agents could be # again downloaded and inappropriately retried. host = self._get_host_plugin(protocol=protocol) self._set_agents([GuestAgent(pkg=pkg, host=host) \ for pkg in pkg_list.versions]) self._purge_agents() self._filter_blacklisted_agents() # Return True if current agent is no longer available or an # agent with a higher version number is available return not self._is_version_eligible(base_version) \ or (len(self.agents) > 0 \ and self.agents[0].version > base_version) except Exception as e: if isinstance(e, ResourceGoneError): continue msg = u"Exception retrieving agent manifests: {0}".format( ustr(e)) logger.warn(msg) add_event( AGENT_NAME, op=WALAEventOperation.Download, version=CURRENT_VERSION, is_success=False, message=msg) return False def _write_pid_file(self): pid_files = self._get_pid_files() pid_dir, pid_name, pid_re = self._get_pid_parts() previous_pid_file = None \ if len(pid_files) <= 0 \ else pid_files[-1] pid_index = -1 \ if previous_pid_file is None \ else int(pid_re.match(os.path.basename(previous_pid_file)).group(1)) pid_file = os.path.join(pid_dir, "{0}_{1}".format(pid_index+1, pid_name)) try: fileutil.write_file(pid_file, ustr(os.getpid())) logger.info(u"{0} running as process {1}", CURRENT_AGENT, ustr(os.getpid())) except Exception as e: pid_file = None logger.warn( u"Expection writing goal state agent {0} pid to {1}: {2}", CURRENT_AGENT, pid_file, ustr(e)) return pid_files, pid_file class GuestAgent(object): def __init__(self, path=None, pkg=None, host=None): self.pkg = pkg self.host = host version = None if path is not None: m = AGENT_DIR_PATTERN.match(path) if m == None: raise UpdateError(u"Illegal agent directory: {0}".format(path)) version = m.group(1) elif self.pkg is not None: version = pkg.version if version == None: raise UpdateError(u"Illegal agent version: {0}".format(version)) self.version = FlexibleVersion(version) location = u"disk" if path is not None else u"package" logger.verbose(u"Loading Agent {0} from {1}", self.name, location) self.error = GuestAgentError(self.get_agent_error_file()) self.error.load() try: self._ensure_downloaded() self._ensure_loaded() except Exception as e: if isinstance(e, ResourceGoneError): raise # Note the failure, blacklist the agent if the package downloaded # - An exception with a downloaded package indicates the package # is corrupt (e.g., missing the HandlerManifest.json file) self.mark_failure(is_fatal=os.path.isfile(self.get_agent_pkg_path())) msg = u"Agent {0} install failed with exception: {1}".format( self.name, ustr(e)) logger.warn(msg) add_event( AGENT_NAME, version=self.version, op=WALAEventOperation.Install, is_success=False, message=msg) @property def name(self): return "{0}-{1}".format(AGENT_NAME, self.version) def get_agent_cmd(self): return self.manifest.get_enable_command() def get_agent_dir(self): return os.path.join(conf.get_lib_dir(), self.name) def get_agent_error_file(self): return os.path.join(conf.get_lib_dir(), self.name, AGENT_ERROR_FILE) def get_agent_manifest_path(self): return os.path.join(self.get_agent_dir(), AGENT_MANIFEST_FILE) def get_agent_pkg_path(self): return ".".join((os.path.join(conf.get_lib_dir(), self.name), "zip")) def clear_error(self): self.error.clear() self.error.save() @property def is_available(self): return self.is_downloaded and not self.is_blacklisted @property def is_blacklisted(self): return self.error is not None and self.error.is_blacklisted @property def is_downloaded(self): return self.is_blacklisted or \ os.path.isfile(self.get_agent_manifest_path()) def mark_failure(self, is_fatal=False): try: if not os.path.isdir(self.get_agent_dir()): os.makedirs(self.get_agent_dir()) self.error.mark_failure(is_fatal=is_fatal) self.error.save() if self.error.is_blacklisted: logger.warn(u"Agent {0} is permanently blacklisted", self.name) except Exception as e: logger.warn(u"Agent {0} failed recording error state: {1}", self.name, ustr(e)) def _ensure_downloaded(self): logger.verbose(u"Ensuring Agent {0} is downloaded", self.name) if self.is_downloaded: logger.verbose(u"Agent {0} was previously downloaded - skipping download", self.name) return if self.pkg is None: raise UpdateError(u"Agent {0} is missing package and download URIs".format( self.name)) self._download() self._unpack() msg = u"Agent {0} downloaded successfully".format(self.name) logger.verbose(msg) add_event( AGENT_NAME, version=self.version, op=WALAEventOperation.Install, is_success=True, message=msg) def _ensure_loaded(self): self._load_manifest() self._load_error() def _download(self): for uri in self.pkg.uris: if not HostPluginProtocol.is_default_channel() and self._fetch(uri.uri): break elif self.host is not None and self.host.ensure_initialized(): if not HostPluginProtocol.is_default_channel(): logger.warn("Download failed, switching to host plugin") else: logger.verbose("Using host plugin as default channel") uri, headers = self.host.get_artifact_request(uri.uri, self.host.manifest_uri) try: if self._fetch(uri, headers=headers, use_proxy=False): if not HostPluginProtocol.is_default_channel(): logger.verbose("Setting host plugin as default channel") HostPluginProtocol.set_default_channel(True) break else: logger.warn("Host plugin download failed") # If the HostPlugin rejects the request, # let the error continue, but set to use the HostPlugin except ResourceGoneError: HostPluginProtocol.set_default_channel(True) raise else: logger.error("No download channels available") if not os.path.isfile(self.get_agent_pkg_path()): msg = u"Unable to download Agent {0} from any URI".format(self.name) add_event( AGENT_NAME, op=WALAEventOperation.Download, version=CURRENT_VERSION, is_success=False, message=msg) raise UpdateError(msg) def _fetch(self, uri, headers=None, use_proxy=True): package = None try: resp = restutil.http_get(uri, use_proxy=use_proxy, headers=headers) if restutil.request_succeeded(resp): package = resp.read() fileutil.write_file(self.get_agent_pkg_path(), bytearray(package), asbin=True) logger.verbose(u"Agent {0} downloaded from {1}", self.name, uri) else: logger.verbose("Fetch was unsuccessful [{0}]", restutil.read_response_error(resp)) except restutil.HttpError as http_error: if isinstance(http_error, ResourceGoneError): raise logger.verbose(u"Agent {0} download from {1} failed [{2}]", self.name, uri, http_error) return package is not None def _load_error(self): try: self.error = GuestAgentError(self.get_agent_error_file()) self.error.load() logger.verbose(u"Agent {0} error state: {1}", self.name, ustr(self.error)) except Exception as e: logger.warn(u"Agent {0} failed loading error state: {1}", self.name, ustr(e)) def _load_manifest(self): path = self.get_agent_manifest_path() if not os.path.isfile(path): msg = u"Agent {0} is missing the {1} file".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) with open(path, "r") as manifest_file: try: manifests = json.load(manifest_file) except Exception as e: msg = u"Agent {0} has a malformed {1}".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) if type(manifests) is list: if len(manifests) <= 0: msg = u"Agent {0} has an empty {1}".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) manifest = manifests[0] else: manifest = manifests try: self.manifest = HandlerManifest(manifest) if len(self.manifest.get_enable_command()) <= 0: raise Exception(u"Manifest is missing the enable command") except Exception as e: msg = u"Agent {0} has an illegal {1}: {2}".format( self.name, AGENT_MANIFEST_FILE, ustr(e)) raise UpdateError(msg) logger.verbose( u"Agent {0} loaded manifest from {1}", self.name, self.get_agent_manifest_path()) logger.verbose(u"Successfully loaded Agent {0} {1}: {2}", self.name, AGENT_MANIFEST_FILE, ustr(self.manifest.data)) return def _unpack(self): try: if os.path.isdir(self.get_agent_dir()): shutil.rmtree(self.get_agent_dir()) zipfile.ZipFile(self.get_agent_pkg_path()).extractall(self.get_agent_dir()) except Exception as e: fileutil.clean_ioerror(e, paths=[self.get_agent_dir(), self.get_agent_pkg_path()]) msg = u"Exception unpacking Agent {0} from {1}: {2}".format( self.name, self.get_agent_pkg_path(), ustr(e)) raise UpdateError(msg) if not os.path.isdir(self.get_agent_dir()): msg = u"Unpacking Agent {0} failed to create directory {1}".format( self.name, self.get_agent_dir()) raise UpdateError(msg) logger.verbose( u"Agent {0} unpacked successfully to {1}", self.name, self.get_agent_dir()) return class GuestAgentError(object): def __init__(self, path): if path is None: raise UpdateError(u"GuestAgentError requires a path") self.path = path self.clear() return def mark_failure(self, is_fatal=False): self.last_failure = time.time() self.failure_count += 1 self.was_fatal = is_fatal return def clear(self): self.last_failure = 0.0 self.failure_count = 0 self.was_fatal = False return @property def is_blacklisted(self): return self.was_fatal or self.failure_count >= MAX_FAILURE def load(self): if self.path is not None and os.path.isfile(self.path): with open(self.path, 'r') as f: self.from_json(json.load(f)) return def save(self): if os.path.isdir(os.path.dirname(self.path)): with open(self.path, 'w') as f: json.dump(self.to_json(), f) return def from_json(self, data): self.last_failure = max( self.last_failure, data.get(u"last_failure", 0.0)) self.failure_count = max( self.failure_count, data.get(u"failure_count", 0)) self.was_fatal = self.was_fatal or data.get(u"was_fatal", False) return def to_json(self): data = { u"last_failure": self.last_failure, u"failure_count": self.failure_count, u"was_fatal" : self.was_fatal } return data def __str__(self): return "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( self.last_failure, self.failure_count, self.was_fatal) WALinuxAgent-2.2.20/azurelinuxagent/pa/000077500000000000000000000000001322477356400177455ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/pa/__init__.py000066400000000000000000000011661322477356400220620ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/000077500000000000000000000000001322477356400223065ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/__init__.py000066400000000000000000000013501322477356400244160ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.pa.deprovision.factory import get_deprovision_handler __all__ = ["get_deprovision_handler"] WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/arch.py000066400000000000000000000024511322477356400235770ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class ArchDeprovisionHandler(DeprovisionHandler): def __init__(self): super(ArchDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(ArchDeprovisionHandler, self).setup(deluser) warnings.append("WARNING! /etc/machine-id will be removed.") files_to_del = ['/etc/machine-id'] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) return warnings, actions WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/clearlinux.py000066400000000000000000000022371322477356400250320ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class ClearLinuxDeprovisionHandler(DeprovisionHandler): def __init__(self, distro): self.distro = distro def setup(self, deluser): warnings, actions = super(ClearLinuxDeprovisionHandler, self).setup(deluser) # Probably should just wipe /etc and /var here return warnings, actions WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/coreos.py000066400000000000000000000024601322477356400241540ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class CoreOSDeprovisionHandler(DeprovisionHandler): def __init__(self): super(CoreOSDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(CoreOSDeprovisionHandler, self).setup(deluser) warnings.append("WARNING! /etc/machine-id will be removed.") files_to_del = ['/etc/machine-id'] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) return warnings, actions WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/default.py000066400000000000000000000226151322477356400243120ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob import os.path import signal import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util def read_input(message): if sys.version_info[0] >= 3: return input(message) else: return raw_input(message) class DeprovisionAction(object): def __init__(self, func, args=[], kwargs={}): self.func = func self.args = args self.kwargs = kwargs def invoke(self): self.func(*self.args, **self.kwargs) class DeprovisionHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.actions_running = False signal.signal(signal.SIGINT, self.handle_interrupt_signal) def del_root_password(self, warnings, actions): warnings.append("WARNING! root password will be disabled. " "You will not be able to login as root.") actions.append(DeprovisionAction(self.osutil.del_root_password)) def del_user(self, warnings, actions): try: ovfenv = self.protocol_util.get_ovf_env() except ProtocolError: warnings.append("WARNING! ovf-env.xml is not found.") warnings.append("WARNING! Skip delete user.") return username = ovfenv.username warnings.append(("WARNING! {0} account and entire home directory " "will be deleted.").format(username)) actions.append(DeprovisionAction(self.osutil.del_account, [username])) def regen_ssh_host_key(self, warnings, actions): warnings.append("WARNING! All SSH host key pairs will be deleted.") actions.append(DeprovisionAction(fileutil.rm_files, [conf.get_ssh_key_glob()])) def stop_agent_service(self, warnings, actions): warnings.append("WARNING! The waagent service will be stopped.") actions.append(DeprovisionAction(self.osutil.stop_agent_service)) def del_dirs(self, warnings, actions): dirs = [conf.get_lib_dir(), conf.get_ext_log_dir()] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs)) def del_files(self, warnings, actions): files = ['/root/.bash_history', '/var/log/waagent.log'] actions.append(DeprovisionAction(fileutil.rm_files, files)) # For OpenBSD actions.append(DeprovisionAction(fileutil.rm_files, ["/etc/random.seed", "/var/db/host.random", "/etc/isakmpd/local.pub", "/etc/isakmpd/private/local.key", "/etc/iked/private/local.key", "/etc/iked/local.pub"])) def del_resolv(self, warnings, actions): warnings.append("WARNING! /etc/resolv.conf will be deleted.") files_to_del = ["/etc/resolv.conf"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) def del_dhcp_lease(self, warnings, actions): warnings.append("WARNING! Cached DHCP leases will be deleted.") dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) # For FreeBSD and OpenBSD actions.append(DeprovisionAction(fileutil.rm_files, ["/var/db/dhclient.leases.*"])) # For FreeBSD, NM controlled actions.append(DeprovisionAction(fileutil.rm_files, ["/var/lib/NetworkManager/dhclient-*.lease"])) def del_lib_dir_files(self, warnings, actions): known_files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'partition', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint' ] known_files_glob = [ 'Extensions.*.xml', 'ExtensionsConfig.*.xml', 'GoalState.*.xml' ] lib_dir = conf.get_lib_dir() files = [f for f in \ [os.path.join(lib_dir, kf) for kf in known_files] \ if os.path.isfile(f)] for p in known_files_glob: files += glob.glob(os.path.join(lib_dir, p)) if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) def cloud_init_dirs(self, include_once=True): dirs = [ "/var/lib/cloud/instance", "/var/lib/cloud/instances/", "/var/lib/cloud/data" ] if include_once: dirs += [ "/var/lib/cloud/scripts/per-once" ] return dirs def cloud_init_files(self, include_once=True, deluser=False): files = [] if deluser: files += [ "/etc/sudoers.d/90-cloud-init-users" ] if include_once: files += [ "/var/lib/cloud/sem/config_scripts_per_once.once" ] return files def del_cloud_init(self, warnings, actions, include_once=True, deluser=False): dirs = [d for d in self.cloud_init_dirs(include_once=include_once) \ if os.path.isdir(d)] if len(dirs) > 0: actions.append(DeprovisionAction(fileutil.rm_dirs, dirs)) files = [f for f in self.cloud_init_files( include_once=include_once, deluser=deluser) \ if os.path.isfile(f)] if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) def reset_hostname(self, warnings, actions): localhost = ["localhost.localdomain"] actions.append(DeprovisionAction(self.osutil.set_hostname, localhost)) actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname, localhost)) def setup(self, deluser): warnings = [] actions = [] self.stop_agent_service(warnings, actions) if conf.get_regenerate_ssh_host_key(): self.regen_ssh_host_key(warnings, actions) self.del_dhcp_lease(warnings, actions) self.reset_hostname(warnings, actions) if conf.get_delete_root_password(): self.del_root_password(warnings, actions) self.del_cloud_init(warnings, actions, deluser=deluser) self.del_dirs(warnings, actions) self.del_files(warnings, actions) self.del_resolv(warnings, actions) if deluser: self.del_user(warnings, actions) return warnings, actions def setup_changed_unique_id(self): warnings = [] actions = [] self.del_cloud_init(warnings, actions, include_once=False, deluser=False) self.del_dhcp_lease(warnings, actions) self.del_lib_dir_files(warnings, actions) return warnings, actions def run(self, force=False, deluser=False): warnings, actions = self.setup(deluser) self.do_warnings(warnings) if self.do_confirmation(force=force): self.do_actions(actions) def run_changed_unique_id(self): ''' Clean-up files and directories that may interfere when the VM unique identifier has changed. While users *should* manually deprovision a VM, the files removed by this routine will help keep the agent from getting confused (since incarnation and extension settings, among other items, will no longer be monotonically increasing). ''' warnings, actions = self.setup_changed_unique_id() self.do_warnings(warnings) self.do_actions(actions) def do_actions(self, actions): self.actions_running = True for action in actions: action.invoke() self.actions_running = False def do_confirmation(self, force=False): if force: return True confirm = read_input("Do you want to proceed (y/n)") return True if confirm.lower().startswith('y') else False def do_warnings(self, warnings): for warning in warnings: print(warning) def handle_interrupt_signal(self, signum, frame): if not self.actions_running: print("Deprovision is interrupted.") sys.exit(0) print ('Deprovisioning may not be interrupted.') return WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/factory.py000066400000000000000000000031011322477356400243220ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import DeprovisionHandler from .arch import ArchDeprovisionHandler from .clearlinux import ClearLinuxDeprovisionHandler from .coreos import CoreOSDeprovisionHandler from .ubuntu import UbuntuDeprovisionHandler def get_deprovision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "arch": return ArchDeprovisionHandler() if distro_name == "ubuntu": return UbuntuDeprovisionHandler() if distro_name == "coreos": return CoreOSDeprovisionHandler() if distro_name == "clear linux": return ClearLinuxDeprovisionHandler() return DeprovisionHandler() WALinuxAgent-2.2.20/azurelinuxagent/pa/deprovision/ubuntu.py000066400000000000000000000033031322477356400242010ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class UbuntuDeprovisionHandler(DeprovisionHandler): def __init__(self): super(UbuntuDeprovisionHandler, self).__init__() def del_resolv(self, warnings, actions): if os.path.realpath( '/etc/resolv.conf') != '/run/resolvconf/resolv.conf': warnings.append("WARNING! /etc/resolv.conf will be deleted.") files_to_del = ["/etc/resolv.conf"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) else: warnings.append("WARNING! /etc/resolvconf/resolv.conf.d/tail " "and /etc/resolvconf/resolv.conf.d/original will " "be deleted.") files_to_del = ["/etc/resolvconf/resolv.conf.d/tail", "/etc/resolvconf/resolv.conf.d/original"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) WALinuxAgent-2.2.20/azurelinuxagent/pa/provision/000077500000000000000000000000001322477356400217755ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/pa/provision/__init__.py000066400000000000000000000012751322477356400241130ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.pa.provision.factory import get_provision_handler WALinuxAgent-2.2.20/azurelinuxagent/pa/provision/cloudinit.py000066400000000000000000000131641322477356400243460ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import os.path import time from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.event import elapsed_milliseconds from azurelinuxagent.common.exception import ProvisionError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.common.protocol.ovfenv import OvfEnv from azurelinuxagent.pa.provision.default import ProvisionHandler class CloudInitProvisionHandler(ProvisionHandler): def __init__(self): super(CloudInitProvisionHandler, self).__init__() def run(self): # If provision is enabled, run default provision handler if conf.get_provision_enabled(): logger.warn("Provisioning flag is enabled, which overrides using " "cloud-init; running the default provisioning code") super(CloudInitProvisionHandler, self).run() return try: if super(CloudInitProvisionHandler, self).is_provisioned(): logger.info("Provisioning already completed, skipping.") return utc_start = datetime.utcnow() logger.info("Running CloudInit provisioning handler") self.wait_for_ovfenv() self.protocol_util.get_protocol() self.report_not_ready("Provisioning", "Starting") thumbprint = self.wait_for_ssh_host_key() self.write_provisioned() logger.info("Finished provisioning") self.report_ready(thumbprint) self.report_event("Provision succeed", is_success=True, duration=elapsed_milliseconds(utc_start)) except ProvisionError as e: logger.error("Provisioning failed: {0}", ustr(e)) self.report_not_ready("ProvisioningFailed", ustr(e)) self.report_event(ustr(e)) return def wait_for_ovfenv(self, max_retry=360, sleep_time=5): """ Wait for cloud-init to copy ovf-env.xml file from provision ISO """ ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) for retry in range(0, max_retry): if os.path.isfile(ovf_file_path): try: OvfEnv(fileutil.read_file(ovf_file_path)) return except ProtocolError as pe: raise ProvisionError("OVF xml could not be parsed " "[{0}]: {1}".format(ovf_file_path, ustr(pe))) else: if retry < max_retry - 1: logger.info( "Waiting for cloud-init to copy ovf-env.xml to {0} " "[{1} retries remaining, " "sleeping {2}s]".format(ovf_file_path, max_retry - retry, sleep_time)) if not self.validate_cloud_init(): logger.warn("cloud-init does not appear to be running") time.sleep(sleep_time) raise ProvisionError("Giving up, ovf-env.xml was not copied to {0} " "after {1}s".format(ovf_file_path, max_retry * sleep_time)) def wait_for_ssh_host_key(self, max_retry=1800, sleep_time=1): """ Wait for cloud-init to generate ssh host key """ keypair_type = conf.get_ssh_host_keypair_type() path = conf.get_ssh_key_public_path() for retry in range(0, max_retry): if os.path.isfile(path): logger.info("ssh host key found at: {0}".format(path)) try: thumbprint = self.get_ssh_host_key_thumbprint(chk_err=False) logger.info("Thumbprint obtained from : {0}".format(path)) return thumbprint except ProvisionError: logger.warn("Could not get thumbprint from {0}".format(path)) if retry < max_retry - 1: logger.info("Waiting for ssh host key be generated at {0} " "[{1} attempts remaining, " "sleeping {2}s]".format(path, max_retry - retry, sleep_time)) if not self.validate_cloud_init(): logger.warn("cloud-init does not appear to be running") time.sleep(sleep_time) raise ProvisionError("Giving up, ssh host key was not found at {0} " "after {1}s".format(path, max_retry * sleep_time)) WALinuxAgent-2.2.20/azurelinuxagent/pa/provision/default.py000066400000000000000000000251551322477356400240030ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # """ Provision handler """ import os import os.path import re import time from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.future import ustr from azurelinuxagent.common.event import add_event, WALAEventOperation, \ elapsed_milliseconds from azurelinuxagent.common.exception import ProvisionError, ProtocolError, \ OSUtilError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol.restapi import ProvisionStatus from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.version import AGENT_NAME CUSTOM_DATA_FILE = "CustomData" CLOUD_INIT_PATTERN = b".*/bin/cloud-init.*" CLOUD_INIT_REGEX = re.compile(CLOUD_INIT_PATTERN) PROVISIONED_FILE = 'provisioned' class ProvisionHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() def run(self): if not conf.get_provision_enabled(): logger.info("Provisioning is disabled, skipping.") self.write_provisioned() self.report_ready() return try: utc_start = datetime.utcnow() thumbprint = None if self.is_provisioned(): logger.info("Provisioning already completed, skipping.") return logger.info("Running default provisioning handler") if not self.validate_cloud_init(is_expected=False): raise ProvisionError("cloud-init appears to be running, " "this is not expected, cannot continue") logger.info("Copying ovf-env.xml") ovf_env = self.protocol_util.copy_ovf_env() self.protocol_util.get_protocol_by_file() self.report_not_ready("Provisioning", "Starting") logger.info("Starting provisioning") self.provision(ovf_env) thumbprint = self.reg_ssh_host_key() self.osutil.restart_ssh_service() self.write_provisioned() self.report_event("Provision succeed", is_success=True, duration=elapsed_milliseconds(utc_start)) self.report_ready(thumbprint) logger.info("Provisioning complete") except (ProtocolError, ProvisionError) as e: self.report_not_ready("ProvisioningFailed", ustr(e)) self.report_event(ustr(e)) logger.error("Provisioning failed: {0}", ustr(e)) return @staticmethod def validate_cloud_init(is_expected=True): is_running = False if os.path.isdir("/proc"): pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] else: pids = [] for pid in pids: try: pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() if CLOUD_INIT_REGEX.match(pname): is_running = True msg = "cloud-init is running [PID {0}, {1}]".format(pid, pname) if is_expected: logger.verbose(msg) else: logger.error(msg) break except IOError: continue return is_running == is_expected def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files(conf.get_ssh_key_glob()) if conf.get_ssh_host_keypair_mode() == "auto": ''' The -A option generates all supported key types. This is supported since OpenSSH 5.9 (2011). ''' shellutil.run("ssh-keygen -A") else: keygen_cmd = "ssh-keygen -N '' -t {0} -f {1}" shellutil.run(keygen_cmd. format(keypair_type, conf.get_ssh_key_private_path())) return self.get_ssh_host_key_thumbprint() def get_ssh_host_key_thumbprint(self, chk_err=True): cmd = "ssh-keygen -lf {0}".format(conf.get_ssh_key_public_path()) ret = shellutil.run_get_output(cmd, chk_err=chk_err) if ret[0] == 0: return ret[1].rstrip().split()[1].replace(':', '') else: raise ProvisionError(("Failed to generate ssh host key: " "ret={0}, out= {1}").format(ret[0], ret[1])) def provisioned_file_path(self): return os.path.join(conf.get_lib_dir(), PROVISIONED_FILE) def is_provisioned(self): ''' A VM is considered provisionend *anytime* the provisioning sentinel file exists and not provisioned *anytime* the file is absent. If the VM was provisioned using an agent that did not record the VM unique identifier, the provisioning file will be re-written to include the identifier. A warning is logged *if* the VM unique identifier has changed since VM was provisioned. ''' if not os.path.isfile(self.provisioned_file_path()): return False s = fileutil.read_file(self.provisioned_file_path()).strip() if not self.osutil.is_current_instance_id(s): if len(s) > 0: logger.warn("VM is provisioned, " "but the VM unique identifier has changed -- " "clearing cached state") from azurelinuxagent.pa.deprovision \ import get_deprovision_handler deprovision_handler = get_deprovision_handler() deprovision_handler.run_changed_unique_id() self.write_provisioned() self.report_ready() return True def write_provisioned(self): fileutil.write_file( self.provisioned_file_path(), get_osutil().get_instance_id()) def provision(self, ovfenv): logger.info("Handle ovf-env.xml.") try: logger.info("Set hostname [{0}]".format(ovfenv.hostname)) self.osutil.set_hostname(ovfenv.hostname) logger.info("Publish hostname [{0}]".format(ovfenv.hostname)) self.osutil.publish_hostname(ovfenv.hostname) self.config_user_account(ovfenv) self.save_customdata(ovfenv) if conf.get_delete_root_password(): self.osutil.del_root_password() except OSUtilError as e: raise ProvisionError("Failed to provision: {0}".format(ustr(e))) def config_user_account(self, ovfenv): logger.info("Create user account if not exists") self.osutil.useradd(ovfenv.username) if ovfenv.user_password is not None: logger.info("Set user password.") crypt_id = conf.get_password_cryptid() salt_len = conf.get_password_crypt_salt_len() self.osutil.chpasswd(ovfenv.username, ovfenv.user_password, crypt_id=crypt_id, salt_len=salt_len) logger.info("Configure sudoer") self.osutil.conf_sudoer(ovfenv.username, nopasswd=ovfenv.user_password is None) logger.info("Configure sshd") self.osutil.conf_sshd(ovfenv.disable_ssh_password_auth) self.deploy_ssh_pubkeys(ovfenv) self.deploy_ssh_keypairs(ovfenv) def save_customdata(self, ovfenv): customdata = ovfenv.customdata if customdata is None: return lib_dir = conf.get_lib_dir() if conf.get_decode_customdata() or conf.get_execute_customdata(): logger.info("Decode custom data") customdata = self.osutil.decode_customdata(customdata) logger.info("Save custom data") customdata_file = os.path.join(lib_dir, CUSTOM_DATA_FILE) fileutil.write_file(customdata_file, customdata) if conf.get_execute_customdata(): start = time.time() logger.info("Execute custom data") os.chmod(customdata_file, 0o700) shellutil.run(customdata_file) add_event(name=AGENT_NAME, duration=int(time.time() - start), is_success=True, op=WALAEventOperation.CustomData) def deploy_ssh_pubkeys(self, ovfenv): for pubkey in ovfenv.ssh_pubkeys: logger.info("Deploy ssh public key.") self.osutil.deploy_ssh_pubkey(ovfenv.username, pubkey) def deploy_ssh_keypairs(self, ovfenv): for keypair in ovfenv.ssh_keypairs: logger.info("Deploy ssh key pairs.") self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) def report_event(self, message, is_success=False, duration=0): add_event(name=AGENT_NAME, message=message, duration=duration, is_success=is_success, op=WALAEventOperation.Provision) def report_not_ready(self, sub_status, description): status = ProvisionStatus(status="NotReady", subStatus=sub_status, description=description) try: protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: logger.error("Reporting NotReady failed: {0}", e) self.report_event(ustr(e)) def report_ready(self, thumbprint=None): status = ProvisionStatus(status="Ready") status.properties.certificateThumbprint = thumbprint try: protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: logger.error("Reporting Ready failed: {0}", e) self.report_event(ustr(e)) WALinuxAgent-2.2.20/azurelinuxagent/pa/provision/factory.py000066400000000000000000000024061322477356400240200ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ProvisionHandler from .cloudinit import CloudInitProvisionHandler def get_provision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if conf.get_provision_cloudinit(): return CloudInitProvisionHandler() return ProvisionHandler() WALinuxAgent-2.2.20/azurelinuxagent/pa/rdma/000077500000000000000000000000001322477356400206705ustar00rootroot00000000000000WALinuxAgent-2.2.20/azurelinuxagent/pa/rdma/__init__.py000066400000000000000000000012631322477356400230030ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.pa.rdma.factory import get_rdma_handler WALinuxAgent-2.2.20/azurelinuxagent/pa/rdma/centos.py000066400000000000000000000241641322477356400225440ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob import os import re import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class CentOSRDMAHandler(RDMAHandler): rdma_user_mode_package_name = 'microsoft-hyper-v-rdma' rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma' rdma_wrapper_package_name = 'msft-rdma-drivers' hyper_v_package_name = "hypervkvpd" hyper_v_package_name_new = "microsoft-hyper-v" version_major = None version_minor = None def __init__(self, distro_version): v = distro_version.split('.') if len(v) < 2: raise Exception('Unexpected centos version: %s' % distro_version) self.version_major, self.version_minor = v[0], v[1] def install_driver(self): """ Install the KVP daemon and the appropriate RDMA driver package for the RDMA firmware. """ # Check and install the KVP deamon if it not running time.sleep(10) # give some time for the hv_hvp_daemon to start up. kvpd_running = RDMAHandler.is_kvp_daemon_running() logger.info('RDMA: kvp daemon running: %s' % kvpd_running) if not kvpd_running: self.check_or_install_kvp_daemon() time.sleep(10) # wait for post-install reboot or kvp to come up # Find out RDMA firmware version and see if the existing package needs # updating or if the package is missing altogether (and install it) fw_version = RDMAHandler.get_rdma_version() if not fw_version: raise Exception('Cannot determine RDMA firmware version') logger.info("RDMA: found firmware version: {0}".format(fw_version)) fw_version = self.get_int_rdma_version(fw_version) installed_pkg = self.get_rdma_package_info() if installed_pkg: logger.info( 'RDMA: driver package present: {0}'.format(installed_pkg)) if self.is_rdma_package_up_to_date(installed_pkg, fw_version): logger.info('RDMA: driver package is up-to-date') return else: logger.info('RDMA: driver package needs updating') self.update_rdma_package(fw_version) else: logger.info('RDMA: driver package is NOT installed') self.update_rdma_package(fw_version) def is_rdma_package_up_to_date(self, pkg, fw_version): # Example match (pkg name, -, followed by 3 segments, fw_version and -): # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 # - fw_version=142 pattern = '{0}-(\d+\.){{3,}}({1})-'.format(self.rdma_user_mode_package_name, fw_version) return re.match(pattern, pkg) @staticmethod def get_int_rdma_version(version): s = version.split('.') if len(s) == 0: raise Exception('Unexpected RDMA firmware version: "%s"' % version) return s[0] def get_rdma_package_info(self): """ Returns the installed rdma package name or None """ ret, output = shellutil.run_get_output( 'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False) if ret != 0: return None return output def update_rdma_package(self, fw_version): logger.info("RDMA: updating RDMA packages") self.refresh_repos() self.force_install_package(self.rdma_wrapper_package_name) self.install_rdma_drivers(fw_version) def force_install_package(self, pkg_name): """ Attempts to remove existing package and installs the package """ logger.info('RDMA: Force installing package: %s' % pkg_name) if self.uninstall_package(pkg_name) != 0: logger.info('RDMA: Erasing package failed but will continue') if self.install_package(pkg_name) != 0: raise Exception('Failed to install package "{0}"'.format(pkg_name)) logger.info('RDMA: installation completed: %s' % pkg_name) @staticmethod def uninstall_package(pkg_name): return shellutil.run('yum erase -y -q {0}'.format(pkg_name)) @staticmethod def install_package(pkg_name): return shellutil.run('yum install -y -q {0}'.format(pkg_name)) def refresh_repos(self): logger.info("RDMA: refreshing yum repos") if shellutil.run('yum clean all') != 0: raise Exception('Cleaning yum repositories failed') if shellutil.run('yum updateinfo') != 0: raise Exception('Failed to act on yum repo update information') logger.info("RDMA: repositories refreshed") def install_rdma_drivers(self, fw_version): """ Installs the drivers from /opt/rdma/rhel[Major][Minor] directory, particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or src). Tries to uninstall them first. """ pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format( self.version_major, self.version_minor) logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir)) if not os.path.isdir(pkg_dir): raise Exception('RDMA packages directory %s is missing' % pkg_dir) pkgs = os.listdir(pkg_dir) logger.info('RDMA: found %d files in package directory' % len(pkgs)) # Uninstal KVP daemon first (if exists) self.uninstall_kvp_driver_package_if_exists() # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) kmod_pkg = self.get_file_by_pattern( pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) if not kmod_pkg: raise Exception("RDMA kernel mode package not found") kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg) self.uninstall_pkg_and_install_from( 'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path) # Install user mode driver (microsoft-hyper-v-rdma-*) umod_pkg = self.get_file_by_pattern( pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) if not umod_pkg: raise Exception("RDMA user mode package not found") umod_pkg_path = os.path.join(pkg_dir, umod_pkg) self.uninstall_pkg_and_install_from( 'user mode', self.rdma_user_mode_package_name, umod_pkg_path) logger.info("RDMA: driver packages installed") if not self.load_driver_module() or not self.is_driver_loaded(): logger.info("RDMA: driver module is not loaded; reboot required") self.reboot_system() else: logger.info("RDMA: kernel module is loaded") @staticmethod def get_file_by_pattern(list, pattern): for l in list: if re.match(pattern, l): return l return None def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path): logger.info( "RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path)) logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name) if self.uninstall_package(pkg_name) == 0: logger.info("RDMA: Successfully uninstaled %s" % pkg_name) logger.info( "RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path)) if self.install_package(pkg_path) != 0: raise Exception( "Failed to install RDMA {0} package".format(pkg_type)) @staticmethod def is_package_installed(pkg): """Runs rpm -q and checks return code to find out if a package is installed""" return shellutil.run("rpm -q %s" % pkg, chk_err=False) == 0 def uninstall_kvp_driver_package_if_exists(self): logger.info('RDMA: deleting existing kvp driver packages') kvp_pkgs = [self.hyper_v_package_name, self.hyper_v_package_name_new] for kvp_pkg in kvp_pkgs: if not self.is_package_installed(kvp_pkg): logger.info( "RDMA: kvp package %s does not exist, skipping" % kvp_pkg) else: logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg) if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0: logger.info("RDMA: successfully erased package") else: logger.error("RDMA: failed to erase package") def check_or_install_kvp_daemon(self): """Checks if kvp daemon package is installed, if not installs the package and reboots the machine. """ logger.info("RDMA: Checking kvp daemon packages.") kvp_pkgs = [self.hyper_v_package_name, self.hyper_v_package_name_new] for pkg in kvp_pkgs: logger.info("RDMA: Checking if package %s installed" % pkg) installed = self.is_package_installed(pkg) if installed: raise Exception('RDMA: package %s is installed, but the kvp daemon is not running' % pkg) kvp_pkg_to_install=self.hyper_v_package_name logger.info("RDMA: no kvp drivers installed, will install '%s'" % kvp_pkg_to_install) logger.info("RDMA: trying to install kvp package '%s'" % kvp_pkg_to_install) if self.install_package(kvp_pkg_to_install) != 0: raise Exception("RDMA: failed to install kvp daemon package '%s'" % kvp_pkg_to_install) logger.info("RDMA: package '%s' successfully installed" % kvp_pkg_to_install) logger.info("RDMA: Machine will now be rebooted.") self.reboot_system()WALinuxAgent-2.2.20/azurelinuxagent/pa/rdma/factory.py000066400000000000000000000030411322477356400227070ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.version import DISTRO_FULL_NAME, DISTRO_VERSION from azurelinuxagent.common.rdma import RDMAHandler from .suse import SUSERDMAHandler from .centos import CentOSRDMAHandler from .ubuntu import UbuntuRDMAHandler def get_rdma_handler( distro_full_name=DISTRO_FULL_NAME, distro_version=DISTRO_VERSION ): """Return the handler object for RDMA driver handling""" if ( distro_full_name == 'SUSE Linux Enterprise Server' and int(distro_version) > 11 ): return SUSERDMAHandler() if distro_full_name == 'CentOS Linux' or distro_full_name == 'CentOS': return CentOSRDMAHandler(distro_version) if distro_full_name == 'Ubuntu': return UbuntuRDMAHandler() logger.info("No RDMA handler exists for distro='{0}' version='{1}'", distro_full_name, distro_version) return RDMAHandler() WALinuxAgent-2.2.20/azurelinuxagent/pa/rdma/suse.py000066400000000000000000000154261322477356400222310ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2017 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class SUSERDMAHandler(RDMAHandler): def install_driver(self): """Install the appropriate driver package for the RDMA firmware""" fw_version = RDMAHandler.get_rdma_version() if not fw_version: error_msg = 'RDMA: Could not determine firmware version. ' error_msg += 'Therefore, no driver will be installed.' logger.error(error_msg) return zypper_install = 'zypper -n in %s' zypper_install_noref = 'zypper -n --no-refresh in %s' zypper_lock = 'zypper addlock %s' zypper_remove = 'zypper -n rm %s' zypper_search = 'zypper -n se -s %s' zypper_unlock = 'zypper removelock %s' package_name = 'msft-rdma-kmp-default' cmd = zypper_search % package_name status, repo_package_info = shellutil.run_get_output(cmd) driver_package_versions = [] driver_package_installed = False for entry in repo_package_info.split('\n'): if package_name in entry: sections = entry.split('|') if len(sections) < 4: error_msg = 'RDMA: Unexpected output from"%s": "%s"' logger.error(error_msg % (cmd, entry)) continue installed = sections[0].strip() version = sections[3].strip() driver_package_versions.append(version) if fw_version in version and installed.startswith('i'): info_msg = 'RDMA: Matching driver package "%s-%s" ' info_msg += 'is already installed, nothing to do.' logger.info(info_msg % (package_name, version)) return True if installed.startswith('i'): # A driver with a different version is installed driver_package_installed = True cmd = zypper_unlock % package_name result = shellutil.run(cmd) info_msg = 'Driver with different version installed ' info_msg += 'unlocked package "%s".' logger.info(info_msg % (package_name)) # If we get here the driver package is installed but the # version doesn't match or no package is installed requires_reboot = False if driver_package_installed: # Unloading the particular driver with rmmod does not work # We have to reboot after the new driver is installed if self.is_driver_loaded(): info_msg = 'RDMA: Currently loaded driver does not match the ' info_msg += 'firmware implementation, reboot will be required.' logger.info(info_msg) requires_reboot = True logger.info("RDMA: removing package %s" % package_name) cmd = zypper_remove % package_name shellutil.run(cmd) logger.info("RDMA: removed package %s" % package_name) logger.info("RDMA: looking for fw version %s in packages" % fw_version) for entry in driver_package_versions: if fw_version not in entry: logger.info("Package '%s' is not a match." % entry) else: logger.info("Package '%s' is a match. Installing." % entry) complete_name = '%s-%s' % (package_name, version) cmd = zypper_install % complete_name result = shellutil.run(cmd) if result: error_msg = 'RDMA: Failed install of package "%s" ' error_msg += 'from available repositories.' logger.error(error_msg % complete_name) msg = 'RDMA: Successfully installed "%s" from ' msg += 'configured repositories' logger.info(msg % complete_name) # Lock the package so it does not accidentally get updated cmd = zypper_lock % package_name result = shellutil.run(cmd) info_msg = 'Applied lock to "%s"' % package_name logger.info(info_msg) if not self.load_driver_module() or requires_reboot: self.reboot_system() return True else: logger.info("RDMA: No suitable match in repos. Trying local.") local_packages = glob.glob('/opt/microsoft/rdma/*.rpm') for local_package in local_packages: logger.info("Examining: %s" % local_package) if local_package.endswith('.src.rpm'): continue if ( package_name in local_package and fw_version in local_package ): logger.info("RDMA: Installing: %s" % local_package) cmd = zypper_install_noref % local_package result = shellutil.run(cmd) if result and result != 106: error_msg = 'RDMA: Failed install of package "%s" ' error_msg += 'from local package cache' logger.error(error_msg % local_package) break msg = 'RDMA: Successfully installed "%s" from ' msg += 'local package cache' logger.info(msg % (local_package)) # Lock the package so it does not accidentally get updated cmd = zypper_lock % package_name result = shellutil.run(cmd) info_msg = 'Applied lock to "%s"' % package_name logger.info(info_msg) if not self.load_driver_module() or requires_reboot: self.reboot_system() return True else: error_msg = 'Unable to find driver package that matches ' error_msg += 'RDMA firmware version "%s"' % fw_version logger.error(error_msg) return WALinuxAgent-2.2.20/azurelinuxagent/pa/rdma/ubuntu.py000066400000000000000000000121771322477356400225740ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob import os import re import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class UbuntuRDMAHandler(RDMAHandler): def install_driver(self): #Install the appropriate driver package for the RDMA firmware nd_version = RDMAHandler.get_rdma_version() if not nd_version: logger.error("RDMA: Could not determine firmware version. No driver will be installed") return #replace . with _, we are looking for number like 144_0 nd_version = re.sub('\.', '_', nd_version) #Check to see if we need to reconfigure driver status,module_name = shellutil.run_get_output('modprobe -R hv_network_direct', chk_err=False) if status != 0: logger.info("RDMA: modprobe -R hv_network_direct failed. Use module name hv_network_direct") module_name = "hv_network_direct" else: module_name = module_name.strip() logger.info("RDMA: current RDMA driver %s nd_version %s" % (module_name, nd_version)) if module_name == 'hv_network_direct_%s' % nd_version: logger.info("RDMA: driver is installed and ND version matched. Skip reconfiguring driver") return #Reconfigure driver if one is available status,output = shellutil.run_get_output('modinfo hv_network_direct_%s' % nd_version); if status == 0: logger.info("RDMA: driver with ND version is installed. Link to module name") self.update_modprobed_conf(nd_version) return #Driver not found. We need to check to see if we need to update kernel if not conf.enable_rdma_update(): logger.info("RDMA: driver update is disabled. Skip kernel update") return status,output = shellutil.run_get_output('uname -r') if status != 0: return if not re.search('-azure$', output): logger.error("RDMA: skip driver update on non-Azure kernel") return kernel_version = re.sub('-azure$', '', output) kernel_version = re.sub('-', '.', kernel_version) #Find the new kernel package version status,output = shellutil.run_get_output('apt-get update') if status != 0: return status,output = shellutil.run_get_output('apt-cache show --no-all-versions linux-azure') if status != 0: return r = re.search('Version: (\S+)', output) if not r: logger.error("RDMA: version not found in package linux-azure.") return package_version = r.groups()[0] #Remove the ending . after package_version = re.sub("\.\d+$", "", package_version) logger.info('RDMA: kernel_version=%s package_version=%s' % (kernel_version, package_version)) kernel_version_array = [ int(x) for x in kernel_version.split('.') ] package_version_array = [ int(x) for x in package_version.split('.') ] if kernel_version_array < package_version_array: logger.info("RDMA: newer version available, update kernel and reboot") status,output = shellutil.run_get_output('apt-get -y install linux-azure') if status: logger.error("RDMA: kernel update failed") return self.reboot_system() else: logger.error("RDMA: no kernel update is avaiable for ND version %s" % nd_version) def update_modprobed_conf(self, nd_version): #Update /etc/modprobe.d/vmbus-rdma.conf to point to the correct driver modprobed_file = '/etc/modprobe.d/vmbus-rdma.conf' lines = '' if not os.path.isfile(modprobed_file): logger.info("RDMA: %s not found, it will be created" % modprobed_file) else: f = open(modprobed_file, 'r') lines = f.read() f.close() r = re.search('alias hv_network_direct hv_network_direct_\S+', lines) if r: lines = re.sub('alias hv_network_direct hv_network_direct_\S+', 'alias hv_network_direct hv_network_direct_%s' % nd_version, lines) else: lines += '\nalias hv_network_direct hv_network_direct_%s\n' % nd_version f = open('/etc/modprobe.d/vmbus-rdma.conf', 'w') f.write(lines) f.close() logger.info("RDMA: hv_network_direct alias updated to ND %s" % nd_version) WALinuxAgent-2.2.20/bin/000077500000000000000000000000001322477356400146705ustar00rootroot00000000000000WALinuxAgent-2.2.20/bin/waagent000077500000000000000000000027521322477356400162520ustar00rootroot00000000000000#!/usr/bin/env python # # Azure Linux Agent # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import os import imp import sys if __name__ == '__main__' : import azurelinuxagent.agent as agent """ Invoke main method of agent """ agent.main() if __name__ == 'waagent': """ Load waagent2.0 to support old version of extensions """ if sys.version_info[0] == 3: raise ImportError("waagent2.0 doesn't support python3") bin_path = os.path.dirname(os.path.abspath(__file__)) agent20_path = os.path.join(bin_path, "waagent2.0") if not os.path.isfile(agent20_path): raise ImportError("Can't load waagent") agent20 = imp.load_source('waagent', agent20_path) __all__ = dir(agent20) WALinuxAgent-2.2.20/bin/waagent2.0000066400000000000000000007550241322477356400164760ustar00rootroot00000000000000#!/usr/bin/env python # # Azure Linux Agent # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import crypt import random import array import base64 import httplib import os import os.path import platform import pwd import re import shutil import socket import SocketServer import struct import string import subprocess import sys import tempfile import textwrap import threading import time import traceback import xml.dom.minidom import fcntl import inspect import zipfile import json import datetime import xml.sax.saxutils from distutils.version import LooseVersion if not hasattr(subprocess,'check_output'): def check_output(*popenargs, **kwargs): r"""Backport from subprocess module from python 2.7""" if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output # Exception classes used by this module. class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) subprocess.check_output=check_output subprocess.CalledProcessError=CalledProcessError GuestAgentName = "WALinuxAgent" GuestAgentLongName = "Azure Linux Agent" GuestAgentVersion = "WALinuxAgent-2.0.16" ProtocolVersion = "2012-11-30" #WARNING this value is used to confirm the correct fabric protocol. Config = None WaAgent = None DiskActivated = False Openssl = "openssl" Children = [] ExtensionChildren = [] VMM_STARTUP_SCRIPT_NAME='install' VMM_CONFIG_FILE_NAME='linuxosconfiguration.xml' global RulesFiles RulesFiles = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] VarLibDhcpDirectories = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] EtcDhcpClientConfFiles = ["/etc/dhcp/dhclient.conf", "/etc/dhcp3/dhclient.conf"] global LibDir LibDir = "/var/lib/waagent" global provisioned provisioned=False global provisionError provisionError=None HandlerStatusToAggStatus = {"installed":"Installing", "enabled":"Ready", "unintalled":"NotReady", "disabled":"NotReady"} WaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ext4 # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ README_FILENAME="DATALOSS_WARNING_README.txt" README_FILECONTENT="""\ WARNING: THIS IS A TEMPORARY DISK. Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. Please do not use this disk for storing any personal or application data. For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx """ ############################################################ # BEGIN DISTRO CLASS DEFS ############################################################ ############################################################ # AbstractDistro ############################################################ class AbstractDistro(object): """ AbstractDistro defines a skeleton neccesary for a concrete Distro class. Generic methods and attributes are kept here, distribution specific attributes and behavior are to be placed in the concrete child named distroDistro, where distro is the string returned by calling python platform.linux_distribution()[0]. So for CentOS the derived class is called 'centosDistro'. """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux=None self.service_cmd='/usr/sbin/service' self.ssh_service_restart_option='restart' self.ssh_service_name='ssh' self.ssh_config_file='/etc/ssh/sshd_config' self.hostname_file_path='/etc/hostname' self.dhcp_client_name='dhclient' self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'useradd', 'usermod', 'openssl', 'sfdisk', 'fdisk', 'mkfs', 'sed', 'grep', 'sudo', 'parted' ] self.init_script_file='/etc/init.d/waagent' self.agent_package_name='WALinuxAgent' self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log",'/etc/resolv.conf' ] self.agent_files_to_uninstall = ["/etc/waagent.conf", "/etc/logrotate.d/waagent"] self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT=' self.getpidcmd = 'pidof' self.mount_dvd_cmd = 'mount' self.sudoers_dir_base = '/etc' self.waagent_conf_file = WaagentConf self.shadow_file_mode=0600 self.shadow_file_path="/etc/shadow" self.dhcp_enabled = False def isSelinuxSystem(self): """ Checks and sets self.selinux = True if SELinux is available on system. """ if self.selinux == None: if Run("which getenforce",chk_err=False): self.selinux = False else: self.selinux = True return self.selinux def isSelinuxRunning(self): """ Calls shell command 'getenforce' and returns True if 'Enforcing'. """ if self.isSelinuxSystem(): return RunGetOutput("getenforce")[1].startswith("Enforcing") else: return False def setSelinuxEnforce(self,state): """ Calls shell command 'setenforce' with 'state' and returns resulting exit code. """ if self.isSelinuxSystem(): if state: s = '1' else: s='0' return Run("setenforce "+s) def setSelinuxContext(self,path,cn): """ Calls shell 'chcon' with 'path' and 'cn' context. Returns exit result. """ if self.isSelinuxSystem(): if not os.path.exists(path): Error("Path does not exist: {0}".format(path)) return 1 return Run('chcon ' + cn + ' ' + path) def setHostname(self,name): """ Shell call to hostname. Returns resulting exit code. """ return Run('hostname ' + name) def publishHostname(self,name): """ Set the contents of the hostname file to 'name'. Return 1 on failure. """ try: r=SetFileContents(self.hostname_file_path, name) for f in EtcDhcpClientConfFiles: if os.path.exists(f) and FindStringInFile(f,r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])') == None : r=ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', "send host-name \"" + name + "\";\n" + "\n".join(filter(lambda a: not a.startswith("send host-name"), GetFileContents('/etc/dhcp/dhclient.conf').split('\n')))) except: return 1 return r def installAgentServiceScriptFiles(self): """ Create the waagent support files for service installation. Called by registerAgentService() Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def registerAgentService(self): """ Calls installAgentService to create service files. Shell exec service registration commands. (e.g. chkconfig --add waagent) Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def uninstallAgentService(self): """ Call service subsystem to remove waagent script. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' start') def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' stop',False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_name + " " + self.ssh_service_restart_option retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def sshDeployPublicKey(self,fprint,path): """ Generic sshDeployPublicKey - over-ridden in some concrete Distro classes due to minor differences in openssl packages deployed """ error=0 SshPubKey = OvfEnv().OpensslToSsh(fprint) if SshPubKey != None: AppendFileContents(path, SshPubKey) else: Error("Failed: " + fprint + ".crt -> " + path) error = 1 return error def checkPackageInstalled(self,p): """ Query package database for prescence of an installed package. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def checkPackageUpdateable(self,p): """ Online check if updated package of walinuxagent is available. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def deleteRootPassword(self): """ Generic root password removal. """ filepath="/etc/shadow" ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n" + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n')))) os.chmod(filepath,self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0') Log("Root password deleted.") return 0 def changePass(self,user,password): Log("Change user password") crypt_id = Config.get("Provisioning.PasswordCryptId") if crypt_id is None: crypt_id = "6" salt_len = Config.get("Provisioning.PasswordCryptSaltLength") try: salt_len = int(salt_len) if salt_len < 0 or salt_len > 10: salt_len = 10 except (ValueError, TypeError): salt_len = 10 return self.chpasswd(user, password, crypt_id=crypt_id, salt_len=salt_len) def chpasswd(self, username, password, crypt_id=6, salt_len=10): passwd_hash = self.gen_password_hash(password, crypt_id, salt_len) cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = RunGetOutput(cmd, log_cmd=False) if ret != 0: return "Failed to set password for {0}: {1}".format(username, output) def gen_password_hash(self, password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) return crypt.crypt(password, salt) def load_ata_piix(self): return WaAgent.TryLoadAtapiix() def unload_ata_piix(self): """ Generic function to remove ata_piix.ko. """ return WaAgent.TryUnloadAtapiix() def deprovisionWarnUser(self): """ Generic user warnings used at deprovision. """ print("WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.") def deprovisionDeleteFiles(self): """ Files to delete when VM is deprovisioned """ for a in VarLibDhcpDirectories: Run("rm -f " + a + "/*") # Clear LibDir, remove nameserver and root bash history for f in os.listdir(LibDir) + self.fileBlackList: try: os.remove(f) except: pass return 0 def uninstallDeleteFiles(self): """ Files to delete when agent is uninstalled. """ for f in self.agent_files_to_uninstall: try: os.remove(f) except: pass return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m= __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self,buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot+'/etc'): os.mkdir(buildroot+'/etc') SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot+'/etc/logrotate.d'): os.mkdir(buildroot+'/etc/logrotate.d') SetFileContents(buildroot+'/etc/logrotate.d/waagent', WaagentLogrotate) self.init_script_file=buildroot+self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def GetIpv4Address(self): """ Return the ip of the first active non-loopback interface. """ addr='' iface,addr=GetFirstActiveNetworkInterfaceNonLoopback() return addr def GetMacAddress(self): return GetMacAddress() def GetInterfaceName(self): return GetFirstActiveNetworkInterfaceNonLoopback()[0] def RestartInterface(self, iface, max_retry=3): for retry in range(1, max_retry + 1): ret = Run("ifdown " + iface + " && ifup " + iface) if ret == 0: return Log("Failed to restart interface: {0}, ret={1}".format(iface, ret)) if retry < max_retry: Log("Retry restart interface in 5 seconds") time.sleep(5) def CreateAccount(self,user, password, expiration, thumbprint): return CreateAccount(user, password, expiration, thumbprint) def DeleteAccount(self,user): return DeleteAccount(user) def ActivateResourceDisk(self): """ Format, mount, and if specified in the configuration set resource disk as swap. """ global DiskActivated format = Config.get("ResourceDisk.Format") if format == None or format.lower().startswith("n"): DiskActivated = True return device = DeviceForIdePort(1) if device == None: Error("ActivateResourceDisk: Unable to detect disk topology.") return device = "/dev/" + device mountlist = RunGetOutput("mount")[1] mountpoint = GetMountPoint(mountlist, device) if(mountpoint): Log("ActivateResourceDisk: " + device + "1 is already mounted.") else: mountpoint = Config.get("ResourceDisk.MountPoint") if mountpoint == None: mountpoint = "/mnt/resource" CreateDir(mountpoint, "root", 0755) fs = Config.get("ResourceDisk.Filesystem") if fs == None: fs = "ext3" partition = device + "1" #Check partition type Log("Detect GPT...") ret = RunGetOutput("parted {0} print".format(device)) if ret[0] == 0 and "gpt" in ret[1]: Log("GPT detected.") #GPT(Guid Partition Table) is used. #Get partitions. parts = filter(lambda x : re.match("^\s*[0-9]+", x), ret[1].split("\n")) #If there are more than 1 partitions, remove all partitions #and create a new one using the entire disk space. if len(parts) > 1: for i in range(1, len(parts) + 1): Run("parted {0} rm {1}".format(device, i)) Run("parted {0} mkpart primary 0% 100%".format(device)) Run("mkfs." + fs + " " + partition + " -F") else: existingFS = RunGetOutput("sfdisk -q -c " + device + " 1", chk_err=False)[1].rstrip() if existingFS == "7" and fs != "ntfs": Run("sfdisk -c " + device + " 1 83") Run("mkfs." + fs + " " + partition) if Run("mount " + partition + " " + mountpoint, chk_err=False): #If mount failed, try to format the partition and mount again Warn("Failed to mount resource disk. Retry mounting.") Run("mkfs." + fs + " " + partition + " -F") if Run("mount " + partition + " " + mountpoint): Error("ActivateResourceDisk: Failed to mount resource disk (" + partition + ").") return Log("Resource disk (" + partition + ") is mounted at " + mountpoint + " with fstype " + fs) #Create README file under the root of resource disk SetFileContents(os.path.join(mountpoint,README_FILENAME), README_FILECONTENT) DiskActivated = True #Create swap space swap = Config.get("ResourceDisk.EnableSwap") if swap == None or swap.lower().startswith("n"): return sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024 if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024): os.remove(mountpoint + "/swapfile") if not os.path.isfile(mountpoint + "/swapfile"): Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB)) Run("mkswap " + mountpoint + "/swapfile") if not Run("swapon " + mountpoint + "/swapfile"): Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile") else: Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile") def Install(self): return Install() def mediaHasFilesystem(self,dsk): if len(dsk) == 0 : return False if Run("LC_ALL=C fdisk -l " + dsk + " | grep Disk"): return False return True def mountDVD(self,dvd,location): return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location) def GetHome(self): return GetHome() def getDhcpClientName(self): return self.dhcp_client_name def initScsiDiskTimeout(self): """ Set the SCSI disk timeout when the agent starts running """ self.setScsiDiskTimeout() def setScsiDiskTimeout(self): """ Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout """ try: scsiTimeout = Config.get("OS.RootDeviceScsiTimeout") for diskName in [disk for disk in os.listdir("/sys/block") if disk.startswith("sd")]: self.setBlockDeviceTimeout(diskName, scsiTimeout) except: pass def setBlockDeviceTimeout(self, device, timeout): """ Set SCSI disk timeout by set /sys/block/sd*/device/timeout """ if timeout != None and device: filePath = "/sys/block/" + device + "/device/timeout" if(GetFileContents(filePath).splitlines()[0].rstrip() != timeout): SetFileContents(filePath,timeout) Log("SetBlockDeviceTimeout: Update the device " + device + " with timeout " + timeout) def waitForSshHostKey(self, path): """ Provide a dummy waiting, since by default, ssh host key is created by waagent and the key should already been created. """ if(os.path.isfile(path)): return True else: Error("Can't find host key: {0}".format(path)) return False def isDHCPEnabled(self): return self.dhcp_enabled def stopDHCP(self): """ Stop the system DHCP client so that the agent can bind on its port. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('stopDHCP method missing') def startDHCP(self): """ Start the system DHCP client. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('startDHCP method missing') def translateCustomData(self, data): """ Translate the custom data from a Base64 encoding. Default to no-op. """ decodeCustomData = Config.get("Provisioning.DecodeCustomData") if decodeCustomData != None and decodeCustomData.lower().startswith("y"): return base64.b64decode(data) return data def getConfigurationPath(self): return "/etc/waagent.conf" def getProcessorCores(self): return int(RunGetOutput("grep 'processor.*:' /proc/cpuinfo |wc -l")[1]) def getTotalMemory(self): return int(RunGetOutput("grep MemTotal /proc/meminfo |awk '{print $2}'")[1])/1024 def getInterfaceNameByMac(self, mac): ret, output = RunGetOutput("ifconfig -a") if ret != 0: raise Exception("Failed to get network interface info") output = output.replace('\n', '') match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), output, re.IGNORECASE) if match is None: raise Exception("Failed to get ifname with mac: {0}".format(mac)) output = match.group(0) eths = re.findall(r"eth\d", output) if eths is None or len(eths) == 0: raise Exception("Failed to get ifname with mac: {0}".format(mac)) return eths[-1] def configIpV4(self, ifName, addr, netmask=24): ret, output = RunGetOutput("ifconfig {0} up".format(ifName)) if ret != 0: raise Exception("Failed to bring up {0}: {1}".format(ifName, output)) ret, output = RunGetOutput("ifconfig {0} {1}/{2}".format(ifName, addr, netmask)) if ret != 0: raise Exception("Failed to config ipv4 for {0}: {1}".format(ifName, output)) def setDefaultGateway(self, gateway): Run("/sbin/route add default gw" + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " netmask " + mask + " gw " + gateway, chk_err=False) ############################################################ # GentooDistro ############################################################ gentoo_init_file = """\ #!/sbin/runscript command=/usr/sbin/waagent pidfile=/var/run/waagent.pid command_args=-daemon command_background=true name="Azure Linux Agent" depend() { need localmount use logger network after bootmisc modules } """ class gentooDistro(AbstractDistro): """ Gentoo distro concrete class """ def __init__(self): # super(gentooDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_name='sshd' self.hostname_file_path='/etc/conf.d/hostname' self.dhcp_client_name='dhcpcd' self.shadow_file_mode=0640 self.init_file=gentoo_init_file def publishHostname(self,name): try: if (os.path.isfile(self.hostname_file_path)): r=ReplaceFileContentsAtomic(self.hostname_file_path, "hostname=\"" + name + "\"\n" + "\n".join(filter(lambda a: not a.startswith("hostname="), GetFileContents(self.hostname_file_path).split("\n")))) except: return 1 return r def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0755) def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('rc-update add ' + self.agent_service_name + ' default') def uninstallAgentService(self): return Run('rc-update del ' + self.agent_service_name + ' default') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self,p): if Run('eix -I ^' + p + '$',chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run('eix -u ^' + p + '$',chk_err=False): return 0 else: return 1 def RestartInterface(self, iface): Run("/etc/init.d/net." + iface + " restart") ############################################################ # SuSEDistro ############################################################ suse_init_file = """\ #! /bin/sh # # Azure Linux Agent sysV init script # # Copyright 2013 Microsoft Corporation # Copyright SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # /etc/init.d/waagent # # and symbolic link # # /usr/sbin/rcwaagent # # System startup script for the waagent # ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network sshd # Required-Stop: $network sshd # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Description: Start the AzureLinuxAgent ### END INIT INFO PYTHON=/usr/bin/python WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } . /etc/rc.status # First reset status of this service rc_reset # Return values acc. to LSB for all commands but status: # 0 - success # 1 - misc error # 2 - invalid or excess args # 3 - unimplemented feature (e.g. reload) # 4 - insufficient privilege # 5 - program not installed # 6 - program not configured # # Note that starting an already running service, stopping # or restarting a not-running service as well as the restart # with force-reload (in case signalling is not supported) are # considered a success. case "$1" in start) echo -n "Starting AzureLinuxAgent" ## Start daemon with startproc(8). If this fails ## the echo return value is set appropriate. startproc -f ${PYTHON} ${WAZD_BIN} -daemon rc_status -v ;; stop) echo -n "Shutting down AzureLinuxAgent" ## Stop daemon with killproc(8) and if this fails ## set echo the echo return value. killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; try-restart) ## Stop the service and if this succeeds (i.e. the ## service was running before), start it again. $0 status >/dev/null && $0 restart rc_status ;; restart) ## Stop the service and regardless of whether it was ## running or not, start it again. $0 stop sleep 1 $0 start rc_status ;; force-reload|reload) rc_status ;; status) echo -n "Checking for service AzureLinuxAgent " ## Check status with checkproc(8), if process is running ## checkproc will return with exit status 0. checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; probe) ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit """ class SuSEDistro(AbstractDistro): """ SuSE Distro concrete class Put SuSE specific behavior here... """ def __init__(self): super(SuSEDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_name='sshd' self.kernel_boot_options_file='/boot/grub/menu.lst' self.hostname_file_path='/etc/HOSTNAME' self.requiredDeps += [ "/sbin/insserv" ] self.init_file=suse_init_file self.dhcp_client_name='dhcpcd' if ((DistInfo(fullname=1)[0] == 'SUSE Linux Enterprise Server' and DistInfo()[1] >= '12') or \ (DistInfo(fullname=1)[0] == 'openSUSE' and DistInfo()[1] >= '13.2')): self.dhcp_client_name='wickedd-dhcp4' self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' self.getpidcmd='pidof ' self.dhcp_enabled=True def checkPackageInstalled(self,p): if Run("rpm -q " + p,chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run("zypper list-updates | grep " + p,chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) except: pass def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('insserv ' + self.agent_service_name) def uninstallAgentService(self): return Run('insserv -r ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def startDHCP(self): Run("service " + self.dhcp_client_name + " start", chk_err=False) def stopDHCP(self): Run("service " + self.dhcp_client_name + " stop", chk_err=False) ############################################################ # redhatDistro ############################################################ redhat_init_file= """\ #!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -daemon & } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL """ class redhatDistro(AbstractDistro): """ Redhat Distro concrete class Put Redhat specific behavior here... """ def __init__(self): super(redhatDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_restart_option='condrestart' self.ssh_service_name='sshd' self.hostname_file_path= None if DistInfo()[1] < '7.0' else '/etc/hostname' self.init_file=redhat_init_file self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' def publishHostname(self,name): super(redhatDistro,self).publishHostname(name) if DistInfo()[1] < '7.0' : filepath = "/etc/sysconfig/network" if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("HOSTNAME"), GetFileContents(filepath).split('\n')))) ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('chkconfig --add waagent') def uninstallAgentService(self): return Run('chkconfig --del ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self,p): if Run("yum list installed " + p,chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run("yum check-update | grep "+ p,chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if DistInfo()[1] < '7.0' and self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m= __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 ############################################################ # centosDistro ############################################################ class centosDistro(redhatDistro): """ CentOS Distro concrete class Put CentOS specific behavior here... """ def __init__(self): super(centosDistro,self).__init__() ############################################################ # eulerosDistro ############################################################ class eulerosDistro(redhatDistro): """ EulerOS Distro concrete class Put EulerOS specific behavior here... """ def __init__(self): super(eulerosDistro,self).__init__() ############################################################ # oracleDistro ############################################################ class oracleDistro(redhatDistro): """ Oracle Distro concrete class Put Oracle specific behavior here... """ def __init__(self): super(oracleDistro, self).__init__() ############################################################ # asianuxDistro ############################################################ class asianuxDistro(redhatDistro): """ Asianux Distro concrete class Put Asianux specific behavior here... """ def __init__(self): super(asianuxDistro,self).__init__() ############################################################ # CoreOSDistro ############################################################ class CoreOSDistro(AbstractDistro): """ CoreOS Distro concrete class Put CoreOS specific behavior here... """ CORE_UID = 500 def __init__(self): super(CoreOSDistro,self).__init__() self.requiredDeps += [ "/usr/bin/systemctl" ] self.agent_service_name = 'waagent' self.init_script_file='/etc/systemd/system/waagent.service' self.fileBlackList.append("/etc/machine-id") self.dhcp_client_name='systemd-networkd' self.getpidcmd='pidof ' self.shadow_file_mode=0640 self.waagent_path='/usr/share/oem/bin' self.python_path='/usr/share/oem/python/bin' self.dhcp_enabled=True if 'PATH' in os.environ: os.environ['PATH'] = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: os.environ['PATH'] = self.python_path if 'PYTHONPATH' in os.environ: os.environ['PYTHONPATH'] = "{0}:{1}".format(os.environ['PYTHONPATH'], self.waagent_path) else: os.environ['PYTHONPATH'] = self.waagent_path def checkPackageInstalled(self,p): """ There is no package manager in CoreOS. Return 1 since it must be preinstalled. """ return 1 def checkDependencies(self): for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self,p): """ There is no package manager in CoreOS. Return 0 since it can't be updated via package. """ return 0 def startAgentService(self): return Run('systemctl start ' + self.agent_service_name) def stopAgentService(self): return Run('systemctl stop ' + self.agent_service_name) def restartSshService(self): """ SSH is socket activated on CoreOS. No need to restart it. """ return 0 def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 def RestartInterface(self, iface): Run("systemctl restart systemd-networkd") def CreateAccount(self, user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd --create-home --password '*' " + user if expiration != None: command += " --expiredate " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user, password) try: if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def startDHCP(self): Run("systemctl start " + self.dhcp_client_name, chk_err=False) def stopDHCP(self): Run("systemctl stop " + self.dhcp_client_name, chk_err=False) def translateCustomData(self, data): return base64.b64decode(data) def getConfigurationPath(self): return "/usr/share/oem/waagent.conf" ############################################################ # debianDistro ############################################################ debian_init_file = """\ #!/bin/sh ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network $syslog # Required-Stop: $network $syslog # Should-Start: $network $syslog # Should-Stop: $network $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: AzureLinuxAgent # Description: AzureLinuxAgent ### END INIT INFO . /lib/lsb/init-functions OPTIONS="-daemon" WAZD_BIN=/usr/sbin/waagent WAZD_PID=/var/run/waagent.pid case "$1" in start) log_begin_msg "Starting AzureLinuxAgent..." pid=$( pidofproc $WAZD_BIN ) if [ -n "$pid" ] ; then log_begin_msg "Already running." log_end_msg 0 exit 0 fi start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS log_end_msg $? ;; stop) log_begin_msg "Stopping AzureLinuxAgent..." start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID ret=$? rm -f $WAZD_PID log_end_msg $ret ;; force-reload) $0 restart ;; restart) $0 stop $0 start ;; status) status_of_proc $WAZD_BIN && exit 0 || exit $? ;; *) log_success_msg "Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}" exit 1 ;; esac exit 0 """ class debianDistro(AbstractDistro): """ debian Distro concrete class Put debian specific behavior here... """ def __init__(self): super(debianDistro,self).__init__() self.requiredDeps += [ "/usr/sbin/update-rc.d" ] self.init_file=debian_init_file self.agent_package_name='walinuxagent' self.dhcp_client_name='dhclient' self.getpidcmd='pidof ' self.shadow_file_mode=0640 def checkPackageInstalled(self,p): """ Check that the package is installed. Return 1 if installed, 0 if not installed. This method of using dpkg-query allows wildcards to be present in the package name. """ if not Run("dpkg-query -W -f='${Status}\n' '" + p + "' | grep ' installed' 2>&1",chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Debian dependency check. python-pyasn1 is NOT needed. Return 1 unless all dependencies are satisfied. NOTE: using network*manager will catch either package name in Ubuntu or debian. """ if self.checkPackageInstalled('network*manager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self,p): if Run("apt-get update ; apt-get upgrade -us | grep " + p,chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) except OSError, e: ErrorWithPrefix('installAgentServiceScriptFiles','Exception: '+str(e)+' occured creating ' + self.init_script_file) return 1 return 0 def registerAgentService(self): if self.installAgentServiceScriptFiles() == 0: return Run('update-rc.d waagent defaults') else : return 1 def uninstallAgentService(self): return Run('update-rc.d -f ' + self.agent_service_name + ' remove') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 ############################################################ # KaliDistro - WIP # Functioning on Kali 1.1.0a so far ############################################################ class KaliDistro(debianDistro): """ Kali Distro concrete class Put Kali specific behavior here... """ def __init__(self): super(KaliDistro,self).__init__() ############################################################ # UbuntuDistro ############################################################ ubuntu_upstart_file = """\ #walinuxagent - start Azure agent description "walinuxagent" author "Ben Howard " start on (filesystem and started rsyslog) pre-start script WALINUXAGENT_ENABLED=1 [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent if [ "$WALINUXAGENT_ENABLED" != "1" ]; then exit 1 fi if [ ! -x /usr/sbin/waagent ]; then exit 1 fi #Load the udf module modprobe -b udf end script exec /usr/sbin/waagent -daemon """ class UbuntuDistro(debianDistro): """ Ubuntu Distro concrete class Put Ubuntu specific behavior here... """ def __init__(self): super(UbuntuDistro,self).__init__() self.init_script_file='/etc/init/waagent.conf' self.init_file=ubuntu_upstart_file self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log"] self.dhcp_client_name=None self.getpidcmd='pidof ' def registerAgentService(self): return self.installAgentServiceScriptFiles() def uninstallAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 os.remove('/etc/init/' + self.agent_service_name + '.conf') def unregisterAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return self.stopAgentService() return self.uninstallAgentService() def deprovisionWarnUser(self): """ Ubuntu specific warning string from Deprovision. """ print("WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.") def deprovisionDeleteFiles(self): """ Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will break resolvconf. Therefore, we check to see if resolvconf is in use, and if so, we remove the resolvconf artifacts. """ if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': Log("resolvconf is not configured. Removing /etc/resolv.conf") self.fileBlackList.append('/etc/resolv.conf') else: Log("resolvconf is enabled; leaving /etc/resolv.conf intact") resolvConfD = '/etc/resolvconf/resolv.conf.d/' self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original']) for f in os.listdir(LibDir)+self.fileBlackList: try: os.remove(f) except: pass return 0 def getDhcpClientName(self): if self.dhcp_client_name != None : return self.dhcp_client_name if DistInfo()[1] == '12.04' : self.dhcp_client_name='dhclient3' else : self.dhcp_client_name='dhclient' return self.dhcp_client_name def waitForSshHostKey(self, path): """ Wait until the ssh host key is generated by cloud init. """ for retry in range(0, 10): if(os.path.isfile(path)): return True time.sleep(1) Error("Can't find host key: {0}".format(path)) return False ############################################################ # LinuxMintDistro ############################################################ class LinuxMintDistro(UbuntuDistro): """ LinuxMint Distro concrete class Put LinuxMint specific behavior here... """ def __init__(self): super(LinuxMintDistro,self).__init__() ############################################################ # fedoraDistro ############################################################ fedora_systemd_service = """\ [Unit] Description=Azure Linux Agent After=network.target After=sshd.service ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/sbin/waagent -daemon [Install] WantedBy=multi-user.target """ class fedoraDistro(redhatDistro): """ FedoraDistro concrete class Put Fedora specific behavior here... """ def __init__(self): super(fedoraDistro,self).__init__() self.service_cmd = '/usr/bin/systemctl' self.hostname_file_path = '/etc/hostname' self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service' self.init_file = fedora_systemd_service self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX=' def publishHostname(self, name): SetFileContents(self.hostname_file_path, name + '\n') ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0644) return Run(self.service_cmd + ' daemon-reload') def registerAgentService(self): self.installAgentServiceScriptFiles() return Run(self.service_cmd + ' enable ' + self.agent_service_name) def uninstallAgentService(self): """ Call service subsystem to remove waagent script. """ return Run(self.service_cmd + ' disable ' + self.agent_service_name) def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' start ' + self.agent_service_name) def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' stop ' + self.agent_service_name, False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_restart_option + " " + self.ssh_service_name retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def checkPackageInstalled(self, p): """ Query package database for prescence of an installed package. """ import rpm ts = rpm.TransactionSet() rpms = ts.dbMatch(rpm.RPMTAG_PROVIDES, p) return bool(len(rpms) > 0) def deleteRootPassword(self): return Run("/sbin/usermod root -p '!!'") def packagedInstall(self,buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot+'/etc'): os.mkdir(buildroot+'/etc') SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot+'/etc/logrotate.d'): os.mkdir(buildroot+'/etc/logrotate.d') SetFileContents(buildroot+'/etc/logrotate.d/WALinuxAgent', WaagentLogrotate) self.init_script_file=buildroot+self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def CreateAccount(self, user, password, expiration, thumbprint): super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint) Run('/sbin/usermod ' + user + ' -G wheel') def DeleteAccount(self, user): Run('/sbin/usermod ' + user + ' -G ""') super(fedoraDistro, self).DeleteAccount(user) ############################################################ # FreeBSD ############################################################ FreeBSDWaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ufs2 # ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ bsd_init_file="""\ #! /bin/sh # PROVIDE: waagent # REQUIRE: DAEMON cleanvar sshd # BEFORE: LOGIN # KEYWORD: nojail . /etc/rc.subr export PATH=$PATH:/usr/local/bin name="waagent" rcvar="waagent_enable" command="/usr/sbin/${name}" command_interpreter="/usr/local/bin/python" waagent_flags=" daemon &" pidfile="/var/run/waagent.pid" load_rc_config $name run_rc_command "$1" """ bsd_activate_resource_disk_txt="""\ #!/usr/bin/env python import os import sys import imp # waagent has no '.py' therefore create waagent module import manually. __name__='setupmain' #prevent waagent.__main__ from executing waagent=imp.load_source('waagent','/tmp/waagent') waagent.LoggerInit('/var/log/waagent.log','/dev/console') from waagent import RunGetOutput,Run Config=waagent.ConfigurationProvider(None) format = Config.get("ResourceDisk.Format") if format == None or format.lower().startswith("n"): sys.exit(0) device_base = 'da1' device = "/dev/" + device_base for entry in RunGetOutput("mount")[1].split(): if entry.startswith(device + "s1"): waagent.Log("ActivateResourceDisk: " + device + "s1 is already mounted.") sys.exit(0) mountpoint = Config.get("ResourceDisk.MountPoint") if mountpoint == None: mountpoint = "/mnt/resource" waagent.CreateDir(mountpoint, "root", 0755) fs = Config.get("ResourceDisk.Filesystem") if waagent.FreeBSDDistro().mediaHasFilesystem(device) == False : Run("newfs " + device + "s1") if Run("mount " + device + "s1 " + mountpoint): waagent.Error("ActivateResourceDisk: Failed to mount resource disk (" + device + "s1).") sys.exit(0) waagent.Log("Resource disk (" + device + "s1) is mounted at " + mountpoint + " with fstype " + fs) waagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT) swap = Config.get("ResourceDisk.EnableSwap") if swap == None or swap.lower().startswith("n"): sys.exit(0) sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024 if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024): os.remove(mountpoint + "/swapfile") if not os.path.isfile(mountpoint + "/swapfile"): Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB)) if Run("mdconfig -a -t vnode -f " + mountpoint + "/swapfile -u 0"): waagent.Error("ActivateResourceDisk: Configuring swap - Failed to create md0") if not Run("swapon /dev/md0"): waagent.Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile") else: waagent.Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile") """ class FreeBSDDistro(AbstractDistro): """ """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ super(FreeBSDDistro,self).__init__() self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux=False self.ssh_service_name='sshd' self.ssh_config_file='/etc/ssh/sshd_config' self.hostname_file_path='/etc/hostname' self.dhcp_client_name='dhclient' self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'pw' , 'openssl', 'fdisk', 'sed', 'grep' , 'sudo'] self.init_script_file='/etc/rc.d/waagent' self.init_file=bsd_init_file self.agent_package_name='WALinuxAgent' self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log",'/etc/resolv.conf' ] self.agent_files_to_uninstall = ["/etc/waagent.conf"] self.grubKernelBootOptionsFile = '/boot/loader.conf' self.grubKernelBootOptionsLine = '' self.getpidcmd = 'pgrep -n' self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if=' # custom data max len is 64k self.sudoers_dir_base = '/usr/local/etc' self.waagent_conf_file = FreeBSDWaagentConf def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0777) AppendFileContents("/etc/rc.conf","waagent_enable='YES'\n") return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run("services_mkdb " + self.init_script_file) def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 def deleteRootPassword(self): """ BSD root password removal. """ filepath="/etc/master.passwd" ReplaceStringInFile(filepath,r'root:.*?:','root::') #ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n" # + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n')))) os.chmod(filepath,self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0') RunGetOutput("pwd_mkdb -u root /etc/master.passwd") Log("Root password deleted.") return 0 def changePass(self,user,password): return RunSendStdin("pw usermod " + user + " -h 0 ",password, log_cmd=False) def load_ata_piix(self): return 0 def unload_ata_piix(self): return 0 def checkDependencies(self): """ FreeBSD dependency check. Return 1 unless all dependencies are satisfied. """ for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self,buildroot): pass def GetInterfaceName(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() return iface def RestartInterface(self, iface): Run("service netif restart") def GetIpv4Address(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() return inet def GetMacAddress(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() l=mac.split(':') r=[] for i in l: r.append(string.atoi(i,16)) return r def GetFreeBSDEthernetInfo(self): """ There is no SIOCGIFCONF on freeBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ code,output=RunGetOutput("ifconfig",chk_err=False) Log(output) retries=10 cmd='ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP ' code=1 while code > 0 : if code > 0 and retries == 0: Error("GetFreeBSDEthernetInfo - Failed to detect ethernet interface") return None, None, None code,output=RunGetOutput(cmd,chk_err=False) retries-=1 if code > 0 and retries > 0 : Log("GetFreeBSDEthernetInfo - Error: retry ethernet detection " + str(retries)) if retries == 9 : c,o=RunGetOutput("ifconfig | grep -A1 -B2 ether",chk_err=False) if c == 0: t=o.replace('\n',' ') t=t.split() i=t[0][:-1] Log(RunGetOutput('id')[1]) Run('dhclient '+i) time.sleep(10) j=output.replace('\n',' ') j=j.split() iface=j[0][:-1] for i in range(len(j)): if j[i] == 'inet' : inet=j[i+1] elif j[i] == 'ether' : mac=j[i+1] return iface, inet, mac def CreateAccount(self,user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "pw useradd " + user + " -m" if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user,password) try: # for older distros create sudoers.d if not os.path.isdir(MyDistro.sudoers_dir_base+'/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir(MyDistro.sudoers_dir_base+'/sudoers.d') # add the include of sudoers.d to the /etc/sudoers SetFileContents(MyDistro.sudoers_dir_base+'/sudoers',GetFileContents(MyDistro.sudoers_dir_base+'/sudoers')+'\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\n') if password == None: SetFileContents(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(self,user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") #Delete utmp to prevent error if we are the 'user' deleted pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).pid try: os.remove(MyDistro.sudoers_dir_base+"/sudoers.d/waagent") except: pass return def ActivateResourceDiskNoThread(self): """ Format, mount, and if specified in the configuration set resource disk as swap. """ global DiskActivated Run('cp /usr/sbin/waagent /tmp/') SetFileContents('/tmp/bsd_activate_resource_disk.py',bsd_activate_resource_disk_txt) Run('chmod +x /tmp/bsd_activate_resource_disk.py') pid = subprocess.Popen(["/tmp/bsd_activate_resource_disk.py", ""]).pid Log("Spawning bsd_activate_resource_disk.py") DiskActivated = True return def Install(self): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a) ) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", self.waagent_conf_file) if os.path.exists('/usr/local/etc/logrotate.d/'): SetFileContents("/usr/local/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split('\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") #ApplyVNUMAWorkaround() return 0 def mediaHasFilesystem(self,dsk): if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep "invalid fdisk partition table found" ',False): return False return True def mountDVD(self,dvd,location): #At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location retcode,out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml') if retcode != 0: return retcode,out ovfxml = (GetFileContents(location+"/ovf-env.xml",asbin=False)) if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 : ovfxml = ovfxml[3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them. ovfxml = ovfxml.strip(chr(0x00)) ovfxml = "".join(filter(lambda x: ord(x)<128, ovfxml)) ovfxml = re.sub(r'.*\Z','',ovfxml,0,re.DOTALL) ovfxml += '' SetFileContents(location+"/ovf-env.xml", ovfxml) return retcode,out def GetHome(self): return '/home' def initScsiDiskTimeout(self): """ Set the SCSI disk timeout by updating the kernal config """ timeout = Config.get("OS.RootDeviceScsiTimeout") if timeout: Run("sysctl kern.cam.da.default_timeout=" + timeout) def setScsiDiskTimeout(self): return def setBlockDeviceTimeout(self, device, timeout): return def getProcessorCores(self): return int(RunGetOutput("sysctl hw.ncpu | awk '{print $2}'")[1]) def getTotalMemory(self): return int(RunGetOutput("sysctl hw.realmem | awk '{print $2}'")[1])/1024 def setDefaultGateway(self, gateway): Run("/sbin/route add default " + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " " + mask + " " + gateway, chk_err=False) ############################################################ # END DISTRO CLASS DEFS ############################################################ # This lets us index into a string or an array of integers transparently. def Ord(a): """ Allows indexing into a string or an array of integers transparently. Generic utility function. """ if type(a) == type("a"): a = ord(a) return a def IsLinux(): """ Returns True if platform is Linux. Generic utility function. """ return (platform.uname()[0] == "Linux") def GetLastPathElement(path): """ Similar to basename. Generic utility function. """ return path.rsplit('/', 1)[1] def GetFileContents(filepath,asbin=False): """ Read and return contents of 'filepath'. """ mode='r' if asbin: mode+='b' c=None try: with open(filepath, mode) as F : c=F.read() except IOError, e: ErrorWithPrefix('GetFileContents','Reading from file ' + filepath + ' Exception is ' + str(e)) return None return c def SetFileContents(filepath, contents): """ Write 'contents' to 'filepath'. """ if type(contents) == str : contents=contents.encode('latin-1', 'ignore') try: with open(filepath, "wb+") as F : F.write(contents) except IOError, e: ErrorWithPrefix('SetFileContents','Writing to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def AppendFileContents(filepath, contents): """ Append 'contents' to 'filepath'. """ if type(contents) == str : contents=contents.encode('latin-1') try: with open(filepath, "a+") as F : F.write(contents) except IOError, e: ErrorWithPrefix('AppendFileContents','Appending to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def ReplaceFileContentsAtomic(filepath, contents): """ Write 'contents' to 'filepath' by creating a temp file, and replacing original. """ handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath)) if type(contents) == str : contents=contents.encode('latin-1') try: os.write(handle, contents) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e)) return None finally: os.close(handle) try: os.rename(temp, filepath) return None except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' + str(e)) try: os.remove(filepath) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e)) try: os.rename(temp,filepath) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e)) return 1 return 0 def GetLineStartingWith(prefix, filepath): """ Return line from 'filepath' if the line startswith 'prefix' """ for line in GetFileContents(filepath).split('\n'): if line.startswith(prefix): return line return None def Run(cmd,chk_err=True): """ Calls RunGetOutput on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ retcode,out=RunGetOutput(cmd,chk_err) return retcode def RunGetOutput(cmd, chk_err=True, log_cmd=True): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd) try: output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) except subprocess.CalledProcessError,e : if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(e.returncode) ) Error('CalledProcessError. Command string was ' + e.cmd ) Error('CalledProcessError. Command result was ' + (e.output[:-1]).decode('latin-1')) return e.returncode,e.output.decode('latin-1') return 0,output.decode('latin-1') def RunSendStdin(cmd, input, chk_err=True, log_cmd=True): """ Wrapper for subprocess.Popen. Execute 'cmd', sending 'input' to STDIN of 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd+input) try: me=subprocess.Popen([cmd], shell=True, stdin=subprocess.PIPE,stderr=subprocess.STDOUT,stdout=subprocess.PIPE) output=me.communicate(input) except OSError , e : if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode) ) Error('CalledProcessError. Command string was ' + cmd ) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return 1,output[0].decode('latin-1') if me.returncode is not 0 and chk_err is True and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode) ) Error('CalledProcessError. Command string was ' + cmd ) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return me.returncode,output[0].decode('latin-1') def GetNodeTextData(a): """ Filter non-text nodes from DOM tree """ for b in a.childNodes: if b.nodeType == b.TEXT_NODE: return b.data def GetHome(): """ Attempt to guess the $HOME location. Return the path string. """ home = None try: home = GetLineStartingWith("HOME", "/etc/default/useradd").split('=')[1].strip() except: pass if (home == None) or (home.startswith("/") == False): home = "/home" return home def ChangeOwner(filepath, user): """ Lookup user. Attempt chown 'filepath' to 'user'. """ p = None try: p = pwd.getpwnam(user) except: pass if p != None: if not os.path.exists(filepath): Error("Path does not exist: {0}".format(filepath)) else: os.chown(filepath, p[2], p[3]) def CreateDir(dirpath, user, mode): """ Attempt os.makedirs, catch all exceptions. Call ChangeOwner afterwards. """ try: os.makedirs(dirpath, mode) except: pass ChangeOwner(dirpath, user) def CreateAccount(user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd -m " + user if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: MyDistro.changePass(user, password) try: # for older distros create sudoers.d if not os.path.isdir('/etc/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir('/etc/sudoers.d/') # add the include of sudoers.d to the /etc/sudoers SetFileContents('/etc/sudoers',GetFileContents('/etc/sudoers')+'\n#includedir /etc/sudoers.d\n') if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") #Delete utmp to prevent error if we are the 'user' deleted Run("userdel -f -r " + user) try: os.remove("/etc/sudoers.d/waagent") except: pass return def IsInRangeInclusive(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) def IsPrintable(ch): """ Return True if character is displayable. """ return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'), Ord('z')) or IsInRangeInclusive(ch, Ord('0'), Ord('9')) def HexDump(buffer, size): """ Return Hex formated dump of a 'buffer' of 'size'. """ if size < 0: size = len(buffer) result = "" for i in range(0, size): if (i % 16) == 0: result += "%06X: " % i byte = buffer[i] if type(byte) == str: byte = ord(byte.decode('latin1')) result += "%02X " % byte if (i & 15) == 7: result += " " if ((i + 1) % 16) == 0 or (i + 1) == size: j = i while ((j + 1) % 16) != 0: result += " " if (j & 7) == 7: result += " " j += 1 result += " " for j in range(i - (i % 16), i + 1): byte=buffer[j] if type(byte) == str: byte = ord(byte.decode('latin1')) k = '.' if IsPrintable(byte): k = chr(byte) result += k if (i + 1) != size: result += "\n" return result def SimpleLog(file_path,message): if not file_path or len(message) < 1: return t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) lines=re.sub(re.compile(r'^(.)',re.MULTILINE),t+r'\1',message) with open(file_path, "a") as F : lines = filter(lambda x : x in string.printable, lines) F.write(lines.encode('ascii','ignore') + "\n") class Logger(object): """ The Agent's logging assumptions are: For Log, and LogWithPrefix all messages are logged to the self.file_path and to the self.con_path. Setting either path parameter to None skips that log. If Verbose is enabled, messages calling the LogIfVerbose method will be logged to file_path yet not to con_path. Error and Warn messages are normal log messages with the 'ERROR:' or 'WARNING:' prefix added. """ def __init__(self,filepath,conpath,verbose=False): """ Construct an instance of Logger. """ self.file_path=filepath self.con_path=conpath self.verbose=verbose def ThrottleLog(self,counter): """ Log everything up to 10, every 10 up to 100, then every 100. """ return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0) def LogToFile(self,message): """ Write 'message' to logfile. """ if self.file_path: try: with open(self.file_path, "a") as F : message = filter(lambda x : x in string.printable, message) F.write(message.encode('ascii','ignore') + "\n") except IOError, e: print e pass def LogToCon(self,message): """ Write 'message' to /dev/console. This supports serial port logging if the /dev/console is redirected to ttys0 in kernel boot options. """ if self.con_path: try: with open(self.con_path, "w") as C : message = filter(lambda x : x in string.printable, message) C.write(message.encode('ascii','ignore') + "\n") except IOError, e: pass def Log(self,message): """ Standard Log function. Logs to self.file_path, and con_path """ self.LogWithPrefix("", message) def LogWithPrefix(self,prefix, message): """ Prefix each line of 'message' with current time+'prefix'. """ t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def NoLog(self,message): """ Don't Log. """ pass def LogIfVerbose(self,message): """ Only log 'message' if global Verbose is True. """ self.LogWithPrefixIfVerbose('',message) def LogWithPrefixIfVerbose(self,prefix, message): """ Only log 'message' if global Verbose is True. Prefix each line of 'message' with current time+'prefix'. """ if self.verbose == True: t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def Warn(self,message): """ Prepend the text "WARNING:" to the prefix for each line in 'message'. """ self.LogWithPrefix("WARNING:", message) def Error(self,message): """ Call ErrorWithPrefix(message). """ ErrorWithPrefix("", message) def ErrorWithPrefix(self,prefix, message): """ Prepend the text "ERROR:" to the prefix for each line in 'message'. Errors written to logfile, and /dev/console """ self.LogWithPrefix("ERROR:", message) def LoggerInit(log_file_path,log_con_path,verbose=False): """ Create log object and export its methods to global scope. """ global Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger l=Logger(log_file_path,log_con_path,verbose) Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger = l.Log,l.LogWithPrefix,l.LogIfVerbose,l.LogWithPrefixIfVerbose,l.Error,l.ErrorWithPrefix,l.Warn,l.NoLog,l.ThrottleLog,l def Linux_ioctl_GetInterfaceMac(ifname): """ Return the mac-address bound to the socket. """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1'))) return ''.join(['%02X' % Ord(char) for char in info[18:24]]) def GetFirstActiveNetworkInterfaceNonLoopback(): """ Return the interface name, and ip addr of the first active non-loopback interface. """ iface='' expected=16 # how many devices should I expect... is_64bits = sys.maxsize > 2**32 struct_size=40 if is_64bits else 32 # for 64bit the size is 40 bytes, for 32bits it is 32 bytes. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) buff=array.array('B', b'\0' * (expected*struct_size)) retsize=(struct.unpack('iL', fcntl.ioctl(s.fileno(), 0x8912, struct.pack('iL',expected*struct_size,buff.buffer_info()[0]))))[0] if retsize == (expected*struct_size) : Warn('SIOCGIFCONF returned more than ' + str(expected) + ' up network interfaces.') s=buff.tostring() preferred_nic = Config.get("Network.Interface") for i in range(0,struct_size*expected,struct_size): iface=s[i:i+16].split(b'\0', 1)[0] if iface == b'lo': continue elif preferred_nic is None: break elif iface == preferred_nic: break return iface.decode('latin-1'), socket.inet_ntoa(s[i+20:i+24]) def GetIpv4Address(): """ Return the ip of the first active non-loopback interface. """ iface,addr=GetFirstActiveNetworkInterfaceNonLoopback() return addr def HexStringToByteArray(a): """ Return hex string packed into a binary struct. """ b = b"" for c in range(0, len(a) // 2): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b def GetMacAddress(): """ Convienience function, returns mac addr bound to first non-loobback interface. """ ifname='' while len(ifname) < 2 : ifname=GetFirstActiveNetworkInterfaceNonLoopback()[0] a = Linux_ioctl_GetInterfaceMac(ifname) return HexStringToByteArray(a) def DeviceForIdePort(n): """ Return device name attached to ide port 'n'. """ if n > 3: return None g0 = "00000000" if n > 1: g0 = "00000001" n = n - 2 device = None path = "/sys/bus/vmbus/devices/" for vmbus in os.listdir(path): guid = GetFileContents(path + vmbus + "/device_id").lstrip('{').split('-') if guid[0] == g0 and guid[1] == "000" + str(n): for root, dirs, files in os.walk(path + vmbus): if root.endswith("/block"): device = dirs[0] break else : #older distros for d in dirs: if ':' in d and "block" == d.split(':')[0]: device = d.split(':')[1] break break return device class HttpResourceGoneError(Exception): pass class Util(object): """ Http communication class. Base of GoalState, and Agent classes. """ RetryWaitingInterval=10 def __init__(self): self.Endpoint = None def _ParseUrl(self, url): secure = False host = self.Endpoint path = url port = None #"http[s]://hostname[:port][/]" if url.startswith("http://"): url = url[7:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" elif url.startswith("https://"): secure = True url = url[8:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" if host is None: raise ValueError("Host is invalid:{0}".format(url)) if(":" in host): pos = host.rfind(":") port = int(host[pos + 1:]) host = host[0:pos] return host, port, secure, path def GetHttpProxy(self, secure): """ Get http_proxy and https_proxy from environment variables. Username and password is not supported now. """ host = Config.get("HttpProxy.Host") port = Config.get("HttpProxy.Port") return (host, port) def _HttpRequest(self, method, host, path, port=None, data=None, secure=False, headers=None, proxyHost=None, proxyPort=None): resp = None conn = None try: if secure: port = 443 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplib.HTTPSConnection(proxyHost, proxyPort, timeout=10) conn.set_tunnel(host, port) #If proxy is used, full url is needed. path = "https://{0}:{1}{2}".format(host, port, path) else: conn = httplib.HTTPSConnection(host, port, timeout=10) else: port = 80 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplib.HTTPConnection(proxyHost, proxyPort, timeout=10) #If proxy is used, full url is needed. path = "http://{0}:{1}{2}".format(host, port, path) else: conn = httplib.HTTPConnection(host, port, timeout=10) if headers == None: conn.request(method, path, data) else: conn.request(method, path, data, headers) resp = conn.getresponse() except httplib.HTTPException, e: Error('HTTPException {0}, args:{1}'.format(e, repr(e.args))) except IOError, e: Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args))) return resp def HttpRequest(self, method, url, data=None, headers=None, maxRetry=3, chkProxy=False): """ Sending http request to server On error, sleep 10 and maxRetry times. Return the output buffer or None. """ LogIfVerbose("HTTP Req: {0} {1}".format(method, url)) LogIfVerbose("HTTP Req: Data={0}".format(data)) LogIfVerbose("HTTP Req: Header={0}".format(headers)) try: host, port, secure, path = self._ParseUrl(url) except ValueError, e: Error("Failed to parse url:{0}".format(url)) return None #Check proxy proxyHost, proxyPort = (None, None) if chkProxy: proxyHost, proxyPort = self.GetHttpProxy(secure) #If httplib module is not built with ssl support. Fallback to http if secure and not hasattr(httplib, "HTTPSConnection"): Warn("httplib is not built with ssl support") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) #If httplib module doesn't support https tunnelling. Fallback to http if secure and \ proxyHost is not None and \ proxyPort is not None and \ not hasattr(httplib.HTTPSConnection, "set_tunnel"): Warn("httplib doesn't support https tunnelling(new in python 2.7)") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) for retry in range(0, maxRetry): if resp is not None and \ (resp.status == httplib.OK or \ resp.status == httplib.CREATED or \ resp.status == httplib.ACCEPTED): return resp; if resp is not None and resp.status == httplib.GONE: raise HttpResourceGoneError("Http resource gone.") Error("Retry={0}".format(retry)) Error("HTTP Req: {0} {1}".format(method, url)) Error("HTTP Req: Data={0}".format(data)) Error("HTTP Req: Header={0}".format(headers)) if resp is None: Error("HTTP Err: response is empty.".format(retry)) else: Error("HTTP Err: Status={0}".format(resp.status)) Error("HTTP Err: Reason={0}".format(resp.reason)) Error("HTTP Err: Header={0}".format(resp.getheaders())) Error("HTTP Err: Body={0}".format(resp.read())) time.sleep(self.__class__.RetryWaitingInterval) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) return None def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("GET", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("HEAD", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("POST", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("PUT", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("DELETE", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url'. """ resp = self.HttpGet(url, headers=None, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url' with x-ms-agent-name and x-ms-version headers. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3, chkProxy=False): """ Return output of get using ssl cert. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion, "x-ms-cipher-name": "DES_EDE3_CBC", "x-ms-guest-agent-public-x509-cert": transportCert }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False): headers = { "x-ms-agent-name": GuestAgentName, "Content-Type": "text/xml; charset=utf-8", "x-ms-version": ProtocolVersion } try: return self.HttpPost(url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) except HttpResourceGoneError as e: Error("Failed to post: {0} {1}".format(url, e)) return None __StorageVersion="2014-02-14" def GetBlobType(url): restutil = Util() #Check blob type LogIfVerbose("Check blob type.") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) blobPropResp = restutil.HttpHead(url, { "x-ms-date" : timestamp, 'x-ms-version' : __StorageVersion }, chkProxy=True); blobType = None if blobPropResp is None: Error("Can't get status blob type.") return None blobType = blobPropResp.getheader("x-ms-blob-type") LogIfVerbose("Blob type={0}".format(blobType)) return blobType def PutBlockBlob(url, data): restutil = Util() LogIfVerbose("Upload block blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) ret = restutil.HttpPut(url, data, { "x-ms-date" : timestamp, "x-ms-blob-type" : "BlockBlob", "Content-Length": str(len(data)), "x-ms-version" : __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to upload block blob for status.") return -1 return 0 def PutPageBlob(url, data): restutil = Util() LogIfVerbose("Replace old page blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #Align to 512 bytes pageBlobSize = ((len(data) + 511) / 512) * 512 ret = restutil.HttpPut(url, "", { "x-ms-date" : timestamp, "x-ms-blob-type" : "PageBlob", "Content-Length": "0", "x-ms-blob-content-length" : str(pageBlobSize), "x-ms-version" : __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to clean up page blob for status") return -1 if url.index('?') < 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) LogIfVerbose("Upload page blob") pageMax = 4 * 1024 * 1024 #Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + pageMax) contentSize = end - start #Align to 512 bytes pageEnd = ((end + 511) / 512) * 512 bufSize = pageEnd - start buf = bytearray(bufSize) buf[0 : contentSize] = data[start : end] ret = restutil.HttpPut(url, buffer(buf), { "x-ms-date" : timestamp, "x-ms-range" : "bytes={0}-{1}".format(start, pageEnd - 1), "x-ms-page-write" : "update", "x-ms-version" : __StorageVersion, "Content-Length": str(pageEnd - start) }, chkProxy=True) if ret is None: Error("Failed to upload page blob for status") return -1 start = end return 0 def UploadStatusBlob(url, data): LogIfVerbose("Upload status blob") LogIfVerbose("Status={0}".format(data)) blobType = GetBlobType(url) if blobType == "BlockBlob": return PutBlockBlob(url, data) elif blobType == "PageBlob": return PutPageBlob(url, data) else: Error("Unknown blob type: {0}".format(blobType)) return -1 class TCPHandler(SocketServer.BaseRequestHandler): """ Callback object for LoadBalancerProbeServer. Recv and send LB probe messages. """ def __init__(self,lb_probe): super(TCPHandler,self).__init__() self.lb_probe=lb_probe def GetHttpDateTimeNow(self): """ Return formatted gmtime "Date: Fri, 25 Mar 2011 04:53:10 GMT" """ return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) def handle(self): """ Log LB probe messages, read the socket buffer, send LB probe response back to server. """ self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000 log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)] strCounter = str(self.lb_probe.ProbeCounter) if self.lb_probe.ProbeCounter == 1: Log("Receiving LB probes.") log("Received LB probe # " + strCounter) self.request.recv(1024) self.request.send("HTTP/1.1 200 OK\r\nContent-Length: 2\r\nContent-Type: text/html\r\nDate: " + self.GetHttpDateTimeNow() + "\r\n\r\nOK") class LoadBalancerProbeServer(object): """ Threaded object to receive and send LB probe messages. Load Balancer messages but be recv'd by the load balancing server, or this node may be shut-down. """ def __init__(self, port): self.ProbeCounter = 0 self.server = SocketServer.TCPServer((self.get_ip(), port), TCPHandler) self.server_thread = threading.Thread(target = self.server.serve_forever) self.server_thread.setDaemon(True) self.server_thread.start() def shutdown(self): self.server.shutdown() def get_ip(self): for retry in range(1,6): ip = MyDistro.GetIpv4Address() if ip == None : Log("LoadBalancerProbeServer: GetIpv4Address() returned None, sleeping 10 before retry " + str(retry+1) ) time.sleep(10) else: return ip class ConfigurationProvider(object): """ Parse amd store key:values in waagent.conf """ def __init__(self, walaConfigFile): self.values = dict() if 'MyDistro' not in globals(): global MyDistro MyDistro = GetMyDistro() if walaConfigFile is None: walaConfigFile = MyDistro.getConfigurationPath() if os.path.isfile(walaConfigFile) == False: raise Exception("Missing configuration in {0}".format(walaConfigFile)) try: for line in GetFileContents(walaConfigFile).split('\n'): if not line.startswith("#") and "=" in line: parts = line.split()[0].split('=') value = parts[1].strip("\" ") if value != "None": self.values[parts[0]] = value else: self.values[parts[0]] = None except: Error("Unable to parse {0}".format(walaConfigFile)) raise return def get(self, key): return self.values.get(key) class EnvMonitor(object): """ Montor changes to dhcp and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ def __init__(self): self.shutdown = False self.HostName = socket.gethostname() self.server_thread = threading.Thread(target = self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() self.published = False def monitor(self): """ Monitor dhcp client pid and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ publish = Config.get("Provisioning.MonitorHostName") dhcpcmd = MyDistro.getpidcmd+ ' ' + MyDistro.getDhcpClientName() dhcppid = RunGetOutput(dhcpcmd)[1] while not self.shutdown: for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Log("EnvMonitor: Moved " + a + " -> " + LibDir) MyDistro.setScsiDiskTimeout() if publish != None and publish.lower().startswith("y"): try: if socket.gethostname() != self.HostName: Log("EnvMonitor: Detected host name change: " + self.HostName + " -> " + socket.gethostname()) self.HostName = socket.gethostname() WaAgent.UpdateAndPublishHostName(self.HostName) dhcppid = RunGetOutput(dhcpcmd)[1] self.published = True except: pass else: self.published = True pid = "" if not os.path.isdir("/proc/" + dhcppid.strip()): pid = RunGetOutput(dhcpcmd)[1] if pid != "" and pid != dhcppid: Log("EnvMonitor: Detected dhcp client restart. Restoring routing table.") WaAgent.RestoreRoutes() dhcppid = pid for child in Children: if child.poll() != None: Children.remove(child) time.sleep(5) def SetHostName(self, name): """ Generic call to MyDistro.setHostname(name). Complian to Log on error. """ if socket.gethostname() == name: self.published = True elif MyDistro.setHostname(name): Error("Error: SetHostName: Cannot set hostname to " + name) return ("Error: SetHostName: Cannot set hostname to " + name) def IsHostnamePublished(self): """ Return self.published """ return self.published def ShutdownService(self): """ Stop server comminucation and join the thread to main thread. """ self.shutdown = True self.server_thread.join() class Certificates(object): """ Object containing certificates of host and provisioned user. Parses and splits certificates into files. """ # # 2010-12-15 # 2 # Pkcs7BlobWithPfxContents # MIILTAY... # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset the Role, Incarnation """ self.Incarnation = None self.Role = None def Parse(self, xmlText): """ Parse multiple certificates into seperate files. """ self.reinitialize() SetFileContents("Certificates.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in [ "CertificateFile", "Version", "Incarnation", "Format", "Data", ]: if not dom.getElementsByTagName(a): Error("Certificates.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "CertificateFile": Error("Certificates.Parse: root not CertificateFile") return None SetFileContents("Certificates.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"Certificates.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"Certificates.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + GetNodeTextData(dom.getElementsByTagName("Data")[0])) if Run(Openssl + " cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | " + Openssl + " pkcs12 -nodes -password pass: -out Certificates.pem"): Error("Certificates.Parse: Failed to extract certificates from CMS message.") return self # There may be multiple certificates in this package. Split them. file = open("Certificates.pem") pindex = 1 cindex = 1 output = open("temp.pem", "w") for line in file.readlines(): output.write(line) if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$',line): output.close() if re.match(r'[-]+END .*?KEY[-]+$',line): os.rename("temp.pem", str(pindex) + ".prv") pindex += 1 else: os.rename("temp.pem", str(cindex) + ".crt") cindex += 1 output = open("temp.pem", "w") output.close() os.remove("temp.pem") keys = dict() index = 1 filename = str(index) + ".crt" while os.path.isfile(filename): thumbprint = (RunGetOutput(Openssl + " x509 -in " + filename + " -fingerprint -noout")[1]).rstrip().split('=')[1].replace(':', '').upper() pubkey=RunGetOutput(Openssl + " x509 -in " + filename + " -pubkey -noout")[1] keys[pubkey] = thumbprint os.rename(filename, thumbprint + ".crt") os.chmod(thumbprint + ".crt", 0600) MyDistro.setSelinuxContext(thumbprint + '.crt','unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".crt" index = 1 filename = str(index) + ".prv" while os.path.isfile(filename): pubkey = RunGetOutput(Openssl + " rsa -in " + filename + " -pubout 2> /dev/null ")[1] os.rename(filename, keys[pubkey] + ".prv") os.chmod(keys[pubkey] + ".prv", 0600) MyDistro.setSelinuxContext( keys[pubkey] + '.prv','unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".prv" return self class SharedConfig(object): """ Parse role endpoint server and goal state config. """ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.RdmaMacAddress = None self.RdmaIPv4Address = None self.xmlText = None def Parse(self, xmlText): """ Parse and write configuration to file SharedConfig.xml. """ LogIfVerbose(xmlText) self.reinitialize() self.xmlText = xmlText dom = xml.dom.minidom.parseString(xmlText) for a in [ "SharedConfig", "Deployment", "Service", "ServiceInstance", "Incarnation", "Role", ]: if not dom.getElementsByTagName(a): Error("SharedConfig.Parse: Missing " + a) node = dom.childNodes[0] if node.localName != "SharedConfig": Error("SharedConfig.Parse: root not SharedConfig") nodes = dom.getElementsByTagName("Instance") if nodes is not None and len(nodes) != 0: node = nodes[0] if node.hasAttribute("rdmaMacAddress"): addr = node.getAttribute("rdmaMacAddress") self.RdmaMacAddress = addr[0:2] for i in range(1, 6): self.RdmaMacAddress += ":" + addr[2 * i : 2 *i + 2] if node.hasAttribute("rdmaIPv4Address"): self.RdmaIPv4Address = node.getAttribute("rdmaIPv4Address") return self def Save(self): LogIfVerbose("Save SharedConfig.xml") SetFileContents("SharedConfig.xml", self.xmlText) def InvokeTopologyConsumer(self): program = Config.get("Role.TopologyConsumer") if program != None: try: Children.append(subprocess.Popen([program, LibDir + "/SharedConfig.xml"])) except OSError, e : ErrorWithPrefix('Agent.Run','Exception: '+ str(e) +' occured launching ' + program ) def Process(self): global rdma_configured if not rdma_configured and self.RdmaMacAddress is not None and self.RdmaIPv4Address is not None: handler = RdmaHandler(self.RdmaMacAddress, self.RdmaIPv4Address) handler.start() rdma_configured = True self.InvokeTopologyConsumer() rdma_configured = False class RdmaError(Exception): pass class RdmaHandler(object): """ Handle rdma configuration. """ def __init__(self, mac, ip_addr, dev="/dev/hvnd_rdma", dat_conf_files=['/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf']): self.mac = mac self.ip_addr = ip_addr self.dev = dev self.dat_conf_files = dat_conf_files self.data = ('rdmaMacAddress="{0}" rdmaIPv4Address="{1}"' '').format(self.mac, self.ip_addr) def start(self): """ Start a new thread to process rdma """ threading.Thread(target=self.process).start() def process(self): try: self.set_dat_conf() self.set_rdma_dev() self.set_rdma_ip() except RdmaError as e: Error("Failed to config rdma device: {0}".format(e)) def set_dat_conf(self): """ Agent needs to search all possible locations for dat.conf """ Log("Set dat.conf") for dat_conf_file in self.dat_conf_files: if not os.path.isfile(dat_conf_file): continue try: self.write_dat_conf(dat_conf_file) except IOError as e: raise RdmaError("Failed to write to dat.conf: {0}".format(e)) def write_dat_conf(self, dat_conf_file): Log("Write config to {0}".format(dat_conf_file)) old = ("ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 " "dapl.2.0 \"\S+ 0\"") new = ("ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 " "dapl.2.0 \"{0} 0\"").format(self.ip_addr) lines = GetFileContents(dat_conf_file) lines = re.sub(old, new, lines) SetFileContents(dat_conf_file, lines) def set_rdma_dev(self): """ Write config string to /dev/hvnd_rdma """ Log("Set /dev/hvnd_rdma") self.wait_rdma_dev() self.write_rdma_dev_conf() def write_rdma_dev_conf(self): Log("Write rdma config to {0}: {1}".format(self.dev, self.data)) try: with open(self.dev, "w") as c: c.write(self.data) except IOError, e: raise RdmaError("Error writing {0}, {1}".format(self.dev, e)) def wait_rdma_dev(self): Log("Wait for /dev/hvnd_rdma") retry = 0 while retry < 120: if os.path.exists(self.dev): return time.sleep(1) retry += 1 raise RdmaError("The device doesn't show up in 120 seconds") def set_rdma_ip(self): Log("Set ip addr for rdma") try: if_name = MyDistro.getInterfaceNameByMac(self.mac) #Azure is using 12 bits network mask for infiniband. MyDistro.configIpV4(if_name, self.ip_addr, 12) except Exception as e: raise RdmaError("Failed to config rdma device: {0}".format(e)) class ExtensionsConfig(object): """ Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. Install if true, remove if it is set to false. """ # # # # # # # {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1", #"protectedSettings":"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR #Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6 #tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X #v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh #kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]} # # #https://ostcextensions.blob.core.test-cint.azure-test.net/vhds/eg-plugin7-vm.eg-plugin7-vm.eg-plugin7-vm.status?sr=b&sp=rw& #se=9999-01-01&sk=key1&sv=2012-02-12&sig=wRUIDN1x2GC06FWaetBP9sjjifOWvRzS2y2XBB4qoBU%3D def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.Extensions = None self.Plugins = None self.Util = None def Parse(self, xmlText): """ Write configuration to file ExtensionsConfig.xml. Log plugin specific activity to /var/log/azure/.//CommandExecution.log. If state is enabled: if the plugin is installed: if the new plugin's version is higher if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade: download the new archive do the updateCommand. disable the old plugin and remove enable the new plugin if the new plugin's version is the same or lower: create the new .settings file from the configuration received do the enableCommand if the plugin is not installed: download/unpack archive and call the installCommand/Enable if state is disabled: call disableCommand if state is uninstall: call uninstallCommand remove old plugin directory. """ self.reinitialize() self.Util=Util() dom = xml.dom.minidom.parseString(xmlText) LogIfVerbose(xmlText) self.plugin_log_dir='/var/log/azure' if not os.path.exists(self.plugin_log_dir): os.mkdir(self.plugin_log_dir) try: self.Extensions=dom.getElementsByTagName("Extensions") pg = dom.getElementsByTagName("Plugins") if len(pg) > 0: self.Plugins = pg[0].getElementsByTagName("Plugin") else: self.Plugins = [] incarnation=self.Extensions[0].getAttribute("goalStateIncarnation") SetFileContents('ExtensionsConfig.'+incarnation+'.xml', xmlText) except Exception, e: Error('ERROR: Error parsing ExtensionsConfig: {0}.'.format(e)) return None for p in self.Plugins: if len(p.getAttribute("location"))<1: # this plugin is inside the PluginSettings continue p.setAttribute('restricted','false') previous_version = None version=p.getAttribute("version") name=p.getAttribute("name") plog_dir=self.plugin_log_dir+'/'+name +'/'+ version if not os.path.exists(plog_dir): os.makedirs(plog_dir) p.plugin_log=plog_dir+'/CommandExecution.log' handler=name + '-' + version if p.getAttribute("isJson") != 'true': Error("Plugin " + name+" version: " +version+" is not a JSON Extension. Skipping.") continue Log("Found Plugin: " + name + ' version: ' + version) if p.getAttribute("state") == 'disabled' or p.getAttribute("state") == 'uninstall': #disable zip_dir=LibDir+"/" + name + '-' + version mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) if self.launchCommand(p.plugin_log,name,version,'disableCommand') == None : self.SetHandlerState(handler, 'Enabled') Error('Unable to disable '+name) SimpleLog(p.plugin_log,'ERROR: Unable to disable '+name) else : self.SetHandlerState(handler, 'Disabled') Log(name+' is disabled') SimpleLog(p.plugin_log,name+' is disabled') # uninstall if needed if p.getAttribute("state") == 'uninstall': if self.launchCommand(p.plugin_log,name,version,'uninstallCommand') == None : self.SetHandlerState(handler, 'Installed') Error('Unable to uninstall '+name) SimpleLog(p.plugin_log,'Unable to uninstall '+name) else : self.SetHandlerState(handler, 'NotInstalled') Log(name+' uninstallCommand completed .') # remove the plugin Run('rm -rf ' + LibDir + '/' + name +'-'+ version + '*') Log(name +'-'+ version + ' extension files deleted.') SimpleLog(p.plugin_log,name +'-'+ version + ' extension files deleted.') continue # state is enabled # if the same plugin exists and the version is newer or # does not exist then download and unzip the new plugin plg_dir=None latest_version_installed = LooseVersion("0.0") for item in os.listdir(LibDir): itemPath = os.path.join(LibDir, item) if os.path.isdir(itemPath) and name in item: try: #Split plugin dir name with '-' to get intalled plugin name and version sperator = item.rfind('-') if sperator < 0: continue installed_plg_name = item[0:sperator] installed_plg_version = LooseVersion(item[sperator + 1:]) #Check installed plugin name and compare installed version to get the latest version installed if installed_plg_name == name and installed_plg_version > latest_version_installed: plg_dir = itemPath previous_version = str(installed_plg_version) latest_version_installed = installed_plg_version except Exception as e: Warn("Invalid plugin dir name: {0} {1}".format(item, e)) continue if plg_dir == None or LooseVersion(version) > LooseVersion(previous_version) : location=p.getAttribute("location") Log("Downloading plugin manifest: " + name + " from " + location) SimpleLog(p.plugin_log,"Downloading plugin manifest: " + name + " from " + location) self.Util.Endpoint=location.split('/')[2] Log("Plugin server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log,"Plugin server is: " + self.Util.Endpoint) manifest=self.Util.HttpGetWithoutHeaders(location, chkProxy=True) if manifest == None: Error("Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") SimpleLog(p.plugin_log,"Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") failoverlocation=p.getAttribute("failoverlocation") self.Util.Endpoint=failoverlocation.split('/')[2] Log("Plugin failover server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log,"Plugin failover server is: " + self.Util.Endpoint) manifest=self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True) #if failoverlocation also fail what to do then? if manifest == None: AddExtensionEvent(name,WALAEventOperation.Download,False,0,version,"Download mainfest fail "+failoverlocation) Log("Plugin manifest " + name + " downloading failed from failover location.") SimpleLog(p.plugin_log,"Plugin manifest " + name + " downloading failed from failover location.") filepath=LibDir+"/" + name + '.' + incarnation + '.manifest' if os.path.splitext(location)[-1] == '.xml' : #if this is an xml file we may have a BOM if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128: manifest=manifest[3:] SetFileContents(filepath,manifest) #Get the bundle url from the manifest p.setAttribute('manifestdata',manifest) man_dom = xml.dom.minidom.parseString(manifest) bundle_uri = "" for mp in man_dom.getElementsByTagName("Plugin"): if GetNodeTextData(mp.getElementsByTagName("Version")[0]) == version: bundle_uri = GetNodeTextData(mp.getElementsByTagName("Uri")[0]) break if len(mp.getElementsByTagName("DisallowMajorVersionUpgrade")): if GetNodeTextData(mp.getElementsByTagName("DisallowMajorVersionUpgrade")[0]) == 'true' and previous_version !=None and previous_version.split('.')[0] != version.split('.')[0] : Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') SimpleLog(p.plugin_log,'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') p.setAttribute('restricted','true') continue if len(bundle_uri) < 1 : Error("Unable to fetch Bundle URI from manifest for " + name + " v " + version) SimpleLog(p.plugin_log,"Unable to fetch Bundle URI from manifest for " + name + " v " + version) continue Log("Bundle URI = " + bundle_uri) SimpleLog(p.plugin_log,"Bundle URI = " + bundle_uri) # Download the zipfile archive and save as '.zip' bundle=self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True) if bundle == None: AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,"Download zip fail "+bundle_uri) Error("Unable to download plugin bundle" + bundle_uri ) SimpleLog(p.plugin_log,"Unable to download plugin bundle" + bundle_uri ) continue AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,"Download Success") b=bytearray(bundle) filepath=LibDir+"/" + os.path.basename(bundle_uri) + '.zip' SetFileContents(filepath,b) Log("Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) SimpleLog(p.plugin_log,"Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) # unpack the archive z=zipfile.ZipFile(filepath) zip_dir=LibDir+"/" + name + '-' + version z.extractall(zip_dir) Log('Extracted ' + bundle_uri + ' to ' + zip_dir) SimpleLog(p.plugin_log,'Extracted ' + bundle_uri + ' to ' + zip_dir) # zip no file perms in .zip so set all the scripts to +x Run( "find " + zip_dir +" -type f | xargs chmod u+x ") #write out the base64 config data so the plugin can process it. mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log,'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) # create the status and config dirs Run('mkdir -p ' + root + '/status') Run('mkdir -p ' + root + '/config') # write out the configuration data to goalStateIncarnation.settings file in the config path. config='' seqNo='0' if len(dom.getElementsByTagName("PluginSettings")) != 0 : pslist=dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"Found RuntimeSettings for " + name + " V " + version) config=GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo=ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Log("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"No RuntimeSettings for " + name + " V " + version) SetFileContents(root +"/config/" + seqNo +".settings", config ) #create HandlerEnvironment.json handler_env='[{ "name": "'+name+'", "seqNo": "'+seqNo+'", "version": 1.0, "handlerEnvironment": { "logFolder": "'+os.path.dirname(p.plugin_log)+'", "configFolder": "' + root + '/config", "statusFolder": "' + root + '/status", "heartbeatFile": "'+ root + '/heartbeat.log"}}]' SetFileContents(root+'/HandlerEnvironment.json',handler_env) self.SetHandlerState(handler, 'NotInstalled') cmd = '' getcmd='installCommand' if plg_dir != None and previous_version != None and LooseVersion(version) > LooseVersion(previous_version): previous_handler=name+'-'+previous_version if self.GetHandlerState(previous_handler) != 'NotInstalled': getcmd='updateCommand' # disable the old plugin if it exists if self.launchCommand(p.plugin_log,name,previous_version,'disableCommand') == None : self.SetHandlerState(previous_handler, 'Enabled') Error('Unable to disable old plugin '+name+' version ' + previous_version) SimpleLog(p.plugin_log,'Unable to disable old plugin '+name+' version ' + previous_version) else : self.SetHandlerState(previous_handler, 'Disabled') Log(name+' version ' + previous_version + ' is disabled') SimpleLog(p.plugin_log,name+' version ' + previous_version + ' is disabled') try: Log("Copy status file from old plugin dir to new") old_plg_dir = plg_dir new_plg_dir = os.path.join(LibDir, "{0}-{1}".format(name, version)) old_ext_status_dir = os.path.join(old_plg_dir, "status") new_ext_status_dir = os.path.join(new_plg_dir, "status") if os.path.isdir(old_ext_status_dir): for status_file in os.listdir(old_ext_status_dir): status_file_path = os.path.join(old_ext_status_dir, status_file) if os.path.isfile(status_file_path): shutil.copy2(status_file_path, new_ext_status_dir) mrseq_file = os.path.join(old_plg_dir, "mrseq") if os.path.isfile(mrseq_file): shutil.copy(mrseq_file, new_plg_dir) except Exception as e: Error("Failed to copy status file.") isupgradeSuccess = True if getcmd=='updateCommand': if self.launchCommand(p.plugin_log,name,version,getcmd,previous_version) == None : Error('Update failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Update failed for '+name+'-'+version) isupgradeSuccess=False else : Log('Update complete'+name+'-'+version) SimpleLog(p.plugin_log,'Update complete'+name+'-'+version) # if we updated - call unistall for the old plugin if self.launchCommand(p.plugin_log,name,previous_version,'uninstallCommand') == None : self.SetHandlerState(previous_handler, 'Installed') Error('Uninstall failed for '+name+'-'+previous_version) SimpleLog(p.plugin_log,'Uninstall failed for '+name+'-'+previous_version) isupgradeSuccess=False else : self.SetHandlerState(previous_handler, 'NotInstalled') Log('Uninstall complete'+ previous_handler ) SimpleLog(p.plugin_log,'Uninstall complete'+ name +'-' + previous_version) try: #rm old plugin dir if os.path.isdir(plg_dir): shutil.rmtree(plg_dir) Log(name +'-'+ previous_version + ' extension files deleted.') SimpleLog(p.plugin_log,name +'-'+ previous_version + ' extension files deleted.') except Exception as e: Error("Failed to remove old plugin directory") AddExtensionEvent(name,WALAEventOperation.Upgrade,isupgradeSuccess,0,previous_version) else : # run install if self.launchCommand(p.plugin_log,name,version,getcmd) == None : self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Installed') Log('Installation completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version) #end if plg_dir == none or version > = prev # change incarnation of settings file so it knows how to name status... zip_dir=LibDir+"/" + name + '-' + version mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log,'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) config='' seqNo='0' if len(dom.getElementsByTagName("PluginSettings")) != 0 : try: pslist=dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") except: Error('Error parsing ExtensionsConfig.') SimpleLog(p.plugin_log,'Error parsing ExtensionsConfig.') continue for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"Found RuntimeSettings for " + name + " V " + version) config=GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo=ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Error("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"No RuntimeSettings for " + name + " V " + version) SetFileContents(root +"/config/" + seqNo +".settings", config ) # state is still enable if (self.GetHandlerState(handler) == 'NotInstalled'): # run install first if true if self.launchCommand(p.plugin_log,name,version,'installCommand') == None : self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Installed') Log('Installation completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version) if (self.GetHandlerState(handler) != 'NotInstalled'): if self.launchCommand(p.plugin_log,name,version,'enableCommand') == None : self.SetHandlerState(handler, 'Installed') Error('Enable failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Enable failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Enabled') Log('Enable completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Enable completed for '+name+'-'+version) # this plugin processing is complete Log('Processing completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Processing completed for '+name+'-'+version) #end plugin processing loop Log('Finished processing ExtensionsConfig.xml') try: SimpleLog(p.plugin_log,'Finished processing ExtensionsConfig.xml') except: pass return self def launchCommand(self,plugin_log,name,version,command,prev_version=None): commandToEventOperation={ "installCommand":WALAEventOperation.Install, "uninstallCommand":WALAEventOperation.UnIsntall, "updateCommand": WALAEventOperation.Upgrade, "enableCommand": WALAEventOperation.Enable, "disableCommand": WALAEventOperation.Disable, } isSuccess=True start = datetime.datetime.now() r=self.__launchCommandWithoutEventLog(plugin_log,name,version,command,prev_version) if r==None: isSuccess=False Duration = int((datetime.datetime.now() - start).seconds) if commandToEventOperation.get(command): AddExtensionEvent(name,commandToEventOperation[command],isSuccess,Duration,version) return r def __launchCommandWithoutEventLog(self,plugin_log,name,version,command,prev_version=None): # get the manifest and read the command mfile=None zip_dir=LibDir+"/" + name + '-' + version for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(plugin_log,'HandlerManifest.json not found.') return None manifest = GetFileContents(mfile) try: jsn = json.loads(manifest) except: Error('Error parsing HandlerManifest.json.') SimpleLog(plugin_log,'Error parsing HandlerManifest.json.') return None if type(jsn)==list: jsn=jsn[0] if jsn.has_key('handlerManifest') : cmd = jsn['handlerManifest'][command] else : Error('Key handlerManifest not found. Handler cannot be installed.') SimpleLog(plugin_log,'Key handlerManifest not found. Handler cannot be installed.') if len(cmd) == 0 : Error('Unable to read ' + command ) SimpleLog(plugin_log,'Unable to read ' + command ) return None # for update we send the path of the old installation arg='' if prev_version != None : arg=' ' + LibDir+'/' + name + '-' + prev_version dirpath=os.path.dirname(mfile) LogIfVerbose('Command is '+ dirpath+'/'+ cmd) # launch pid=None try: child = subprocess.Popen(dirpath+'/'+cmd+arg,shell=True,cwd=dirpath,stdout=subprocess.PIPE) except Exception as e: Error('Exception launching ' + cmd + str(e)) SimpleLog(plugin_log,'Exception launching ' + cmd + str(e)) pid = child.pid if pid == None or pid < 1 : ExtensionChildren.append((-1,root)) Error('Error launching ' + cmd + '.') SimpleLog(plugin_log,'Error launching ' + cmd + '.') else : ExtensionChildren.append((pid,root)) Log("Spawned "+ cmd + " PID " + str(pid)) SimpleLog(plugin_log,"Spawned "+ cmd + " PID " + str(pid)) # wait until install/upgrade is finished timeout = 300 # 5 minutes retry = timeout/5 while retry > 0 and child.poll() == None: LogIfVerbose(cmd + ' still running with PID ' + str(pid)) time.sleep(5) retry-=1 if retry==0: Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) SimpleLog(plugin_log,'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) os.kill(pid,9) return None code = child.wait() if code == None or code != 0: Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') SimpleLog(plugin_log,'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') return None Log(command + ' completed.') SimpleLog(plugin_log,command + ' completed.') return 0 def ReportHandlerStatus(self): """ Collect all status reports. """ # { "version": "1.0", "timestampUTC": "2014-03-31T21:28:58Z", # "aggregateStatus": { # "guestAgentStatus": { "version": "2.0.4PRE", "status": "Ready", "formattedMessage": { "lang": "en-US", "message": "GuestAgent is running and accepting new configurations." } }, # "handlerAggregateStatus": [{ # "handlerName": "ExampleHandlerLinux", "handlerVersion": "1.0", "status": "Ready", "runtimeSettingsStatus": { # "sequenceNumber": "2", "settingsStatus": { "timestampUTC": "2014-03-31T23:46:00Z", "status": { "name": "ExampleHandlerLinux", "operation": "Command Execution Finished", "configurationAppliedTime": "2014-03-31T23:46:00Z", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Finished executing command" }, # "substatus": [ # { "name": "StdOut", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Goodbye world!" } }, # { "name": "StdErr", "status": "success", "formattedMessage": { "lang": "en-US", "message": "" } } # ] # } } } } # ] # }} try: incarnation=self.Extensions[0].getAttribute("goalStateIncarnation") except: Error('Error parsing attribute "goalStateIncarnation". Unable to send status reports') return -1 status='' statuses='' for p in self.Plugins: if p.getAttribute("state") == 'uninstall' or p.getAttribute("restricted") == 'true' : continue version=p.getAttribute("version") name=p.getAttribute("name") if p.getAttribute("isJson") != 'true': LogIfVerbose("Plugin " + name+" version: " +version+" is not a JSON Extension. Skipping.") continue reportHeartbeat = False if len(p.getAttribute("manifestdata"))<1: Error("Failed to get manifestdata.") else: reportHeartbeat = json.loads(p.getAttribute("manifestdata"))[0]['handlerManifest']['reportHeartbeat'] if len(statuses)>0: statuses+=',' statuses+=self.GenerateAggStatus(name, version, reportHeartbeat) tstamp=time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #header #agent state if provisioned == False: if provisionError == None : agent_state='Provisioning' agent_msg='Guest Agent is starting.' else: agent_state='Provisioning Error.' agent_msg=provisionError else: agent_state='Ready' agent_msg='GuestAgent is running and accepting new configurations.' status='{"version":"1.0","timestampUTC":"'+tstamp+'","aggregateStatus":{"guestAgentStatus":{"version":"'+GuestAgentVersion+'","status":"'+agent_state+'","formattedMessage":{"lang":"en-US","message":"'+agent_msg+'"}},"handlerAggregateStatus":['+statuses+']}}' try: uri=GetNodeTextData(self.Extensions[0].getElementsByTagName("StatusUploadBlob")[0]).replace('&','&') except: Error('Error parsing element "StatusUploadBlob". Unable to send status reports') return -1 LogIfVerbose('Status report '+status+' sent to ' + uri) return UploadStatusBlob(uri, status.encode("utf-8")) def GetCurrentSequenceNumber(self, plugin_base_dir): """ Get the settings file with biggest file number in config folder """ config_dir = os.path.join(plugin_base_dir, 'config') seq_no = 0 for subdir, dirs, files in os.walk(config_dir): for file in files: try: cur_seq_no = int(os.path.basename(file).split('.')[0]) if cur_seq_no > seq_no: seq_no = cur_seq_no except ValueError: continue return str(seq_no) def GenerateAggStatus(self, name, version, reportHeartbeat = False): """ Generate the status which Azure can understand by the status and heartbeat reported by extension """ plugin_base_dir = LibDir+'/'+name+'-'+version+'/' current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir) status_file=os.path.join(plugin_base_dir, 'status/', current_seq_no +'.status') heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log') handler_state_file = os.path.join(plugin_base_dir, 'config', 'HandlerState') agg_state = 'NotReady' handler_state = None status_obj = None status_code = None formatted_message = None localized_message = None if os.path.exists(handler_state_file): handler_state = GetFileContents(handler_state_file).lower() if HandlerStatusToAggStatus.has_key(handler_state): agg_state = HandlerStatusToAggStatus[handler_state] if reportHeartbeat: if os.path.exists(heartbeat_file): d=int(time.time()-os.stat(heartbeat_file).st_mtime) if d > 600 : # not updated for more than 10 min agg_state = 'Unresponsive' else: try: heartbeat = json.loads(GetFileContents(heartbeat_file))[0]["heartbeat"] agg_state = heartbeat.get("status") status_code = heartbeat.get("code") formatted_message = heartbeat.get("formattedMessage") localized_message = heartbeat.get("message") except: Error("Incorrect heartbeat file. Ignore it. ") else: agg_state = 'Unresponsive' #get status file reported by extension if os.path.exists(status_file): # raw status generated by extension is an array, get the first item and remove the unnecessary element try: status_obj = json.loads(GetFileContents(status_file))[0] del status_obj["version"] except: Error("Incorrect status file. Will NOT settingsStatus in settings. ") agg_status_obj = {"handlerName": name, "handlerVersion": version, "status": agg_state, "runtimeSettingsStatus" : {"sequenceNumber": current_seq_no}} if status_obj: agg_status_obj["runtimeSettingsStatus"]["settingsStatus"] = status_obj if status_code != None: agg_status_obj["code"] = status_code if formatted_message: agg_status_obj["formattedMessage"] = formatted_message if localized_message: agg_status_obj["message"] = localized_message agg_status_string = json.dumps(agg_status_obj) LogIfVerbose("Handler Aggregated Status:" + agg_status_string) return agg_status_string def SetHandlerState(self, handler, state=''): zip_dir=LibDir+"/" + handler mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.') return None Log("SetHandlerState: "+handler+", "+state) return SetFileContents(os.path.dirname(mfile)+'/config/HandlerState', state) def GetHandlerState(self, handler): handlerState = GetFileContents(handler+'/config/HandlerState') if (handlerState): return handlerState.rstrip('\r\n') else: return 'NotInstalled' class HostingEnvironmentConfig(object): """ Parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ # # # # # # # # # # # # # # # # # # # # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset Members. """ self.StoredCertificates = None self.Deployment = None self.Incarnation = None self.Role = None self.HostingEnvironmentSettings = None self.ApplicationSettings = None self.Certificates = None self.ResourceReferences = None def Parse(self, xmlText): """ Parse and create HostingEnvironmentConfig.xml. """ self.reinitialize() SetFileContents("HostingEnvironmentConfig.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in [ "HostingEnvironmentConfig", "Deployment", "Service", "ServiceInstance", "Incarnation", "Role", ]: if not dom.getElementsByTagName(a): Error("HostingEnvironmentConfig.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "HostingEnvironmentConfig": Error("HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig") return None self.ApplicationSettings = dom.getElementsByTagName("Setting") self.Certificates = dom.getElementsByTagName("StoredCertificate") return self def DecryptPassword(self, e): """ Return decrypted password. """ SetFileContents("password.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"password.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"password.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + textwrap.fill(e, 64)) return RunGetOutput(Openssl + " cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem")[1] def ActivateResourceDisk(self): return MyDistro.ActivateResourceDisk() def Process(self): """ Execute ActivateResourceDisk in separate thread. Create the user account. Launch ConfigurationConsumer if specified in the config. """ no_thread = False if DiskActivated == False: for m in inspect.getmembers(MyDistro): if 'ActivateResourceDiskNoThread' in m: no_thread = True break if no_thread == True : MyDistro.ActivateResourceDiskNoThread() else : diskThread = threading.Thread(target = self.ActivateResourceDisk) diskThread.start() User = None Pass = None Expiration = None Thumbprint = None for b in self.ApplicationSettings: sname = b.getAttribute("name") svalue = b.getAttribute("value") if User != None and Pass != None: if User != "root" and User != "" and Pass != "": CreateAccount(User, Pass, Expiration, Thumbprint) else: Error("Not creating user account: " + User) for c in self.Certificates: csha1 = c.getAttribute("certificateId").split(':')[1].upper() if os.path.isfile(csha1 + ".prv"): Log("Private key with thumbprint: " + csha1 + " was retrieved.") if os.path.isfile(csha1 + ".crt"): Log("Public cert with thumbprint: " + csha1 + " was retrieved.") program = Config.get("Role.ConfigurationConsumer") if program != None: try: Children.append(subprocess.Popen([program, LibDir + "/HostingEnvironmentConfig.xml"])) except OSError, e : ErrorWithPrefix('HostingEnvironmentConfig.Process','Exception: '+ str(e) +' occured launching ' + program ) class GoalState(Util): """ Primary container for all configuration except OvfXml. Encapsulates http communication with endpoint server. Initializes and populates: self.HostingEnvironmentConfig self.SharedConfig self.ExtensionsConfig self.Certificates """ # # # 2010-12-15 # 1 # # Started # # 16001 # # # # c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 # # # MachineRole_IN_0 # Started # # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&type=hostingEnvironmentConfig&incarnation=1 # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&type=sharedConfig&incarnation=1 # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=certificates&incarnation=1 # http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&type=extensionsConfig&incarnation=2 # http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&type=fullConfig&incarnation=2 # # # # # # # There is only one Role for VM images. # # Of primary interest is: # LBProbePorts -- an http server needs to run here # We also note Container/ContainerID and RoleInstance/InstanceId to form the health report. # And of course, Incarnation # def __init__(self, Agent): self.Agent = Agent self.Endpoint = Agent.Endpoint self.TransportCert = Agent.TransportCert self.reinitialize() def reinitialize(self): self.Incarnation = None # integer self.ExpectedState = None # "Started" self.HostingEnvironmentConfigUrl = None self.HostingEnvironmentConfigXml = None self.HostingEnvironmentConfig = None self.SharedConfigUrl = None self.SharedConfigXml = None self.SharedConfig = None self.CertificatesUrl = None self.CertificatesXml = None self.Certificates = None self.ExtensionsConfigUrl = None self.ExtensionsConfigXml = None self.ExtensionsConfig = None self.RoleInstanceId = None self.ContainerId = None self.LoadBalancerProbePort = None # integer, ?list of integers def Parse(self, xmlText): """ Request configuration data from endpoint server. Parse and populate contained configuration objects. Calls Certificates().Parse() Calls SharedConfig().Parse Calls ExtensionsConfig().Parse Calls HostingEnvironmentConfig().Parse """ self.reinitialize() LogIfVerbose(xmlText) node = xml.dom.minidom.parseString(xmlText).childNodes[0] if node.localName != "GoalState": Error("GoalState.Parse: root not GoalState") return None for a in node.childNodes: if a.nodeType == node.ELEMENT_NODE: if a.localName == "Incarnation": self.Incarnation = GetNodeTextData(a) elif a.localName == "Machine": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE: if b.localName == "ExpectedState": self.ExpectedState = GetNodeTextData(b) Log("ExpectedState: " + self.ExpectedState) elif b.localName == "LBProbePorts": for c in b.childNodes: if c.nodeType == node.ELEMENT_NODE and c.localName == "Port": self.LoadBalancerProbePort = int(GetNodeTextData(c)) elif a.localName == "Container": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE: if b.localName == "ContainerId": self.ContainerId = GetNodeTextData(b) Log("ContainerId: " + self.ContainerId) elif b.localName == "RoleInstanceList": for c in b.childNodes: if c.localName == "RoleInstance": for d in c.childNodes: if d.nodeType == node.ELEMENT_NODE: if d.localName == "InstanceId": self.RoleInstanceId = GetNodeTextData(d) Log("RoleInstanceId: " + self.RoleInstanceId) elif d.localName == "State": pass elif d.localName == "Configuration": for e in d.childNodes: if e.nodeType == node.ELEMENT_NODE: LogIfVerbose(e.localName) if e.localName == "HostingEnvironmentConfig": self.HostingEnvironmentConfigUrl = GetNodeTextData(e) LogIfVerbose("HostingEnvironmentConfigUrl:" + self.HostingEnvironmentConfigUrl) self.HostingEnvironmentConfigXml = self.HttpGetWithHeaders(self.HostingEnvironmentConfigUrl) self.HostingEnvironmentConfig = HostingEnvironmentConfig().Parse(self.HostingEnvironmentConfigXml) elif e.localName == "SharedConfig": self.SharedConfigUrl = GetNodeTextData(e) LogIfVerbose("SharedConfigUrl:" + self.SharedConfigUrl) self.SharedConfigXml = self.HttpGetWithHeaders(self.SharedConfigUrl) self.SharedConfig = SharedConfig().Parse(self.SharedConfigXml) self.SharedConfig.Save() elif e.localName == "ExtensionsConfig": self.ExtensionsConfigUrl = GetNodeTextData(e) LogIfVerbose("ExtensionsConfigUrl:" + self.ExtensionsConfigUrl) self.ExtensionsConfigXml = self.HttpGetWithHeaders(self.ExtensionsConfigUrl) elif e.localName == "Certificates": self.CertificatesUrl = GetNodeTextData(e) LogIfVerbose("CertificatesUrl:" + self.CertificatesUrl) self.CertificatesXml = self.HttpSecureGetWithHeaders(self.CertificatesUrl, self.TransportCert) self.Certificates = Certificates().Parse(self.CertificatesXml) if self.Incarnation == None: Error("GoalState.Parse: Incarnation missing") return None if self.ExpectedState == None: Error("GoalState.Parse: ExpectedState missing") return None if self.RoleInstanceId == None: Error("GoalState.Parse: RoleInstanceId missing") return None if self.ContainerId == None: Error("GoalState.Parse: ContainerId missing") return None SetFileContents("GoalState." + self.Incarnation + ".xml", xmlText) return self def Process(self): """ Calls HostingEnvironmentConfig.Process() """ LogIfVerbose("Process goalstate") self.HostingEnvironmentConfig.Process() self.SharedConfig.Process() class OvfEnv(object): """ Read, and process provisioning info from provisioning file OvfEnv.xml """ # # # # # 1.0 # # LinuxProvisioningConfiguration # HostName # UserName # UserPassword # false # # # # EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 # $HOME/UserName/.ssh/authorized_keys # # # # # EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 # $HOME/UserName/.ssh/id_rsa # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.WaNs = "http://schemas.microsoft.com/windowsazure" self.OvfNs = "http://schemas.dmtf.org/ovf/environment/1" self.MajorVersion = 1 self.MinorVersion = 0 self.ComputerName = None self.AdminPassword = None self.UserName = None self.UserPassword = None self.CustomData = None self.DisableSshPasswordAuthentication = True self.SshPublicKeys = [] self.SshKeyPairs = [] def Parse(self, xmlText, isDeprovision = False): """ Parse xml tree, retreiving user and ssh key information. Return self. """ self.reinitialize() LogIfVerbose(re.sub(".*?<", "*<", xmlText)) dom = xml.dom.minidom.parseString(xmlText) if len(dom.getElementsByTagNameNS(self.OvfNs, "Environment")) != 1: Error("Unable to parse OVF XML.") section = None newer = False for p in dom.getElementsByTagNameNS(self.WaNs, "ProvisioningSection"): for n in p.childNodes: if n.localName == "Version": verparts = GetNodeTextData(n).split('.') major = int(verparts[0]) minor = int(verparts[1]) if major > self.MajorVersion: newer = True if major != self.MajorVersion: break if minor > self.MinorVersion: newer = True section = p if newer == True: Warn("Newer provisioning configuration detected. Please consider updating waagent.") if section == None: Error("Could not find ProvisioningSection with major version=" + str(self.MajorVersion)) return None self.ComputerName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "HostName")[0]) self.UserName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "UserName")[0]) if isDeprovision == True: return self try: self.UserPassword = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "UserPassword")[0]) except: pass CDSection=None try: CDSection=section.getElementsByTagNameNS(self.WaNs, "CustomData") if len(CDSection) > 0 : self.CustomData=GetNodeTextData(CDSection[0]) if len(self.CustomData)>0: SetFileContents(LibDir + '/CustomData', bytearray(MyDistro.translateCustomData(self.CustomData), 'utf-8')) Log('Wrote ' + LibDir + '/CustomData') else : Error(' contains no data!') except Exception, e: Error( str(e)+' occured creating ' + LibDir + '/CustomData') disableSshPass = section.getElementsByTagNameNS(self.WaNs, "DisableSshPasswordAuthentication") if len(disableSshPass) != 0: self.DisableSshPasswordAuthentication = (GetNodeTextData(disableSshPass[0]).lower() == "true") for pkey in section.getElementsByTagNameNS(self.WaNs, "PublicKey"): LogIfVerbose(repr(pkey)) fp = None path = None for c in pkey.childNodes: if c.localName == "Fingerprint": fp = GetNodeTextData(c).upper() LogIfVerbose(fp) if c.localName == "Path": path = GetNodeTextData(c) LogIfVerbose(path) self.SshPublicKeys += [[fp, path]] for keyp in section.getElementsByTagNameNS(self.WaNs, "KeyPair"): fp = None path = None LogIfVerbose(repr(keyp)) for c in keyp.childNodes: if c.localName == "Fingerprint": fp = GetNodeTextData(c).upper() LogIfVerbose(fp) if c.localName == "Path": path = GetNodeTextData(c) LogIfVerbose(path) self.SshKeyPairs += [[fp, path]] return self def PrepareDir(self, filepath): """ Create home dir for self.UserName Change owner and return path. """ home = MyDistro.GetHome() # Expand HOME variable if present in path path = os.path.normpath(filepath.replace("$HOME", home)) if (path.startswith("/") == False) or (path.endswith("/") == True): return None dir = path.rsplit('/', 1)[0] if dir != "": CreateDir(dir, "root", 0700) if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(dir, self.UserName) return path def NumberToBytes(self, i): """ Pack number into bytes. Retun as string. """ result = [] while i: result.append(chr(i & 0xFF)) i >>= 8 result.reverse() return ''.join(result) def BitsToString(self, a): """ Return string representation of bits in a. """ index=7 s = "" c = 0 for bit in a: c = c | (bit << index) index = index - 1 if index == -1: s = s + struct.pack('>B', c) c = 0 index = 7 return s def OpensslToSsh(self, file): """ Return base-64 encoded key appropriate for ssh. """ from pyasn1.codec.der import decoder as der_decoder try: f = open(file).read().replace('\n','').split("KEY-----")[1].split('-')[0] k=der_decoder.decode(self.BitsToString(der_decoder.decode(base64.b64decode(f))[0][1]))[0] n=k[0] e=k[1] keydata="" keydata += struct.pack('>I',len("ssh-rsa")) keydata += "ssh-rsa" keydata += struct.pack('>I',len(self.NumberToBytes(e))) keydata += self.NumberToBytes(e) keydata += struct.pack('>I',len(self.NumberToBytes(n)) + 1) keydata += "\0" keydata += self.NumberToBytes(n) except Exception, e: print("OpensslToSsh: Exception " + str(e)) return None return "ssh-rsa " + base64.b64encode(keydata) + "\n" def Process(self): """ Process all certificate and key info. DisableSshPasswordAuthentication if configured. CreateAccount(user) Wait for WaAgent.EnvMonitor.IsHostnamePublished(). Restart ssh service. """ error = None if self.ComputerName == None : return "Error: Hostname missing" error=WaAgent.EnvMonitor.SetHostName(self.ComputerName) if error: return error if self.DisableSshPasswordAuthentication: filepath = "/etc/ssh/sshd_config" # Disable RFC 4252 and RFC 4256 authentication schemes. ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not (a.startswith("PasswordAuthentication") or a.startswith("ChallengeResponseAuthentication")), GetFileContents(filepath).split('\n'))) + "\nPasswordAuthentication no\nChallengeResponseAuthentication no\n") Log("Disabled SSH password-based authentication methods.") if self.AdminPassword != None: MyDistro.changePass('root',self.AdminPassword) if self.UserName != None: error = MyDistro.CreateAccount(self.UserName, self.UserPassword, None, None) sel = MyDistro.isSelinuxRunning() if sel : MyDistro.setSelinuxEnforce(0) home = MyDistro.GetHome() for pkey in self.SshPublicKeys: Log("Deploy public key:{0}".format(pkey[0])) if not os.path.isfile(pkey[0] + ".crt"): Error("PublicKey not found: " + pkey[0]) error = "Failed to deploy public key (0x09)." continue path = self.PrepareDir(pkey[1]) if path == None: Error("Invalid path: " + pkey[1] + " for PublicKey: " + pkey[0]) error = "Invalid path for public key (0x03)." continue Run(Openssl + " x509 -in " + pkey[0] + ".crt -noout -pubkey > " + pkey[0] + ".pub") MyDistro.setSelinuxContext(pkey[0] + '.pub','unconfined_u:object_r:ssh_home_t:s0') MyDistro.sshDeployPublicKey(pkey[0] + '.pub',path) MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0') if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(path, self.UserName) for keyp in self.SshKeyPairs: Log("Deploy key pair:{0}".format(keyp[0])) if not os.path.isfile(keyp[0] + ".prv"): Error("KeyPair not found: " + keyp[0]) error = "Failed to deploy key pair (0x0A)." continue path = self.PrepareDir(keyp[1]) if path == None: Error("Invalid path: " + keyp[1] + " for KeyPair: " + keyp[0]) error = "Invalid path for key pair (0x05)." continue SetFileContents(path, GetFileContents(keyp[0] + ".prv")) os.chmod(path, 0600) Run("ssh-keygen -y -f " + keyp[0] + ".prv > " + path + ".pub") MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0') MyDistro.setSelinuxContext(path + '.pub','unconfined_u:object_r:ssh_home_t:s0') if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(path, self.UserName) ChangeOwner(path + ".pub", self.UserName) if sel : MyDistro.setSelinuxEnforce(1) while not WaAgent.EnvMonitor.IsHostnamePublished(): time.sleep(1) MyDistro.restartSshService() return error class WALAEvent(object): def __init__(self): self.providerId="" self.eventId=1 self.OpcodeName="" self.KeywordName="" self.TaskName="" self.TenantName="" self.RoleName="" self.RoleInstanceName="" self.ContainerId="" self.ExecutionMode="IAAS" self.OSVersion="" self.GAVersion="" self.RAM=0 self.Processors=0 def ToXml(self): strEventid=u''.format(self.eventId) strProviderid=u''.format(self.providerId) strRecordFormat = u'' strRecordNoQuoteFormat = u'' strMtStr=u'mt:wstr' strMtUInt64=u'mt:uint64' strMtBool=u'mt:bool' strMtFloat=u'mt:float64' strEventsData=u"" for attName in self.__dict__: if attName in ["eventId","filedCount","providerId"]: continue attValue = self.__dict__[attName] if type(attValue) is int: strEventsData+=strRecordFormat.format(attName,attValue,strMtUInt64) continue if type(attValue) is str: attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr) continue if str(type(attValue)).count("'unicode'") >0 : attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr) continue if type(attValue) is bool: strEventsData+=strRecordFormat.format(attName,attValue,strMtBool) continue if type(attValue) is float: strEventsData+=strRecordFormat.format(attName,attValue,strMtFloat) continue Log("Warning: property "+attName+":"+str(type(attValue))+":type"+str(type(attValue))+"Can't convert to events data:"+":type not supported") return u"{0}{1}{2}".format(strProviderid,strEventid,strEventsData) def Save(self): eventfolder = LibDir+"/events" if not os.path.exists(eventfolder): os.mkdir(eventfolder) os.chmod(eventfolder,0700) if len(os.listdir(eventfolder)) > 1000: raise Exception("WriteToFolder:Too many file under "+eventfolder+" exit") filename = os.path.join(eventfolder,str(int(time.time()*1000000))) with open(filename+".tmp",'wb+') as hfile: hfile.write(self.ToXml().encode("utf-8")) os.rename(filename+".tmp",filename+".tld") class WALAEventOperation: HeartBeat="HeartBeat" Provision = "Provision" Install = "Install" UnIsntall = "UnInstall" Disable = "Disable" Enable = "Enable" Download = "Download" Upgrade = "Upgrade" Update = "Update" def AddExtensionEvent(name,op,isSuccess,duration=0,version="1.0",message="",type="",isInternal=False): event = ExtensionEvent() event.Name=name event.Version=version event.IsInternal=isInternal event.Operation=op event.OperationSuccess=isSuccess event.Message=message event.Duration=duration event.ExtensionType=type try: event.Save() except: Error("Error "+traceback.format_exc()) class ExtensionEvent(WALAEvent): def __init__(self): WALAEvent.__init__(self) self.eventId=1 self.providerId="69B669B9-4AF8-4C50-BDC4-6006FA76E975" self.Name="" self.Version="" self.IsInternal=False self.Operation="" self.OperationSuccess=True self.ExtensionType="" self.Message="" self.Duration=0 class WALAEventMonitor(WALAEvent): def __init__(self,postMethod): WALAEvent.__init__(self) self.post = postMethod self.sysInfo={} self.eventdir = LibDir+"/events" self.issysteminfoinitilized = False def StartEventsLoop(self): eventThread = threading.Thread(target = self.EventsLoop) eventThread.setDaemon(True) eventThread.start() def EventsLoop(self): LastReportHeartBeatTime = datetime.datetime.min try: while True: if (datetime.datetime.now()-LastReportHeartBeatTime) > \ datetime.timedelta(minutes=30): LastReportHeartBeatTime = datetime.datetime.now() AddExtensionEvent(op=WALAEventOperation.HeartBeat,name="WALA",isSuccess=True) self.postNumbersInOneLoop=0 self.CollectAndSendWALAEvents() time.sleep(60) except: Error("Exception in events loop:"+traceback.format_exc()) def SendEvent(self,providerid,events): dataFormat = u'{1}'\ '' data = dataFormat.format(providerid,events) self.post("/machine/?comp=telemetrydata", data) def CollectAndSendWALAEvents(self): if not os.path.exists(self.eventdir): return #Throtting, can't send more than 3 events in 15 seconds eventSendNumber=0 eventFiles = os.listdir(self.eventdir) events = {} for file in eventFiles: if not file.endswith(".tld"): continue with open(os.path.join(self.eventdir,file),"rb") as hfile: #if fail to open or delete the file, throw exception xmlStr = hfile.read().decode("utf-8",'ignore') os.remove(os.path.join(self.eventdir,file)) params="" eventid="" providerid="" #if exception happen during process an event, catch it and continue try: xmlStr = self.AddSystemInfo(xmlStr) for node in xml.dom.minidom.parseString(xmlStr.encode("utf-8")).childNodes[0].childNodes: if node.tagName == "Param": params+=node.toxml() if node.tagName == "Event": eventid=node.getAttribute("id") if node.tagName == "Provider": providerid = node.getAttribute("id") except: Error(traceback.format_exc()) continue if len(params)==0 or len(eventid)==0 or len(providerid)==0: Error("Empty filed in params:"+params+" event id:"+eventid+" provider id:"+providerid) continue eventstr = u''.format(eventid,params) if not events.get(providerid): events[providerid]="" if len(events[providerid]) >0 and len(events.get(providerid)+eventstr)>= 63*1024: eventSendNumber+=1 self.SendEvent(providerid,events.get(providerid)) if eventSendNumber %3 ==0: time.sleep(15) events[providerid]="" if len(eventstr) >= 63*1024: Error("Signle event too large abort "+eventstr[:300]) continue events[providerid]=events.get(providerid)+eventstr for key in events.keys(): if len(events[key]) > 0: eventSendNumber+=1 self.SendEvent(key,events[key]) if eventSendNumber%3 == 0: time.sleep(15) def AddSystemInfo(self,eventData): if not self.issysteminfoinitilized: self.issysteminfoinitilized=True try: self.sysInfo["OSVersion"]=platform.system()+":"+"-".join(DistInfo(1))+":"+platform.release() self.sysInfo["GAVersion"]=GuestAgentVersion self.sysInfo["RAM"]=MyDistro.getTotalMemory() self.sysInfo["Processors"]=MyDistro.getProcessorCores() sharedConfig = xml.dom.minidom.parse("/var/lib/waagent/SharedConfig.xml").childNodes[0] hostEnvConfig= xml.dom.minidom.parse("/var/lib/waagent/HostingEnvironmentConfig.xml").childNodes[0] gfiles = RunGetOutput("ls -t /var/lib/waagent/GoalState.*.xml")[1] goalStateConfi = xml.dom.minidom.parse(gfiles.split("\n")[0]).childNodes[0] self.sysInfo["TenantName"]=hostEnvConfig.getElementsByTagName("Deployment")[0].getAttribute("name") self.sysInfo["RoleName"]=hostEnvConfig.getElementsByTagName("Role")[0].getAttribute("name") self.sysInfo["RoleInstanceName"]=sharedConfig.getElementsByTagName("Instance")[0].getAttribute("id") self.sysInfo["ContainerId"]=goalStateConfi.getElementsByTagName("ContainerId")[0].childNodes[0].nodeValue except: Error(traceback.format_exc()) eventObject = xml.dom.minidom.parseString(eventData.encode("utf-8")).childNodes[0] for node in eventObject.childNodes: if node.tagName == "Param": name = node.getAttribute("Name") if self.sysInfo.get(name): node.setAttribute("Value",xml.sax.saxutils.escape(str(self.sysInfo[name]))) return eventObject.toxml() class Agent(Util): """ Primary object container for the provisioning process. """ def __init__(self): self.GoalState = None self.Endpoint = None self.LoadBalancerProbeServer = None self.HealthReportCounter = 0 self.TransportCert = "" self.EnvMonitor = None self.SendData = None self.DhcpResponse = None def CheckVersions(self): """ Query endpoint server for wire protocol version. Fail if our desired protocol version is not seen. """ # # # # 2010-12-15 # # # 2010-12-15 # 2010-28-10 # # global ProtocolVersion protocolVersionSeen = False node = xml.dom.minidom.parseString(self.HttpGetWithoutHeaders("/?comp=versions")).childNodes[0] if node.localName != "Versions": Error("CheckVersions: root not Versions") return False for a in node.childNodes: if a.nodeType == node.ELEMENT_NODE and a.localName == "Supported": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE and b.localName == "Version": v = GetNodeTextData(b) LogIfVerbose("Fabric supported wire protocol version: " + v) if v == ProtocolVersion: protocolVersionSeen = True if a.nodeType == node.ELEMENT_NODE and a.localName == "Preferred": v = GetNodeTextData(a.getElementsByTagName("Version")[0]) Log("Fabric preferred wire protocol version: " + v) if not protocolVersionSeen: Warn("Agent supported wire protocol version: " + ProtocolVersion + " was not advertised by Fabric.") else: Log("Negotiated wire protocol version: " + ProtocolVersion) return True def Unpack(self, buffer, offset, range): """ Unpack bytes into python values. """ result = 0 for i in range: result = (result << 8) | Ord(buffer[offset + i]) return result def UnpackLittleEndian(self, buffer, offset, length): """ Unpack little endian bytes into python values. """ return self.Unpack(buffer, offset, list(range(length - 1, -1, -1))) def UnpackBigEndian(self, buffer, offset, length): """ Unpack big endian bytes into python values. """ return self.Unpack(buffer, offset, list(range(0, length))) def HexDump3(self, buffer, offset, length): """ Dump range of buffer in formatted hex. """ return ''.join(['%02X' % Ord(char) for char in buffer[offset:offset + length]]) def HexDump2(self, buffer): """ Dump buffer in formatted hex. """ return self.HexDump3(buffer, 0, len(buffer)) def BuildDhcpRequest(self): """ Build DHCP request string. """ # # typedef struct _DHCP { # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ # UINT8 HardwareAddressType; /* htype: ethernet */ # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ # UINT8 Hops; /* hops: 0 */ # UINT8 TransactionID[4]; /* xid: random */ # UINT8 Seconds[2]; /* secs: 0 */ # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast */ # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte ethernet MAC address */ # UINT8 ServerName[64]; /* sname: 0 */ # UINT8 BootFileName[128]; /* file: 0 */ # UINT8 MagicCookie[4]; /* 99 130 83 99 */ # /* 0x63 0x82 0x53 0x63 */ # /* options -- hard code ours */ # # UINT8 MessageTypeCode; /* 53 */ # UINT8 MessageTypeLength; /* 1 */ # UINT8 MessageType; /* 1 for DISCOVER */ # UINT8 End; /* 255 */ # } DHCP; # # tuple of 244 zeros # (struct.pack_into would be good here, but requires Python 2.5) sendData = [0] * 244 transactionID = os.urandom(4) macAddress = MyDistro.GetMacAddress() # Opcode = 1 # HardwareAddressType = 1 (ethernet/MAC) # HardwareAddressLength = 6 (ethernet/MAC/48 bits) for a in range(0, 3): sendData[a] = [1, 1, 6][a] # fill in transaction id (random number to ensure response matches request) for a in range(0, 4): sendData[4 + a] = Ord(transactionID[a]) LogIfVerbose("BuildDhcpRequest: transactionId:%s,%04X" % (self.HexDump2(transactionID), self.UnpackBigEndian(sendData, 4, 4))) # fill in ClientHardwareAddress for a in range(0, 6): sendData[0x1C + a] = Ord(macAddress[a]) # DHCP Magic Cookie: 99, 130, 83, 99 # MessageTypeCode = 53 DHCP Message Type # MessageTypeLength = 1 # MessageType = DHCPDISCOVER # End = 255 DHCP_END for a in range(0, 8): sendData[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] return array.array("B", sendData) def IntegerToIpAddressV4String(self, a): """ Build DHCP request string. """ return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, a & 0xFF) def RouteAdd(self, net, mask, gateway): """ Add specified route using /sbin/route add -net. """ net = self.IntegerToIpAddressV4String(net) mask = self.IntegerToIpAddressV4String(mask) gateway = self.IntegerToIpAddressV4String(gateway) Log("Route add: net={0}, mask={1}, gateway={2}".format(net, mask, gateway)) MyDistro.routeAdd(net, mask, gateway) def SetDefaultGateway(self, gateway): """ Set default gateway """ gateway = self.IntegerToIpAddressV4String(gateway) Log("Set default gateway: {0}".format(gateway)) MyDistro.setDefaultGateway(gateway) def HandleDhcpResponse(self, sendData, receiveBuffer): """ Parse DHCP response: Set default gateway. Set default routes. Retrieve endpoint server. Returns endpoint server or None on error. """ LogIfVerbose("HandleDhcpResponse") bytesReceived = len(receiveBuffer) if bytesReceived < 0xF6: Error("HandleDhcpResponse: Too few bytes received " + str(bytesReceived)) return None LogIfVerbose("BytesReceived: " + hex(bytesReceived)) LogWithPrefixIfVerbose("DHCP response:", HexDump(receiveBuffer, bytesReceived)) # check transactionId, cookie, MAC address # cookie should never mismatch # transactionId and MAC address may mismatch if we see a response meant from another machine for offsets in [list(range(4, 4 + 4)), list(range(0x1C, 0x1C + 6)), list(range(0xEC, 0xEC + 4))]: for offset in offsets: sentByte = Ord(sendData[offset]) receivedByte = Ord(receiveBuffer[offset]) if sentByte != receivedByte: LogIfVerbose("HandleDhcpResponse: sent cookie:" + self.HexDump3(sendData, 0xEC, 4)) LogIfVerbose("HandleDhcpResponse: rcvd cookie:" + self.HexDump3(receiveBuffer, 0xEC, 4)) LogIfVerbose("HandleDhcpResponse: sent transactionID:" + self.HexDump3(sendData, 4, 4)) LogIfVerbose("HandleDhcpResponse: rcvd transactionID:" + self.HexDump3(receiveBuffer, 4, 4)) LogIfVerbose("HandleDhcpResponse: sent ClientHardwareAddress:" + self.HexDump3(sendData, 0x1C, 6)) LogIfVerbose("HandleDhcpResponse: rcvd ClientHardwareAddress:" + self.HexDump3(receiveBuffer, 0x1C, 6)) LogIfVerbose("HandleDhcpResponse: transactionId, cookie, or MAC address mismatch") return None endpoint = None # # Walk all the returned options, parsing out what we need, ignoring the others. # We need the custom option 245 to find the the endpoint we talk to, # as well as, to handle some Linux DHCP client incompatibilities, # options 3 for default gateway and 249 for routes. And 255 is end. # i = 0xF0 # offset to first option while i < bytesReceived: option = Ord(receiveBuffer[i]) length = 0 if (i + 1) < bytesReceived: length = Ord(receiveBuffer[i + 1]) LogIfVerbose("DHCP option " + hex(option) + " at offset:" + hex(i) + " with length:" + hex(length)) if option == 255: LogIfVerbose("DHCP packet ended at offset " + hex(i)) break elif option == 249: # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx LogIfVerbose("Routes at offset:" + hex(i) + " with length:" + hex(length)) if length < 5: Error("Data too small for option " + str(option)) j = i + 2 while j < (i + length + 2): maskLengthBits = Ord(receiveBuffer[j]) maskLengthBytes = (((maskLengthBits + 7) & ~7) >> 3) mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - maskLengthBits)) j += 1 net = self.UnpackBigEndian(receiveBuffer, j, maskLengthBytes) net <<= (32 - maskLengthBytes * 8) net &= mask j += maskLengthBytes gateway = self.UnpackBigEndian(receiveBuffer, j, 4) j += 4 self.RouteAdd(net, mask, gateway) if j != (i + length + 2): Error("HandleDhcpResponse: Unable to parse routes") elif option == 3 or option == 245: if i + 5 < bytesReceived: if length != 4: Error("HandleDhcpResponse: Endpoint or Default Gateway not 4 bytes") return None gateway = self.UnpackBigEndian(receiveBuffer, i + 2, 4) IpAddress = self.IntegerToIpAddressV4String(gateway) if option == 3: self.SetDefaultGateway(gateway) name = "DefaultGateway" else: endpoint = IpAddress name = "Azure wire protocol endpoint" LogIfVerbose(name + ": " + IpAddress + " at " + hex(i)) else: Error("HandleDhcpResponse: Data too small for option " + str(option)) else: LogIfVerbose("Skipping DHCP option " + hex(option) + " at " + hex(i) + " with length " + hex(length)) i += length + 2 return endpoint def DoDhcpWork(self): """ Discover the wire server via DHCP option 245. And workaround incompatibility with Azure DHCP servers. """ ShortSleep = False # Sleep 1 second before retrying DHCP queries. ifname=None sleepDurations = [0, 10, 30, 60, 60] maxRetry = len(sleepDurations) lastTry = (maxRetry - 1) for retry in range(0, maxRetry): try: #Open DHCP port if iptables is enabled. Run("iptables -D INPUT -p udp --dport 68 -j ACCEPT",chk_err=False) # We supress error logging on error. Run("iptables -I INPUT -p udp --dport 68 -j ACCEPT",chk_err=False) # We supress error logging on error. strRetry = str(retry) prefix = "DoDhcpWork: try=" + strRetry LogIfVerbose(prefix) sendData = self.BuildDhcpRequest() LogWithPrefixIfVerbose("DHCP request:", HexDump(sendData, len(sendData))) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) missingDefaultRoute = True try: if DistInfo()[0] == 'FreeBSD': missingDefaultRoute = True else: routes = RunGetOutput("route -n")[1] for line in routes.split('\n'): if line.startswith("0.0.0.0 ") or line.startswith("default "): missingDefaultRoute = False except: pass if missingDefaultRoute: # This is required because sending after binding to 0.0.0.0 fails with # network unreachable when the default gateway is not set up. ifname=MyDistro.GetInterfaceName() Log("DoDhcpWork: Missing default route - adding broadcast route for DHCP.") if DistInfo()[0] == 'FreeBSD': Run("route add -net 255.255.255.255 -iface " + ifname,chk_err=False) else: Run("route add 255.255.255.255 dev " + ifname,chk_err=False) if MyDistro.isDHCPEnabled(): MyDistro.stopDHCP() sock.bind(("0.0.0.0", 68)) sock.sendto(sendData, ("", 67)) sock.settimeout(10) Log("DoDhcpWork: Setting socket.timeout=10, entering recv") receiveBuffer = sock.recv(1024) endpoint = self.HandleDhcpResponse(sendData, receiveBuffer) if endpoint == None: LogIfVerbose("DoDhcpWork: No endpoint found") if endpoint != None or retry == lastTry: if endpoint != None: self.SendData = sendData self.DhcpResponse = receiveBuffer if retry == lastTry: LogIfVerbose("DoDhcpWork: try=" + strRetry) return endpoint sleepDuration = [sleepDurations[retry % len(sleepDurations)], 1][ShortSleep] LogIfVerbose("DoDhcpWork: sleep=" + str(sleepDuration)) time.sleep(sleepDuration) except Exception, e: ErrorWithPrefix(prefix, str(e)) ErrorWithPrefix(prefix, traceback.format_exc()) finally: sock.close() if missingDefaultRoute: #We added this route - delete it Log("DoDhcpWork: Removing broadcast route for DHCP.") if DistInfo()[0] == 'FreeBSD': Run("route del -net 255.255.255.255 -iface " + ifname,chk_err=False) else: Run("route del 255.255.255.255 dev " + ifname,chk_err=False) # We supress error logging on error. if MyDistro.isDHCPEnabled(): MyDistro.startDHCP() return None def UpdateAndPublishHostName(self, name): """ Set hostname locally and publish to iDNS """ Log("Setting host name: " + name) MyDistro.publishHostname(name) ethernetInterface = MyDistro.GetInterfaceName() MyDistro.RestartInterface(ethernetInterface) self.RestoreRoutes() def RestoreRoutes(self): """ If there is a DHCP response, then call HandleDhcpResponse. """ if self.SendData != None and self.DhcpResponse != None: self.HandleDhcpResponse(self.SendData, self.DhcpResponse) def UpdateGoalState(self): """ Retreive goal state information from endpoint server. Parse xml and initialize Agent.GoalState object. Return object or None on error. """ goalStateXml = None maxRetry = 9 log = NoLog for retry in range(1, maxRetry + 1): strRetry = str(retry) log("retry UpdateGoalState,retry=" + strRetry) goalStateXml = self.HttpGetWithHeaders("/machine/?comp=goalstate") if goalStateXml != None: break log = Log time.sleep(retry) if not goalStateXml: Error("UpdateGoalState failed.") return Log("Retrieved GoalState from Azure Fabric.") self.GoalState = GoalState(self).Parse(goalStateXml) return self.GoalState def ReportReady(self): """ Send health report 'Ready' to server. This signals the fabric that our provosion is completed, and the host is ready for operation. """ counter = (self.HealthReportCounter + 1) % 1000000 self.HealthReportCounter = counter healthReport = ("" + self.GoalState.Incarnation + "" + self.GoalState.ContainerId + "" + self.GoalState.RoleInstanceId + "Ready") a = self.HttpPostWithHeaders("/machine?comp=health", healthReport) if a != None: return a.getheader("x-ms-latest-goal-state-incarnation-number") return None def ReportNotReady(self, status, desc): """ Send health report 'Provisioning' to server. This signals the fabric that our provosion is starting. """ healthReport = ("" + self.GoalState.Incarnation + "" + self.GoalState.ContainerId + "" + self.GoalState.RoleInstanceId + "NotReady" + "
" + status + "" + desc + "
" + "
") a = self.HttpPostWithHeaders("/machine?comp=health", healthReport) if a != None: return a.getheader("x-ms-latest-goal-state-incarnation-number") return None def ReportRoleProperties(self, thumbprint): """ Send roleProperties and thumbprint to server. """ roleProperties = ("" + "" + self.GoalState.ContainerId + "" + "" + "" + self.GoalState.RoleInstanceId + "" + "" + "") a = self.HttpPostWithHeaders("/machine?comp=roleProperties", roleProperties) Log("Posted Role Properties. CertificateThumbprint=" + thumbprint) return a def LoadBalancerProbeServer_Shutdown(self): """ Shutdown the LoadBalancerProbeServer. """ if self.LoadBalancerProbeServer != None: self.LoadBalancerProbeServer.shutdown() self.LoadBalancerProbeServer = None def GenerateTransportCert(self): """ Create ssl certificate for https communication with endpoint server. """ Run(Openssl + " req -x509 -nodes -subj /CN=LinuxTransport -days 32768 -newkey rsa:2048 -keyout TransportPrivate.pem -out TransportCert.pem") cert = "" for line in GetFileContents("TransportCert.pem").split('\n'): if not "CERTIFICATE" in line: cert += line.rstrip() return cert def DoVmmStartup(self): """ Spawn the VMM startup script. """ Log("Starting Microsoft System Center VMM Initialization Process") pid = subprocess.Popen(["/bin/bash","/mnt/cdrom/secure/"+VMM_STARTUP_SCRIPT_NAME,"-p /mnt/cdrom/secure/ "]).pid time.sleep(5) sys.exit(0) def TryUnloadAtapiix(self): """ If global modloaded is True, then we loaded the ata_piix kernel module, unload it. """ if modloaded: Run("rmmod ata_piix.ko",chk_err=False) Log("Unloaded ata_piix.ko driver for ATAPI CD-ROM") def TryLoadAtapiix(self): """ Load the ata_piix kernel module if it exists. If successful, set global modloaded to True. If unable to load module leave modloaded False. """ global modloaded modloaded=False retcode,krn=RunGetOutput('uname -r') krn_pth='/lib/modules/'+krn.strip('\n')+'/kernel/drivers/ata/ata_piix.ko' if Run("lsmod | grep ata_piix",chk_err=False) == 0 : Log("Module " + krn_pth + " driver for ATAPI CD-ROM is already present.") return 0 if retcode: Error("Unable to provision: Failed to call uname -r") return "Unable to provision: Failed to call uname" if os.path.isfile(krn_pth): retcode,output=RunGetOutput("insmod " + krn_pth,chk_err=False) else: Log("Module " + krn_pth + " driver for ATAPI CD-ROM does not exist.") return 1 if retcode != 0: Error('Error calling insmod for '+ krn_pth + ' driver for ATAPI CD-ROM') return retcode time.sleep(1) # check 3 times if the mod is loaded for i in range(3): if Run('lsmod | grep ata_piix'): continue else : modloaded=True break if not modloaded: Error('Unable to load '+ krn_pth + ' driver for ATAPI CD-ROM') return 1 Log("Loaded " + krn_pth + " driver for ATAPI CD-ROM") # we have succeeded loading the ata_piix mod if it can be done. def SearchForVMMStartup(self): """ Search for a DVD/CDROM containing VMM's VMM_CONFIG_FILE_NAME. Call TryLoadAtapiix in case we must load the ata_piix module first. If VMM_CONFIG_FILE_NAME is found, call DoVmmStartup. Else, return to Azure Provisioning process. """ self.TryLoadAtapiix() if os.path.exists('/mnt/cdrom/secure') == False: CreateDir("/mnt/cdrom/secure", "root", 0700) mounted=False for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]: if dvds == None: continue dvd = '/dev/'+dvds.group(0) if Run("LC_ALL=C fdisk -l " + dvd + " | grep Disk",chk_err=False): continue # Not mountable else: for retry in range(1,6): retcode,output=RunGetOutput("mount -v " + dvd + " /mnt/cdrom/secure") Log(output[:-1]) if retcode == 0: Log("mount succeeded on attempt #" + str(retry) ) mounted=True break if 'is already mounted on /mnt/cdrom/secure' in output: Log("Device " + dvd + " is already mounted on /mnt/cdrom/secure." + str(retry) ) mounted=True break Log("mount failed on attempt #" + str(retry) ) Log("mount loop sleeping 5...") time.sleep(5) if not mounted: # unable to mount continue if not os.path.isfile("/mnt/cdrom/secure/"+VMM_CONFIG_FILE_NAME): #nope - mount the next drive if mounted: Run("umount "+dvd,chk_err=False) mounted=False continue else : # it is the vmm startup self.DoVmmStartup() Log("VMM Init script not found. Provisioning for Azure") return def Provision(self): """ Responible for: Regenerate ssh keys, Mount, read, and parse ovfenv.xml from provisioning dvd rom Process the ovfenv.xml info Call ReportRoleProperties If configured, delete root password. Return None on success, error string on error. """ enabled = Config.get("Provisioning.Enabled") if enabled != None and enabled.lower().startswith("n"): return Log("Provisioning image started.") type = Config.get("Provisioning.SshHostKeyPairType") if type == None: type = "rsa" regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair") if regenerateKeys == None or regenerateKeys.lower().startswith("y"): Run("rm -f /etc/ssh/ssh_host_*key*") Run("ssh-keygen -N '' -t " + type + " -f /etc/ssh/ssh_host_" + type + "_key") MyDistro.restartSshService() #SetFileContents(LibDir + "/provisioned", "") dvd = None for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]: if dvds == None : continue dvd = '/dev/'+dvds.group(0) if dvd == None: # No DVD device detected Error("No DVD device detected, unable to provision.") return "No DVD device detected, unable to provision." if MyDistro.mediaHasFilesystem(dvd) is False : out=MyDistro.load_ata_piix() if out: return out for i in range(10): # we may have to wait if os.path.exists(dvd): break Log("Waiting for DVD - sleeping 1 - "+str(i+1)+" try...") time.sleep(1) if os.path.exists('/mnt/cdrom/secure') == False: CreateDir("/mnt/cdrom/secure", "root", 0700) #begin mount loop - 5 tries - 5 sec wait between for retry in range(1,6): location='/mnt/cdrom/secure' retcode,output=MyDistro.mountDVD(dvd,location) Log(output[:-1]) if retcode == 0: Log("mount succeeded on attempt #" + str(retry) ) break if 'is already mounted on /mnt/cdrom/secure' in output: Log("Device " + dvd + " is already mounted on /mnt/cdrom/secure." + str(retry) ) break Log("mount failed on attempt #" + str(retry) ) Log("mount loop sleeping 5...") time.sleep(5) if not os.path.isfile("/mnt/cdrom/secure/ovf-env.xml"): Error("Unable to provision: Missing ovf-env.xml on DVD.") return "Failed to retrieve provisioning data (0x02)." ovfxml = (GetFileContents(u"/mnt/cdrom/secure/ovf-env.xml",asbin=False)) # use unicode here to ensure correct codec gets used. if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 : ovfxml = ovfxml[3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them. ovfxml=ovfxml.strip(chr(0x00)) # we may have NULLs. ovfxml=ovfxml[ovfxml.find('.*?<", "*<", ovfxml)) Run("umount " + dvd,chk_err=False) MyDistro.unload_ata_piix() error = None if ovfxml != None: Log("Provisioning image using OVF settings in the DVD.") ovfobj = OvfEnv().Parse(ovfxml) if ovfobj != None: error = ovfobj.Process() if error : Error ("Provisioning image FAILED " + error) return ("Provisioning image FAILED " + error) Log("Ovf XML process finished") # This is done here because regenerated SSH host key pairs may be potentially overwritten when processing the ovfxml fingerprint = RunGetOutput("ssh-keygen -lf /etc/ssh/ssh_host_" + type + "_key.pub")[1].rstrip().split()[1].replace(':','') self.ReportRoleProperties(fingerprint) delRootPass = Config.get("Provisioning.DeleteRootPassword") if delRootPass != None and delRootPass.lower().startswith("y"): MyDistro.deleteRootPassword() Log("Provisioning image completed.") return error def Run(self): """ Called by 'waagent -daemon.' Main loop to process the goal state. State is posted every 25 seconds when provisioning has been completed. Search for VMM enviroment, start VMM script if found. Perform DHCP and endpoint server discovery by calling DoDhcpWork(). Check wire protocol versions. Set SCSI timeout on root device. Call GenerateTransportCert() to create ssl certs for server communication. Call UpdateGoalState(). If not provisioned, call ReportNotReady("Provisioning", "Starting") Call Provision(), set global provisioned = True if successful. Call goalState.Process() Start LBProbeServer if indicated in waagent.conf. Start the StateConsumer if indicated in waagent.conf. ReportReady if provisioning is complete. If provisioning failed, call ReportNotReady("ProvisioningFailed", provisionError) """ SetFileContents("/var/run/waagent.pid", str(os.getpid()) + "\n") reportHandlerStatusCount = 0 # Determine if we are in VMM. Spawn VMM_STARTUP_SCRIPT_NAME if found. self.SearchForVMMStartup() ipv4='' while ipv4 == '' or ipv4 == '0.0.0.0' : ipv4=MyDistro.GetIpv4Address() if ipv4 == '' or ipv4 == '0.0.0.0' : Log("Waiting for network.") time.sleep(10) Log("IPv4 address: " + ipv4) mac='' mac=MyDistro.GetMacAddress() if len(mac)>0 : Log("MAC address: " + ":".join(["%02X" % Ord(a) for a in mac])) # Consume Entropy in ACPI table provided by Hyper-V try: SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0")) except: pass Log("Probing for Azure environment.") self.Endpoint = self.DoDhcpWork() while self.Endpoint == None: Log("Azure environment not detected.") Log("Retry environment detection in 60 seconds") time.sleep(60) self.Endpoint = self.DoDhcpWork() Log("Discovered Azure endpoint: " + self.Endpoint) if not self.CheckVersions(): Error("Agent.CheckVersions failed") sys.exit(1) self.EnvMonitor = EnvMonitor() # Set SCSI timeout on SCSI disks MyDistro.initScsiDiskTimeout() global provisioned global provisionError global Openssl Openssl = Config.get("OS.OpensslPath") if Openssl == None: Openssl = "openssl" self.TransportCert = self.GenerateTransportCert() eventMonitor = None incarnation = None # goalStateIncarnationFromHealthReport currentPort = None # loadBalancerProbePort goalState = None # self.GoalState, instance of GoalState provisioned = os.path.exists(LibDir + "/provisioned") program = Config.get("Role.StateConsumer") provisionError = None lbProbeResponder = True setting = Config.get("LBProbeResponder") if setting != None and setting.lower().startswith("n"): lbProbeResponder = False while True: if (goalState == None) or (incarnation == None) or (goalState.Incarnation != incarnation): try: goalState = self.UpdateGoalState() except HttpResourceGoneError as e: Warn("Incarnation is out of date:{0}".format(e)) incarnation = None continue if goalState == None : Warn("Failed to fetch goalstate") continue if provisioned == False: self.ReportNotReady("Provisioning", "Starting") goalState.Process() if provisioned == False: provisionError = self.Provision() if provisionError == None : provisioned = True SetFileContents(LibDir + "/provisioned", "") lastCtime = "NOTFIND" try: walaConfigFile = MyDistro.getConfigurationPath() lastCtime = time.ctime(os.path.getctime(walaConfigFile)) except: pass #Get Ctime of wala config, can help identify the base image of this VM AddExtensionEvent(name="WALA",op=WALAEventOperation.Provision,isSuccess=True, message="WALA Config Ctime:"+lastCtime) executeCustomData = Config.get("Provisioning.ExecuteCustomData") if executeCustomData != None and executeCustomData.lower().startswith("y"): if os.path.exists(LibDir + '/CustomData'): Run('chmod +x ' + LibDir + '/CustomData') Run(LibDir + '/CustomData') else: Error(LibDir + '/CustomData does not exist.') # # only one port supported # restart server if new port is different than old port # stop server if no longer a port # goalPort = goalState.LoadBalancerProbePort if currentPort != goalPort: try: self.LoadBalancerProbeServer_Shutdown() currentPort = goalPort if currentPort != None and lbProbeResponder == True: self.LoadBalancerProbeServer = LoadBalancerProbeServer(currentPort) if self.LoadBalancerProbeServer == None : lbProbeResponder = False Log("Unable to create LBProbeResponder.") except Exception, e: Error("Failed to launch LBProbeResponder: {0}".format(e)) currentPort = None # Report SSH key fingerprint type = Config.get("Provisioning.SshHostKeyPairType") if type == None: type = "rsa" host_key_path = "/etc/ssh/ssh_host_" + type + "_key.pub" if(MyDistro.waitForSshHostKey(host_key_path)): fingerprint = RunGetOutput("ssh-keygen -lf /etc/ssh/ssh_host_" + type + "_key.pub")[1].rstrip().split()[1].replace(':','') self.ReportRoleProperties(fingerprint) if program != None and DiskActivated == True: try: Children.append(subprocess.Popen([program, "Ready"])) except OSError, e : ErrorWithPrefix('SharedConfig.Parse','Exception: '+ str(e) +' occured launching ' + program ) program = None sleepToReduceAccessDenied = 3 time.sleep(sleepToReduceAccessDenied) if provisionError != None: incarnation = self.ReportNotReady("ProvisioningFailed", provisionError) else: incarnation = self.ReportReady() # Process our extensions. if goalState.ExtensionsConfig == None and goalState.ExtensionsConfigXml != None : reportHandlerStatusCount = 0 #Reset count when new goal state comes goalState.ExtensionsConfig = ExtensionsConfig().Parse(goalState.ExtensionsConfigXml) # report the status/heartbeat results of extension processing if goalState.ExtensionsConfig != None : ret = goalState.ExtensionsConfig.ReportHandlerStatus() if ret != 0: Error("Failed to report handler status") elif reportHandlerStatusCount % 1000 == 0: #Agent report handler status every 25 seconds. Reduce the log entries by adding a count Log("Successfully reported handler status") reportHandlerStatusCount += 1 if not eventMonitor: eventMonitor = WALAEventMonitor(self.HttpPostWithHeaders) eventMonitor.StartEventsLoop() time.sleep(25 - sleepToReduceAccessDenied) WaagentLogrotate = """\ /var/log/waagent.log { monthly rotate 6 notifempty missingok } """ def GetMountPoint(mountlist, device): """ Example of mountlist: /dev/sda1 on / type ext4 (rw) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0") none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) /dev/sdb1 on /mnt/resource type ext4 (rw) """ if (mountlist and device): for entry in mountlist.split('\n'): if(re.search(device, entry)): tokens = entry.split() #Return the 3rd column of this line return tokens[2] if len(tokens) > 2 else None return None def FindInLinuxKernelCmdline(option): """ Return match object if 'option' is present in the kernel boot options of the grub configuration. """ m=None matchs=r'^.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?'+option+r'.*$' try: m=FindStringInFile(MyDistro.grubKernelBootOptionsFile,matchs) except IOError, e: Error('FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return m def AppendToLinuxKernelCmdline(option): """ Add 'option' to the kernel boot options of the grub configuration. """ if not FindInLinuxKernelCmdline(option): src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r')(.*?)("?)$' rep=r'\1\2 '+ option + r'\3' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep) except IOError, e : Error('AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return 1 Run("update-grub",chk_err=False) return 0 def RemoveFromLinuxKernelCmdline(option): """ Remove 'option' to the kernel boot options of the grub configuration. """ if FindInLinuxKernelCmdline(option): src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?)('+option+r')(.*?)("?)$' rep=r'\1\3\4' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep) except IOError, e : Error('RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return 1 Run("update-grub",chk_err=False) return 0 def FindStringInFile(fname,matchs): """ Return match object if found in file. """ try: ms=re.compile(matchs) for l in (open(fname,'r')).readlines(): m=re.search(ms,l) if m: return m except: raise return None def ReplaceStringInFile(fname,src,repl): """ Replace 'src' with 'repl' in file. """ try: sr=re.compile(src) if FindStringInFile(fname,src): updated='' for l in (open(fname,'r')).readlines(): n=re.sub(sr,repl,l) updated+=n ReplaceFileContentsAtomic(fname,updated) except : raise return def ApplyVNUMAWorkaround(): """ If kernel version has NUMA bug, add 'numa=off' to kernel boot options. """ VersionParts = platform.release().replace('-', '.').split('.') if int(VersionParts[0]) > 2: return if int(VersionParts[1]) > 6: return if int(VersionParts[2]) > 37: return if AppendToLinuxKernelCmdline("numa=off") == 0 : Log("Your kernel version " + platform.release() + " has a NUMA-related bug: NUMA has been disabled.") else : "Error adding 'numa=off'. NUMA has not been disabled." def RevertVNUMAWorkaround(): """ Remove 'numa=off' from kernel boot options. """ if RemoveFromLinuxKernelCmdline("numa=off") == 0 : Log('NUMA has been re-enabled') else : Log('NUMA has not been re-enabled') def Install(): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a) ) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", MyDistro.waagent_conf_file) SetFileContents("/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split('\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") ApplyVNUMAWorkaround() return 0 def GetMyDistro(dist_class_name=''): """ Return MyDistro object. NOTE: Logging is not initialized at this point. """ if dist_class_name == '': if 'Linux' in platform.system(): Distro=DistInfo()[0] else : # I know this is not Linux! if 'FreeBSD' in platform.system(): Distro=platform.system() Distro=Distro.strip('"') Distro=Distro.strip(' ') dist_class_name=Distro+'Distro' else: Distro=dist_class_name if not globals().has_key(dist_class_name): print Distro+' is not a supported distribution.' return None return globals()[dist_class_name]() # the distro class inside this module. def DistInfo(fullname=0): if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', str(platform.release())) distinfo = ['FreeBSD', release] return distinfo if 'linux_distribution' in dir(platform): distinfo = list(platform.linux_distribution(full_distribution_name=fullname)) distinfo[0] = distinfo[0].strip() # remove trailing whitespace in distro name if os.path.exists("/etc/euleros-release"): distinfo[0] = "euleros" return distinfo else: return platform.dist() def PackagedInstall(buildroot): """ Called from setup.py for use by RPM. Generic implementation Creates directories and files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent, /etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot. Copies generated files waagent.conf, into place and exits. """ MyDistro=GetMyDistro() if MyDistro == None : sys.exit(1) MyDistro.packagedInstall(buildroot) def LibraryInstall(buildroot): pass def Uninstall(): """ Uninstall the agent service. Copy RulesFiles back to original locations. Delete agent-related files. Call RevertVNUMAWorkaround(). """ SwitchCwd() for a in RulesFiles: if os.path.isfile(GetLastPathElement(a)): try: shutil.move(GetLastPathElement(a), a) Warn("Moved " + LibDir + "/" + GetLastPathElement(a) + " -> " + a ) except: pass MyDistro.unregisterAgentService() MyDistro.uninstallDeleteFiles() RevertVNUMAWorkaround() return 0 def Deprovision(force, deluser): """ Remove user accounts created by provisioning. Disables root password if Provisioning.DeleteRootPassword = 'y' Stop agent service. Remove SSH host keys if they were generated by the provision. Set hostname to 'localhost.localdomain'. Delete cached system configuration files in /var/lib and /var/lib/waagent. """ #Append blank line at the end of file, so the ctime of this file is changed every time Run("echo ''>>"+ MyDistro.getConfigurationPath()) SwitchCwd() ovfxml = GetFileContents(LibDir+"/ovf-env.xml") ovfobj = None if ovfxml != None: ovfobj = OvfEnv().Parse(ovfxml, True) print("WARNING! The waagent service will be stopped.") print("WARNING! All SSH host key pairs will be deleted.") print("WARNING! Cached DHCP leases will be deleted.") MyDistro.deprovisionWarnUser() delRootPass = Config.get("Provisioning.DeleteRootPassword") if delRootPass != None and delRootPass.lower().startswith("y"): print("WARNING! root password will be disabled. You will not be able to login as root.") if ovfobj != None and deluser == True: print("WARNING! " + ovfobj.UserName + " account and entire home directory will be deleted.") if force == False and not raw_input('Do you want to proceed (y/n)? ').startswith('y'): return 1 MyDistro.stopAgentService() # Remove SSH host keys regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair") if regenerateKeys == None or regenerateKeys.lower().startswith("y"): Run("rm -f /etc/ssh/ssh_host_*key*") # Remove root password if delRootPass != None and delRootPass.lower().startswith("y"): MyDistro.deleteRootPassword() # Remove distribution specific networking configuration MyDistro.publishHostname('localhost.localdomain') MyDistro.deprovisionDeleteFiles() if deluser == True: MyDistro.DeleteAccount(ovfobj.UserName) return 0 def SwitchCwd(): """ Switch to cwd to /var/lib/waagent. Create if not present. """ CreateDir(LibDir, "root", 0700) os.chdir(LibDir) def Usage(): """ Print the arguments to waagent. """ print("usage: " + sys.argv[0] + " [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]") return 0 def main(): """ Instantiate MyDistro, exit if distro class is not defined. Parse command-line arguments, exit with usage() on error. Instantiate ConfigurationProvider. Call appropriate non-daemon methods and exit. If daemon mode, enter Agent.Run() loop. """ if GuestAgentVersion == "": print("WARNING! This is a non-standard agent that does not include a valid version string.") if len(sys.argv) == 1: sys.exit(Usage()) LoggerInit('/var/log/waagent.log','/dev/console') global LinuxDistro LinuxDistro=DistInfo()[0] global MyDistro MyDistro=GetMyDistro() if MyDistro == None : sys.exit(1) args = [] conf_file = None global force force = False for a in sys.argv[1:]: if re.match("^([-/]*)(help|usage|\?)", a): sys.exit(Usage()) elif re.match("^([-/]*)version", a): print(GuestAgentVersion + " running on " + LinuxDistro) sys.exit(0) elif re.match("^([-/]*)verbose", a): myLogger.verbose = True elif re.match("^([-/]*)force", a): force = True elif re.match("^(?:[-/]*)conf=.+", a): conf_file = re.match("^(?:[-/]*)conf=(.+)", a).groups()[0] elif re.match("^([-/]*)(setup|install)", a): sys.exit(MyDistro.Install()) elif re.match("^([-/]*)(uninstall)", a): sys.exit(Uninstall()) else: args.append(a) global Config Config = ConfigurationProvider(conf_file) logfile = Config.get("Logs.File") if logfile is not None: myLogger.file_path = logfile logconsole = Config.get("Logs.Console") if logconsole is not None and logconsole.lower().startswith("n"): myLogger.con_path = None verbose = Config.get("Logs.Verbose") if verbose != None and verbose.lower().startswith("y"): myLogger.verbose=True global daemon daemon = False for a in args: if re.match("^([-/]*)deprovision\+user", a): sys.exit(Deprovision(force, True)) elif re.match("^([-/]*)deprovision", a): sys.exit(Deprovision(force, False)) elif re.match("^([-/]*)daemon", a): daemon = True elif re.match("^([-/]*)serialconsole", a): AppendToLinuxKernelCmdline("console=ttyS0 earlyprintk=ttyS0") Log("Configured kernel to use ttyS0 as the boot console.") sys.exit(0) else: print("Invalid command line parameter:" + a) sys.exit(1) if daemon == False: sys.exit(Usage()) global modloaded modloaded = False while True: try: SwitchCwd() Log(GuestAgentLongName + " Version: " + GuestAgentVersion) if IsLinux(): Log("Linux Distribution Detected : " + LinuxDistro) global WaAgent WaAgent = Agent() WaAgent.Run() except Exception, e: Error(traceback.format_exc()) Error("Exception: " + str(e)) Log("Restart agent in 15 seconds") time.sleep(15) if __name__ == '__main__' : main() WALinuxAgent-2.2.20/config/000077500000000000000000000000001322477356400153655ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/66-azure-storage.rules000066400000000000000000000030241322477356400214610ustar00rootroot00000000000000ACTION=="add|change", SUBSYSTEM=="block", ENV{ID_VENDOR}=="Msft", ENV{ID_MODEL}=="Virtual_Disk", GOTO="azure_disk" GOTO="azure_end" LABEL="azure_disk" # Root has a GUID of 0000 as the second value # The resource/resource has GUID of 0001 as the second value ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" # Wellknown SCSI controllers ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="azure_datadisk" GOTO="azure_end" # Retrieve LUN number for datadisks LABEL="azure_datadisk" ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" GOTO="azure_end" # Create the symlinks LABEL="azure_names" ENV{DEVTYPE}=="disk", SYMLINK+="disk/azure/$env{fabric_name}" ENV{DEVTYPE}=="partition", SYMLINK+="disk/azure/$env{fabric_name}-part%n" LABEL="azure_end" WALinuxAgent-2.2.20/config/99-azure-product-uuid.rules000066400000000000000000000005271322477356400224540ustar00rootroot00000000000000SUBSYSTEM!="dmi", GOTO="product_uuid-exit" ATTR{sys_vendor}!="Microsoft Corporation", GOTO="product_uuid-exit" ATTR{product_name}!="Virtual Machine", GOTO="product_uuid-exit" TEST!="/sys/devices/virtual/dmi/id/product_uuid", GOTO="product_uuid-exit" RUN+="/bin/chmod 0444 /sys/devices/virtual/dmi/id/product_uuid" LABEL="product_uuid-exit" WALinuxAgent-2.2.20/config/alpine/000077500000000000000000000000001322477356400166355ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/alpine/waagent.conf000066400000000000000000000045341322477356400211400ustar00rootroot00000000000000# # Windows Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable logging to serial console (y|n) # When stdout is not enough... # 'y' if not set Logs.Console=y # Enable verbose logging (y|n) Logs.Verbose=n # Preferred network interface to communicate with Azure platform Network.Interface=eth0 # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/arch/000077500000000000000000000000001322477356400163025ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/arch/waagent.conf000066400000000000000000000054741322477356400206110ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/bigip/000077500000000000000000000000001322477356400164575ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/bigip/waagent.conf000066400000000000000000000050351322477356400207570ustar00rootroot00000000000000# # Windows Azure Linux Agent Configuration # # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.StateConsumer=None # Specified program is invoked with XML file argument specifying role # configuration. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role topology. Role.TopologyConsumer=None # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. # waagent cannot do this on BIG-IP VE Provisioning.MonitorHostName=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Specify location of waagent lib dir on BIG-IP Lib.Dir=/shared/vadc/azure/waagent/ # Specify location of sshd config file on BIG-IP OS.SshdConfigPath=/config/ssh/sshd_config # Disable RDMA management and set up OS.EnableRDMA=n # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/clearlinux/000077500000000000000000000000001322477356400175335ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/clearlinux/waagent.conf000066400000000000000000000044751322477356400220420ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.StateConsumer=None # Specified program is invoked with XML file argument specifying role # configuration. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role topology. Role.TopologyConsumer=None # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Enable or disable self-update, default is enabled AutoUpdate.Enabled=y AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/coreos/000077500000000000000000000000001322477356400166575ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/coreos/waagent.conf000066400000000000000000000054761322477356400211700ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=ed25519 # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks OS.AllowHTTP=y # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/freebsd/000077500000000000000000000000001322477356400167775ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/freebsd/waagent.conf000066400000000000000000000053541322477356400213030ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs' here. ResourceDisk.Filesystem=ufs # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd OS.SudoersDir=/usr/local/etc/sudoers.d # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/gaia/000077500000000000000000000000001322477356400162665ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/gaia/waagent.conf000066400000000000000000000053711322477356400205710ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. Provisioning.PasswordCryptId=1 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext3 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=y # Size of the swapfile. ResourceDisk.SwapSizeMB=1024 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=/var/lib/waagent/openssl # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images OS.EnableRDMA=n # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=n # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/openbsd/000077500000000000000000000000001322477356400170175ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/openbsd/waagent.conf000066400000000000000000000054011322477356400213140ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". Provisioning.SshHostKeyPairType=auto # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. OpenBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ufs2 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=y # Max size of the swap partition in MB ResourceDisk.SwapSizeMB=65536 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=/usr/local/bin/eopenssl # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/suse/000077500000000000000000000000001322477356400163445ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/suse/waagent.conf000066400000000000000000000054761322477356400206550ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Microsoft Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/ubuntu/000077500000000000000000000000001322477356400167075ustar00rootroot00000000000000WALinuxAgent-2.2.20/config/ubuntu/waagent.conf000066400000000000000000000053551322477356400212140ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=n # Rely on cloud-init to provision Provisioning.UseCloudInit=y # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=n # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Microsoft Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable RDMA kernel update, this value is effective on Ubuntu # OS.UpdateRdmaDriver=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/waagent.conf000066400000000000000000000054761322477356400176760ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval # OS.SshClientAliveInterval=180 # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/config/waagent.logrotate000066400000000000000000000001341322477356400207330ustar00rootroot00000000000000/var/log/waagent.log { compress monthly rotate 6 notifempty missingok } WALinuxAgent-2.2.20/init/000077500000000000000000000000001322477356400150635ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/arch/000077500000000000000000000000001322477356400160005ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/arch/waagent.service000066400000000000000000000005371322477356400210150ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/bin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/bin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.20/init/clearlinux/000077500000000000000000000000001322477356400172315ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/clearlinux/waagent.service000066400000000000000000000005661322477356400222500ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/bin/waagent ConditionPathExists=/usr/share/defaults/waagent/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/bin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.20/init/coreos/000077500000000000000000000000001322477356400163555ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/coreos/cloud-config.yml000066400000000000000000000023511322477356400214520ustar00rootroot00000000000000#cloud-config coreos: units: - name: etcd.service runtime: true drop-ins: - name: 10-oem.conf content: | [Service] Environment=ETCD_PEER_ELECTION_TIMEOUT=1200 - name: etcd2.service runtime: true drop-ins: - name: 10-oem.conf content: | [Service] Environment=ETCD_ELECTION_TIMEOUT=1200 - name: waagent.service command: start runtime: true content: | [Unit] Description=Microsoft Azure Agent Wants=network-online.target sshd-keygen.service After=network-online.target sshd-keygen.service [Service] Type=simple Restart=always RestartSec=5s ExecStart=/usr/share/oem/python/bin/python /usr/share/oem/bin/waagent -daemon - name: oem-cloudinit.service command: restart runtime: yes content: | [Unit] Description=Cloudinit from Azure metadata [Service] Type=oneshot ExecStart=/usr/bin/coreos-cloudinit --oem=azure oem: id: azure name: Microsoft Azure version-id: 2.1.4 home-url: https://azure.microsoft.com/ bug-report-url: https://github.com/coreos/bugs/issues WALinuxAgent-2.2.20/init/freebsd/000077500000000000000000000000001322477356400164755ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/freebsd/waagent000077500000000000000000000005151322477356400200520ustar00rootroot00000000000000#!/bin/sh # PROVIDE: waagent # REQUIRE: sshd netif dhclient # KEYWORD: nojail . /etc/rc.subr PATH=$PATH:/usr/local/bin:/usr/local/sbin name="waagent" rcvar="waagent_enable" pidfile="/var/run/waagent.pid" command="/usr/local/sbin/${name}" command_interpreter="python" command_args="start" load_rc_config $name run_rc_command "$1" WALinuxAgent-2.2.20/init/gaia/000077500000000000000000000000001322477356400157645ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/gaia/waagent000077500000000000000000000014561322477356400173460ustar00rootroot00000000000000#!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent.sh start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -start & success echo } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL WALinuxAgent-2.2.20/init/openbsd/000077500000000000000000000000001322477356400165155ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/openbsd/waagent000066400000000000000000000002311322477356400200620ustar00rootroot00000000000000#!/bin/sh daemon="python2.7 /usr/local/sbin/waagent -start" . /etc/rc.d/rc.subr pexp="python /usr/local/sbin/waagent -daemon" rc_reload=NO rc_cmd $1 WALinuxAgent-2.2.20/init/suse/000077500000000000000000000000001322477356400160425ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/suse/waagent000077500000000000000000000062011322477356400174150ustar00rootroot00000000000000#! /bin/sh # # Microsoft Azure Linux Agent sysV init script # # Copyright 2013 Microsoft Corporation # Copyright SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # /etc/init.d/waagent # # and symbolic link # # /usr/sbin/rcwaagent # # System startup script for the waagent # ### BEGIN INIT INFO # Provides: MicrosoftAzureLinuxAgent # Required-Start: $network sshd # Required-Stop: $network sshd # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Description: Start the MicrosoftAzureLinuxAgent ### END INIT INFO PYTHON=/usr/bin/python WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } . /etc/rc.status # First reset status of this service rc_reset # Return values acc. to LSB for all commands but status: # 0 - success # 1 - misc error # 2 - invalid or excess args # 3 - unimplemented feature (e.g. reload) # 4 - insufficient privilege # 5 - program not installed # 6 - program not configured # # Note that starting an already running service, stopping # or restarting a not-running service as well as the restart # with force-reload (in case signalling is not supported) are # considered a success. case "$1" in start) echo -n "Starting MicrosoftAzureLinuxAgent" ## Start daemon with startproc(8). If this fails ## the echo return value is set appropriate. startproc -f ${PYTHON} ${WAZD_BIN} -start rc_status -v ;; stop) echo -n "Shutting down MicrosoftAzureLinuxAgent" ## Stop daemon with killproc(8) and if this fails ## set echo the echo return value. killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; try-restart) ## Stop the service and if this succeeds (i.e. the ## service was running before), start it again. $0 status >/dev/null && $0 restart rc_status ;; restart) ## Stop the service and regardless of whether it was ## running or not, start it again. $0 stop sleep 1 $0 start rc_status ;; force-reload|reload) rc_status ;; status) echo -n "Checking for service MicrosoftAzureLinuxAgent " ## Check status with checkproc(8), if process is running ## checkproc will return with exit status 0. checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; probe) ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit WALinuxAgent-2.2.20/init/ubuntu/000077500000000000000000000000001322477356400164055ustar00rootroot00000000000000WALinuxAgent-2.2.20/init/ubuntu/walinuxagent000066400000000000000000000001321322477356400210320ustar00rootroot00000000000000# To disable the Microsoft Azure Agent, set WALINUXAGENT_ENABLED=0 WALINUXAGENT_ENABLED=1 WALinuxAgent-2.2.20/init/ubuntu/walinuxagent.conf000066400000000000000000000007321322477356400217640ustar00rootroot00000000000000description "Microsoft Azure Linux agent" author "Ben Howard " start on runlevel [2345] stop on runlevel [!2345] pre-start script [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent if [ "$WALINUXAGENT_ENABLED" != "1" ]; then stop ; exit 0 fi if [ ! -x /usr/sbin/waagent ]; then stop ; exit 0 fi #Load the udf module modprobe -b udf end script exec /usr/sbin/waagent -daemon respawn WALinuxAgent-2.2.20/init/ubuntu/walinuxagent.service000077500000000000000000000010251322477356400224760ustar00rootroot00000000000000# # NOTE: # This file hosted on WALinuxAgent repository only for reference purposes. # Please refer to a recent image to find out the up-to-date systemd unit file. # [Unit] Description=Azure Linux Agent After=network-online.target cloud-init.service Wants=network-online.target sshd.service sshd-keygen.service ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python3 -u /usr/sbin/waagent -daemon Restart=always [Install] WantedBy=multi-user.target WALinuxAgent-2.2.20/init/waagent000077500000000000000000000014761322477356400164470ustar00rootroot00000000000000#!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -start RETVAL=$? echo return $RETVAL } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL WALinuxAgent-2.2.20/init/waagent.service000066400000000000000000000005411322477356400200730ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/sbin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.20/makepkg.py000077500000000000000000000067601322477356400161250ustar00rootroot00000000000000#!/usr/bin/env python import glob import os import os.path import shutil import subprocess import sys from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, \ AGENT_LONG_VERSION from azurelinuxagent.ga.update import AGENT_MANIFEST_FILE MANIFEST = '''[{{ "name": "{0}", "version": 1.0, "handlerManifest": {{ "installCommand": "", "uninstallCommand": "", "updateCommand": "", "enableCommand": "python -u {1} -run-exthandlers", "disableCommand": "", "rebootAfterInstall": false, "reportHeartbeat": false }} }}]''' PUBLISH_MANIFEST = ''' Microsoft.OSTCLinuxAgent {1} {0} VmRole Microsoft Azure Guest Agent for Linux IaaS true https://github.com/Azure/WALinuxAgent/blob/2.1/LICENSE.txt https://github.com/Azure/WALinuxAgent/blob/2.1/LICENSE.txt https://github.com/Azure/WALinuxAgent true Microsoft Linux ''' PUBLISH_MANIFEST_FILE = 'manifest.xml' output_path = os.path.join(os.getcwd(), "eggs") target_path = os.path.join(output_path, AGENT_LONG_VERSION) bin_path = os.path.join(target_path, "bin") egg_path = os.path.join(bin_path, AGENT_LONG_VERSION + ".egg") manifest_path = os.path.join(target_path, AGENT_MANIFEST_FILE) publish_manifest_path = os.path.join(target_path, PUBLISH_MANIFEST_FILE) pkg_name = os.path.join(output_path, AGENT_LONG_VERSION + ".zip") family = 'Test' if len(sys.argv) > 1: family = sys.argv[1] def do(*args): try: subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("ERROR: {0}".format(str(e))) print("\t{0}".format(" ".join(args))) print(e.output) sys.exit(1) if os.path.isdir(target_path): shutil.rmtree(target_path) elif os.path.isfile(target_path): os.remove(target_path) if os.path.isfile(pkg_name): os.remove(pkg_name) os.makedirs(bin_path) print("Created {0} directory".format(target_path)) args = ["python", "setup.py", "bdist_egg", "--dist-dir={0}".format(bin_path)] print("Creating egg {0}".format(egg_path)) do(*args) egg_name = os.path.join("bin", os.path.basename( glob.glob(os.path.join(bin_path, "*"))[0])) print("Writing {0}".format(manifest_path)) with open(manifest_path, mode='w') as manifest: manifest.write(MANIFEST.format(AGENT_NAME, egg_name)) print("Writing {0}".format(publish_manifest_path)) with open(publish_manifest_path, mode='w') as publish_manifest: publish_manifest.write(PUBLISH_MANIFEST.format(AGENT_VERSION, family)) cwd = os.getcwd() os.chdir(target_path) print("Creating package {0}".format(pkg_name)) do("zip", "-r", pkg_name, egg_name) do("zip", "-j", pkg_name, AGENT_MANIFEST_FILE) do("zip", "-j", pkg_name, PUBLISH_MANIFEST_FILE) os.chdir(cwd) print("Package {0} successfully created".format(pkg_name)) sys.exit(0) WALinuxAgent-2.2.20/setup.py000077500000000000000000000176541322477356400156520ustar00rootroot00000000000000#!/usr/bin/env python # # Microsoft Azure Linux Agent setup.py # # Copyright 2013 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, \ AGENT_DESCRIPTION, \ DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME from azurelinuxagent.common.osutil import get_osutil import setuptools from setuptools import find_packages from setuptools.command.install import install as _install root_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(root_dir) def set_files(data_files, dest=None, src=None): data_files.append((dest, src)) def set_bin_files(data_files, dest="/usr/sbin", src=["bin/waagent", "bin/waagent2.0"]): data_files.append((dest, src)) def set_conf_files(data_files, dest="/etc", src=["config/waagent.conf"]): data_files.append((dest, src)) def set_logrotate_files(data_files, dest="/etc/logrotate.d", src=["config/waagent.logrotate"]): data_files.append((dest, src)) def set_sysv_files(data_files, dest="/etc/rc.d/init.d", src=["init/waagent"]): data_files.append((dest, src)) def set_systemd_files(data_files, dest="/lib/systemd/system", src=["init/waagent.service"]): data_files.append((dest, src)) def set_freebsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/freebsd/waagent"]): data_files.append((dest, src)) def set_openbsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/openbsd/waagent"]): data_files.append((dest, src)) def set_udev_files(data_files, dest="/etc/udev/rules.d/", src=["config/66-azure-storage.rules", "config/99-azure-product-uuid.rules"]): data_files.append((dest, src)) def get_data_files(name, version, fullname): """ Determine data_files according to distro name, version and init system type """ data_files = [] if name == 'redhat' or name == 'centos': set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) set_udev_files(data_files) if version.startswith("6"): set_sysv_files(data_files) else: # redhat7.0+ use systemd set_systemd_files(data_files, dest="/usr/lib/systemd/system") if version.startswith("7.1"): # TODO this is a mitigation to systemctl bug on 7.1 set_sysv_files(data_files) elif name == 'arch': set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, src=["config/arch/waagent.conf"]) set_udev_files(data_files) set_systemd_files(data_files, dest='/usr/lib/systemd/system', src=["init/arch/waagent.service"]) elif name == 'coreos': set_bin_files(data_files, dest="/usr/share/oem/bin") set_conf_files(data_files, dest="/usr/share/oem", src=["config/coreos/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) set_files(data_files, dest="/usr/share/oem", src=["init/coreos/cloud-config.yml"]) elif name == 'clear linux os for intel architecture' \ or name == 'clear linux software for intel architecture': set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, dest="/usr/share/defaults/waagent", src=["config/clearlinux/waagent.conf"]) set_systemd_files(data_files, dest='/usr/lib/systemd/system', src=["init/clearlinux/waagent.service"]) elif name == 'ubuntu': set_bin_files(data_files) set_conf_files(data_files, src=["config/ubuntu/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) if version.startswith("12") or version.startswith("14"): # Ubuntu12.04/14.04 - uses upstart set_files(data_files, dest="/etc/init", src=["init/ubuntu/walinuxagent.conf"]) set_files(data_files, dest='/etc/default', src=['init/ubuntu/walinuxagent']) elif fullname == 'Snappy Ubuntu Core': set_files(data_files, dest="", src=["init/ubuntu/snappy/walinuxagent.yml"]) else: # Ubuntu15.04+ uses systemd set_systemd_files(data_files, src=["init/ubuntu/walinuxagent.service"]) elif name == 'suse': set_bin_files(data_files) set_conf_files(data_files, src=["config/suse/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) if fullname == 'SUSE Linux Enterprise Server' and \ version.startswith('11') or \ fullname == 'openSUSE' and version.startswith( '13.1'): set_sysv_files(data_files, dest='/etc/init.d', src=["init/suse/waagent"]) else: # sles 12+ and openSUSE 13.2+ use systemd set_systemd_files(data_files, dest='/usr/lib/systemd/system') elif name == 'freebsd': set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/freebsd/waagent.conf"]) set_freebsd_rc_files(data_files) elif name == 'openbsd': set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/openbsd/waagent.conf"]) set_openbsd_rc_files(data_files) else: # Use default setting set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) set_udev_files(data_files) set_sysv_files(data_files) return data_files class install(_install): user_options = _install.user_options + [ ('lnx-distro=', None, 'target Linux distribution'), ('lnx-distro-version=', None, 'target Linux distribution version'), ('lnx-distro-fullname=', None, 'target Linux distribution full name'), ('register-service', None, 'register as startup service and start'), ('skip-data-files', None, 'skip data files installation'), ] def initialize_options(self): _install.initialize_options(self) self.lnx_distro = DISTRO_NAME self.lnx_distro_version = DISTRO_VERSION self.lnx_distro_fullname = DISTRO_FULL_NAME self.register_service = False self.skip_data_files = False def finalize_options(self): _install.finalize_options(self) if self.skip_data_files: return data_files = get_data_files(self.lnx_distro, self.lnx_distro_version, self.lnx_distro_fullname) self.distribution.data_files = data_files self.distribution.reinitialize_command('install_data', True) def run(self): _install.run(self) if self.register_service: osutil = get_osutil() osutil.register_agent_service() osutil.stop_agent_service() osutil.start_agent_service() setuptools.setup( name=AGENT_NAME, version=AGENT_VERSION, long_description=AGENT_DESCRIPTION, author='Microsoft Corporation', author_email='walinuxagent@microsoft.com', platforms='Linux', url='https://github.com/Azure/WALinuxAgent', license='Apache License Version 2.0', packages=find_packages(exclude=["tests"]), py_modules=["__main__"], cmdclass={ 'install': install } ) WALinuxAgent-2.2.20/tests/000077500000000000000000000000001322477356400152625ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/__init__.py000066400000000000000000000011651322477356400173760ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/common/000077500000000000000000000000001322477356400165525ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/common/__init__.py000066400000000000000000000011651322477356400206660ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/common/dhcp/000077500000000000000000000000001322477356400174705ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/common/dhcp/__init__.py000066400000000000000000000011651322477356400216040ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/common/dhcp/test_dhcp.py000066400000000000000000000104161322477356400220210ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import mock import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.osutil.default as osutil from tests.tools import * class TestDHCP(AgentTestCase): def test_wireserver_route_exists(self): # setup dhcp_handler = dhcp.get_dhcp_handler() self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) # execute routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric " \ "Mask MTU Window IRTT \n\ eth0 00000000 10813FA8 0003 0 0 5 " \ "00000000 0 0 0 \n\ eth0 00345B0A 00000000 0001 0 0 5 " \ "00000000 0 0 0 \n\ lo 00000000 01345B0A 0003 0 0 1 " \ "00FCFFFF 0 0 0 \n" with patch("os.path.exists", return_value=True): mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(dhcp_handler.wireserver_route_exists) # test self.assertTrue(dhcp_handler.endpoint is not None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) def test_wireserver_route_not_exists(self): # setup dhcp_handler = dhcp.get_dhcp_handler() self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) # execute self.assertFalse(dhcp_handler.wireserver_route_exists) # test self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) def test_dhcp_cache_exists(self): dhcp_handler = dhcp.get_dhcp_handler() dhcp_handler.osutil = osutil.DefaultOSUtil() with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', return_value=None): self.assertFalse(dhcp_handler.dhcp_cache_exists) self.assertEqual(dhcp_handler.endpoint, None) with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', return_value="foo"): self.assertTrue(dhcp_handler.dhcp_cache_exists) self.assertEqual(dhcp_handler.endpoint, "foo") def test_dhcp_skip_cache(self): handler = dhcp.get_dhcp_handler() handler.osutil = osutil.DefaultOSUtil() with patch('os.path.exists', return_value=False): with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint')\ as patch_dhcp_cache: with patch.object(dhcp.DhcpHandler, 'send_dhcp_req') \ as patch_dhcp_send: endpoint = 'foo' patch_dhcp_cache.return_value = endpoint # endpoint comes from cache self.assertFalse(handler.skip_cache) handler.run() self.assertTrue(patch_dhcp_cache.call_count == 1) self.assertTrue(patch_dhcp_send.call_count == 0) self.assertTrue(handler.endpoint == endpoint) # reset handler.skip_cache = True handler.endpoint = None # endpoint comes from dhcp request self.assertTrue(handler.skip_cache) handler.run() self.assertTrue(patch_dhcp_cache.call_count == 1) self.assertTrue(patch_dhcp_send.call_count == 1) WALinuxAgent-2.2.20/tests/common/osutil/000077500000000000000000000000001322477356400200715ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/common/osutil/__init__.py000066400000000000000000000011651322477356400222050ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/common/osutil/test_bigip.py000066400000000000000000000257541322477356400226110ustar00rootroot00000000000000# Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import socket import time import azurelinuxagent.common.osutil.bigip as osutil import azurelinuxagent.common.osutil.default as default import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError from tests.tools import * class TestBigIpOSUtil_wait_until_mcpd_is_initialized(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(logger, "info", return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil._wait_until_mcpd_is_initialized( osutil.BigIpOSUtil() ) self.assertEqual(result, True) # There are two logger calls in the mcpd wait function. The second # occurs after mcpd is found to be "up" self.assertEqual(args[0].call_count, 2) @patch.object(shellutil, "run", return_value=1) @patch.object(logger, "info", return_value=None) @patch.object(time, "sleep", return_value=None) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil._wait_until_mcpd_is_initialized, osutil.BigIpOSUtil() ) class TestBigIpOSUtil_save_sys_config(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(logger, "error", return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil()) self.assertEqual(result, 0) self.assertEqual(args[0].call_count, 0) @patch.object(shellutil, "run", return_value=1) @patch.object(logger, "error", return_value=None) def test_failure(self, *args): result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil()) self.assertEqual(result, 1) self.assertEqual(args[0].call_count, 1) class TestBigIpOSUtil_get_dhcp_pid(AgentTestCase): @patch.object(shellutil, "run_get_output", return_value=(0, 8623)) def test_success(self, *args): result = osutil.BigIpOSUtil.get_dhcp_pid(osutil.BigIpOSUtil()) self.assertEqual(result, 8623) @patch.object(shellutil, "run_get_output", return_value=(1, 'foo')) def test_failure(self, *args): result = osutil.BigIpOSUtil.get_dhcp_pid(osutil.BigIpOSUtil()) self.assertEqual(result, None) class TestBigIpOSUtil_useradd(AgentTestCase): @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) @patch.object(shellutil, "run_get_output") def test_success(self, *args): args[0].return_value = (0, None) result = osutil.BigIpOSUtil.useradd( osutil.BigIpOSUtil(), 'foo', expiration=None ) self.assertEqual(result, 0) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) def test_user_already_exists(self, *args): args[0].return_value = 'admin' result = osutil.BigIpOSUtil.useradd( osutil.BigIpOSUtil(), 'admin', expiration=None ) self.assertEqual(result, None) @patch.object(shellutil, "run", return_value=1) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.useradd, osutil.BigIpOSUtil(), 'foo', expiration=None ) class TestBigIpOSUtil_chpasswd(AgentTestCase): @patch.object(shellutil, "run_get_output", return_value=(0, None)) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=True) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) @patch.object(osutil.BigIpOSUtil, '_save_sys_config', return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil.chpasswd( osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) self.assertEqual(result, 0) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[0].call_count, 1) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=True) def test_is_sys_user(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) @patch.object(shellutil, "run_get_output", return_value=(1, None)) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) def test_failed_to_set_user_password(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) @patch.object(shellutil, "run_get_output", return_value=(0, None)) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) def test_failed_to_get_user_entry(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) class TestBigIpOSUtil_get_dvd_device(AgentTestCase): @patch.object(os, "listdir", return_value=['tty1','cdrom0']) def test_success(self, *args): result = osutil.BigIpOSUtil.get_dvd_device( osutil.BigIpOSUtil(), '/dev' ) self.assertEqual(result, '/dev/cdrom0') @patch.object(os, "listdir", return_value=['foo', 'bar']) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.get_dvd_device, osutil.BigIpOSUtil(), '/dev' ) class TestBigIpOSUtil_restart_ssh_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.restart_ssh_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_stop_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.stop_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_start_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.start_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_register_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.register_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_unregister_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.unregister_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_set_hostname(AgentTestCase): @patch.object(os.path, "exists", return_value=False) def test_success(self, *args): result = osutil.BigIpOSUtil.set_hostname( osutil.BigIpOSUtil(), None ) self.assertEqual(args[0].call_count, 0) self.assertEqual(result, None) class TestBigIpOSUtil_set_dhcp_hostname(AgentTestCase): @patch.object(os.path, "exists", return_value=False) def test_success(self, *args): result = osutil.BigIpOSUtil.set_dhcp_hostname( osutil.BigIpOSUtil(), None ) self.assertEqual(args[0].call_count, 0) self.assertEqual(result, None) class TestBigIpOSUtil_get_first_if(AgentTestCase): @patch.object(osutil.BigIpOSUtil, '_format_single_interface_name', return_value=b'eth0') def test_success(self, *args): ifname, ipaddr = osutil.BigIpOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") @patch.object(osutil.BigIpOSUtil, '_format_single_interface_name', return_value=b'loenp0s3') def test_success(self, *args): ifname, ipaddr = osutil.BigIpOSUtil().get_first_if() self.assertFalse(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") class TestBigIpOSUtil_mount_dvd(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(time, "sleep", return_value=None) @patch.object(osutil.BigIpOSUtil, '_wait_until_mcpd_is_initialized', return_value=None) @patch.object(default.DefaultOSUtil, 'mount_dvd', return_value=None) def test_success(self, *args): osutil.BigIpOSUtil.mount_dvd( osutil.BigIpOSUtil(), max_retry=6, chk_err=True ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 1) class TestBigIpOSUtil_route_add(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): osutil.BigIpOSUtil.route_add( osutil.BigIpOSUtil(), '10.10.10.0', '255.255.255.0', '10.10.10.1' ) self.assertEqual(args[0].call_count, 1) class TestBigIpOSUtil_device_for_ide_port(AgentTestCase): @patch.object(time, "sleep", return_value=None) @patch.object(os.path, "exists", return_value=False) @patch.object(default.DefaultOSUtil, 'device_for_ide_port', return_value=None) def test_success_waiting(self, *args): osutil.BigIpOSUtil.device_for_ide_port( osutil.BigIpOSUtil(), '5' ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 99) self.assertEqual(args[2].call_count, 99) @patch.object(time, "sleep", return_value=None) @patch.object(os.path, "exists", return_value=True) @patch.object(default.DefaultOSUtil, 'device_for_ide_port', return_value=None) def test_success_immediate(self, *args): osutil.BigIpOSUtil.device_for_ide_port( osutil.BigIpOSUtil(), '5' ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 1) self.assertEqual(args[2].call_count, 0) if __name__ == '__main__': unittest.main()WALinuxAgent-2.2.20/tests/common/osutil/test_default.py000066400000000000000000000670621322477356400231410ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import socket import glob import mock import azurelinuxagent.common.osutil.default as osutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from tests.tools import * class TestOSUtil(AgentTestCase): def test_restart(self): # setup retries = 3 ifname = 'dummy' with patch.object(shellutil, "run") as run_patch: run_patch.return_value = 1 # execute osutil.DefaultOSUtil.restart_if(osutil.DefaultOSUtil(), ifname=ifname, retries=retries, wait=0) # assert self.assertEqual(run_patch.call_count, retries) self.assertEqual(run_patch.call_args_list[0][0][0], 'ifdown {0} && ifup {0}'.format(ifname)) def test_get_dvd_device_success(self): with patch.object(os, 'listdir', return_value=['cpu', 'cdrom0']): osutil.DefaultOSUtil().get_dvd_device() def test_get_dvd_device_failure(self): with patch.object(os, 'listdir', return_value=['cpu', 'notmatching']): try: osutil.DefaultOSUtil().get_dvd_device() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue('notmatching' in ustr(ose)) @patch('time.sleep') def test_mount_dvd_success(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(0, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() except OSUtilError: self.fail("mounting failed") @patch('time.sleep') def test_mount_dvd_failure(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(1, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue(msg in ustr(ose)) self.assertTrue(patch_run.call_count == 6) def test_get_first_if(self): ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") def test_isloopback(self): self.assertTrue(osutil.DefaultOSUtil().is_loopback(b'lo')) self.assertFalse(osutil.DefaultOSUtil().is_loopback(b'eth0')) def test_isprimary(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ eth0 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ eth0 00345B0A 00000000 0001 0 0 5 00000000 0 0 0 \n\ lo 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('lo')) self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('eth0')) def test_sriov(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n" \ "bond0 00000000 0100000A 0003 0 0 0 00000000 0 0 0 \n" \ "bond0 0000000A 00000000 0001 0 0 0 00000000 0 0 0 \n" \ "eth0 0000000A 00000000 0001 0 0 0 00000000 0 0 0 \n" \ "bond0 10813FA8 0100000A 0007 0 0 0 00000000 0 0 0 \n" \ "bond0 FEA9FEA9 0100000A 0007 0 0 0 00000000 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('eth0')) self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('bond0')) def test_multiple_default_routes(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ high 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ low1 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('low1')) def test_multiple_interfaces(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ first 00000000 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ secnd 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('first')) def test_interface_flags(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ nflg 00000000 01345B0A 0001 0 0 1 00000000 0 0 0 \n\ flgs 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('flgs')) def test_no_interface(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ ndst 00000001 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ nflg 00000000 01345B0A 0001 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('ndst')) self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('nflg')) self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('invalid')) def test_no_primary_does_not_throw(self): with patch.object(osutil.DefaultOSUtil, 'get_primary_interface') \ as patch_primary: exception = False patch_primary.return_value = '' try: osutil.DefaultOSUtil().get_first_if()[0] except Exception as e: exception = True self.assertFalse(exception) def test_dhcp_lease_default(self): self.assertTrue(osutil.DefaultOSUtil().get_dhcp_lease_endpoint() is None) def test_dhcp_lease_ubuntu(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") def test_dhcp_lease_multi(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.multi"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "second") def test_get_total_mem(self): """ Validate the returned value matches to the one retrieved by invoking shell command """ cmd = "grep MemTotal /proc/meminfo |awk '{print $2}'" ret = shellutil.run_get_output(cmd) if ret[0] == 0: self.assertEqual(int(ret[1]) / 1024, get_osutil().get_total_mem()) else: self.fail("Cannot retrieve total memory using shell command.") def test_get_processor_cores(self): """ Validate the returned value matches to the one retrieved by invoking shell command """ cmd = "grep 'processor.*:' /proc/cpuinfo |wc -l" ret = shellutil.run_get_output(cmd) if ret[0] == 0: self.assertEqual(int(ret[1]), get_osutil().get_processor_cores()) else: self.fail("Cannot retrieve number of process cores using shell command.") def test_conf_sshd(self): new_file = "\ Port 22\n\ Protocol 2\n\ ChallengeResponseAuthentication yes\n\ #PasswordAuthentication yes\n\ UsePAM yes\n\ " expected_output = "\ Port 22\n\ Protocol 2\n\ ChallengeResponseAuthentication no\n\ #PasswordAuthentication yes\n\ UsePAM yes\n\ PasswordAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match(self): new_file = "\ Port 22\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ Port 22\n\ ChallengeResponseAuthentication no\n\ PasswordAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_last(self): new_file = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ Port 22\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_middle(self): new_file = "\ Port 22\n\ match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ match all\n\ #Other config\n\ " expected_output = "\ Port 22\n\ match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ match all\n\ #Other config\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_multiple(self): new_file = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ Match all\n\ #Other config\n\ " expected_output = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ Match all\n\ #Other config\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_multiple_first_last(self): new_file = "\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_correct_instance_id(self): util = osutil.DefaultOSUtil() self.assertEqual( "12345678-1234-1234-1234-123456789012", util._correct_instance_id("78563412-3412-3412-1234-123456789012")) self.assertEqual( "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8", util._correct_instance_id("544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8")) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="33C2F3B9-1399-429F-8EB3-BA656DF32502") def test_get_instance_id_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( util.get_instance_id(), "B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="") def test_get_instance_id_empty_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( "", util.get_instance_id()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="Value") def test_get_instance_id_malformed_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( "Value", util.get_instance_id()) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[0, '33C2F3B9-1399-429F-8EB3-BA656DF32502']) def test_get_instance_id_from_dmidecode(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( util.get_instance_id(), "B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[1, 'Error Value']) def test_get_instance_id_missing(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual("", util.get_instance_id()) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[0, 'Unexpected Value']) def test_get_instance_id_unexpected(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual("", util.get_instance_id()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file') def test_is_current_instance_id_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() mock_read.return_value = "B9F3C233-9913-9F42-8EB3-BA656DF32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_read.return_value = "33C2F3B9-1399-429F-8EB3-BA656DF32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_is_current_instance_id_from_dmidecode(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() mock_shell.return_value = [0, 'B9F3C233-9913-9F42-8EB3-BA656DF32502'] self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_shell.return_value = [0, '33C2F3B9-1399-429F-8EB3-BA656DF32502'] self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) @patch('azurelinuxagent.common.conf.get_sudoers_dir') def test_conf_sudoer(self, mock_dir): tmp_dir = tempfile.mkdtemp() mock_dir.return_value = tmp_dir util = osutil.DefaultOSUtil() # Assert the sudoer line is added if missing util.conf_sudoer("FooBar") waagent_sudoers = os.path.join(tmp_dir, 'waagent') self.assertTrue(os.path.isfile(waagent_sudoers)) count = -1 with open(waagent_sudoers, 'r') as f: count = len(f.readlines()) self.assertEqual(1, count) # Assert the line does not get added a second time util.conf_sudoer("FooBar") count = -1 with open(waagent_sudoers, 'r') as f: count = len(f.readlines()) print("WRITING TO {0}".format(waagent_sudoers)) self.assertEqual(1, count) def test_get_firewall_dropped_packets_returns_zero_if_firewall_disabled(self): osutil._enable_firewall = False util = osutil.DefaultOSUtil() self.assertEqual(0, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_returns_negative_if_error(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (1, "not used")] self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_returns_negative_if_exception(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (1, Exception)] self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (0, ''' Chain OUTPUT (policy ACCEPT 104 packets, 43628 bytes) pkts bytes target prot opt in out source destination 0 0 ACCEPT tcp -- any any anywhere 168.63.129.16 owner UID match daemon 32 1920 DROP tcp -- any any anywhere 168.63.129.16 ''')] dst = '168.63.129.16' self.assertEqual(32, util.get_firewall_dropped_packets(dst)) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)), call(osutil.FIREWALL_DROP.format(wait, "A", dst)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION), call(osutil.FIREWALL_LIST.format(wait)) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_no_wait(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION-1) wait = "" mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)), call(osutil.FIREWALL_DROP.format(wait, "A", dst)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION), call(osutil.FIREWALL_LIST.format(wait)) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_skips_if_drop_exists(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [0, 0, 0] mock_output.return_value = (0, version) self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_ignores_exceptions(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [1, Exception] mock_output.return_value = (0, version) self.assertFalse(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertFalse(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_skips_if_disabled(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = False util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertFalse(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_not_called() mock_output.assert_not_called() mock_uid.assert_not_called() self.assertFalse(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_remove_firewall(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.remove_firewall()) mock_run.assert_has_calls([ call(osutil.FIREWALL_FLUSH.format(wait), chk_err=True) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_remove_firewall_does_not_repeat(self, mock_run, mock_output, _): osutil._enable_firewall = True util = osutil.DefaultOSUtil() version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [1, 0] mock_output.side_effect = [(0, version), (1, "Output")] self.assertFalse(util.remove_firewall()) mock_run.assert_has_calls([ call(osutil.FIREWALL_FLUSH.format(wait), chk_err=True) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertFalse(osutil._enable_firewall) self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) self.assertFalse(util.remove_firewall()) self.assertFalse(util.remove_firewall()) self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/common/test_conf.py000066400000000000000000000100401322477356400211030ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import mock import os.path from azurelinuxagent.common.conf import * from tests.tools import * class TestConf(AgentTestCase): # Note: # -- These values *MUST* match those from data/test_waagent.conf EXPECTED_CONFIGURATION = { "Provisioning.Enabled" : True, "Provisioning.UseCloudInit" : True, "Provisioning.DeleteRootPassword" : True, "Provisioning.RegenerateSshHostKeyPair" : True, "Provisioning.SshHostKeyPairType" : "rsa", "Provisioning.MonitorHostName" : True, "Provisioning.DecodeCustomData" : False, "Provisioning.ExecuteCustomData" : False, "Provisioning.PasswordCryptId" : '6', "Provisioning.PasswordCryptSaltLength" : 10, "Provisioning.AllowResetSysUser" : False, "ResourceDisk.Format" : True, "ResourceDisk.Filesystem" : "ext4", "ResourceDisk.MountPoint" : "/mnt/resource", "ResourceDisk.EnableSwap" : False, "ResourceDisk.SwapSizeMB" : 0, "ResourceDisk.MountOptions" : None, "Logs.Verbose" : False, "OS.EnableFIPS" : True, "OS.RootDeviceScsiTimeout" : '300', "OS.OpensslPath" : '/usr/bin/openssl', "OS.SshClientAliveInterval" : 42, "OS.SshDir" : "/notareal/path", "HttpProxy.Host" : None, "HttpProxy.Port" : None, "DetectScvmmEnv" : False, "Lib.Dir" : "/var/lib/waagent", "DVD.MountPoint" : "/mnt/cdrom/secure", "Pid.File" : "/var/run/waagent.pid", "Extension.LogDir" : "/var/log/azure", "OS.HomeDir" : "/home", "OS.EnableRDMA" : False, "OS.UpdateRdmaDriver" : False, "OS.CheckRdmaDriver" : False, "AutoUpdate.Enabled" : True, "AutoUpdate.GAFamily" : "Prod", "EnableOverProvisioning" : False, "OS.AllowHTTP" : False, "OS.EnableFirewall" : True } def setUp(self): AgentTestCase.setUp(self) self.conf = ConfigurationProvider() load_conf_from_file( os.path.join(data_dir, "test_waagent.conf"), self.conf) def test_key_value_handling(self): self.assertEqual("Value1", self.conf.get("FauxKey1", "Bad")) self.assertEqual("Value2 Value2", self.conf.get("FauxKey2", "Bad")) def test_get_ssh_dir(self): self.assertTrue(get_ssh_dir(self.conf).startswith("/notareal/path")) def test_get_sshd_conf_file_path(self): self.assertTrue(get_sshd_conf_file_path( self.conf).startswith("/notareal/path")) def test_get_ssh_key_glob(self): self.assertTrue(get_ssh_key_glob( self.conf).startswith("/notareal/path")) def test_get_ssh_key_private_path(self): self.assertTrue(get_ssh_key_private_path( self.conf).startswith("/notareal/path")) def test_get_ssh_key_public_path(self): self.assertTrue(get_ssh_key_public_path( self.conf).startswith("/notareal/path")) def test_get_fips_enabled(self): self.assertTrue(get_fips_enabled(self.conf)) def test_get_provision_cloudinit(self): self.assertTrue(get_provision_cloudinit(self.conf)) def test_get_configuration(self): configuration = conf.get_configuration(self.conf) self.assertTrue(len(configuration.keys()) > 0) for k in TestConf.EXPECTED_CONFIGURATION.keys(): self.assertEqual( TestConf.EXPECTED_CONFIGURATION[k], configuration[k]) WALinuxAgent-2.2.20/tests/common/test_event.py000066400000000000000000000217361322477356400213150ustar00rootroot00000000000000# Copyright 2017 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from __future__ import print_function from datetime import datetime import azurelinuxagent.common.event as event import azurelinuxagent.common.logger as logger from azurelinuxagent.common.event import add_event, \ mark_event_status, should_emit_event, \ WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.common.version import CURRENT_VERSION from tests.tools import * class TestEvent(AgentTestCase): def test_event_status_event_marked(self): es = event.__event_status__ self.assertFalse(es.event_marked("Foo", "1.2", "FauxOperation")) es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_marked("Foo", "1.2", "FauxOperation")) event.__event_status__ = event.EventStatus() event.init_event_status(self.tmp_dir) es = event.__event_status__ self.assertTrue(es.event_marked("Foo", "1.2", "FauxOperation")) def test_event_status_defaults_to_success(self): es = event.__event_status__ self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_records_status(self): d = tempfile.mkdtemp() es = event.EventStatus(tempfile.mkdtemp()) es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) es.mark_event_status("Foo", "1.2", "FauxOperation", False) self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_preserves_state(self): es = event.__event_status__ es.mark_event_status("Foo", "1.2", "FauxOperation", False) self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) event.__event_status__ = event.EventStatus() event.init_event_status(self.tmp_dir) es = event.__event_status__ self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_should_emit_event_ignores_unknown_operations(self): event.__event_status__ = event.EventStatus(tempfile.mkdtemp()) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) # Marking the event has no effect event.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) def test_should_emit_event_handles_known_operations(self): event.__event_status__ = event.EventStatus(tempfile.mkdtemp()) # Known operations always initially "fire" for op in event.__event_status_operations__: self.assertTrue(event.should_emit_event("Foo", "1.2", op, True)) self.assertTrue(event.should_emit_event("Foo", "1.2", op, False)) # Note a success event... for op in event.__event_status_operations__: event.mark_event_status("Foo", "1.2", op, True) # Subsequent success events should not fire, but failures will for op in event.__event_status_operations__: self.assertFalse(event.should_emit_event("Foo", "1.2", op, True)) self.assertTrue(event.should_emit_event("Foo", "1.2", op, False)) # Note a failure event... for op in event.__event_status_operations__: event.mark_event_status("Foo", "1.2", op, False) # Subsequent success events fire and failure do not for op in event.__event_status_operations__: self.assertTrue(event.should_emit_event("Foo", "1.2", op, True)) self.assertFalse(event.should_emit_event("Foo", "1.2", op, False)) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_if_not_previously_sent(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") mock_event.assert_called_once() @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_does_not_emit_if_previously_sent(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_if_forced(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent", force=True) self.assertEqual(2, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_after_elapsed_delta(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) h = hash("FauxEvent"+WALAEventOperation.Unknown+ustr(True)) event.__event_logger__.periodic_events[h] = \ datetime.now() - logger.EVERY_DAY - logger.EVERY_HOUR event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(2, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_forwards_args(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") mock_event.assert_called_once_with( "FauxEvent", duration=0, evt_type='', is_internal=False, is_success=True, log_event=True, message='', op=WALAEventOperation.Unknown, version=str(CURRENT_VERSION)) def test_save_event(self): add_event('test', message='test event') self.assertTrue(len(os.listdir(self.tmp_dir)) == 1) def test_save_event_rollover(self): add_event('test', message='first event') for i in range(0, 999): add_event('test', message='test event {0}'.format(i)) events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == 1000) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertTrue('first event' in first_event_text) add_event('test', message='last event') events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertFalse('first event' in first_event_text) self.assertTrue('test event 0' in first_event_text) last_event = os.path.join(self.tmp_dir, events[-1]) with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) def test_save_event_cleanup(self): for i in range(0, 2000): evt = os.path.join(self.tmp_dir, '{0}.tld'.format(ustr(1491004920536531 + i))) with open(evt, 'w') as fh: fh.write('test event {0}'.format(i)) events = os.listdir(self.tmp_dir) self.assertTrue(len(events) == 2000, "{0} events found, 2000 expected".format(len(events))) add_event('test', message='last event') events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertTrue('test event 1001' in first_event_text) last_event = os.path.join(self.tmp_dir, events[-1]) with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) WALinuxAgent-2.2.20/tests/common/test_logger.py000066400000000000000000000044551322477356400214520ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from datetime import datetime import azurelinuxagent.common.logger as logger from tests.tools import * _MSG = "This is our test logging message {0} {1}" _DATA = ["arg1", "arg2"] class TestLogger(AgentTestCase): @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_emits_if_not_previously_sent(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) mock_info.assert_called_once() @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_does_not_emit_if_previously_sent(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_emits_after_elapsed_delta(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG)] = \ datetime.now() - logger.EVERY_DAY - logger.EVERY_HOUR logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(2, mock_info.call_count) @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_forwards_message_and_args(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) mock_info.assert_called_once_with(_MSG, *_DATA) WALinuxAgent-2.2.20/tests/common/test_version.py000066400000000000000000000140341322477356400216520ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from __future__ import print_function import textwrap import mock from azurelinuxagent.common.version import set_current_agent, \ AGENT_LONG_VERSION, AGENT_VERSION, AGENT_NAME, AGENT_NAME_PATTERN, \ get_f5_platform from tests.tools import * class TestCurrentAgentName(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) return @patch("os.getcwd", return_value="/default/install/directory") def test_extract_name_finds_installed(self, mock_cwd): current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd", return_value="/") def test_extract_name_root_finds_installed(self, mock_cwd): current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd") def test_extract_name_in_path_finds_installed(self, mock_cwd): path = os.path.join(conf.get_lib_dir(), "events") mock_cwd.return_value = path current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd") def test_extract_name_finds_latest_agent(self, mock_cwd): path = os.path.join(conf.get_lib_dir(), "{0}-{1}".format( AGENT_NAME, "1.2.3")) mock_cwd.return_value = path agent = os.path.basename(path) version = AGENT_NAME_PATTERN.match(agent).group(1) current_agent, current_version = set_current_agent() self.assertEqual(agent, current_agent) self.assertEqual(version, str(current_version)) return class TestGetF5Platforms(AgentTestCase): def test_get_f5_platform_bigip_12_1_1(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.1.1 Build: 0.0.184 Sequence: 12.1.1.0.0.184.0 BaseBuild: 0.0.184 Edition: Final Date: Thu Aug 11 17:09:01 PDT 2016 Built: 160811170901 Changelist: 1874858 JobID: 705993""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.1') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_bigip_12_1_0_hf1(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.1.0 Build: 1.0.1447 Sequence: 12.1.0.1.0.1447.0 BaseBuild: 0.0.1434 Edition: Hotfix HF1 Date: Wed Jun 8 13:41:59 PDT 2016 Built: 160608134159 Changelist: 1773831 JobID: 673467""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.0') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_bigip_12_0_0(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.0.0 Build: 0.0.606 Sequence: 12.0.0.0.0.606.0 BaseBuild: 0.0.606 Edition: Final Date: Fri Aug 21 13:29:22 PDT 2015 Built: 150821132922 Changelist: 1486072 JobID: 536212""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.0.0') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_iworkflow_2_0_1(self): version_file = textwrap.dedent(""" Product: iWorkflow Version: 2.0.1 Build: 0.0.9842 Sequence: 2.0.1.0.0.9842.0 BaseBuild: 0.0.9842 Edition: Final Date: Sat Oct 1 22:52:08 PDT 2016 Built: 161001225208 Changelist: 1924048 JobID: 734712""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'iworkflow') self.assertTrue(platform[1] == '2.0.1') self.assertTrue(platform[2] == 'iworkflow') self.assertTrue(platform[3] == 'iWorkflow') def test_get_f5_platform_bigiq_5_1_0(self): version_file = textwrap.dedent(""" Product: BIG-IQ Version: 5.1.0 Build: 0.0.631 Sequence: 5.1.0.0.0.631.0 BaseBuild: 0.0.631 Edition: Final Date: Thu Sep 15 19:55:43 PDT 2016 Built: 160915195543 Changelist: 1907534 JobID: 726344""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigiq') self.assertTrue(platform[1] == '5.1.0') self.assertTrue(platform[2] == 'bigiq') self.assertTrue(platform[3] == 'BIG-IQ') WALinuxAgent-2.2.20/tests/daemon/000077500000000000000000000000001322477356400165255ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/daemon/__init__.py000066400000000000000000000011651322477356400206410ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/daemon/test_daemon.py000066400000000000000000000061461322477356400214100ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.daemon import * from azurelinuxagent.daemon.main import OPENSSL_FIPS_ENVIRONMENT from tests.tools import * class MockDaemonCall(object): def __init__(self, daemon_handler, count): self.daemon_handler = daemon_handler self.count = count def __call__(self, *args, **kw): self.count = self.count - 1 #Stop daemon after restarting for n times if self.count <= 0: self.daemon_handler.running = False raise Exception("Mock unhandled exception") class TestDaemon(AgentTestCase): @patch("time.sleep") def test_daemon_restart(self, mock_sleep): #Mock daemon function daemon_handler = get_daemon_handler() mock_daemon = Mock(side_effect=MockDaemonCall(daemon_handler, 2)) daemon_handler.daemon = mock_daemon daemon_handler.check_pid = Mock() daemon_handler.run() mock_sleep.assert_any_call(15) self.assertEquals(2, daemon_handler.daemon.call_count) @patch("time.sleep") @patch("azurelinuxagent.daemon.main.conf") @patch("azurelinuxagent.daemon.main.sys.exit") def test_check_pid(self, mock_exit, mock_conf, mock_sleep): daemon_handler = get_daemon_handler() mock_pid_file = os.path.join(self.tmp_dir, "pid") mock_conf.get_agent_pid_file_path = Mock(return_value=mock_pid_file) daemon_handler.check_pid() self.assertTrue(os.path.isfile(mock_pid_file)) daemon_handler.check_pid() mock_exit.assert_any_call(0) @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=True) def test_set_openssl_fips(self, mock_conf, mock_daemon): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertTrue(OPENSSL_FIPS_ENVIRONMENT in os.environ) self.assertEqual('1', os.environ[OPENSSL_FIPS_ENVIRONMENT]) @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=False) def test_does_not_set_openssl_fips(self, mock_conf, mock_daemon): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertFalse(OPENSSL_FIPS_ENVIRONMENT in os.environ) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/daemon/test_resourcedisk.py000066400000000000000000000031341322477356400226410ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import * from azurelinuxagent.daemon import * from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class TestResourceDisk(AgentTestCase): def test_mount_flags_empty(self): partition = '/dev/sdb1' mountpoint = '/mnt/resource' options = None expected = 'mount /dev/sdb1 /mnt/resource' rdh = ResourceDiskHandler() mount_string = rdh.get_mount_string(options, partition, mountpoint) self.assertEqual(expected, mount_string) def test_mount_flags_many(self): partition = '/dev/sdb1' mountpoint = '/mnt/resource' options = 'noexec,noguid,nodev' expected = 'mount -o noexec,noguid,nodev /dev/sdb1 /mnt/resource' rdh = ResourceDiskHandler() mount_string = rdh.get_mount_string(options, partition, mountpoint) self.assertEqual(expected, mount_string) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/data/000077500000000000000000000000001322477356400161735ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/data/dhcp000066400000000000000000000005101322477356400170300ustar00rootroot00000000000000ƪ] >` >* >]88RD008CFA06B61CcSc56 >* > >"test-cs12.h1.internal.cloudapp.net:;3 >WALinuxAgent-2.2.20/tests/data/dhcp.leases000066400000000000000000000035721322477356400203160ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers invalid; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 never; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 4 2015/06/16 16:58:54; rebind 4 2015/06/16 16:58:54; expire 4 2015/06/16 16:58:54; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers 168.63.129.16; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } WALinuxAgent-2.2.20/tests/data/dhcp.leases.multi000066400000000000000000000037161322477356400214470ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers first; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers second; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2012/07/23 23:27:10; } WALinuxAgent-2.2.20/tests/data/events/000077500000000000000000000000001322477356400174775ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/data/events/1478123456789000.tld000066400000000000000000000006271322477356400217710ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "Test Event"}, {"name": "Version", "value": "2.2.0"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Some Operation"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": ""}, {"name": "Duration", "value": 0}, {"name": "ExtensionType", "value": ""}]}WALinuxAgent-2.2.20/tests/data/events/1478123456789001.tld000066400000000000000000000006701322477356400217700ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "Linux Event"}, {"name": "Version", "value": "2.2.0"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Linux Operation"}, {"name": "OperationSuccess", "value": false}, {"name": "Message", "value": "Linux Message"}, {"name": "Duration", "value": 42}, {"name": "ExtensionType", "value": "Linux Event Type"}]}WALinuxAgent-2.2.20/tests/data/events/1479766858966718.tld000066400000000000000000000007571322477356400220360ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "WALinuxAgent"}, {"name": "Version", "value": "2.3.0.1"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Enable"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": "Agent WALinuxAgent-2.3.0.1 launched with command 'python install.py' is successfully running"}, {"name": "Duration", "value": 0}, {"name": "ExtensionType", "value": ""}]}WALinuxAgent-2.2.20/tests/data/ext/000077500000000000000000000000001322477356400167735ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/data/ext/event.xml000077500000000000000000000022201322477356400206350ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/data/ext/sample_ext-1.3.0.zip000066400000000000000000000015561322477356400223240ustar00rootroot00000000000000PKR-G.h`HandlerManifest.jsonUT OVlVux }M 0}N֢D=)G2$J]}4)MQ1̘'&bЃA<^u`cWFт 3 MH@U*mvNM{vmBJo,fj=8wpN37on2<)d}Ixi (hzOە\7$r )"5BVj#Ј>q(V28wq.k1 \V 0 and item[separator + 1:] == "settings": sequence = int(item[0: separator]) if sequence > latest_seq: latest_seq = sequence return latest_seq succeed_status = """ [{ "status": { "status": "success" } }] """ if __name__ == "__main__": seq = get_seq() if seq >= 0: status_path = os.path.join(os.getcwd(), "status") if not os.path.exists(status_path): os.makedirs(status_path) status_file = os.path.join(status_path, "{0}.status".format(seq)) with open(status_file, "w+") as status: status.write(succeed_status) WALinuxAgent-2.2.20/tests/data/ga/000077500000000000000000000000001322477356400165625ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/data/ga/WALinuxAgent-2.2.19.zip000066400000000000000000017541161322477356400223620ustar00rootroot00000000000000PKg&LB9<!bin/WALinuxAgent-2.2.19-py2.7.eggUT 7QZ7QZux cx->m۶m۶c۶m۶mӷ雙aS'$li`3j) j@J Z9x& g }G%Rйfo";꠲LrD8 auI7|]qGR*rLu{1.vھEk{{ha޽Dfe SqTc?Fx K?wLs@Mtő+ɗY7 eX-pvcCOR٥C3N.O9vsU0]%T%ᾛL*8t;ҔFNAӲ eZ•Ő<< v": ?)N܇.#e:^HUgviߛ2mvfcsMcz,*pAy?]3y*m1c5@nW-!W"c/vzٽni$9N׵eNadJy]L&5(?4nTc驗t] @tu((FcF&Fb&&FFf V)?<0xM"(*(%g')j%'~Hj i#F'd'7p# ̌¼DMBp6Ai~V^]~>bZ>`u2v9bc$_2kf'pu1uvqwuqkm8or &?%~JYHi*=[2B4pL%^=boc$fBLٗ|!rNIe*խfTpW@g[;y-;\NM2;qIWKet8)թH"B&'*}8ќ6tH S/7csiw7G̢KOΎw\̚2C##~~g^@ gt*|*iD+|!ϯ7l%q\ϽHJˏ#/tᄩyuYk67 . ,p3p-t+fW-g >j ZbhbqS? k]k>2B,ۡ<TPn+de*`]xdûSĨs [+ݤ{½_b>Pk9&?ᐖ|ShB0,_MJqclМWl1hkC.#b-[ų*oPU'ێЍ,[WzF,mŬ_][ߖqPZl23 'zD;6,~(ƁfOjB ^ΫuYһW N>SOsTaq%㫝].jc/u02Óے=a},ÌfXRRwr_!J9R_P\H$0$\}I,IRpacǗwSW:ca \*LHlZcKA%%/h["YC}8i_ QnB9pptSS._*% Q-nsu G_Q5̞mS վ&t9]nҟ]./$i3/Va!*Av-Wknin JVϜf 9LTa*bD=81qόU[Q:g)BG-F\#_zܘ^m>a/jT"HygR/YKD 0Jt0.j^a36. ҮKEF#~oS ث : ҵجIqz}W]>s(+ {>@3 nSnH( N@ttg/PU5o)i7N^L|5#\7=9x̒'?.*dsXn QWIs6$2LL<-Ssn#(D)PQJ1v+5bdDPʾ8wpƎcLe|AfyDp=ۮ M)fM"ɢ7tc.:Xp^l1؟ĺ4Uq,Maxv8ୂ"\g_7Ǝp4f71&㣑mBS^r'rQxff֚l!Pkn&lQ>ٍ]|-hjGQ},l3(,fW  2{4jȦC[1=k_9MW) /b7 0r]Qd+ +{ `t;74Bt:tX*tumӑ}}_Kx򘙢e}HFVFH *ԝt(en=ƳG z=FQ+j}@F/ R}%L#-cjӋykrEb n-sӲw6u7u{6m5x11?[|~B-]}< [ @G .p /n.UZ)l}%w}#]+`%P~7{JrCegt!eNAڣ]p[Zfl0QЀvuKQuJ&3,t(%9qLyhϽu+ ޚZ B=FvlI6Hx;O_967KTD[%]Z.洵@'EV 뱚. eJ$YKoM .c1:>aW 5rjp)3l#Q9o]kB' S,)+Ҡwl|F(r[|"}>+&e TPdR y}r*yACBl^,O9-}c*#~Ufuz2t;) UAZ%V=X_+^ c֑~Y<<:=lUiT[@ ](c3/_tfׁ&CQel_KZ6U ڀxM̟YX>%I1wq _z"p؞,^xȨ|sbwn—nC/"_*wa)t`hWK9Mb_-@dZnwhl @=7BfIZz4<v؂!בNڒc1Ȅ0CX5 5i{GC66H\HPȏ0C[_.,=̒|keC1֏4@2`))2Cp*{՛2v:"D=5rp'rZݙ*3kܽ x̊TzZC]Gy;PJl`kATnH9M;3X{*{~rբjOUS毺L"_j# cV=(Vڥ oλ yv DW.*!R#Jbh&xwXt!J^l[AuTmOrA -?rr2(p`i&l/{Z:9 = S4^ёw8 ÉLPv k%M$(`J-i&;9o8ȸUt4qGIH'?Na  493xڭ|Wbr&)+\{[Se~x;[7˙ RyWHj2̂ҭhT %Ӡ`KN1ʢ,dٚ!l2Ҭ7m1\l&m<¢`k6\7۠a >:03KQx:qǁFIۤL 9.=k5ޅqݒMprb-_G+D2RVhe8lie2 $>W븪Uv{lGf@4 fHEB<&#IH0qS@6A*)Ke8MPJ.0f'X |ńS$I=.3f`Noj xE 0eom٨?x;(r;~;v^0mE P9Eb֭~֮+Q_׭2}t=֭BšwarY`8^AS` Nb )'7HD T=.35#Kx`#5Kejm΋"=#\8A d$@$Ĉ!ƈC@9 #1v!'!?= #Av)D)H!D)1H#8sK#,zQ tsr^ܸ+[B=+sn蔋TV`qW]`JxkJ,龅`&N (dJJaMX!8L ssk`{{7:*Gl?w vO#¨S觼6ǺuQ.I[c'^c^DrQA'KZ+ΛmŇ/n8IeQl/ =GpD+{ "OHC+~aY籨in[{:s+ ./4GPz%-P#i;T,v|NJDa4E$K_["@|/Y: ˂"򗘮-q# 6r`_:-WurZzyڸ}8"l>piIkRhtX!f-\i- ` 9luYϐlU>mr mA~ݣ4Xq ^-܄m;u/ _5Go<lo5k >7ҸE\lU5o4mا9WOTfSOЯ񿐚2EtA X>_$>k;3dĚ3+%s&':5:EXy|D q_4LjD"]L&Lu`<#D@j]!vZ1g">r~mU($fbɟBl |NT2b#RKP"Q紬_6.OeFɞNrD6|ӵ|b`QKž)a`uI6LȘ(P\*,9M6%O1Qoit>WC7Ň~4SfoDfI؜2P3A.@.f1C_˸;"@cqġ8NrM1\MHx 薞9a\4Ck&٧D5THE@/4|K$PH. :h |>? [눎Ð6N F)v Ŭ *jsx'hIrl`߿E aC!S WAt$۽(ĨkaUS4cpU=/f\ 5e/ X]j`o5B(]jT*?@e5ҿM}) c/k `p pd+*t\рhe so @s@J~CE8qİ(B5+!cUލ"3|ǼwMx߬?_|my>/;Q/<"tbTIZ\@ُ+ @4W3.bBH*mp:oX&V5hPjrptXy?żbF/\7+ bd0AFȰt=Dhiν^jZ!_ Y0N Ge JC![ѲyShTw?>CtՏU$_7w h7Z.%Z|i%]]%io8ꊛ #lk+V) $2a!y/١,Mh+̓IA, 'zL#yiȱ($PHHj:`Ǟ& ]@m6MƐy7"|C[㏑#3(uުz-+Wkco ߒ -@tR!x2FE1X*i?#/kncHr}4!-ʱ Y}%$GO`3 _U?%#M1<(4OC5Pwѱ,i:]j(JsZ3:g%w ~y|iTu2aKĈ}'j *2! Yj_nMR$Ihi#%A22 ;Kжf&p!z a8Kcqi"n ppdrNSjܠrg5jlo$o•ֆF@DI6҅;*_b /(e%nv.T^[$@B^ǫRRFE?*BNez[ͧQX Vpz,TvxmT`Wt# jTԨWQ1!#VFX!lZbwkz^h/=L\a_@bD,msj:OZtR9K1L%0O'!O SB/:B*<SReDԊv|X4~@&g/G9XY?9:Pj7N%/rv:JCIzWԋE{/߹D#f12b 2tCCu,hȭ>D$7-Gj+ncqRy(mCb,yނ1^{k!pZ1^<;r4Dl.bV@iؼ#}`M gRLUDvM)?Ҡ}/4[ )im(:=G(r7 H'ü&f)HfBVsR&Ej}yYf5BMϻwqIۉ¨}óȭոƟzGnf=ґp>,Y]ޜ훢ͳQЕS.[ q> "+QB\q'Qx6"x!Cb~&8OI&+P(}6QDV#Pa-[Ds|yJ@#z wP+15۹)0J5x쭮ND/SNK("GQx?j@H@^9#md U?*DE6l>/0!HН~z*eKRxw^Ƀsz@6l 2IٯEt(uf?_$GWnX;cJ*h40o/o ]O85/2/'.ǥ|W_]؄iP,uԉ#})[]2'Hdu ԓ9RK j/psR? 8>wԅc FV*-66|PلOǵ(,xJ#HVQGi7#Cslw;̧xE$L ~T*'fW{Y'VM{CCKd#x}+SF}{}B;9-I~T3NYL18C!?&)=ܨ4v/x:MjKxh)@JY.˹}b|3B;B_9<6ߒMRQoWsş!Q0ǫxDն[\|7Q5G.WdExv}8d!Jl܂#o\L=G~ V_ߤ./?3kaҨM6TM[S]|d"v!(6GV9c;(B~P'"]npT(4ESxn{J}9 ?0 G k D|B&18l1bC邚 XgBB[I@_1XJ_|edĩ:Ѥp݊%#v6FRbO/5sc,P +Z2'1' rFLK'w<}z·@dAjT9Fo8/}h†'3 ɕד775\|55ݰBW ##1Ih?"Z1{oS,zz-l.B{Ћ=PZVrRX Z[n2d$wp<= 4&!0ɄH8 )b}K?! S8y,:~ |z5`\idi؞ Dpgtw,B 5ݴE˶ZY5;ֶ'$I!]a˱X HxKԈ 61. :/l^p`\90Ȗ[.Ҡȋ%z܋IRr[:GDŽJUrr@L(Od&PҒu2NE!L6#CiGxU씏pRD FjaFPC~[3bU0k6׾'f_0^pP `1OTK#MLCR)v '/8ogc( MZMF% £ dk]x_H[)y){H)XQ/yHDj%iWI~K鉃dv]H!Q1t ]뼢 NBdІnX O&p=sDx`\˔hOFG<3깨 BAϟ/BZQP.LOvͿ_}n}U^D9} ok;ŀX;q?5Z7CD(^ WGV7m6s<;J᩟h]=~<+\uv3z) t}ƵKUU X"b0b<"aC)1X©~ک$C0[CqQɷ+>^LrXuOv8vqh{Z8F4QZަ  ( 6_\o@oigNb@ gt^>BES&ܜ/Z\y~וK J >>mgw{Dmm?r?<[ՋyW0\d TuȨDcLۣLAl`@jLZK :IEjDM׺WC09|u!^}{mtn|[!'Nxs|'ANBɗ mT! Ě₯@54)r"esj] v'ٷ_-#\?r5WqgvF`ۂ k>q4_̽FJΐ>XImgor EIA{0A?(O Yژ`֌3]ՋFj1BzhG,aIÈ'q@HMݬwR4 )aXq0՟xl8a gRSS#N-뫌{߭/~݋ٯ2WoZ\3d_`KR.pdfCKĥ\ /= ʈT>Vedur HbfC[DDVş`;1 bT W*ubbjF rd9DK`V~*gwh_,0v AZ"5& f j$-TOf PjĤNA\5ʛE$]»,Ad4X0tAIx=uQ#n]*I?l!Sl4X_%!_mmWmC ̰˪n˩ƜTm;MGCU H`x08WK*9h|@YycD.-U@oЗϖ:Tw6oUDܢ8R|Cm I>0ѷ8@^cw:eK #UՋQRbZZhV]-MVc\|_?1&1T.6Qƍn"YЙ hrjIC y(mtΞaߺ6[md;%F_ IEpF2{cU/ <;@yV2 w.w;jlw=gOɋ˰ G\qpzmxwRFG"#ͯ2cTrVx//uy^/sDZ-$m;6P`0fηaٷ{CEldǵK`JW95jZYy!L^eu=Yy\B8Uÿ}-OYױ1l10^3" GZU]SLS*5~OuoizY.<{ ~{.K$Hz K2e,4pDLdFf c('ypաrwF±~0S^\40S? 3T{Q<#td_ 8@n+꣊BOV㐄;͵l?PbYXp=zXN  a>CdEP=\b'}|5'uk<" =4y4>GyT=8g-?۸m"f7Tɖaa+gɫroҦRQ)k L2.Kq'ċFYh~4M2#pS!li݋HGS6XWA(Kw2Nѡbh@˨VS(n1 F&{ @`hl gAޟ.GM(jD+ m ֌~ ԗ-V=u'jfmUdy7kP-5ҏ =_{cgIhӮ)g"qqLKƊ7uB'gX7b9>i7XaS0^S ʄ*:q0C{) ic|KmbK}"ATr'OFx'OHЭBG#T쉙ޘ,b 2lÏP>p!G)B%q[..҄P ^٢{ 7(< I֫?oO #,.˷C:x:6ҔfUw˲_ǫC 9Q֤cvqe,׷xf?UpW"6CD갢>PJ)*l^0fu(W >I)#IWy-}+=¯fk6}N&]WyLv֙-;P&Z9NVBQ/wl6Bw . >HwgAlilojÉCH(Xҳ V'-5H[dt$Cg:7n4uUv_]xSPYΣ-2;ž)Gty\aG&5"k3=Ao8Vn+dڦ;9IsEtJYCho̱hDKX?R9?LR@5*K9-9ŒK*"4d* [)AAm')[?\V9[;_fsRw.XabdA1LJ*d~F v=Oq2d\ A0|F<S:weY!Q=O/@iR4IҎ1M'0*$(MD 7c{W~6̂FQp;-\4(P&sPDD1G*}D\b0b23fMZ\^<ؘ51*%]{j>/gc~D.Cyc.2Mg:_84B3"liX 4\ߗS:Y9ٴ ̻UҀM0s/V8#t GPϏrT'-*AmiCjG>YP )b8b1R6w6/\9,ȴb*.v$T'BG4GQ0D-6h|HHD£B^F}7!k 2GX68U<ֽ[xU ޽ { Z!;$&Zn8]Уp`tFK h#ǑLZPZ^`H76[/XuK8p>ذ\}tMq~0ED˔;1b͈'1v-W S#QSH$^6KNETQzgrLKWqbQՌoI mKY*9>qY1ATIdrJU-OA`\0@h ^1#ex|#1Mm2'pws4֨mC\U}.|:S7׍ Q !UgQk 8y,^,K:6_-DBGI:A-OX~2x-t0"0vZUeiɓIU݇2I X~mvpR xqi-!P#̒سL2jT_bm͏4p _>IvO[Som7Y[H9^iÐ&F8ebn 80xKQ1mﭿB) t&T(d x[ wh͗"No5Jw)%b6Mf ^V ZkOƳT g*5T:ƒ&e#&I˯F|8ϚS&ɞ3;',{[o"Ka4"5Ba=b7  ߨqO5vxvxJchTmrv>lޥ`}'8ll9.S[ t~{*JDnLawW<{^'-yHmeU (l6&׵Ǫz(=__'T VkHY!>DSvc.j|yqT%hO44.P Ͷ^%[aO1Ys~`(^gu׉=ɰѻYx6E.*tlQ@yXbcK!sgcjwZ-}:aR7Dk d2PqdR4[(_D=Xb;hÌCQqQPʲxks# Ti%=%M  e]aS&|eW 7<;م]x|;4mzǧȒ{虤VNpکVqs+j9T(qġl.X':rz[O>=t)&%ZQ3Q~?}+23]λTPL?I9Zst *v">ѼIk!&ձP;z0qs6GpuzBKK#kfv*a?[<#AϬC~ s?~~O;bzևQV itp4qdA BQM3='a~`#"MSiqk}u_#7% PF1kBc$JᱱV[ҜS +Y:}ԖŔg̑+mJ@%))ՇNw0TlfJ2tCQ m4#}8A&wpwk_z*"P ?SzUn{[u{Ñ|l Sw8 =voE*̕,QY[3nmmkdkf%o[ihu(k0aNd s&kkN,P% [hקW+MruoMV4h $][w  )5F @fAe0< yjFq0R7-{_1-u{'b.dqHp:eI"uܔ/liLNSVL(MN^6B\9`ΡѲRܻsB22zKKx`'ւlUx5/8uAŨiEvdr1vo|-fâRm|~h\,<=FLawmS^ϭOKeʓ *{eᯖ.6ۖ_Ϧh+rýhBӷTتTKE09/eYBK@F_+BB L)1(r$ZtMb~q_ dZ)2Өq4`EFQ*dbɍbGƐqd4Zq4r(Z>mͲ)Q`*YMoY%ˬ;^2D7( -5XFWAlAP;PqEUk*X"L8AU :R4EVe.X.r1^w.!"-TUQ*NJ1)_HS%VdŦv*m H3-COzyA56M5U`ump{xnyʾ1laM\HUL֩f%ݵs5=NhL^>HeEJ}bQUWmE,/IybSI_Ǘ!>|gFzfP9e\Zڣ„ a^q>cj藔ViyX JD#T qȽAd4l&44;#ۀAa|` .RZTVal5l3zs2 tTY|wbњ#,Ι\W4eԋFjx!l`)VqU9euW_(ް5=ȺݧHL-FDVh$_=7΃ bkY#Lt)Gqz5z\QY3ԎgÝȢ~Qз/*^`-г!/#,݇dwmh_[n-;h0b̍^c`|U\X "ߎJFlq|1_ tğ$s&y?OŽu:@]/WYtO_5<$C2Txͱ75a7OP_Ud^,f0䩏!3Jܕfڎplii C:!@`VKampwJ%-J.Ӑi򙦵I1OfM k1{..hMMU.H5lk, Y+uĮrb/dJ4W̍xfpB,쌎tjwߺ=`Vu{u)m#l8  abN>#Enhn[S˾9"Wet~_f9" cyh=5 V)A!Nat vQB{~LHx&ubWhpf:ӹ> .<ݑq—# &?R3,Ꚋ%d[+5 =5P `Y)*p8U5,R&w\_}kX’,z]O3Yg qA":'vVE,laqeˁ;ZXʺ_U+T-68KWxv"ZAS-UhōPFb\=#g$Ef7ˊsaQae|ݘrCivC]30vaI !k,h EUtey"NĥMPdZwudaV HۨBLW-+% AZ\Jk 8I-DDE3Co0Y8u v-z@ATRvEvzw_A$ QX"(E{xRӦ#u?CPDF*lI~b+#Ih1wJ tt  FLDs%a $G2t c$Ki S H2l# L2S2lc P:q $i:` T2H y|mut( ga#^@9c(-bmѲ ՚z0Ҕ*C9kCS_ֶϹnY P!GLeNMk5jr EۑOMl[^'Ս/{+׆  ~: Ȇ 2LcuV1KW ~|&il|O/J{1ӿҽhxO֡MU 8ޗ"2VCb4.Xմy`rϹj7` B2DJt#d)F%SlQfFcl&?FQg{ SV4 kU;J2WOD,)#%#͈i)vS)\NCޅ} GJkD"Frou^tvIkΩ=HZԙyFA \ Zpi)*Z ;#!dDwC)ޜ`YZ.E؍J> J8a' `⃈1L'(`eA)Th6a)g<`($i,Ϗ|Cflˑ]մ@X.%W]X%hCI#&<,Z]1NX 4 L 7mvQBt!(l mȰUTtJέNFhTԍ7vڌru,vvfLm5V-^F69JXن֤IcwЛg8<`*RoNN#i0,W? 7+-J'g-sqĢu y7q i;?hD5 s4\n[i5ywy^5&h2Kx2.litf %"Hwqzݒ^\(u|$%h>:Y7O'Q?kL|zfHO{j2S)[r D^!7;xY,hz)Yd?Y3{y5p6-bIh#@ ?U &D i .݅B/ȝ)p53mrD*{Ͽr05z̵|XIaP3#Tvb.@X5,* t dr ߞŊԇ5i!Ɍ $ kmO|%7%`7;tH47 kpvu?CY{[l;!qXZ/q{S74_|_Ǻ4ZU\}&{P,e}0{x(#ۥ}ݧpG~: ,# H6aAfTFK@fD[W'[ M&v:LHI}<.Iqn,g),N1xƆ;(gDΥE=V_:s6C8X[QCg/mG&T|_dkDIۉOO,0Fvrj8¶%75yn\tWH]ωBn냏-˻/L%azN4dC:+4v ,^@IKpk>a*a(NizC2JE!ԣuUm )0NZ:Pq-;z(eBILQ7YL5$"?Nbn:oMctFp'{!Wd.^:oU9[w[C_틉yV9vK., '!q*+-96IKLY׌+zFŎBh&HmEjnKp~gLh9]k-Zlc$$z!UEof6U7l1uǻW*p.Mp3-ٌep.tߟ{el~?xUU¡$? '}yzf9S#LKM@`I:0A\nm_[0}ww8FmRh fׁ0 Ojɾ?n?fǡwӴGM![W,X.VZصKQ=\wm6k;rC{sPm+( EܚDB?j>RG}]*-!o1-آ([ 䖙T6ZlT[ -7׺k[d!HHp׀وofcn= 侾OfO]/gc>2WL!7`[,II 9(Gt3\94[HBcnNR3 cܞdPSA9*0|r~$wT@HNZ#C{H HޏabXl:cDIIf@&LӾ 2A`XÞ&]~"h̩fJ2c~ o޳h׻iy g`<(YdžR ³Y $t"],{`zfewb M%S.6G^?8FQ^!-_6K?DA7vi٥{°rLz*/se7bJ07n+(lC(.Y,)#D /w^Үsq&W1=YU- HDt/o'ݯvEMOmM@e8B䥮p 9ESIIWOB1fwA-# ]3%'Zi:D=Y +ػgԶ{_(P@`жZay@\-wm_m% Td۠]`pح{cVG\&뢽qk?JS簇$%D qma"i4M F2 a$RexD_}]Fո?D>Bhz hJ?溨2`7diZ)j1P<\q&PoΈ[)W PZ G\?[ WI6Xpʹ+aש=T$hgyD"JGDROՌ+0A$ˉϖ " jiS11=\':ȑ(rrԯmG/x=loulpn^XCݕwi$̌d<<~`I::$=cעp&.ኟobY춱=,m9_U)pf~J$`P\~1d.{.'oR/+{[ee~VNPsbliK1YA´ë㿆7S1NPHSFlhIR~֪I(QFcIk.gCaBi7d Z/ZTnow7 SwMd+jjX{0B5.n Ъ_K |OhW ?Op6syGBv@M6m!rpYCs]5,!z.kpZ= яt [=5idvU$l~Qe΅+_3=u2x4Ĉ'kt .9I;?/G:`ȶ\,\e(櫶`ܾG ']Nqh-+1vDgCmFs|w8SS¡6N=Ȃ(|]ZA)5BB#&I9*ǐx?sě׹99iwΣ|*<4>yE `3Vh 7y4˲fss3}ppT+Lz8Gߨ8'TuTHʍNE@|9((i^W!w^ Ye]AT[SP{G(8YɓxdoҖn)RUU܅.)QOBr·1mEo6us7 5oY Js#*m )'.]cű(@ tsz &&BhiUX¯O> qG))PƢPj<5Ԉza9Qo} Rj<٧̥ȫe\LyMcFZ [a3D\͸?_#OOB2A۱PE7qp#' pjq*XׇZ{oB;] MN#ܪÇW@ۗzA -|7#~Fmo\nd?X`|y2Y^0)C9HF%"iJЭb?%X3vJŵ!'q?ME՝n~g0 q簄Oo_DJv4FQ9זl3S]Mr&[mGah Y$ݰ.&ks/Խ?7x N0?{>4g̩}d?k|O  GzbԷд4MO5>haibWw 6fN-(;Mw 8i4G[1rc\+|sh D;EC:E8Džl| Jr'r  Yepms;5玛s )mHD\qqv }aY\i0Ň)2-h<>l\|//8`%–F+ )w&N <+a"l w 6!h`Ɗ"hBA)iEώhO dK&%5_aGQÕfkktA_} A?&[yH: {aJj-FH S{Dr@0BNKew (4g} rɺ{DcL݈!ctxEJGx(hRqɑ`HLT? M{FUA2Y'j d$,:/iHES8! ]^+ 8>_C䎨 D7 ̐E`8(Yd"sD [.t4(L4Ңsv#1'}aDJx>a00> opI8ppC7%~_/I!lԹs}|qJ1&<1}#qg49Pk>0r:R##ؤT]>⟾:BP4l6AMxl,lO:8(R2@V3[jIz :ʮx!Q tgGM{2ɔdvSN wwmmyqvS:5T/o TJ/\b1*)9Է-INi@["8SS jZ!S_[S@וֹɩLA.6=.oxm`E[.rlZOWoKi\;7D0aQߨG%lF5g P8r S0CDQg &3f, ^q6cCdSR !ђ?fsLk>{^>~MuKYFdŪ!1jzIr zSx Nɉ5&wW=ka;G_V-Y.gant0$B#^TsQD b$)PP&cO%.vN>wђxsRt" Xhyt6TbմmUekjYTgUӞa]i\rRS!qMٶs{_xQG|n ٳ"}WLn1UҺQ蠺ZґGe*k=-%B?I9WdTYV"Ôp]wGů3[[mPt Q;SszZG{xla`O7gSM`.Yrn妳t枧R gM}j$mQYyL$j7G|?(⒂m-Xvo^D+8B%āxH_}Fb u"A?I:gxrgZTR%i5' :C]zSϢz(*G `fj?Sq8⨵_צkiG#lM!"PuՒucĚ5.3vuޥc;WRSǸ Huߠ"egVWOV*RWlj>15ЙržbW[קL=ɶfviN6zN*˂]iTwR5EHUk4^N}C]iVUNrŴXUf8Cdn}G a׾鿯'A? ב毥+*JRMu6NnCŴoSZMhjÂ""l!qr_0q۬ h5%NpO v'd=e HRL7} g!8RC}nvUi}U$#+e lkD(:}KGnmB1 &8Rn զd5+.:]C7-F']h#E({{jym[~}߂RQf;ZwU*JNX8)yj瑠5"A~A*pFyiEuDL2Q;V~p|gj4'CP(g+xWu@wǘK C_99fυvR=.}k25v61Y">/.mve̱JRh9<V<ktØL(L72 lTQ[>8g;* 4ND?Į ˇKqۈ柗 R4 ,a1"G+蓷3?u \ՔtXY-Sj+<.8j? >X=ϭK4黫ʒ -Ԕ'-0㲦ʤtLQƎ}={R<@6'4~w-swag,=705 J5G>jq>|&2ڲaU oSvYTq雨|iݗ tftca&!dkȆo+J\nh8\XXx#G6򂑫63!쀐B$|AP ;v,E%_Z^a!b7>Jy+z ˖фDbPO22]Tmq Sbzv3׍EC&E#{}^b!` zEn,857 To?u$?'BgFX$B`Ptw6:a$M"q?eYRVPÑlVCz (TgjHqIņ׏s\ߥ{:9O8QcJ0\~g$7rv-mJy+a.JVj1m=),'݁ԲU[4_ڢ^ZT*{zb|Vz/T'`)Dlz lQwMk Q́6m|j#XEwr쪭Ĺ;GZ 2ܡd!rmdrϒж'6L+CGѣ/mH/}Fj) T]E֋lw-?.v`HXyU*2tV}-Ul0v]L;'`nf>&4.h@R ԨÍo%KirFY8! XϧZ*(WZgWEѐV\yYWк DF̸C֑!YL_~fCl9b@9O.Kūbǁj^( xc> >QۙDŽ5{guatYz74h;9r$ MCq;͒+y}sċ5& JIS#(R4,X&T$+27 &< H%|PI>`rt;h٣LBI240 lř r@ ŌE<f^iֈBӬ,>QL Y{JWϛOW-ҴQ9w=ƒ 社9 gLD$ ͖ﵤKF,ʐ&Rls.ոΘNr٭|?{I[.BcGw/)ȺYE˶mWe۶mx˶m۶m۶ΜsZbu1"F#zDk=3d1;Y~fS(W'uC9+drqМ 2x1B(9 uNbY:y0)b Ov{L* v7ˎqрM18dQ@oV҅IuK')&a{;PJyXIj5hʄr!഼@HݲLL۝q$hr>` .G/gD]s߬M2#ьYLzCQ5k%|qa9nL!"Ϭs5ڡ< |+B9Q|ڔVs:\2wDQ`f[n ْ&9u\3>g`87"Eڙ8!)E#8뒻κ#u FOeZni 6rA&Op-Yqkԑ G|~YֆV:YVUP`Kn@ѵ%89kJe-o$- tWWpBWndF]%{}6̭M$dCJku[NTHnԷ"JW Pkɩ~кTkH |.ڊh MV[Ir댑/X4j09 zg8 kOȆ/2ûZ>#)OOol*[3n ϙXk,9u[jM [-Î=%e6 7[~ Sp,rT_hҘtE)(vv\t =jʣ_(Cjط\a4?7/҄ق&  AZO ƾ-GDChc͝f; wLJ W=bS߆|K{f; * nAUOcȤa+4T1졃(7i(qb¼dSX@% AQ(&hCHɤM ▚ޣF?|:AV;b>XW8賳ǖ|{miS_jb$pֆM)>wы;zHgɋwI3d#>n[ 0tЧc dsB.꿸z1ʍ'D cʂ7 2s^%(Q#K MyxZ (Ty˟ٙCPVIK=1 Ԝ}V95L&k1}φ8 z :Cb Trb' . ɴi.QtPqXh67auߖA ۱|o0Ԡk&~\n e5)ntA Pqxp^(KrGqs%<ۺxd6ALbfBdMf'ZF6V0)DHQY_ {Ǯϵ(zE͹eS>Q<e&Wʻh}v;Gq- bh.RK3zF#t;t䛢 cfw+גټ6 m_^}<ګ+:[dd6.jlvBωaS93 t!c*8m"pM6Ȋ-KG.p %Zpe4:fɎؒv{ Y0&\Gvnw^Wےˡ9HED==> SBF `8#aݦKX/TG)|mCĔ>HaW¸S/E@r1Ֆy0 yBH(`ዞpWUN^6]ЮZ ^5Xf8r?pTڑF9 9T}\x=2q>ĖEf$/}*(*>AGU4~`},e2B2hр* V^4=KG==lVyDp?<=|孱,Sww_rxccT*%d<۠sau6e[珻60B;be5X`uy=R;idhs֨A}y{ ^/hE$V%Mz2iwcOSDsŞ[ *èDBOÄ7 EllĪH2ɩ#p=}\e1AoA3 f*wnvͧnSm{r$Ձpל2$\rFsŵdZiܲ;;:쵽r}eGܬC hkYîyJfuhlQ ;u=Ĥ W3Zy? ;<[1Kp8adU@itH#h d&1r"~sFPioo ~T(- ʍbedHiq‚BP#sԹ#X =iCYc&%r31jpl3w/ߦwv9]A/866npq^BCt*P?X tchHɕJ͗ד#4JiXByOAhvzB ].{86qɿ[w$kc-gLjzR "B[Q!%.|VPoA$_8Y:hi`="rrMBnT:krDp8MIp9[*p(ڍxL A%s}Cq6WL1Vxc&#l8P̀V xq[e̎yo>TgY!(^9fWvu?Qq+d'=ىמE;64D$_YT) =oJ$pw3m^>q'=|BRhzӻ˂_CD: f">bMP&RM&IZVR'IurĘh`:^bKѷMW Ktt:"bŎh&H юp'xUj!S+,Z?|.ҿ7,f*Nn'zU|R. J/M^{ i/周,'c;Og e JdJ7\-UGc0^TYϙ0\ۜ Жq%˚x@15bC4L8d(R" ]Μ\xfW]`2%7vwkK窹}37 EOO9j2åH22Mg2\33 q;}PXr3}Yb tOJ*=Of`G oTЛe3<_c?A`3O]Ze.y$l2LMiXQ?*P\MѤ05h*+I֢s8뮬ؒ@>mՌ,zV8X=&KYY9JwQQ̜/YQfeESs5}(Ÿ#Bqzfx)f\jɈ:YFo'FjNlYAiAAf@sb\Y[YrG^:HngznWnjC$oF͕ג'?mw?BEoq[2 ƈZ]<;cYz2X2GH@?32WR|OpM);POh2y̋]_aGH_#9%m^Lyqx (ȭ7*w o䧊mnt8{!H9:jxc ]`sI5b#:t2&cuLb4{Okʎyn|_fMC<+ckUysS<-Uɛ@9S4ɸ-s -|qǤ HЬP^i"GS/HɭO?4 Tݪ2\m',LV[./ &\P3 @%wclApXeY򩔘6qB, mwrQ{IrY3T'f.M*l(_*Mkk3ѱ8 +tHIhi榀-I_~ɗN<{YcuSقT|S2ZSIMphln+1;K=Ƞ:~L0>ow+,<0A 0Q,}h/ZL":Mꃚ"YyYIfJFYk/Y>HҨ3DzTԿ}PN%ٔ.Y<棌 <ΤRv' Dm\Yxd~Ǔތ n{r}~J oV)0 <DpYU~7aQHs!q~(LC%gZmDLǵct~E2ZU/{ӗ}bG x69uDp  ]x&ܴ/L N`=y3%!kEhb 1͙1z!s9YMi΄t9T"M\.LFZt̟<?QKS뒴h!|[Aߞ +^<ExdUx@fwiS4oyXUᎮ 7UTG/, ˫ tps{x5aGrOB9\JJmr 8Y;r3LR :ioϟ $E)ƿȪQM{ZOO:H:X+PWGv#c>Nڂ{l}@"/fđAi'x?"C]7(*_P~CMmTh 3DOØͲېH̶vfI(<7Cf"sپۈiB3+4-N>49?ylA7]=YEܻushȊ{MwsH}5/1 9F1ݾwLNFym^Зז7;>$j<{WqR!u0vQq9Hv^!3*hrVՋۣolD0v(.*M9S#4f~QK R8yxhk}7(3P2E|Am~.ĥB yfѳZz еܔڣ*AД̺]R/mtWrfi<1Xݞ͞(.Y:R 8{b c'_zCj:e):.&m>ڷonE&Q5Z};#YE@eje3lǠς絚݆8k7ڌ?YrE{m2?mUGeVmFlJem!ˌT7&u |ҒeYʲ{0TAS@{CͫAZlI4ڃQbo~G1Ȁ"C,A]LQE,L僮d*GxOJ`:RǏk+P|$ on&ê̊݊ w&M*|12c8 %뻖֧w1cLc8tږoas Y?5: wZPbb oILZ?P'c u̽;@z FA1h`[gVQ|sN5ޏڼ!JNȫɵmkh?W?gd:ʩp]2!"^e5Ɖx3T&M]We嶫m&Ĉ4c&0uCk*GkwZ)/Z[RKc: -s,:E2OWU6XvDf `K0͓zZ]7bOow{SIKfmIOǏ K=J]^uF罟 (Pƪq[xH&gZL X feUoYR ^LuP;ε+2eZ }-g0![+P:tu &޹w4pUO(еf(VεYHߥw*}8y~MU!O;7U(:s=!4pjt@?sڰs.ǘB_赶Ӂx S$U ~Xh:i˦[ZL:\0 DDIo:)?u46s8Lm83n6^ڹS(  6KS9uj/dI`PD◨7ఄز8.ҁX/P'h9-8E~@T ䷭")w&fxҺ0(o]fXeX?cT{Ք)rTV՚@3;G7`Da/&#pw;yW6Ӏ,P׌IYcR! dp"(NcGU|Y$sOL͏H GZaYha:6Qm!Y~yϫ~qe -4= @!6#y>CmmPa '|} P8dFgػ7}27U"<dfʵ#vڡEOxW}\)m_!vbh'ArqS>J؟[nyHrC*5- U.۽7W5UP-9`abʄdzJ*3e2L3?oZ!|Oga<"˾r>[;΀x okZhy~72hCwaXA;k;z+v[J3׉/(4ݯ ~ĥj;b鉒M*f5BŝOmQDbز8NɜUi1O _)F O_1fk-63G4,e9^݂LaY#x_Ax_j*Qx}b'Z Q$|hGTʑ-^&r%gpv\)0$};b!^<G5ED{c W0 j(5_q+LCTE5}8Pt^5nTOI8 {}Jf; h &*QɸeĈeG0TtrI|0]+2,kח5CQZPh rH]3Z@*_(]MV 4/w Ͱ})/u6-/>ftZ'Opx؁&|$ >0$Qn9D X5s̪KaX-rR&dPTN{ڞ Ĭ1hbT`WC8])xȬ;;'.&OYHPlөFkD%(N0;0 GgqK?Xdۜ!SɁ|؀c̱]F:М/]qBڶ7>]tu{; vR|[\[[=eݤ66Vl+~٤i,GDXu +7"m7])`B~9/yY IGfzIL8/ /8hǑx˱4⪷W'$}Nʓ 9MIcf*YX}^Ɵ6}a9dƇT4Aj!P6ɑyv0p"t [ć)gm'[&1oe;1Z|%R?eeٱR(ONKg#f-7Q=Wc)GL(SC ڄv3[yɩ˖A1lԮXG:GT/{Մ,Eч/*,I}xPzizĻxۄ;CʯΧ*'0ExcA3icK}II#jH5G| ~_R?lAdE!z]?3kW/<.Sw;}'a3!W$A/ tj8th9)${s+k)QH]j+[-=kS?=kZ/}+~F1i5չrzL.[6ғ "KA2ah/FnsP!gOb1DsE%}C&2}loatZtϗK1GCXnljG]n,e@$R\xײrβH&9gT'|E%hyWцH-B~RDIJ[K5LVZjXkFz!wՏ4e^=I{M$AE)7ƅDuǏ6Q+G(7[<)/|۸ Q-`K9W>j~_cMyaӍqo^Orp*Jl1u\\><OM$zBYIcosV 5$┢R}P<={͢)sբj䡔"|}+Dx4Ttӻ: #:$vϼBC{/{̀'˾v[/3{K/Հ7_$E.>ۯ0@vp< g*jl﷾{{B0fwî{{KWB;k1P w^tj nƁZ gNޫ;lMUumf׎n7"՞ǖvk5~[f8=35\ؗJKؘQM+%.瘿He"IIz}t#!0\g0bծ{y˗80Zb+ vn7WyIP)Ȩ<<8|e0=Qs!|jFѡ2tmޫYyyjEw!ѡ,ԯU&(_`Mo=*)4ԅ\QDk$ /yGnK%>deaNQw¨|NlCsrXKw9զ&_T'M˅^hjKGe5%=E'ZWG*MR֐hz霽Hy%KvM@REf{XSW=eXGH7 }zϱ`_13 (u>hҲ]3r^]=vL6" -GW_KBXr ,!>[eӝr xrL#_6Wɴ^Nّбro-*}[k5ju#u ي#6*Ji'vy- B-Yw-iۢ<[ xF&~87'c TLy8`}4T{"}GPO.MV bq 163'J ?uP@:2o?$v U #M'W٦`=o0BudJ?An>8(N ~~*(dbn*Wߢ;IG5R9lu=۸~VzNҀЅR8 yeoּ2hX~{\P8ǹ%e| ?*u΢,Z=aZB˘y_*nd?.wƽZv!u3hpoOA3m0DOGf<>֒C֫oYQ"_f7e"A';^\iLt$Pv(nSjSlp(bW)ǡA ~~lH\&j38}\=Zm*Mp&/#X_YMVrI@]M ZAmX7L|:{A=hϱEe7@%C*(c!Q.#d4dAyUG>(3dr@CuPkqyd"C( g~A堊⢢O-\*,4n>xČI/&61=G#w9PZ7Q n`s !'dbȱ+lf"U*`>0U}:0u僝A0Z7:I$b_L|p˖th=%ҽg;BoQXb/8xH}!]6ي9bm֋g8vA%RoP~}ׯ*w_|=&Wޏ{TCfً l>@t9h'`X7}ݵȓYܘ[^vju2tH=xxmgsPQIW ^&U}!1<#bcN4]>iar1hb:=UpR:MX w2y zp0}Y'?owjF;r*MΕ U<) UQ<Ԙ]Y^:'ao}<)|' tnSճ ;=3*FiZ%(,8ޙwl[m4"׍P,4@c} o6~vQ,MS#gG~0]1P8+܎g ި5s"ջ^^0 sBLC`=lOᄚbf2ib4-PO$m/dSw4nj[m[jF 4ۮ ˧fA{b>mǯE? 7v4g!6-B(B-8ӻz Nak⭲VN&"JVbY5`9,Q1CUwkA9ѥHN~< &mϔKZY$Ҝȉ/e:½&hWA4L1j! '/BܦJh$(Q 5pwR5L[)Dfx|gS=ᱯ[. gϵ.v2MY'Fʌڌx6-֊u:煰f:JKq,[|gC%N}{ '^l6/!(umEiBԠEFSat;9gh$c71vMw N :;R'x οFUR,ϙĞ$Eķ[U'XN׭䂣5xܬqݼnb8$gA%v,\֗Zs3捹YORiH7re6l?3TNرؙEUD-b_1ŻMMJrAKŀuJet*8ܗb߁kC 8̞G?)\ASH/?4Nar z <N$Pc+Ƥ |KF9UYˣyW_E1vEj].Gȵ228o,~^D}? WvNiM%M)S ݠy|$j%Ǯe9E0 b+-\|+G;FoӗD`Fi0[ jpv-\R6#`#v#`ԘoCMrq@N ô۩+' <`GLpC|GʗCKо6Ȣ5I4O鹓;Ne6a&Flrg8$ puC9:=rc3j`G6iXֶn<^+]O^^d#V^:4{ݓ-i@f}]kܬ'f4K5=pifhBh'rmbNf%G.w2ek\:!S\@!QB'}+ ÿɡT+d[XyC,o/.n/~}vݓh˥zNF!MLF<9~4^wNߜ9\LRSN=qLJzWmͦ|;SptjCPL ;$?8xis-@?3zU_5F_MTMᙾ :p񱉢{=dQ8S/V {1UrOqJOa*2:B4ƙica7&i q W!>pW)qȰn=2ޛg>K+,=I_af}1|LA#[!#YbY_q1{8$c 7,tyenn~J?0JnA.^XbMN LGTjIaRDNtj,)Hm\MxN̚Е%˸#Z(0ΚEmq*$( |qq MLhmҐm@hfFxgcOaj,0|Jx P'̭06[ n>](8-Uqt/?99~]8sp -m:y<=ar]zƕ%0{gF/Oc7S_WA %6 ivۣ#Jz>UcM/5DZm*مQ5o~EQTa[ҋAJA:Koze T@\SކCFq!!LqJԶ:d[[ڥ>O㦭?#/^t8yl•STIXJ =wm-AUV$%zhrάfEe># ln9@`}bWsq7G/%`t J[/մk`&Eyyvy9tN?گѯ͉ücTJ9䉪ڲl1=CWk:؀çV׷]>r  w1i<(ߣj䋽ǡT? b`Vԏ 93zNR&\ЂFqDanOPe! E%QQ>ZltMP=Ci^zۇ{P@?uRRؘ[Mw)}E Cqji|vjЩ&+\oeP:eQ/ z&FyZdv)ROj-"CwOC{SŃZ*a8$W۰38" Y(lL ;I&a=Dp}\1pI"ЕLe]8ڷCdM{g&)JMjh۲Hg(KӺZm[NZ|Բb?tNAKgbiY9| q."QϊD@> ;CCZR9aMNȑБ_)kd6SsKz@5$K-yv |l]HJL#2gt \&8lMayZV}@̷@^vEx5,TWn@.\@9V+3ZMƃkRԉNjA'AHK8AG |$'IxA QL9?gx^fy0D" #yJ_y!Q ~)$R3Eӿ {ҿ35'qEIi0^} SVQ`T) W.VܮT+~Yinbw 3Q&vҫ d r{aȋyJ"Wtr{U .m7߅8 RU2 LI %1=GԆ4EU~GATfav1Pmve8wOMN?г驿7n F85G]v\xvVXd:0$(Y*S}<-Zz)ƶ!\_ &#w9uh Gf`sv=i]Pa؋j'ƛKƩ4gKn<[_GKZ( !?v7$kZ +ecdC@UvWQ8duc uM̍hvT\qrP?dW? 19豼X#!W?{M O\&`"8LR&K;Oc Qbqê@'tNK|cK.+uq<%m?#qX&dkJd=~nU'sܗ6ںm`ؾo&RJY6<+4R[q#*B w|kH?:$vbj%lAN_SG˪Xwt+G#&'GƯs+5?ҳzah5ZiL|L %&Cܦ^~8l2Q~H.\ڋ6L/FjP@d P s vJS*SwyL OOKO_WkGOWWkSKWkVFNt:' *IXG( v1Q1"?5i743,>$J0C̏r-sd:Qԉ|C\ek58@E̔YܫH+Cw0)7}OcqIH3_R,o8V5n+ \fn־h@" ڻ Yt-5ےT&}'P<"ij~|"4 V2%,UDuF΍teSjmZ[[Kl-N ՏG7 ^9xPpXL\o?<XL*eF~"u;d>Nsbd u0lZ)TiFK[yh}#Ϻ8/Აde U4ΤuRo@!w'JdklG3yp=4•_с: acxb1i8*t]WQ{Ð;//c"^,-KS1$'3:ܔW o~E >Z;wPnǟjp>jQ~ v@H[&~rDCfacNf!'jEv9R:eKkԞLY#Yzd-esBH;/N_Z2̎yatMSmD2?v 5CL!%XiOܵehaط\FI9oװdZBcPbU"zKyV3|ELMƴmgS>ON6>cDN@= ['iFcRFwo6A y3B4 5+64nI ZJ6MArA kkJ+CBfzHgx5_eUZ:}{Hd} vZ͉wAuoՆj{`$2u+kEIgWg繗vI^FJlt'_I쌹 6; wkp=IOɳ6$<%DXȐ*Z*M mэo^THy&:?=ppO'ur5 2 UaMgwTڣqD)R)в2j%&6Brm5sRxԖx`hm ߌD L"UkE^'DIQ:L)?xcbp ؀ *iS̎D2E\eyLP0VIwWWƴٞb $flaɋy7uz]nWaE+MP6Z$}?I-h,>nߡv=п~gɚ4bVA8ؒ0c8rfrEʚai-DzHtŠUgj*zVH$f6D>>;iY9yZr|e<ʐB6b[}il|_Yɒqc:D_Jjݩ:0Luk-x@yDdOR2BF;vؖr}I`lgPɴÅP_w\C`{( YI7!<<$&aF7Y1<$0D([- [qsvV]G*fs E$xq-g3[hEώ`pW-yA\XZ@r hjrIy\mhˀkY/'F7|`иke! h9V}Ef ZfCeYG S>zYyRyҳݙ(17ps%?OK(<PpGï㍝9/qNIA\;Z"$R¼Iܧ8ߟZfxK6{:hFI\Ͱ矐S 15ѫHބ#qBwkLߞrh6 M(>CNei a6ԍ1Lf+]YqGr o"4T{a#9j.yNɲb]')?+wp^kZzp/o}fүyOOE{N}gn޽۾}Ae|aOt$]>KpÖ6KBi*r9N[xLߕ_8 rC.0]UQT0rl[C?%aOo!%ڄrH^;kbwP"5  9VԾ!aׯDfeKmQS)]d8"Z{ VA_jAFEәS<{E5ީ%Fei}k"T!vPYS%A?cJ4~$9 ](G?`W+mxךL*oܚcX*U QG#`4tQ@Y`\TzK1~ ;$ IvDǍP#̘Bcr%l7{r8[$;uPSxW0~m[,m|Ң0)|RK?^DAf4,Z^@D(_.E,7$1J3UB 07j0R9Gn>HXϴ+Z;a 3G#{] I(["P@v/^FnmMH C0@ P^N5 Taiһi׏+OkgG}.Ͷy#S0#dZLA8*. n*__%w?=J I 5̓$̠45h6,P|Ca Ͳ28c6/$L%CaEҮ8)|$ۘ&>֪vHr,QL`Q?kOk.5] Dy@5U+#´ ؜$xAUGyֲ9+;xlxY={hT`PbG[MxRs:r~g5з[]VIοRK I);B Y?"Pc/V0/8  GĀM-1`Q`,w> i& O'#,zNЍtԙ "KB}e8fvk{,(*B6m1?I|^xĄ1ۯ Abk ;se?'}8I]YpQX8~K @drT*p}4KRdglFјư;d*|3PnRtX )NggN1/fhgKVcrբ,APcMĴ\ux›*n6 RV r/buc"PȠ݀{@zI>Y$ˢ}fB7u P hoh8|W5(3 7!ٓ Ul3'07=2Xk$eUSD?)>yǠD^z>%kdYQb5<̦?Rklja]JvR3YP8χbC/5-6z+mV?qɳ؍lorwb7T sx94cd'(Z6ӳXezfe931 -Dp,1KӘYmkJ)oG_2t g!e DŽFՏ+[@]pA'c@y4aH:!in`ZY) <.hЈxGE>,q5܏ZN$I4ZGҞ@`"%9`+H =ϡjB{kXƈ.ՠP7JS0J0G zrPyCq%Bo1Drk`KI.nO!z\nֳ x,` ~W!o6ީv/|<m2% Jwj$i]qCAit9V3#0>t'"cD{/;\wgO5ֿlQvo ٷLؓxeNnĔφ\y|pDB%Ï2 0:+O8ּ7^2G8/67 _ S8NYQJ`!04jߎNמjqNo+_*5'#Wmj[[l\.RJ4`ʊc`&TqLM/ ̣ѭEf/o?==N?(;ߦ+_\{>ᬓbZKe +y h6-2 LMia9։2<\R8hTl^p~s{t9m׶&Rl\ZY}=gr/KcA`&G Ԑi ߰Je ?Q(;⥋:Ok2f0%zВ{őF!NG0DɎCelvU,-#Oij 4ČB#m],fԕCzW6*e7YNu~2 Q-|og6gQRsV@ dF8}x\~ ,(k$jq|R2)k_C *v^ohbV"jukW4/`َ&Tw6RO_μdm]qcف2@ v^'\2exRvl}@n}s0 u*3u䅉GDs4l٭bRYsz DB40C39,Nڸ7rD:NJʇ߾+g1-@u:TV-,LXrѼkPbv;a V1_O7.{iLox+u!fۥWPJVꯢȢz|ҥ62:NQ%ʨJf,Y.@\۶7.5~iG : %EY %Z\~dB"+yGF¢ e߫ppmۥ[S=]q~Mysg)$M8@a ˓}ǑO%3u xz9W+?ئ [ps~em4]O'ssAC4le+>pRtG`YnmSm;BuMԭ|d_]|; 4y0dQC16{0Ѹex4ԥbɆ6_Dj7/qEc_V *3PAޕAH9 wDf+=G28^{ ӷ e\ˉdg`G@D@ a-i.paq b %gY.*ړ(̓ZěۭlB^uuѪ`%Ѱ3|MM19xg[Gi:of#ϫbJD+VN(CMBCi=!!ΉX 1lOnȿ-(|B`g Ұ5. C;ay9 I5;O^! f @VthI&W6%"lh*ؚo?:Ei* UXZb NWd%@?i:d]h6] I4;q/p)`i(s&Hf)c[rW̆?/^li>s2Sl]xy/C_|o4G +ĔX; m0A9J00df 蔀}LeaN?l܆(ʲYp)tN:sf9伽w\DâFLSQSz4`&8suusŊ-ZY1 {yxqv-Wmv@ܫ:4K_lU W>!U@j6dUf^O9\UR@0s1Z8ILTCħO9`x(4Chp ?oW9[9u2lѠ9 FR#פ 7vdN1;u'! )\bd赕ijt dn/ 6kyW^I~w﹊+G ez#=!:!Kx=O;(B q݌xlѤy&aJIŎ>oȖ; s&?O(u< p';W{1e+>U{>`25&,RV 7گy͝GEtO_CFS4h Y6otpqʯG;g$=Wu̶t*\3|;)ivլBjOW)6f=:QW޲DE&k,m/_qw-7ǷZw}էŁv8Ǭp/ðǟt󮒆\fl;}k_az|LuM%tL Kel}$-d+pǾ 3MHenQWr<|G\M)Ao" 嫊90 oڀF C8bqpKBڌt`g,OCM@34QM##^  x!XZsl81ΝWN͝6|{I,_]ϹV9nØ{Ahȍ Cn2Mt+ Fu\]"IǨfo6n(Ff } 6yOџ g[||V=Z"a~(% .D$]Gp ẄBFx-%+Vy&-t.^ ofWW4Wq>>>^"Ic$RҊ0< tYs>v>,Aɜ_q]=Tݩ{ ~Už}/8nutLRX5E*7J:hT8ɼB[]@dF[F vSmCAkͷhmqnnR|o5e]-Q@˨G Eug˜0,SGctTF2#v)WyS*,)z6F騘: i.ˋv:0:ѫoWPtcK?w_wW^rd˯3n ;lj0M1|Sv򏱙3"sxONuݝAt#".ů3QBXڠ}B#eUҞi/࿏ĝ1h(tF)Rv8"f/k~(A 1(w,;fn,D .c c€I?V]DBSuvvtt\\o|5\_N3!RJDԋyQv}&1+*$KGE`y =]vsF\0?Pc1[ 7 E&X %(ρoYgʠl,R0xӦ{;> 9LFRDUs dSvvCѝUqd]I1+3?.uݟ rh%_\ٳՇ_Lq .?(zuj )eV;G4iūQ9Ic_*~\цj_65NP#{C*7&L@&θVr&A@KjIUy?G'z 4_| ["ԀPXt>nq ]5(7s1S<=̎Oi$ )Mf:T["ZU2jƊI.'qE'նmYEw^}[Ϩor/X*VCT3%D ~}HbJdif'OGL;BX/K:)!<$oKh]N;Z'\)6ZZ: MJ.!)_%Eȧp'$ %KjdqDxm^{8iݺe{h<+x6]@Nu+`zeߘ̭L_LJ:m*dv$Q]@ R8|>=(mCvYvɥ(b<=VPVߔrڋ jwgFoSԦbX6Ya"c| O΅ɼ|ڙqMރXبv @e+2>:xg '#Q.jdCY_(D1WW3nhBguju r!Iܾz޺ ͒g2H/˜W_owZɲbFF;wPrݝk|d=L(ڇfb'۩`V'ƤU't0DJ, m7ޛDSHJ@[֏W`v.eV.7>QmN XE$hu|wq UwaWywp=iTꙏ"%-9Õ)&IR(a/h0@ jB2AVgEהÔL-Bs2|zOTx]0rݻ4-4 bd_XBtdJW.98z@_Q*3D McTrЧ q>Q7JCIE /BP ƶ[]KxeXcf gi;Z l/K%Ĭ L"DY[!]$I'O2 &j.Js"93/TQJGfú^@!7|*|$U! >w# Ģ+7J 97{ȴJ* sgl|10ƥ1`}h_ +NG)Ԟ.x$Oǯ[ڗAk,;Blh^*q[K;%S-Rď02*<?RL:%ɥvjV-.8 b0!Wj-/ȄRr2{'W P:bWE4.c/1k2\Tg ɻ7iK=΃azyiO:ڶӭ'PXbUBV mgRд.ى?ٶLng46/E U~ф!Hv&DڄE*43_vP<',qxۅ6GZ\a.':(y %J(Fn63ƚުam֜[L6K\?MeFQpQ7sl#xq6S/\ʉ%D?,;;9,;,FAid7BPy;6Z[)Dee'vbfʆSiъ&صƝyu i YB'Iܬ^@^dxk枀~ir' Y+^8iۿɤdy= K7 Ţq<ݔ\$"xsƎ}*Fm 6 ޞ#P_M*mJA(  :3DԊctd]Cw #\ڢ-Qasߨa)Mj9tV T2L'V 0l4GmG wE=+ !UQݻ("xpaTHQz  ڀxV|Cfܴ+Q7l|i4 z'oP ]2bj]dFf b ;y3pZ.̚pcg|%=Mgedpm: X cA}̡sLH u|ecUeJyPY|"!9ME.3ceF O&RdaoOS֘UP񖇠˨tȑ L Od?-[@|kb!I?J_c!D3thwØN#TiY`w낽'oIb\ vfi.|%o?'Pj$šzv tQm?û|85οpմEQxls ('<Á%%7))0 dR-piC"x䞺{~xNH&PLő^rwgj|u)N|*Bm9AmkA̍Bd5E*K+XΖZ}ʜ1'Bu",Ukfc"4񇞹!8e٫ihZPOE1`!w÷%/i~^YǮ!A[҃#N`UVsȣȋV?,.t pfN, /,su)Ȍh4&q*⨣BtUy*tNꖻ*.,nV:5PM6%n:>.RN~ ub$4)Ɨ@ry:s:RnM[G~溹U-bbue۶m۶m۶m۶m۶oYd̞3ɹdN/;[UAQE/q@3 U!LT`c#i,v=/ѐ'(f V-ѕ #rS[Vb^ZqdܢMT hJ@Am{Y&DOs4M#Y@Mc0pd6;#"֞I-rΣH Y;Bˑ^ O@վs.-˳ۇ]?Y ܓ5O$eL?}iq kև0}-3= Wk}Bʉc&V"dRk$<D5ZYB%$jeҞ,"t6Lۆ9rU9±&r>[=[rKK?FF83C!,vniJt*Q}"8WłĔU #-y[>oͼ-VicܗӳF9 9W yAka|.lϩ0 4mY>wwsIn5M5}ao.o }/Ǖ9g-R~(6OW [;=j%x~O t9_.2iZGݙn 3&w ʜ¥eKYg\7̰PA"S.%h0h=jH;㨣, G`''8rBbHZvi^Zx,^J3lCi6n*T7OQ.Bzk Doh#zES2jxVʥںJ|5أZjEZoEM+w'?brEMA 0آwTj2nT6.K u-69Xwb3WhNvsҝIJڹ9KB݇-Z5_ 2@u+&Q_ uev9r&SAV4/ ϻNQnbjb@'5,5@1|j!hDb "9+TPUNH"l8&]&T8Lp+T*:''6cE/%Z)Q)h%xUl+FZV`uܑ<oVs!hȉ<sxHH7K@k1Z$-ad% H2H6"vwA^5]ru^~SGljd~]L[[1g{gbvnҬ첑?I{+|Iv%g;|*~Ϊ=;踗Z Qᦌ0Bel]P!,p&Lc`9A'%pXT yQ^]s9AKKe{YQb5K8{ VA:pWz_ioqB?)^ 7x{!zZ'ĻqA9.` ~ y$~(nZ?-IYX?zsR};Y3 w nJ> @ 6rЌ o#Es䀩.%3gV vMѳʴ<"rΘ/u3='lY"@A\wvʿLq 9M"ex5r(xb+q:%3?[8=ڜi ˼? (+٠,@J޶fY$:fL ZE;q?uŒBc{i$uQH=l:"\Fέ&oAI,uN9 bGp <hpLD{GxƠ?TbIvLpO>.Qh"|: %]ǘ-n/Pisjj[~5;36Ij!Yqn03kʘq_`<6̅.WƼSLszWM謝P/DӘ/~9 Sd41/k,"L7Iw-ۄm.+J╌ 9c?#yA;,?`1ٿq=Ӥf^ h6' &@%U{:K;L$r}OasP!: BDI>mz9D2'ȯ"kJ3 .c.6Iz{g=#~e4B1%CY1q8Ly7%n3Ur+o,!城L*l8$tsjjX02%oEsD }U'CM)rzeK"(@`f a}^f@|ɲ L <"sLg]%_. , )ɂ)>S?StI)Z@[YLNQC ^8$4BA=*X0@y(ADa2L0Oa rBa!\[,?>CVC}r!hֵРy %kK/.6DLBB,i(=Ni$NfLDY.c9)Hb= 3-! 겊:wؾ6,\~^P՛}װ}?XkO]YEš֟`RވEI(9C"+3gG1P,#!Rjr]2[ uТDJn<HxXax4>h:吭$BM04[5N~+x+Ek CXn3JCU2@;|us$EUS+BԯRJO(!Xȇ, ﷊ tvTLdY>qQo6uaYccl"Oҕ :zk&OWG(nbm٢lR@+x^áP1DM#/S0ݴMSퟍCdL n&=g)R;wѿH2!ں;35g4z{rF<ӽ~*'|mOc.3\ sQ4 57y'#yftmr(l0A:,(f8!TgLEU܉iͩ֏>|ryzzkL*E 9óVjݹ+,13} qHdji_)k+Ec85z()˓sٔ ]VŭS5[vbn4,d %#)P$5SM+8dwx2OSdN{*u̦SMj~Qpg}VH3+֛t˜˽tչGBhžR;i>]]re۟FքM mezuW^>ͼKk`fz4ZkJ/qpԑd=)%z_\}2OS5=ʍ@ 0+ 3?m8M&I6 ߻{7 hmsnpܧ 髺nSMPU&Q ^zwzoT]޽6]dxwFAth.ͼS60p W'֏ П#4VG}4`#~~1˰cG?\m2Bi0LqlL!jZ m ^4ؚ 4P5p7ٙ6瀶Nmkr_,jk:cJzƂ5< }ixՋ vpZf5s7=yg=!M h֌72dqpp{ 5vz=ϸXA˹3G(pqyiEy@s6eGt;?1}7LqN`s|bQ P f4#ӿ) )@8D. @}QbIL S[Tw4o"ٺpdGS`M!<zBc6%uW`R n.b#T̤/]Q cjO6MҢ!1L(ٕ4iKPZ򇬍dR#䃫2%W, h`Rkr$]lȾnEGjW-MHi ћ2@wBNsq "2 "ؓKݼƻ`2AMl¤rj./$!DR6",hV{" i&sUA/2UB7 A-aCezqv-d_PrE,HX(h4#M#4CZ^ef؅"0'%S1"} 9"=` e_@qŧ9]EBf&\>S,@4?(G$b5ݑ38ӛxlS(39FaR!dTΑDӇlpZҸ <乪7Y}3*#Q PFǶ_q` Bv4]c0pҙTѳ](֌YńG;^{Iz^+ ۏuԾ7?|Fd ΰP{|E7GW_BUQpBkx;3^vKczHّqr!/nQЁ"oe')Nߥ ԯi^ҧtn 鲓["BȴGZ&㙤#qy>QGUI:UIo:ٻZ ZIx^mqr#1Ca_JàXz6 ep;{eQCj7։21ф “Zr(y<,?(%mUFT&#r)ߎX>y +VlpgpG:#qyJAs">jk`6F[O%cU63xAj̦iuƽdr|C/Ra]LeݩQLS)F$qt> ؈ggE j9c(1^E2sl-Q]fhFdd٤{,8WG tsq]P\2V!gd#ջK>s*1 JwN% sTK~!ph,AZ$w0]U*U*4]Hvw_]$I^3e*n"9 %}PZǓ鰚DFR?h[.vB9Yf^'Oƍ]No"<.}F. 3x;5TDZ |&DZlʉf* C"QL$4~aU:X';ka|̭p. z*:qV$CX"^3C cCHC)>xTvUJpsg |[˓O$#F5K{uk$8G騁f$}Mc3~ˀ % k+z+ D>6ؽ/.pG'+G# ~Xdb (@QBd8v!dB0Ł /dbd8y8N~n"ļe|n (yvA l$4ØM_6D.qsΔ'C[?ҍ\0(X( [D J+߹"-й3(]yS!ːhrGӟAJ+C_2J:1Ogْ]rcSv _42"7; kʶ7o{X²`V$ղ_oI fd\Dd75 -Y0HۿoeNEAu`Oh<JIQA+)/fhDy88NrGZgz3}˝ PXl^YwX¡F PH bBa6:Wzw3Vش}'Fi\wˡk\$uV?R ||Axdp m:;N8v=u4FH%>}2`C.yoH[ AY1HCGT96ʼ:eLI T.(+!LQ/W)$盨%VisahCW_ ^Q/3e"YU v3Nl@-Vr[s=9g_Bٰ]$7/Ao;{Pf w8]k.!y- (TïE;ǧSX \&L%;fajd/ly0auecw6f &W3әg+&|fB+o͗tj@}8)J u(Jvc~AkfPbsVѶta0՘,'\6x9 P"PD;0? (`vԱ"&59dM,U 3Fbx^?4b70X'kp6vWHJCɛN-.g#axEG;bd HgHϿ> @kXGN}} t ނgغ/&=Il,1SIt`&)b8: Br21ŞM:?M)'xp0Hj" y6nF19eKL"td3%s}T>ԯXKRi"O?.b$4IXIbj*iB +D%b2)|l]t)p)J 58[<[[ 3Din?b=G7w6 _l4`"As]a ґb*sGnA7)Y+$N[ưw5zHSe~Z/~H0<^?ŖS_xgfRӂ1{Q?s)b(U A䐦LjS9X$(N}Ch ׂڶcZ db]|+A<7栴#胑)}04RJ65{֘qT3$,H˿~Ǜ(e8;~r-7e⽗ky9Aߐ xXZ5#Xfy$<ʵ릛W歬۪措yFQ`!bQNhǡ)먬*I+1['y&7^?1 mnoOzi!Pu黄p;>39pkq aT:oti\R4uiI4A6!Ybm"W?D,ZR_~i g ~v}dylqZrC\(c]+c/›.kte=9 ; .dO/=^#$+HrHßTJNU,Fu=;Mh!E^ q Z$N uUD3=\dyhzvD=SY͈g9OtҫT@J4IN{fq͘GTOXW*w>jw;ʼnsk%|؈JL|oWD 3y_Jdg4W) ^;-= j{+eKI3̍UQ4gИf%܈=!1xXΕSlyдCR㮗UfBieP}DŅ[ECD9Ea`%T:s -1^`h؃K*9+| x._@zCkyѡãy=&mڧ{=gT:}]<=}VQ D𒂰Kbrf ƌIw7A>f}j2 οR}RMC|Cى#M/}Z;]7*Ā{yfZ䢧JJl+mzڵa:QT۰f>? x2@ϔJ/HpG0o&3y8љAN%jmW uaHUoox Ay$0~0"zM;2vb0H;D^\`36/<_`YeZf0|;"Kֱ35§ s8Ufk`8m {h}&m4%Zp7ѐT; Et6SNmmh˖z7uXPdD-PrD"VY"”]9æaY3^&n͚PIxt~ßݵ`Czg[RRh`~)|$ =y=FTƐh('8=C9[ֳfTTAfH9Vŏ`R̓+@ִk7*=VuNMh&^k*JAL%۠ԸzOk8TTf V`:V0ﳍYc:I-`q*|),Hs1vԐBm_B7pQ27:Lth"YwKw/Pw0f,"'R ~ ; =Nc-&R|l42@0wj\k#^KWn9ի!"[r7曡bCHQp_e?{6$/Sk1'4e~rU՗( Ǟ`)[ 2/,O⦴L NUP >p1O?@ 95t~g,sed+uB׊"a4B<5 3j# aI,KFd1Cg; ID X!38&jM J*7eV 4y{#qU᭑S>}zk ~iCpݵJa%4ɪ;tNr8b&2}T5x, Bwn9  }< 2C O(P% ;Oqu+ĔdvKp?($s_.//d6@IBpTJiUԪ^OZL-fWl,kX`$OF0ʞ)YxJc:lJȫivQ7{`yꩴ'A+7U'0(LWFK(}O&L&|3nNu/wqd/\FzKK_$]˪6|J(O:C5#qk _ͅ;-v#J(1@wBN:r0yVחKm{, f MYFW*Ǝg8&tVf6G D ˏv}` rش3굓<%r&q-Ud9JH扁0n+C~=TtOdY1Nү6n*a'-qR: u.K,}ծaNyf"L. r"ӛX[ O>, 9/X@cOAjM.Io}” $`ۭȿjA˺^xĒ4߈l m4 WL{CkZ6%WFo8Uk3r>5D|sDIu)E~IZnx7T }tu󆨯GDoF0W K~a$gd2ڦ/*];Q9 ;*۟+ńD F R>V'C0t I5$0 ;WIYAiz޴lX cjPtWsK0p'x3T ޓp5):ܳFR ?"-`aP)&OO0-/Q>zf׎v@2qbErJ~-W&&Th/;̑UNQZ8֫ +W$-X' ] V҃:LfL`E!QC҉2qSE;p,! >-eP Qb \ѹd2fA񶹆`PD.8J-^%Tu_a [&"Q\V*@4-?5$Z4#3\Rȩ݊ɉpSg^Wmc{DU 3I D@Zj7guq48{wgԷdW4d"J\9}? ~NnxÚ.~ +.h$ڞYE3v\"d,Cxl4 JhwBN*@G|;'܇$ s{'SӒx7 H]J/y'-Ex*n9B^p@`E) nOӶ A)S?̓Z=R|hƶiի3K_ KGͿ=t}Y҅ -]`Ъ47'[h5v;RS^!x'{I%7f pc{Cug-`6Ն 4UBINTXr47δX*Rؕ :" 3G4!JSERI9-HqT1ӫ>F&Am;#]%+Mcbjk<'. |:NL4O N&N(qJaO^=1h6uxAQ"? C /u2w9 9Km1 .,IM@7USaTPj+& .N:uR)ێ>x&U V_vuwXĢXU| {/`y= ,kE:J YJqB)~i:& iNtX`Xm:KzogǠ<]? V)pXBb,bsU c./rT.%^@arJtqV {YfnԝE^4vN3EG9Ҿal)]4\ LJTI+D iVP'!0~yǀ|3VQ=OݚQ  iFINOOv.No:C_,w#\ >eAނ@>u. 3jzjiVn޴ZAn u57$?;s~Lށ3/ P i62ڢ c<6"k= FCزw1ڌ{R7P^=-?J{蟽=vO3Bqo )hc[ &SkOkKژAmJ5Q/x\!ѥ=al6*R#C@2%P/*mnrhXx6"53GQ0c*`Ćg ҁȔ" N, te%1Kj却?l:}(^8CH0I܎vRyf:cTv'Y{B gM;Ur _4S[a>V%B׫ X{h<>аL:=nEMgKo{'):r`%74c~< H50>Hch(7=3 k&٬$ʎnxHyes!n~}؄Of,d멈(Hp/|RIZ$h q 1G&S]Ow1$MtC_a3j@bܭؗhK-L fI$%@TbSEULXRg^k/b*t;g!rɲ7|tV~jP(&aqZ j bp[!eM Z͞ĸCV"(;N\a E휒F֚nN|[jMn.C-PA̟@)]w K%gtO 44,z+;_s~ui`LYP #bAvvC_{PjbsEU3 PCMJsyWu2`ZSG^ SBx]Q!OyeldB]%u#9k,Z XוTBg̚R͖3rmTrB%G8t"J=bwDo%OLL hŌ˾VGën9Ɔ@ۥ] ܴZ`Cy`}W˷؀pl)t*ca&rêxAueDh Bh.&ZI%NB`41FQhQjU]0#ӟRY`^]) Hyd׊h4Q]kcV} GRZd\֔J 덢 P"PO{L $`u# !LDP+& dV!U2 b@]#1|V#r r1!MT7Q!4S76BfNz NZaB}u%y.P !RV׮%O\\tUď8!ml]|m!M"`kw ((fh\0B8O?EyWM|sunȚB-h^W'*0k/ /aI#j{d"75?LW>Ip%|$@bI"G:2M4lyYz<Li`˶"ig϶E8%REԣP2^jig)}Ul1u3AE3mk?@qˌ~EۜYT$T V h*z7X2*1YzfNC_.܇&Ј6Mx+ 謬aX t@r9{p!kC=p(p7@qX 2PkkakaKǏXy`Z%I-nQ`n5Bmv:J2ku^uvEv,_oB<姣+O~_PbtvD_I ?Oc`RGlenz<ѭ#UԜ+"y{G .{Pe2KjL"*t&.0F#ʘy6`F(Wx1 ~pkGVEavT ;p%&S&W-4ACLt'g8O :j3Q3=ǘ]Y!ot응ZNJp[;NYJ(<x@r\ /'E4, zm3h!*x+o jNm$5j3-6>SL2oLHEPmBvkOiT#A-#Q.Q`( * 6`QcJb]crtnTFiN?a@NpZfHj)3џY__\_SqlK@QP9b%~KʐR6I."X?V ۹J^r!dO5[$03|VLJ~uCoCw͟חJ@3FLc78MF<~HPS3Y2 ے_U63 iR;]xeRWe̾ƣ ePo:2/wlN>U6@̨ |} -9;MR_f;%Ʉ{}k` .foms`,UAuYRo .E7dA&\1#kORFk qGx^7Cg!ur'#ϲɼ6cjM O86a/u4,UyF_:)Nawj*6{9٤ƶ2ÝrARV3-nƱ T?#gjR8&T"5!ģmCP2/^|F2L^C,/B bKx'O;nء"kd cхs9A,,f ]%_4Y KUkE#A5?XY*{D@Rqj%mY(F9ãdRUrM;I`Ifv`[YRz'54}`N8FWTh*j2B=IbSz֨#mHx9L4 *xA}aџnrYҀ{ʮkwȎb&(IYB&޼h'u/Xias\TWx=PHu*zj4;,|ǘ⬢[Vm))-_ŧ{"$u/9s߭rY oE7CT ;6dR4Ȼ\wv?C ;&5Z./c0Jz Tk)'.8=tqhVBK HnFJj!px$JW*:6 0 ֺٌ^Vv8j?zbzR?lYFSwly }Lru^zKa, -q& y.r.4?Gk#{ԲYoJm5|N#)Ջ} HV]#?( Ϝz jayaRbӂ_+?:!{,A$wn}Ywg;=xo ~ٯH >X0凭yg,xgC 2! >Yzo(~zgZ5 .|Y|gO_,3f_oycZ(JM@'z К~r.R=S)KG[O]OkwTaSSGߴ nW{*;+{\e<w˝N.=n:[.{ԗ[tPߗNG1M{QM3([*|rE/8nSx#MJd'CxarOiwM)Uyxn|j|TIUSFtSX_́ӳz0ɂaJS# byHbj2@8i^YB\8. ΰH (/\s 3=mW[<$u q1LJUhtdUѶ InG ȯ/$ NmHk)FI3;bgUS/|eWjk9߷w΄A_hQӝ>;6""H޵Gqi##0nG^seՄ亸͵ݳ20Pgj rZW܏^PR@L@+*%wP&RaK^ܞO qg YV ){0zlj=m*HC/9"~tN/%A%]5QQT)R^P ߤr3DʣH&Eܣa/94[CKp\[qT/j R35ap(+ MJ4sڭ}:8(UV pQEܼx `P?*Gb"@v!-jy$N$CnOXKmNu7mQ>Ҭt!cEs*T- CmpP ' Q|dMƳa*v2qg DIW;;s/܋xI?sI}8YnR׻UNgN(mMdA&e튩F;]GR/[FbNN~CB8"Z'TA_D%OqtwYDJVO`둾ȉHj%t|Nk@cX@Fa'%rS5Wt6 0M!γXOi`j\`3mO3A⣲4TYP@w&j@^?UBT>N>z.Ӳzs@#XrF+~+;N@):6h*J(}SN,&7g\mr/13mL? j'Y!P;.C2k:凰JBlUӒ6G7`@2u@2.tԡjF{CK /v(;r }\Lg"b ];G>S'SNRB4 JZx(ob% 0q AV _Lbr/!L hdGUz8!c_qwNI;q$Yl/ ~IhnB#4.1鼲$OGI"QB4ڱJ *EF-]X dh**פ~ sSwPKͯ<]p0ttLY`Pi8rŎ? 'B.(sd4,0~mm:Py'ICir2Ne.bFY9f2߈X 'D0QWFi,:!Lqva0l[kMP1 8%*I~yP)N-t—F;FhƀnKk7Z0m c8ݢQ꾯R*]N–kv";Vwa6zөXNAhPƾY5L8F\@y$w ̹2] U٬ ̋=A|ާRuňV\r#Zҵ2Gƽli Af/+amp&L%#a:%5@S$ݦmX)4fR bQ(E/;pVs0s0 6M}p!H!Wk,qq|M2yo^l2OEP*;?R KW[)/DS~Q*hozXQ=;i.)Zfت~JHÛѕ`q5_h1>1 u0T&x  ԫX)ʺVD6勱/V5b4.19/Q3&`9~9"Bi(֜_;K*wݿx4M;Z1Y2b-vCjق]HӱrH} pR5x!^]Ie N"m ,CXxNn hLm<'6 ,pW(PL>/\x|V/@X' U.ܕi%XL^BM NO iHWgҜ8x'/’:p^uݘ9*'M5PJYs+i]23?Ǝ آ- n"ɭS 7I_*I|.ҏ I?MڜqזIߴ\ڤ=iN^kj(fkSd2.uZꌀđ^y6pMhJ[Ͱ#! J?CՒ|LtZENiԃ ~ rMhkbv@.Oߜ' v&ETm%89933c}7VKzW e+JӞ@& c"9֞6[q7Q)X.1 %%h6sqaۻV@ Pwsig`1MTظM~q)»ޔD ̩#Ĕ'ٹ[wN6i3A\D2D|qk9' +*紶vT"2JyצϞlڥ>cuGேN~.z2pcן: Yy\PuFN#(P4͵LR?RJ}>lqH#HM5`<;X3ChT/Q5BP=vtu 17f;N:{k>x?:QFMqro9 F ?W?~챫Ҙ9 \}p]Qф,Y=tJV'_8߽ 0kCqFvb,5,!9R떲;Ze޵5'&nь ܨIXcDN=|IrL`<\=;'/F/c|{Q^Ud4G2glbze*者e%|m o//O“ ߦ=y|ON˫TI;xֵ[ew0ZyJ@&jFoZ'޿_mfljelawSnpȾ'Z#3t@ْ46̕W==X]4v3W9DS)1V}dP=T^6ONV\ӇT| )# rM Zz]}x>420LoTls>UٯqXd(Je v *E~U[SjYʔYFiud$b6s?pD*aW ~yJ0rNW SmԮlU${6lozfٵZ_IPx:e"6 ܨŒ{%#+c0paD0eCQ"rkRC 7R\PK3ej[:- =ݣBDvDMD~(j{ERgbg&޶d@wcCrK: _pcFHSߎYW(si%u RT6a, ɽ96f;\Ǫk!"8>y6yX%f9g4AIz]mk4`^,mW9Ĵk4vYxLLmFށaFpv Φ&s}^d{wlsdsk؆q؞knTV]cYwO&$ } $;! a TD+G_k3zF]&US{xg?4 ͐#Iܓ:|[9j+UmrlЗSC-헫?sf'b X~0ZlVvfԛ8ҥ+ da z8է'N"Ԟ]ZE f8(Im4g_X`v4cNE"qLΘЕFLg5Y')歄/6H l Nb~bvTHz';)!I-A]+czm"'7sL28m݂ܭ*w{<ޯ)TG]*>R_yHs뜺pQ0BV6V! `| m-DWi\,ճ |c4݁n_-04B4\ޱ 16` l[^NeN]Of pL8hv؟xmBBv۱vJkP ^v'A_ރnɰ;Jq;/*Yq8$e׳ N٪y*' ˵Xfs~UNe^X57 YvG[ uE(4e3b]1rccz,4_ԎHuVeWſWnUO鉍ݒiȂFh X2 8:glV&OV&eIMp)ĚC[@@@ˆ[[>*=MK+w}o`O=*Tnv8"?;eUk1p d,]8:5ߨi;bRŏk[fS/*j2΍Ab[,R҃؂B8Zp-͙ T8\pЍ("1۔T$?g4|pOOQ]BRa _ q{`-X"`HJ&)-{n3\yXZkDP8 )ń+ N+4  &܂0ˬxN&OuWCI{ j|¯ # 􆣤J7o)O'aeL}}պ|5;wBK"5c0"[XSuqBP4R"Օ[KK?u0}@B[uq@t׍km~O-nbr+gǞL3/{x7<vĿ7xwS.RG?g$[8N8ac-to8ϊȗY *T_r!G֕:8j)0V1MIA@Sc1߆ i$ A}!Bj!>qB ʹ#jT Qsc1Ph9 py~D8Lk^(h(No/x>RDm >/ޢcfHAnŎ#Wу_Ei RQQ2opzgEܸny1? ?:&d~ҟ% Q5%I|s['0& ŃFk-1зHrLip=(xId nE4YI,V  уp#Ud:( KKÌE/%0B4IvdU#Zܭْ!8>I-qKI4_^Ӕ NJt #e_4xSoPaKYrA! >h#F S"&zqX\Iʆ•nAVDO@ҖOLuK5~OEmx3I2jvH%_R9S -p#–C|H I3GGVl  쥦jJdyl.)8̎@$ߎʟTSi:MG0|e~Lpf./~ -3`o@HrOFz6ASS 1zY<\Kp "B!H`=!S?{FJnT@MoX XҺ h΅Ga)R6S!Jv+O%3d%6$Wc2!\ӤVb 7ݩ%@3s*FRxż9?H,.]xғ>8pRuأejDCh(O{_֎핛Q8w (7FIl wëW#sa+$?ʒl!ʲ1p;X,tlu, f֑q@HosXp,PAMZ oT,'fB\U~Qͧd y/KL)@_GAzjf1U *c`Hph0ҝ+'"Bm;,=6(1)|:HK4ni:gK3*rժn(FlDhuo)pJaYH5oͪ6ԃʾyv.?zaM̈JQyhR+|WYx %_.7j9oTjx "gp0Rc4Z5a'eSOa@0q8d0-9K+m۳ fIE ihL.ugFSx5 rJ/ݢ,1dq6Ah31f32832/5l$%ay&{> 3N<|D` ҝJN].[#(]R~WB2yr_?˦K>x>oR{' T%dJ9Iy;+$9m7\^ڤ1Nb/>}O(6nُh)R6dTҦm+>.ZQoVoL% zur7y[b^.6]LaH); u[`׶m۶m۶m۶m~׶m_RIU%fAwjuYO/w%"{>o <mn ]#6,E5wl}qbWp17ExI'Geq*dPcd ^w9æڹRXAq*GXҶȖm+>Ӄr_Zɒ't7hniQ6 iM2IƵ<ztH =n!,3(66cBz,*io=Mb]xu`; JoIJ4Lb~ܼ@0Խ @b2y]7H[is;< ʱtfah&iaRCyWQ]Ы/ QGJ/P. r%շDŽ&ț9d pnm: XFhE&D. RH;HMI%;csumLv/.$3ꗂz|C eYv(Ek:u"U8.hFF^ew*ǤR y$M/Y*"H'r8˵AH` %uSIJ:`eyR_xzeS "A4Z^o>h ,(m& L5LAA3#5N8RB-NHv(P<ɧ%hZOBAqScp ːA+TG ʓhPC=BZCǔlih~Fd N;Vh؇r:jld'B+`x0mgceII*9@!6b*% ƉSL%6L‚՘Sbf]^OȈ@,FSI03GDo5fҋ̾o4+U< ָj3 P $@ӢfwT xpV3@ϒ"t-^^⵪d8錙e%{ޚT(g-oxLq*՞”_L/g%vd2]ŗː0΋ 3$-MK<"<nѴZK_j,/ yO)n=;ZQSDEz*iPпʙ{ˏM,'YƮk${ז.PV56ȿL=|aۣH^rEpͤM/r*%H?+TDb0"}(#d5Aԑ-/kf۹+KrwU$nf8ss\#cنh.D2->+^Kӑx*zBS󛘦0^cj79N[Ci,YV8l\\4 >B;p3in0$4>բ^ :-M/5F-!İ;Wl$!c0dÞtQo^n4t%:: &"TSޙ Y9w9+k7x÷S+PF0M кD>t;4M,Y% vz"h89hv1 )M!i?)&-U=сy9-QCM-$ywVwjinZviD#BXbj|}jkPj#tik755.h $ew\`9AR3uMkK9 0I j1+qdkjM׭I㛘;njqb#hWӢraN 7-r*›\v|$ qXB'a뒾&?pPEM[次|tJ*͔>Z@! `|D+L(s"S[pK szKW:BZ"лYMD_MY.>tÄ2p?ޞ3ga/dYrd){FP+RgO%G_Jr҆3leƣbV7Ϣs=%xΫP\V]h λN;'V̑#Cs"+/"_316'\DwCk~A!*~l/gʓȨa#BA=d7*˗\"N#l Lp~~[X|;@u_?(y[KM.gbD pT2I@x%nP#nL(0IFcFh+Hq g/d䞯KNLͪKܵIW NcĒ$vxRW3-[f= 4폓PǢy{C"Vq?cA YskTLbߦߓ'JȲҴ b) bV W[_>۽Ӹ7A][W{Г5G.Ka;M,koTE#Vܛ4Baʹ$G \N &*&If1qkuV9B^\|8 JOg聾c ң>ܿer Й-o sff)VSFq sps ph)VYs8IP&f`3L:aӷ)cisl m$w)>LG?\o3IapŔ\z6h,*hI}pG2A>J9bUpsE,]"ROXM< =v6>R2E֬27ӂnb9 16tT`?j5ʨu%7)(.WPk@0Mqg7 MPiLKubF@2.n!kS6O0߹r< Cvk̫ 3;}@]N-1+w_14uMO9e*Rqp~FLu"c w $*p!/6H>yzP7b] l|q}.uOz|?̃e2MCP\ePxײRXHS>dFlL(8xϽz g2@S,US+\ItǼ,~tއ0j\΀ (=cnOZ.y0q,Icb'QCӠ{+G,!f K43Pb'N}\-+!!0d+ ?i0s]nL-]. NsB&9d>,VTz*Ze ziDҲ`xg~7e3C{Oqd߫]XN_$* >|ss9}Z"(8|Z?Jw)NPDk&SX˥RJa3ӊSg U{z"Qr0i3g҂ V3SIQ̢>U,X3_Ye -^Ǐ ʳ{wDSje}誜X25%xe@,YLoK8ʜ Ŭ}A? eTJ4(i7UFNeVãG(0mRW^nkڪU֣[iw^?\l,L8ZJ]ML-ȅ9ꩰ DDxϟOV\-1 [t`jǿ@FǼ;V4izO$IIYjcϷh5b"܊7,f)D0mM"x \_f[?3y1zrƥ5*B άvz=y;ϙ͠nٵ\~L/&%cHF7u4_*ұ]8 }uܗ|1x%jܢё7f+-_j讳^ hDWvҘFrF XNXM@ 4!1ˇ}<#4EHB=I|#hB8S#V*+H&^8 KI֒J&mtrO- sT;=ͿʽHrmFW9>Jcw8 Y-ڈaCa8Ul7_t\d%w hh:(̐qUTb:onb(Ć85%5<*zt"-ԍ [)'ɼ>`׳̶"ux tN' zFsJ\xIUǾ}(Tl%Ы$ t0 ȇZ=pA\깴U 鰞x[aJGaQ}fI0̫זeϱ'H-/|"} f#A!GH{'7ՑݢGJgd؜;Z,Apj~!c (r<0S3Nx@Gt$јvGO&dbjշ%xahhN*$pSf Ac>9+~(PW]F`Fhϑx%ٛ?6o(_H3|g$%z i:Ȕ+*7W&tŁbx) 7ܐUҨ2fK+VvKTF-+Z'05CrX~lMQA%}.͚EP_L2 Fa35.emePz3'LyAeߊ3IlDq_O(^t_{4PÜvl4♃||}S}HR2{ Iګ%j=OA˔e~mr~Q^ 6{[&Hdrdj\w+y)(0wBΜi[D.o)|..GYpq~m1i<=YIHޟ1!_d6 IXZkaXɡ f  &2ILiOB ͒s 9}aW, 0f!vZ~o%J@H31ֹ0|>\b4XZ u{ L!Du~᧟vJ:, o#;+(`*2dQo^aN]-%wۈ&Zi6GϤ=r zRp>vbgF0ʢɹB|!~Ք4??ҷڶm4}xl/*=M˼ b-3jդA̗  ,u:_!V#vᵴƙL;B2o:Vũ\]6Vxb._\mi 0D{ wZwIM_;w޾+ o-۱Q-kEYGY->e}$;z [yQU.K&+{hmil"뗦K1u`!+Wb.f.A8t|f|ݴIX/zFX(V$ՕLK Xoa|mL)]PDsGMk4Im-PFdE\"Pbγd,J&n1>j$1G=Y4;ԹJm/t<{ߟ)[3{ė.>8;M aYLp)=!OE5}DާL*?máFX}qrBta[唵d)L\o'0h^M2TVHG (xQH\u4T<f}ȍXeq ?eAk&>ׇ#B5WG(`{WY qx+-y/Fwz} ن+[C$4_U9OJ_Q*C.j puKB5 PLB޼EJ8 >u!쾉4R35<_#j$^q}߯,@8,ƩL`lmy{l^(!_Rlj"^vrʒt+p`\jG7WȸpMDU6𔢗1O g\ioE+_vUEo%qNF#3_jY Ꝗ.q!ilNN:aps\oF1Q!J-NtgzmqU(w󄇓FFG*?-ʎӢ8Vo|b,=햳7,r@ߚr4NEs\D {NH[<&kߎR/-)2nc%(z;{:ib-l(Oq{V --YT#~V dJ ?pi2T(UvɷRdR|, n7s) VubmKv7NR%m9LĞ?MN1GpfL FoϒaQ՜>m?)"wF[+jQ4͹+05I[-& SMW?Muԃ\;4/<, OX12Da_zu)Jw@r?e3ٗ_;Y<P3,7~?/z?I1~lD^?):;k\2%Wm\ߗo{[ڪɀ}-4H}'Og{tTu!3mY̧mCxE؉Xk}b¦eum gތf3KŚHR~'jln>fRLcdž!2[=_q: ]:$6wj19g*[D0˅Munn N ??k^#f=' zu<> 46 ,f٩ ×?APuljv5ɰń=-L0ƃ}b'2-}c" "*E& tzէS\v̑w R98Jd.8z[wC&^T@n;CAOT]=#Ŏ3ʐ`QY4OQ؊LfQ̞//a0ķoU̵Zm.RGHWDGg#>(b@S<43" 򽝫PC$K&ngmkd_a>SzYV=P{? w6өfͨsҋaB_fu;70 /.'_.Q{ 3K1jB(8KKR(q[T=0xH:YƁb}_uVKIbn٩tA`N^+5#MJ0`=U%zTO3LNYOmCEjQݺ.?A~Sxzs `K Zv Xps(>{/FD p\fTR'5,[2~ }B+qe-=Nv蠛v-CC!S]ZָָoI[ BC*P%ۥUM9^$:DZ)4Pכ'dB+>Y:p7N6}1ȅ3POܖ߇C`0k@d ? YeWK? wouNuB })%A 먕 gx  1!*|)u^/P췌*j3Q+hg[ ~n, E}ɸ{a5a6q"p>o`6;ClBa3z*j2T-7z5 qTeTtC,vj{X%@0RtfBiޓyh.56"jU>Jysv3hB/MX 4󭔎BoSȥq5WjW22([*99``2e%59Vp{Րx "?^A~W+Ftᱚ+;jM^rsI:<9ȸ y>|:v§zmN4}oI a({2ofpZpsVovA2y8Ypo1e;.n06ó/(?Mͺw<(?[Fm-S2+)]7$Ej<d'gxoDp}gZ4yęף0N5YbMd'P7pHQ-zep䐡4P\Ep PCf]V$.v#tWZaeaBinا) -ppnbF܅G rXؼ<:g~݇=j;hCvވΩzEXmDvL<ס7Bio WԆqğycPT5ft+xsMFQ5<_R;$4OM`I?P#RSbM(>%x4BϠQF(nodF^+TB@&M]e=ZRh~Z&,M{ˀ۳lM,.Vv! I+.csk 4?ADm<|tb+LIS( APjD4q.Uw5c%/=#. R1Fۯu-a%orgށ®k {@Lh8 Hiex߱7tKXp(},eE`{JΡ=QOyt4'A6ЛZ ?QZ6%& 7kkhZxMԥR"ȔHTkJm dn&6}0T w`mRptsCC=Iɋu^u_V/f B]6$k@329D`4.n=>Sةɦs_1nAo[7bt1(7I[U>؇\5HOe)]wp{Q מwwN7{A6q7KPO;Z^%驊?:`9@1.'-T<㔖JXYN.V'櫤S(ԛMeI%`Ȗ q~)cUClGc~+3ɴ+?qu@:pд ϚZ^JZ?!`+5]QA4;.iXHBEǨ'N#%׭I#=^v!*d{ r~p"/d@E΀?n<7=HWU Sl PB"H,e--1S̶ߜb%]Y*&@`{/(肀9c; ƥ}QSBUz 0m8%x`i^Y,1zF!Q!FMΌDXRG}IjfpAVK.8"&#*lw`v"/`xY:;;lĜ&R2%՗Lh]v "rirP`T6gv(91OR#: '!@@ƈb3J75WQ1QMڷy* %ҰzI5?l M`eV~ł~*|y@RRLkE։jаdYϓ T^|NUPBsh:]`x{C=i fzF.~gVш_^ggIt-ʢȞ P:.wIHt{})30r5~M/Vo94D͎>Nv4a uuWG@ܞbH$W”]#0d[4Cs(,ЦS}KEGS0G^|-h }vw".G=/i?QoL)i\r-mQZG{6>\,ix[c&y h[O|Hbcz/΀-|M;`j=SĝG ń)cJΩMHJD dГY/Eޖ~M& {;RRe]0G[)6hGE}Z>čhbԓj-) 6K%Pc@J&8-ԅ@!K6Hl~$u(Ya٭=r)JrHI/G!͈R+ʙ]B_L<7{ԦM8i*O;_`J}'Iqa>ȪArHgfQ!z3vHvN+EfYFP 'c2@x'S21Rib睥j -pLj=G]i@˪9kNWa I$pyE+|7qσa> N7lYaEsnU 'ZO-o:N`gH*v|m'P>ܭ\\ a"d!+V 4{;նu}>g 6: ՚eS%@K'ywRwI ۯ%~qw去~ ⃳~>y\ϡ9BEQ{Fv<<^saZ: K'iO"'dɫ2ꮁ[QT[޺"`x]l1N j&R1yV{V"?40(qR#_  \݇ >(͟`VAh}yKJr5\S+w^W1k4|t+C{ SV[oh)OȔZƾ%:MX\.~j7T}iЭdd@OL@7C#km$'~g^f v.z mI\~ߌOl6'U?&'ZdHNpkkbksRФ{u}/pg._X%3<@OI[UgpDn7A2QF eL pPyfix^'=gyTgp5<'WVV^^T[\$Af7@$caɭ䄨kyӁ2.8@(`3iF[ȝZBa{H9|$%NG+M5;T;I\S6?|_fڻ$9'd n(1z~$}%%Bpzj02%ӫX,˽EDP=GjV JXn*#rF}F΀R,<'I~֠ x&3Cd]?0A  nkYXۋAِ6u|E%Cq 7H6DJU3]29s*)q$ƺtyUZoQQD*NRji4JG3;؊_RW5]pGKwo ?i:Qz[i%Hb$ \ķ4_y'iSiuB]NA'Ew8#Q:ajL+YSGIX^WN%$ g :4Έp3$oT7\DטwM jpK;r5[˴ed#3z=R:V̨W|ɒ'[EJFKi63l(sP|q˭Lrw+}bF`9l&flO&#:8BFOZq%B^< im*À : 9TZ #s/Ht5K9F=Qz#L:}hbOȎ~h5fs/cH}?p7P.k46 TX g{W=,@1wbGd̈aֆ'3D?q:, }%ۚ]O~Sw^Jˊ@6e^tG1Q/xʰ` `p (IHNT@"';36a`|'%>%t.Kv1Z J i?MWt,KY,'J=:Yyz@S3*ianmF[c9T`zՂ)(t | @>;ptFf#=w0a'AD?5~K0A \axqFA GG9I2a2 }~ac;]b uU8-k^Ʉ9,j4*LOZUC0AyF ,y|,}{;{ow]tT(ʒ`C~sFt:(.ծ?PxEIOg`,Hi'( YG9]X07'd “whRꔕX܏Wj~,ߢC_yG(]N]qCgg$MV^PHOTCֻkrؽ/&#Tˋzl`ח1SPQ-SP9kT Bcյ>Fľ-ȕGہu} -jpy9Fu>34ٷozҿڵhD4k[R]q'@4rԻTٿēA:\o=ppz|>vm}i~ԬF'4_o:KYB餹m9ܺhkLm}YplwJ? ?`ٴ{mp4Vݻpo9e3@ko8йls2%ˮuFowtW*ܮm tǁ:! =A%B<"+8>~r/(|c"~t?ABEjEgR% a,$_* lLBSQ9 O4<I†cAUjR ` ["xDr) E4ȇQYһxt]jU`܄=#уxR>vگ $^jlz՟۱\ãf BNtrsX`v.}v_oŽަz,.Y{櫽b7达j Y -<'UAj7XޓaSYˆBErJB(I!*b@<*F`2:4v0$PB4zMȳ8#r?ir0H 0eS)k7ϊRmA~@DeJN[JGޠD= ˝`MXkpuvXi 6u7\gm؅Nv b*LaZRm emL_h0l|H @HB{@Ѵ4fy~^=ƑU@ #B!HGed31%#n Ӑ.C9b)~pG{+4u({;n.>pl 0X{gmAo2o9p #""ɡ$ o@„<Ԅ 'nbrJ\>NTjtyZ:y>pGOx]Nax{ݹr|yn풖n{89SKi2*eD h[4tqe1;XW$VA*)D"C0ngeo,9T%;d4؃UB6 |K]';nq /req _𑧞J{ 쾾6MzJY(7xcbBm?_U%`OJ E&↪bRǂZ^4]dShݮvv@È4!b '(iDY kd1LjeNQxH$Nu}^=˰p"Q} Z^`0`AGtTjϼaxAPŭXX6|Se" $~_]`@Z~ .}wJ}bx9*5pP؆%'eK:q?d*{r ~`(srZ/]nn%1H}PD\OZJ[6A'kpSdӌyx8u||)XS^T,tl]Db|<~X*6ag[ܞt1o {ݍ#F`]d]$ݡ"Ì/ܪ=k%~ ڭ}DA`kްrEMNNt"JRU!0Aianf|Lk-y|κ^h3]&a֌>&oXy^wvvzf@_N?}(7ji^&fe{ڑϧp聡#sXox,loWߏ./۫3|]˷a(k뀌jnc^5 gk9ށ6J;G<"qmع3m ߔO (ްpE׎%y8)sBcԖy"92]"+ۀ emG,JbgP ZTI@ߠ{i@!={dzN mGkJvG*{phӟvQi$`sC"]t@pMN`XوEe@L=V }jJLaxeuWgQ$M\hX9" "b2 ,K!qY>#H, L8aãiM _h8^MrO cJ .)~"$G.+" _d&hSAiʸ*Ñ˗"MdLlEǝJEcHV+e ?Ơ2pW\'F}05t!YMv2,*L.l] ?L|03 $ mh0<1^zK|b.v4w>˱wlܥu:qSMksHrC?RHɾB%Q8!I04ܦ 997%M pcezmWG}}:>QBgJJs,$E*I+MVc,?h.in7RDA<5e` g1Mwߞ~ h0 ߡtwB,CoR6Ib0h,񭛱@N \Gyn yN 8g;ꑄK+ħ/E:먞lIH8M/`%DwulS+^yg\SyEFJJV`bc59,ŒEɇ@S]*tJpya[n#ʂOKDĉǘ u62U)\*2l/خf]7U43.V;&1)Si9 Om!Co" -KBˎuUB>"3.j*uj64_|R=jH~ǛKC~<y`RW>%tu6C9ȵ}-.p]&>O`Hk./XB g=j롿`pKٴ9,l6H@m,s2 CJ$:.йp,j38p`6@x8nE=A& vmH<\xCӸDk*AI]qմczȃpN_1ěӱi]6N]r""4^f׺Y|UkS[V#.io rŦڭ\ kFى~ 5oxuNCݍȬ߃[mg r> s{A3@` W~+ŊSnW;F!~-u3.⊻#QjX:'֕YsԆWJ@Ece,/iacGckً'ƒjӴ 6NeR`DZ2ROx2>j%N&y=3<=s`m$4mބW {N<&5ƕeںAJʋ !8, wB4z/z\3;U²qz#9`Uia_CO`-3;Th9<ڊe"AD?װm$~ i9WCPESE \Kr!$9#1^W0V%ʡA 52hbM : Yڨ|}D,v/dT՘J`Noķ +[tOJ@>2QKp3֎뢶3^j)$ř *v/Y,+~SGc1fK!p&D,L3-4q }E?:_sx}g&Ef%j^{޿U!R.8ͷFdMSDi_a luϽ?nUyIpzУdݾu,r :uRD-"C9{Q6lX]$[&Mz*(Ma< UGw %*tֵ!W!R%S֏1.TU Zx z]W{m;m*7ZKT0  :@u,K:p!HSp3HE, nS-GI:7uuUbx]/R+m*v{/oqc+ 7jý{[&Rc%5<638NSmCqdTP+,|ke#]E4_?_+HTVKʉs=tr=:nap+/W]/=7u}m Y6հuF^Cube! +lCnnsl% Õ՚V>ɊaY|36ޜ˜?s Nw9l$$<03(Q< QF¥|p' y?u^r)\a-V})T N;]t^(\TtR{ 6,RqMq ; S>JPARZ #Α2b *N6oQ[`%,KA| R*~2ӏWB;Ma_z.QSdLC,gv":ubWE-a_b[u o* Tbi)XBL[;W gUpBYJj]kQX]maܹ"iL\ F_zrȬ6d19y:-Z(Y(JtLBk )Nʖ\*_"Z.z%$d<6AIZm"G3nx>Wn\ K_AsQSR8D1F+ё](o|S-4?7ntYU{JcV!T[-i ?z#kG8Vc;6H˱HnZ)(>_`0kyQwLk&f8~]֚LI8z̰u/;JCm ӥ`^o }[;3ո&A!um)-Om˗8r Ds~e1/X֕|Y[5ݚǐӪRPh ,TIA&;HB[R1TՎQ .)QNP[fb]I<]BPbjAH8C4IEO\E-bC6Pt^ۯeyCd?G (ÚԛܱQ2iD '!3^';icXme^!X; }W\~gN3Zުؤ7fi-2B i^+:ŊXFG73'<8xøn3׬50Vy9Y զ+6s h`x{cH%'g`xh ew0B㓩UWL'A:`&.X,Q͘^=+kviT3!Dv}xNŪIagz@lN$NM-'/TH=&ۢfRu\XREyyBb9bM?B+"`L~lk,gl {de-*^Y15x; lҮhCI:85-kr*[Ko$R#3Vݳ%X!iJhlB:Q3+OY!B.psqmBrdkҊRx݋똗ZOo%>Oh3P_ n[)Ly u:H ʤ9H3S5#uFR /V]=OINDo:t뽔'U})4}Q@Un2_6 Lw?>%+E:iJ[$!IBF EmU]:V$CM^f-CĈLUEU9H5({Hv{{s=9;~-uZ? 8vo?U(Jԝp@ӵs(q/e.=J[&1'P鈫mG^lZ#\BC=GF0v#oNgLy}Dz;F8\mg3E3.cAj >.BVd F-L=kN]$ Æ4|g rrro~K84Y@ O5iɯU %PG^h)wLhhZkt \"|e*\"A)}e?Djӊ+=faqduEo/l!P ZTь)Tb-bhD,ix9w7\˷>PںBtm,БAT.z&JĖ :t$ 8G 빬/lٲTMl[wfLXGo2RZ`RVE|S26qn>bI*lꑥ꤬acY[FS=yhS~6뎫Nf'ʫ3#YimGm:"n:s8Cە4Io{ .6v=[ʌ{OdI%)ze\m"Ђ N Im5]5*m"'XGdh@I-ZjRD$ DΓnT BWǞ,gM\ENi"l"XrUW)ʵr);JKVkel  *ew ՉJuſK*t*Kp_T1Sl2)c6m3Efpc%rK)NĽ0mMŮE; 7S33GvLF.:Oi1pE*Vb"UWVԍѻڷ4Ad糽3\+KpC.'hLw5Ӊ4{n=6^cp-Y [p&;NO]8.EgSvf+ ,TZEfK۞}ZHAlUQGʎ-E;4K}1#nYILYAeL0%b X,⠟VPMlSm*.6X$^WYsAN9~> v^.pwwFQ[A|w[%{JʎjMy:- Jxu4;bݛ=׍b2KQD*'*A~3%h)gr,@i.ZI"eI8RO+i$4ag7Ӡ;S4#:Y$׊V탊ZEFtϚ3'ZSVYXRj2ۏa;ժ0]WBZ`æp1'dQ-ʰ+"f䕬!65lgΚ&=*%6C,tу>p .!ίSf6, H.kH66G6-i+. yR mX Jۙ:5+j~izȸ[cGbzF!'N/u%,R0]OɉvT7j.βbeaeʯX:֞kPE~$9vLj"d|KüZ 1$C<('<5ePD%", ʊT巑\-EKQ3w%fm/>.O\<!7Y4xmlвcQLZsa+LGDLmY:/8~l W ߺm)Zfc+5?"Gl'ݺZ0ŖM:R WN f(K &+WHjUK7 |_,74» `ߌ왽.U'QKC?eXg-K.EN+?vd6%'p,,Vѓ2킼KW6A˻ҁ⢍ühMo@A˨`wb|$~0te{\z4vQJ}4NWMOxV0OO[vM^& U%̥5qٍIFqi`?4/1IΕGlSC^Ӽ̉P.g,;Mw}_6;CO'9Ə>ߢV*Y_]?CSP6j͊p45ΰtMSKi?Kyn D$b᩿)&u+E[o9drB{(Z6f4IIlSWi[m(* 'Nv-zq6 yiBdrY܁.(.{a!o<;wHrwZ5s4Pӄɿ^x(=5gtBtz۝oT zЇcwxN:￷@/&<]C~ou&\Uy+78 CSDo6Ghmw6OhmXY1v;W]8-)B U90ʧ'ޯ\\OXqc0 ⚟pfEcJ~%/,~oE 4}6oc7h<e"d~8lx-!N -zn v1~E/xl<.Ʀ0<<= zdS Hg# 1ʃ w_sO6LYз];`K!@àۈr%(CD,/>%X`>[p7TUEakx߱k@Rr (fcӝƟ k;4ɢ$S DiW.IO-C >{RqBcn4-Eyy8Nxl $yMB[w;n~F q^'IpAo447yb;x)Ȣɰ9x,k1vݷ F܏S D~SH\g!=H18yxCPxu|~k9C?xUۮIhLˠ d8)x}٭}NHHF) T2MJpqɕ:7I0 {laa&8fR=8׈'T!F!il|a EX u4'qd g&=u|.yBA` q߲4(D3}_y`>^)yj.]ⵑ[A֑*0OzlF?v{ -uĹ4O ,E,G꬈D@4|ĖuC[ nWScׄoQo]- ǒkEV6눾DZjmXzjKcp1j)(.#6:qMu-;-[BH>@)Kap(c5n,)/N>cn34 r} !+ v3-@޻SeqާS s1 s*@bpֻ=u I O2@-$0anS8eHpY0Inm҂?S!-5*]@_-$GMa[ TZq| RA%nz. p6Yϔt+ohVR- r=E{I2$k(:]^CV—L`5ܣɄq5Gg7C"2kM#; %j@rl3ua 4+928qS 7/#7sFA7|ZK"fTujXn?71fa8 ܈/f+x%h xkʺb&~i|P7x (Ke?ᅭl:`)6xVWkiv(|!G|r;A03O $:mG{fnn3|jo0}Z A5==n*k{g̈͋|?$-cbAdڣMǛ0Y{Huc_mOvwZs|٣<ӿcxOno~*/oF8ׅ,?'6b -fY; ά:zZzCȍKmG ꈻdpu5C(҆0>+a,L![4ORݲ;BcP6p*|(9+&^PŖm,NMדM,pӶ=3m[۫w? 兀"2ȑJ/ɺ^"Oq?2P_oPlᴟxLckƵz82w]"յ\=N~yjpkW 6)-F}=|zjMVu$i޺˖QQ+i,:k~zu/!e =%.ik)Rjj M<9j[mz:؛}3E1RtyήS߯V/%Jԡ;@& 3g:sykCӵla=zfؖԁg<ؙ=Oo2ŕ׿(&i2`Guؐ~dR-qgr-i:mzq@JP& Lȑ} *%]ȑ uG"˵ov嚾9@=uzZc..+AQWY$WPkQR!a(vԑ o,|{h1Pks+0 C* Opw-X VL;ӡɇ<]89Sw-8&1ɶ.߉sFAY$&nap}pb|"t!21=!:oSN0҂Zky68dnp16^KST~+xT'70|wD3ZUEօuW*f[DqKl8Wfb=Wq9Br<e+KKGXE ujgtѰ)Y>"CvbU`X'?;6V.Җsu H+5{UG;7q˝<9]BGۣpRxkz=y9jZ-Dy :&r?n8 d f83:?=K6Fּ6+͜Pdvw1vf'$_Dt`D^dpAl(̓XX4mJ rR _BAHʤPOBb>b(Stwp&S1{ka.1P|d 8Cڮ]Jy]ss 6E1/$5\}#hQC7C2RbRIBj&sjWeĔ0k"6T.걜!5Y-G |1t$.aGGhIFIdYy! *F#XB5*}uj =8JVsū5m-iu"_ =\Yt`Bc- 4g9%r6s*8x]1RjW⨡m/_̊}ɹڡ.0+6~/pۆ B`m!~FQ!``Z<Š1k+Uk!rsǥ9¨Y%Z+ͻ(y1~..yQ< *q糙 }6S\`j_'ak+!sj?zݳmmu puWY{ kҟ\ ţ7Gc|~+ds:5>]jPl㝚"A~C/$,Z{ f]3!s*g6U"RCfrWApl2MۚTOEq|KCUE!|dgB+#d#qBWmAd3TQL-1v%sy"KZ=¬a|&cҕzhTOX{@D]c)X ˷?'ɣCozoIRݜ6”JQAܢI_ aW7B_3-3*iRkVlPgRǧ$C^AcQd[zVt{.][ާI6 B:ŒzUPØ žYgk"O C`u~:8ݥf ױYnǓم90=JO |.:VmAJy8 'oEKʚ)/wI*;ٞg+ :<!*%ZlضE}aE|mжȜO^ -HR*irݥXR_"p_TMJV^ V|~,粕xeٝq3\>(?3b -|Tf\R8o5~\U7eSS* -m є5[ZƩ*cz&\aփıOW<42e,UWJ,6D;٫͟/pή|OJI(*bfw&YcoӘ8( gNb|^WY̭synijjw/*TpMP?+yZ"g B6Q0 VqJG${ Aպa/[/ O:+Uo$}u"v9'}(hG;ʜΑ8{kVWz)WQB nN4Q@d$B/6 B,K|׊Lq,Վa"M#j-P~'Z: ϩCiKھwɼ1[L㲹3ϝCr6 5YܣoLZۮ[ Vۡ2wS <nhQC)T2%EI&/@V 8u.VR ;CIz'CjȥNR`o A$ )?/em ~P ]b!M+f]4yWj\TU)yrXϦ'Ǯn Q7n=H.q$ a)0X)z _G}1f>NT} G4œ,OK7zky>3ݦXssq#97c빲a89eՍ!#)/HIputeq+xX g4(7ꊇ:JOH`)Λٍx2TUvpʼq/?8 *y頙:-;Sm&M& W#I'Vi4bPѪ#2:I;N?!D.mOGH16,`mKn`arP +j.9OsKev7AE_I_G_8TaGM$k7i ȫ7B:LJtNWSxQ2:kֺM քc'1=ܲ d+yX|ܾ5q3O5qq\$pټX$ܾh Bw`v}O /.AA7iDcw3 N= p-`~"qrY=֊glj*fLջY1 lԓ:AmIs>Z[F[㾡D,r4{r7'IWN5Ǵ4QAw ?p߀? ;ye[˴4].8䧚 *ZwO&ib6Y7N=LpkE}%o1#ᧀι2a7Qlٕu9<4ev}|=&,s񩺐? q橽EPAJZLR/:wX0B7dkm[-e~].i;;]K_[=2#&,2[֦#J]O@Rح5f[D@aE&YDL%O8v(My= <T]B4QۀVgjCoN|{[}85bɶ&[c >g聕C'0{"9"G-W1 '楛3TS _ܷ킣?3#>gqNNb/q$Go .·'R^gb\BZ 8dPA$nd)2XpK'82`Q3wrB ) ?RK'r/dĆs ڝTY+'Yǰ;d:ǠJn-.g.B,|-CہZ<7 QāEdQ^ϴܔQpo:Wۮv[9khfzf?>GzwUOId! ,@=='D?vUQUmW:B#A@Ѷgu2O6NӸj V@];1[qR2`׽׸!f<2 ] h w xif -4ћ*a6;tbR[ ׄ>ux²RHňXV5J@rje7r3̥G?QޜƿZ[Si P3Jk#^RE75F)TudTP^ЗUVaO7/d(i@t<_X,3$Ǣ2hToêBD #e !"yb0:X.w?_'̈́%#_|ݕQtbNmdZ I(YW @q3-K aCT3;kF*̵ϰ]H/X/:Qz3E` IP34كz +P("G\ .-6=f|TXFq k^|ChhgNE[!oD"!Yj .wήyJ<q:l"ݺ/*ƈ(h~v&H,*ʌ jC _>l{?*  G3-)8a԰@ !VV Se A` gHr s[.dsb"Bq=[B;N5QE/  |]/ꂼ"=zY!%g/diwH%7iJS@y׶L,Y6)GfWgYELMr{*~ K9eI7Yu2S=*K7Xe 3l)BnFAq%Dx ;HII)1Nmh1>iOPk" I9W8je(dωHVqT1NdFvO@sOhmTĒwS66օdjĊmK+O_=EC3-Tl%hhyEڎR%-(C$ h$PǓ1e!>;jvҟ[gp 5vDŽBĪyФZ)ͿJZᴸ%@M;/E(I{*~8UƄJb-3) , Fh %vGKD[w~Gb5k]PKm@r{u]eI_3.evf̲U5|IhCCi3Ш݅yi h/jT4lJM-j  m\68FR?'~c-sx'ce@M_wW:}{l|)|k3u h%zrc&/? F}B_>+b!v}BJUbnIDv;j#^ ~EHfUV;x}*Y}&⇥tAi[=>tfdH9Bk{^*Cںh/zgL];uokz<Ɗ5'xfw'~ fl}dtfڿBۭVl P*eS|AFY   F@1P6@PQ ʧ )I.a7Za* b-AhNS}aK|7ĝ>@ڤk>o00p/=w Yq!'I-DxrPT$m GgJ ׁ4Z6gjDa]}Q-H'!7,͞ 9!&^U,l̥i+(wuq Z=K:8bEyCTbS&Fz?۵Zs JF,Dy7u48'CXؾ:s<)cX@LjD^\0oXthxJA,[ꆌNЗ,mT(\c\<[ /De\1a'1,xPS!pз9hO :XI\ ~}"%MvG0tkGΑQBҼt,.Xe>`dq ]fNTwDN6i|lϼqY] J%f!7@ǃZKgDs;`MW}l{ȤxJsρ >y2 TѱYHx_ʾȚIY͚b`ЫQŎZ?.3!k%`Lrew$ nɝu{W t qMxU~́Ë$$Gu>y\t@q+JA.]k@:Z1(a2BQ,m5k%hy$`, }U9[ێnM+~.(BNЄu'O,ex-_/N /5ph q_U;T焇 4g 4 |#Kfn`NoWn^`M\KYrkX2}E/*4s'!`o4"|TAjWwmD*P J@]Jv ٤vg7uX=܊X gMmI8)1Od{Fr@X*㑰й$*~w7 ΣZ GБdRƐO219bex%̇}J!-Zk#z1cϐ| B[seoPaÀ%%\R1TGup7X&!/ *9ю("p"CIT€] _p((=Ɗ]+L,t˵yR!r\t*W=MQGfG/ hQTLKH(Ʈ>.ObAZY |yuݬi_78j#>aǛ=Q0}nj]Rzy<4[̎KPfJl:gZDq$ q w9 VOs[Hê1Ru SV'0PaTċb`#eV"3x%[r\&ZU m.q.dijX\æH~) ky[wG|'A48VEnaVJ(8$^*X>j.j2v5SG9"$ź`'hrf+HL,lg<#;!])h=tg0=`, ެFe{ oOzҌ<:PWj&\"{ bG!N|d}+[hRڹkطE;3u(|d)<q!Qcr&,E)1֋jlG=Ka9bNWō:F*\z"xhZuiQě)NQl5˒b!IHTkT T 4%)I##ƳЕk*;oJG`o'٣2şbȭ~M@?0+Āu4 O@B(AJ^!0PWoa lA4$n-W,OkeD$Ȭ^Y]h3u.-[jseюZs o~j4)p>]T ;r}f0=;M`|;j0=;N|;O|Ch݂ז'&c x$N:J9F)Y5e R1=B["̦ Dav*; @ŵN V鉯u^Qm -%,(֔ a2IMFI -d!WvjQ~HW_ bFϋ̔.WμC& _?m"%&ņJN~13w/O?s]Ϳ_$/W98 5pQ6MTDUfsꢚs[n4߫GX'r+=A+`ϋd-QCڎQ$ӨDǼXMh,g0)L=$ck1V#V`>ge.ρV2n^*\4qCd7q׃yŜ 0Ò>QU{Ď{=MNƘIdjOkn/t% -{2V(U6޼CAGݨ)(f\/6;ٔED+;1T XHOv%MN]OYku>GYʎTT D*GnjSvcw!]ұXpZow}*^R\Z^1]Ȥ<|ͩ_UkP/DdNAuJ)akK)­Gn",'F']Z%2,;xUGq80 tF8ؿ}Jaɲ~Y· fQ5C+)JPB8h-5j*/*Fz/7R?5/F/6frQ,f4DM>Y3^D]ѬjAIF|sHmNRSKLŝ͝0g^#w}~L1̗ 8f{ uBgL67Id :Ry j+ ĐoEQuSY!DexkDK*f*cP}iLlVQ0?N?Ude:C|\Hzlj"t\tp;"Yt³gob][Uvzs}|쓛N\QkݶSjqy|BXKdINk|G;QS}ҪRvmneqgf{]6Ҷa"OXkPar:[;y:mr{ y{vtf]o78d]|>p}]Kc#0pL7my"3R{EFLoUSPU˖ٻ:X:[CEQ~ܲ+H6~ KVysL{NQ(ri$Rp+wI- jXҗgo|췻$pM`h`LYv~gg  *`@T sV!(aYߋ+J]R6JtY'6v%ůV}f\7' .LCa",<~U6D_dzap*k$A^?x1d1uCyN5Ѫ1~>(JbB]LڊR;>9Ta_`Bd;p|\W@bpk'A(KUYjڃ@,\ELaeA7m>btx%{NzJ^Xj&t5n;\lyy0 眬:QTEYC(WR)hۻ?%9}c[ayه3N mtq]ǺBlBD |6,s`ّS(+hj3|AbRTh._.ou{4O$3nȆ4V+,ai|a*@ -FQލ9AX 48!qp\X]噃iK(wiH9ښ/]y>^|}Dwl3rR:ʛ6FVVxcQi4oWPW؅fu袞5Ç}oA`~#w:6 0~jRL'Slۧz14_o)_<ƎG`]qg:`JMGT#8I&ҡ@${P3o[VRQ-5wSa%%iQ׼MiuSt;U25\D*E@ RBDHn)!S8]x[]q (FRHpB\&?AaT.P*A i-U!2H1h1>phWkQ:\( [=,kb>>AiުIit0IhFAN-kmI%wf騸cy8 xYKx3'hK<0& \h .s 9<NN*_0v1%OFx.2&d/RG?j _\iL҄Ғ,^dl{m-UcC yLM@IVo*FQByJ HCʁABj #2*ݛkvbm+RPO%?n$8/u~/ul]J(1k ~`oͷǢz+J_%sk<oYy6]FO/ݽƒyiڶuܢ&剏4*D99ּW*s- (rՙc1o}R~9}3qp|GqU-SH~ M P׊Goܪk-؛̆_bK"Z ]Pq! HiHTr]\H%bӝ͛o'SҟWکŶ6[~Q ss r$;W\"a<8)ݗOi^LӃ4U2ef,˘=8J3tS"! ZX*19mΪa,7JMEM:w2O^2=O2PZZB|lvkp .0JQy'P%IjQǵ7JZKb.$FͶ}<%A~O`ȫl3P**ģjwn ~ϢiV (=`*8J4N{uKwNHGN NQCqRP:KKjY s8ZmͿ=W3tlf)t 5z^22gL#NuxԷ"s*  t \ҋ6a'҃6\6Hdͅ)'b5UBzBPWiA+rJhHi],Q2K'Pk_@FXcreTrQTMԔ@ Xd*| '*zK5Ukv kGm QM*k깩i։,BgCOuB}tU{8|>#}AWLRE^,kT8QQڗQvI@eN*pFqU ;) t,jqO 4{>sQ#uY(*w8~LR {FT#H䉰jt0mE^C:4'#'N=Z6>UJb9{#UHo=W .G9q_{`}&:Fqس((y56iP" D~9y49㳮jcI>jB`5]3A0N#s/{F1wGo2v*L)bx9Oj_Ѹ:^/W O&*.N+)'&OogBe@lhfz_"?.{}S7S:9S*ZiZ *IZ:ʓ(Sz?8!CB5~JnA'=(VhmiYfRo#8<7n"~N,n_>vnϵڴmdknrR>֋\nPtb">p5< Jr! Ѩ ѥK{}1n\i3K퓤n`(C,P;料 pߋ|.կs13H(NUEcRӵP>uxW,YY[isfX r8*0j,،4S:9p~~?.]G=dw!꺞bnڧd/ϳ`O}^,=J}.*  0OU3s=b=Iȅh(5Xh4zCtôUapɂYmDf"J,B\FTrҿ2MHC)\rqGALٺoׄCKt5 sNmX< QCi[,E ^ Ye$r,VyO*)U&K.r=P5,/qܹD ~Dık7+)J\ ̠V 'P Fm Ï@7U}HT&/Cѓn(à3^/C#a.W  NJaic W]OmF4ۿܖp_CaD y̺p:& wa]Ty*PjZ[*z*`Uuݏ 4o4P3'W÷1ܶZlCİ}M¨aCxBg^s4{z~ @hpF;;2/B4sʽTX&7]f6O("9~Zb>Sr" W m5l2д .7ⳢZܚIMlLiNY/cptŚpAhR0Tpa/wQ_sMzL@t L#ؼGu\,v'k @2RaG/q ,dY( ?uՁݷ#!X'"V+S?>bO zڹہXiEg?L ?UE* F^Vz~>>fzf>=z Ͳv~RrUQF 05dlA!0>G8. 2KcE8+9=ŹKSjp)M-RZi2BH(X>݀ h髃b\ُϠ޹HU1Mz 7=<<̲^Oo;%: ,ǍE=P?hh4qiC>&ne$owEd8{Á$$h$W !4H%}3V"i0ѾJd[e+VaXd†nHkF0,8{̢'@ԤH5)V !A  )nB,1?@lʃ R*!nZLjt728A3p.=uҎ.>xȡ7ϝę|f خ)!005شԐ fZ :|wM\H Х\@BT.:'6m|8KQ{GVLcƹ_hp}NJ |ysK©DB: NEkK>>^PEBN%=|+H>A8K#|~W2Mceͤ~xγO߯_?qύ$07~`1- gf٨$G"(-pՓbY8:8+grFMC.I Ln^:ܸ24PJ,g%I|v토P:hق"v=i7TdTW(161&,Rq3$E|J[s5ٝŵF1`) XN7|5i:n ז\TZ]bYT'bDG[,Cއ h͌L5\h^uTEV~k*PkDz 1{D\/AI`jeU/J&3xgkwZ9{;A<[덳Le;@]$hp4vhu dISSS0 {sSP =gA-';DsMʸ|lX /(46} 'w.`+PZJUp0uBav`Vo۵wv=axv-Y+:{J6g sםf] 6=)UuČ٧S\ݍzXq?f|[DU/k /jFjd,°=O_l\N K m$PmuyF900A B**f+>(,gi3.µnb/#Ԧ>:bsg,nݫ_͐GP gXHoUK<ǖƚJ +3y7$b/&9墛7C}o<@<8×Bі ;.'vMSCՆYV/(vU.OI$ (j0!ٞ fy 0*1fr0^vQ~$'"!(%"1RO:XL˨0ѕ;6FIƳi>*\= f)ŤRn/`q{+Xx gopڜ"瀺=$A3pT|t!ʇP>!v~Zܵ[W;хffmohS%%|cA#+ۭa:&["V͗)ێC*D|{˵h`aj+˯uh aO֚O_z8='5C ݎ&Uz-&G}d#̵b+{&V5}McqZzq%dڎ]w0鷩qC%v ' (“8<ZM`Z~Umjve5bI; "uHz}x~kpZn –+;bIƷjT1eHLgZ>;x!^ѿgm,cE4KVz.[>P',뭿 ҭ Rd+7.g)OX]| iiX~>3Ut 5BJeДMdNSzD1і!Ћ'[zMRf,pw "戌aӰ*e(c QC '#_吖I/ػ+g{qs[3)Xiлa6sJͩHSX-%w=X=9D0'5AWa*>$Q(mhE^`V$GJvg.5EI q5Q$Zةjpl+_ܤmd:^pwm)9`w)8Tz~5_Y$V]&P̕laLjndL3YN%WJe̗1/JvW$D5q1;9xd=K~#Ϟ3.lGA<.'6_)Z0p#e(&J ײ_ۘ`FE.VKAU.Nsǿt^YTR&9эA2!q uCuYpN VLj"p,⽋E9ɧHXU6hך8j/J,iۯH(`e"|g>!? D`y$$Hyp'=Op6yIVvRCԽ.z yi+=VaVlu(Öe~6T+*胀/EMKʼFVOWI:%"KQT !"[1a0V RTqT4}PVg]d,p0ƈwJz wt@(ѱ]r#87j"E>|PweSLj?RQRu /Rh5~-Gv]7G'#6۾a|0I?VW Q%rS8ZWe/cvFHuKOPCGy%:;ʒ.@qz}(4aA4 U׉/~i YΚ/7@C}a2B*S*/ Grb,rt0JRj*OT$/CV9;vA}!PiNS704G$n EBqqJޘ_(_WJbBl32'/9#rRm5"8[P>s.pV^slN  D1  1OxB\Sε$*1*^xY>-hfZ_Lչo3EvQCU! +$?4z~;Mc\;6,TK1!--(FD&FJ96UDz#rO)VR<Zϡ?Pm1t#kD^L?[&䃭Λb/&]%9,NXzi 4W%1Jv'H֋[[w_ ^ѧQ^v̺'FxB;?z8x8~#4gJφ%[8-i]4YT]GjsjE"Q||0?P4FAβu A+(Q Ӹ?{d]>f$# iYppmڰ k:9=u <x$5dʁw8vH@/¸/A!vS8] 2JBq9nm>!1|_L0LV2"-HzͦOePPaJ4G:{hr4@& Ė6Fe k>:# E}7RC+]#wd1V̑%8Rz@Y@Y`X/`oH082(YNKNrp>}=&X ˻)K2q|wa%Ci+u?fDQ]qF/l@v#Wʠ+,g{P#ُ2/s "@(H$שIytٶyy;?|fH|K!$XJ}aVM?$ˤ(ƻI`ꥉA&_c79tG$i-.퓡 (+y2(le!g؏CK 5:-Q([MlhL \U2Xniə[TmPlgݠC*8jiyekyWz$!Zy 4zn@qbH1e;RvaBܕj/%y@6z"JJFu-))(5ZsGToil5X`sH#3~Vs+QYhTJbU;]?dFyûӀP51_/O5zDO`=pChݤPu-rL)L2;'28 JS;j6{7Z=,`|m]$gsr>y=J|g5=2z : Y7R 4/\*|dZr 0R۲ek?+tDDΉٞ]nVTSΞiL9ixag6 (pfY#۔M0Ӭsֳ|[8(zkN~mA9&X)ҒZ. 9h N Dܕ 2OD1Y0))_A6O5Q@"Z#r uB <#`cY) iya\srmX0NL;<ІC[sX{8L_2bdć%QGǪ srڨC6<2oA"UxV0Sxy2H2rt 9se KB­TS;L/աh$r!b/ftk‹Q*LLM|v.C5k& u202lmRFM 6)}zt]8>b~ YzG?#}UF_G#y{j_h#AHX \h>K31Pr~S_#pI[{$mֹXFe_B烥x zܵ5q on^\i ⊁@p]sT=;{Us)9RX\/ZS|.;ڽ ʗ)79q.(G$&'b|H ZO=ѡgrdM =C$dvv+bBYp* nsBo'm/bxg5^16}h|vS܂F|s3#8hIG;ρ>jZe%^K?]#4bI(ųkh(P7|s]ž=O$"^ q*ukswDsM~!ݻӽC$O<q&ۆ.̑}bPv_W#9[P#4=klTSsFZ3`yڈ }:u!BU &E-=b~ r`qO[0ψw#h WlُDjugߺq)IwR~u_M6FR!zd>}] hX"/]3{˽ǢM>brVHReaMSM86Ɩ6¦AYP?02BSa+[.wh&s`#p֚5{)GNw *DKO݊A ycƏH8VYkKU$*~{aU?٥&oKu\Yo ^" _Q-lcaGqOloa2/tk;./wk%;NB3$N=D8tv^;mevh/a;z[N~O &.Q"yK#q7Op'8/p Oq8O#q@98)9u"Gv@"'s9hO#i` +b5[vì17u/gjœG(Pء& e!tmmc6s_tYѾr MԏGR BhߏMu9~Jٲ{ə`ÒhEd#@b-0e>(</3Ut}\6X{I󴕴4E"ln&[@ {l;SHmؼx0lJj,[·Hi#Y_k0Ĺ?&w nd3}Ƙ}#@_D?y\]̺]HG#Լ*XCG_K)Eb77+;SeHc=e[jT]bĥUχ[u$79Zz4DH]htL,}+i$Ii̩wBܖ|p7n|MWr E' #^r(*b٢sRcPz$y`D`b4-8*uZCY!!5+Vxْ.Uodp`D(WG N=Rũ뜢 PsND1:$!t#c4]xPbU@r8'Sh`51cM:ilep?It%# i2|N%Qh|j_[ UN'EIw,='&Р-4Xϕ\|ס ~T@ @\cBu(5m5&QnVyqB* 7$bRNGE)jNV=8!D&HF=!o\,??z#*tXT:fcHȊ(!ql !u0aBj45=}9Q=&-B©և2F! ނK4[*AbejlhuŠ K6(FL'/q3r2%;\r2+EE7r21Ӽl$ECI*Q!U#&[{f(T O-m*=L:ۡ}7`j8$9``Tu7vC;+EJH4G,Ykٛue1Xv5XtZX_Cm;#K`;ghk8w>>8j{+@+ m4"~gƾf]xGW@,zޛjjswߕǪR*)5V/_#!˶m۶m'}w;Lew׭~͘&N73Sn2z3oun:CFLf׾E8.W`H34L<۝eSRo]^9CNmy#|Vv֎d-whWb~'u9^$b[`ݶ6nYVw!Zbҽ$N.Fۨyt%2 d62fǦFS>f{Vo~ΤȱkyYL1}ğ7گfš(abkl:s5?U8[8v5p.*ۥ ɔSh{.5bV^X̮ltZXrl ѧdM"ڮCV,V=^b(j}L2n`rZ6n2]3^:2v(O9E۴2ʵjέ1aFx>A8Y58[\~mxNλ@*5BrҒ`K~%2r'Ҵs65Oi㗹9" V䊋凌ݐdoWoK"?d N糶7tH!cGFAǻo)gHM#,7xIoldIFFQx!Ơ9ll*8! wB''le^,ovie{.'Tż@u"RZWfh`/(v3ur߼!g1=`?گ0 M5>Q}ހx]nP5؝ESqvD"JKI]nH,8wooow﮾罽^ gw>D6?T?]s/*Fxa+u,uiAґa u~Ter\pz/r#xSkuIucL0Йܰ}@f9Q~&sOfGS#P\P9جWiċGSO { d}s'X2G5W59܁ͪa;x*SםCΝDAz@j_pydD=rN|R pM/p"bzHKsj:+%'\ֿiz̛n%#avpN_] 6mWVۢ2H.BEK5v+돗ϛWa7Mk18 x,nCcA` Ė3F%m4qѡUQa {ii[n׿837/3I eAo uٝ_Auu *ܥ#v 2edMbK95j:dێ;wھvM: 4ZkT'칓B@Hv8l0 x]_tkY@9:d_e[XKĠN:tl#uHpN4Ҋsnvi4()ȼ;X\[A RtFu˜Q:9̊^^tD6_d$$3Z""nTAdd\L1 `. 7G!B#,ƣqfا@G&[:`G5j&A7&BJEt6oEі[L#I[;QdK^:֚9h(]uB!]RdD%eMDDׅ6*$N=#=KUtrW? @KWǥɝ} Aۼ{nzcYZd!ImsGo$Dy3gЭ`* .+̬4YF7.B܌!G2|uŘiCTdڈq0Z%慗~A}ĨpA-*CZ4~55yO::CSu7 {BJ-M! z(nY5\܃#XRtʩ5Kp2OwXsia<)n!4~G=SLtp_&*Jو MrF~H!ut Lh'E-ݒ RnؿY:bb?Ew격WD[jW49Je׾B;?K~E\j/hTݵ&FxUpOSC+e[bKI`9ZiOD_kB8l-N4 eE #yUp׌2:_Fxk p[EOJ8hC|3kL4+.cj"VQ2|'7>{ߠ+UҽE* OAG9m&"X bz`H J",OLL/yv(jmYWڦRyˮM "`iraGdFIc wEX;rhmSk5=DM  u6vjЫkW{D,3H7Ol.fQ*$S+7N-^, Lo~8$o}·i VFJ2dg="x<@"4cZi*DefʎզJU}^RScS?F2DP⏇#qVO7/`nL2p dK],X`V ۠v`߷]8at=B ŭV+ʼn\+ڬ5Ѫ|xȫ|ʪp+ԫPEv˙puW?$t/9G$zV ׈_x,gAEx*>@dY08Pc׮.k E [-p%H$,hWMq a쒁yɬdetM.Z} <4cpv9c):N\ cs3U  d:˒7*qo#KPə/FBkn/?d+WHQR#۷(^KSu0fimYGǒ<(J9_I@c7h_?ݓgqGXiCJnxE,Ktr={TP3mbb7MgږN<LP8gz$5Xg;6eKe,l(n^S,XY3Dl{oiBo ׇ2;#b]wȤc|;}ڜb'~p'Qn?P!߬/ {ZE'='2r[vc X3qW Gfi7l/$08==:b4K3̒6҉MnisnfގBd4,df:v6q. GyW=ZU|5=:YOE?Ys؍JMlN2-~nEZ8G^a8?gВ\ǻd*1^z9 8m:ͽD^=@8Ch.M7(xw>@^B/ "# P4~ ވ3*V\Z&mбӠ%ӅkL(!<*Rɉ|QwnvwGE=]O>Xyٓ5z~9kwLCs'3MB9s~alW>^d, x_0yG (!w?ERZ(R"SdU4dJ[a_ DS^Gn:kWUߧʁyf~:Gg|В޸qeJ}Vq2NVidO!:Z'}p ,FB{o(.r;i6 vU%n+7yDVh`Jtƕ'JǛ7[䔥?)H>)[ө71Q |/?a 6+h)?0)%qtS>ݦqhaKSG9bqCaU~gt[--} (=N١jpSk;9&H(rĦ@'/q_r:*ФFJ]q?wk1vz3y *ܒMMjU@=VA̗]k*X>!k2d6po̩ܖƪ~,) Qx dg,qgq{YS .B%@eo|? LMlf swi !n59/ہn[,9Kq4)p`{`_Xv!Ҏow -G-u1N1p6ż<91[)BϥQ7vh Яo(1RSe_Ak-\;ߵ!olR;s-?HxI U4-D{eZ;M%!9Vo}q;骄m\:H@_v]Tѳ|;93~/{ԟWEn-J'+-BRr4LvL0P,-KsM_);#_c.="Td~%*Kt(07>`⒇ J73ʐ"3\v{`Ԯ5Rs[%482V¤fZXoS&<2߿7OײPC-h}ۏ3[ykKrUO&wc<ﰮᗡs`'P{oWQ^]6mhFp7cG>AOk0TB{d{[.4VnC!W#>yIJeWhfu G";'ilJ+ &{F,όX;Fm}"%9_QGWG!:6}>qNC+< t/p^{wYql[f4/8[ so4ʉ ǂXGeI<5wB&'5QTC$|Eh#xl8ӦyETk3<;Eauz0YskTTn.qU90S5*©:>ޢG_@pㄞيZ]Cxh  F)t2n.E h%zebUwrcX[}$YI|R-lZ'j]Pp9rwM[?6}a47γT hnUxTX2 L LT8 DKIPm/2H0Ra;Tx_AqkT|7:M$0i*FwcZڸyN{ׯA _hStqN/%֥FK[w㷁퐦v{<Җ3sf҉1O]ŕ,)Ql)у䳯/5Noixh4;ŗȿCJ=7"-8YT9Odb<0뿯^.F?@稟ut9si׿˽nM{c&lWS:o8: ,OxKO #%BXߡ4aAx$RcENbS=c=b Dh*Y@ YcкELjpTBImMGU!#g ;VSoC0 _3PqC sNܷ7>s3b_Vk [Iᄒ0s>AV3+\("‘;A@ T Y$r=rN>!KvΚQe, )&z_vJ/(d:I߉Zy]4VG q^T:to~3r(Re }@ .+{6]l:+sAd lY枸=̖ܽ.˻PbrwxOUY~eL Q!r*IbK`"bmG=xBԇ7GiF.,Bu} FVl/0($c QM̸Xh- AsZbOA5Ό21v}<0-+>/s,;pX^z{KxXНWV#3&n8o=䎷w(V 2 UY[MRVXݺ1"FuFncy뾆^k-C׾o߸~3>z*tx{y1Zzaa&Pٜ|wd5Qc#SR_. Eçqk%"ag3Bb<#B4\Pԣ-~s݁/Jݧ\1yѳn/ x8{rpoV yx7hLc q`9c{|m Ŗd !s@uȷ\(b<@އgþ,pXw!4ۛǏnl# a cջK ɎMCQ_BQIGx3xo˫G-Qi&-1κ4}+Sː < 0Rd![ IηtT=>ٔNh_-A%bw )8-<~LhKtsɽ>*ButMdzhkU2A%Zb?-%uI]56Sfn5~e^P8Snj.FzZ#Hjh~#w n̲s1Z~ޚI \oG?e({Gvvc||HӪw#Ixkx}[_mUG֨𝂱])ۣlͭU%u-ތg ѥ4}=)\#_j`fhn7U WB% 7= 8[/3 F8z),RΨ1唑xb-Lď佤8@/`wsX*)xU$])"[gF>IAC3\B_R R9>i^vs[iP0Ww) 0QzC!-n8B.T&ŦC#Lld$~|ibĨñ;t23W跅XBIĵդ?:Pd%wGyod'MJV)0R.uK]1qqC-X-s 8GvJ K`A Fzr{Nal uqK`nvHXTڇZ_E9MOtjFjZt[w#`@-^@IL 7^8iGyd55Q(y+ZcXԊ\>&>Bҝ5`w6|]qj=q"%2gQ7s(K~vNi/+~ōݣ ݶ쌣: z>?򤛈s"}`yJ}7p/~ YN&%l#;3 n`.ITL2fÑ0/0370ޤ)0bSkV F@W^86C*jpzVe*THTgUy)F4:M,TVYfs>b0, h5{܂M\zKZ$2.БptLHwNB s| Yr|'- sߕIp^goE[)13Jzč:PЩd0<۷7$-n4b ˻YiH"5Vjr7n2NY`ܗdBJ2wѐ3SA(qܞz}}ٶW*d6sg$a!Ҟ|tzja6 QկytFBwT[pyŊe窭g%A.E+ْ=J9щ7`+GpR~Q\lKeV1Тfq$^o58D~<*JΆd"G!,V݁ M,X]mh{|ZM{8% ώ g $n_Ȑ qғ+S&뱵Jk R:fBz4\=^kU0+sAa '+4&ubX{_: +BTvsdUdb$Tv%;Pۍ*L~~7_#'tn=[a'}i$pX;M,;f1CaQhg2ɐw<.)e晷)>W _C]ef'a%M!"`i3B-&[tmQrx閭|"JG-mPr>"%]-q,R\XNUPRNkwTl!kR!KvO7㑉.=+pGp&9$pbe,H6!gL <~4-ء||iy׉\2% ;B{c[PjګƐrJ>xTw#{5{AjSrշ-ʁ,yE*_`*sY|ě87K0s5D@Rc/v`Pk&=PH5DH ?9 #` 􆀱_jȡ΃ Ţk@*{bۊ5ǁw˟ +D=3j\Y_єM+,FUK_irF,VMSko̬Z7!qW^6/Q7cJSo*zk[3~s?xzЎpW¹C";{Q_O@~} SghF6N׶Nm>a< ^yw"=Pd\_s}bsgNДnhNn )g_[Rncn+o[ZEg'Ƴ&_ AA[leAibl<2}穳TѶ$6gcuN2ΕZ6-| UEqu%_ LgA]BxDEn)Z_yoAJ^YrSp_`6TtPU\hr)xtdbkx--W]@S2b+bgAmw#C\B !YDCۯ;o8$ 2)Rٟ0g̏Ɛ;̟6nS͇4)9q=Vcn$zwAQfdB4GT&Y:d(c d|@dwl}PMqx#NJP$PiB} )QMjQH903Dq>46fGfX.]J3`"Ʌ'b ڏ)s(2H~y)MD:f).d ' . $Lh6z'qMRw&X(Xq]-;$XOY=\]޾>KǥԄB}vp2D&l)\04fv3h`ƈ(aBlUi)5݂@L=yg=0Y@{B-i՞JZ,Q|6:LYw1Έu Қ\zXT?H]nx,gɵ$S sJFuz- uXhт/tjUXd.} O1 sL†RfB W7 *DE] {#'Z 4M|@,MȜere/Ba/fFДxg1Ia:_lC T AEԙBrNn7^PKa`E11(hZ3-^$[&Fi>OpGH7 sG K^nI,Ÿ|eprd́b]u=u.y@| (̆pZƇbygwB~@"==^{8K.n V{džR)S02K@c]%< ZNU{50L$,DEJVƠNG[( G  l.[acQE& xc}#eCx:;IcL*mYy4*opEiy\=2%<Ա#hsd'*(X>4ïa8̩"X 7/bi(~s i&r?v-s3RU|-WNC2&!`T,:pd%4GjؙvXIh68"B0ܧL%&c@cD*oOC'^h󰃅ụá+Ĩ23)) Vf p 2ڨtS~Y v0Qd2|'NyWrItԆXcU )0AL去`J1ä\$<f>NY D4JrE?")/DH"h⡕wU5=.jeBOs 7NjCAD]ˋ@UgF.o <$[jf Qܦyh~d6vNPjbϷn!oɰbԪTu|; :9TvCSʿd`(S-+2'M*EEnGr,R/\[tJE9{#` -qo\im <@˒ȐG*`}?’8V@ A B!/֋ԟ7(=>W+do^Â2,Mh_ @)Bm3z#prԨg7=6#%p"1WrwjvoOʀ'- {gTj0`X81y^q+Mu؏8-LnvcO9ݝc;ϑFc3\#͘`Al{m='Ku |taЭ&)zU>BZ~{ZD`+5Xx& e[9[^7c%\;;X'?d7w\ӬCI ?kXo6w<{9 iEjg1"\WdoO-}OZa*tŠqipJltY@ g<GrP=b#]0 qN"bxH&'])~.gcGn.eG+O*HDKC`jœdj@Û~b!4)Bm/@!c_xl?zĜX&-uxoW=osJ-uZ9T'F'++wR5K-(pX$슮q9SMrqGՉSmUz/s>"7pϛ1Xu )Iel;Nۼ▘Iv|0>; q| c▹T J:?<\5D.}SK|a|37NSl*ްme0vNSn]7˝۶R:M@-l|=K*4ԋN"vX:6h4 "\x[ɫ,eClV:KԦ,|^o{L`I|vC[ YXTKnnW>Oo\zrS(8n9 MW!_>G# 4FYHh~&7$h#P4f5Ql帚Wn}d#f HгX~;NSun;(b0CA`Y0~V4_T9sjtxxkqĽn=ߓDVnZzAe/MkҨI[Q(J(rdg];Z$@i&8%fI3%r!D=98I*eH4Uu_VzJf>>IȿHE,i2iWm"Z(G$mG6UKZjFc[-DEc!_,E(W1STFζk`B6r%*F0[(6Er6ɭH%Z2ͺdDTET( 6 fS&" BALVe1P9AyG{mcbG,[fSRkS_ b[$[ђ,*+l#YbzHbI@Vk9Oğb&H;GqC(,m_+-d|v1ÍY5}fy 6upm5G/"foa7zs ԩwnn/ r cʓ+Di\ĩ֛`[2}@L3oZؔW4Q+*A  EF 8mj@6,SNJs93<9~dil=7c_pе?$)3i%l?r~뾄<`PYBLT #CD#zPU$p Y$]Ssi{BoZ=/,ie܊k55 fワP@8s`C{Α\TrNH(tTlrkB _(.ph5gx$b͐ya{FG f53n.hl8%k98ea~xKB\0W!6Khufhcg38o;Vh$Skׇv?sYD?֛mw帅6`.*/H7Dy)Fivp|kjOc{ēz_&`]^%K2Oq>"*aP{75R␱7H锌-=DQ=eF\v ve@N4&0FƆ@3]TjrtE]Y~ Mvy_[-!r mIDhj1` s+pN.kDrlDO߰<Wd#7"iΊ},>0Ō Hۉf 3 _!RؾfT= Z kk]'QCD;=2LFlj=$,h3=L=ABo"xWZORiZ/r+ȵ7odkAaef7Cj§(i:P6۞sX^+F60ۂNYZZ$t""Pp;N3%bP`vH]3^əutp"`P@=9I$n| > ct, a(0C|IM1#]wX2d昣tCpV)>"SvI+E[OpJK5$mz* S{C5RD*HԎ--U[Οy* `u=U3.wbU6aoGm ƾ,:  9ʨ3usD|ئxRLKJ7@$g)8U7F 0^-=|XeڞbĆEMj$+ep 1 RzXOQԡC}|}}}|/!_}ގ*C>GX 2W_o/kT'.iAT$+BKb&0ƽ<QIcyn" T˽UĄ O}SvvC pڳ"JcvReป c@PL rLa-K;f%FD=GYuo5m:Cmpx4e#XiPZt,˚G:Zty !3_ r%qǶbmaW2@\f|G!Ams?g|vl*n4uN ^8hw!hp8Y\ۯ+FN|ZR1XB +NB=9VGT&I֡a&s;Phr0 WH'0ۭa&45#|ee 2:uցUB3\hC'Xi6`}}'Ć!FuƦDLL9im3g҉XhVN5I~5Db(a=ռO(3q7EԵ| A'tz Gn#gG#[*:Dj|bU{ux:'2:p|{/tOsU,# oh#Lq5ʬ'y94}pd#r(NƦVP|0W;t-' >ys dV@1LCjYyS z/+Av#xE7{87X%?}d.9C2k^B:ީ Q)QXPk78A*DMtrvYiPe+@;#TKB#8'CL4O2/@HgC뿒tN%f$c_,K'*K7I=A(=߿=oUK sZ&8tL/jU? u+W8ϭ%o@1SA.IH`P 5HKȵ$ C͓@VC2H/^. 2j׸]3~c1u<|V>w*yH}y&նcԷrn7@o&b B;#Y)HÔA}_@M3]cTJ)㲋:V?> ?Kp9pf;#tY!ADz) Hj`O|F:^ÌzMH?wm ):(OHGJSnya7Njgr^F!ajEGuL=K;%{`$qV#}}kf2*LSM=\݅N<"a0e'rBFa ]OgLPH~3*޾sQ]7u\:_BzGdt&6؁2Kqg_b {hŜS}*>,[hKX#t'$";=x}2_2s.iDu2Ak ؇O_/1&w9b# -6p7sh81>/lnlޝ)G$.^Yjn(ڭA=Ҕu)A}[ "%%z2|pTJX3QLezoPD*j5`=(k#w ǻ1tMʛ?`| R[ʇ!>_":n8b~SB,k $"u9iJ";S]h2Xn\6\doo"Ϯ?d`#k\F-ZLR$w.Dx u2z ynJ,˿S?;ໃEeux|kZ]adۣy$`In;|/ `lۮ;Z$vjmqǂRNfF8bg]oQTpPc*YĐN.0G!9 wj\ KN>łҕ1T c64MG=*@)9SPi=՟aWhGQǷl#W]*@u7no eԋ9*@\ Kt'֌Ũa@Nc_u#μ: ߷DC,r31?PƦ>M/h2ؿTu1(iNGk jQ9BI]g#Fɳ 8 )R {1',P =~1M1wM5kSƋz(h0;^&c茦ě~YPY<yR$OG)|js 2Kq$β#CO 8D|<ω(\9uഓrt0 ˆl53x~rh~k') }l5.[?,k WgVgW B67ѺVwJjHN\K(Zhjkĉd\[ Tj0EYomzvkB9u^ ێMp/3p>9>{E7>P~O{w>~lomn:6m~:>hd!4g("u2!%lfFu:d廘P2ؐQ,aOp?pLJ>QrʡKf>w ^C#5hG|윓E>)8Cũ+t6~tOZ'"(NBk[SK{R`[X䘶lH8:=>E 'iY7g#7~G%+( |MOL~a>Gbe.}<#qd'})^숹;Ju?fYGA16nj"~wfys`zY]u8hB G^ﺕt}rE ff j:lj*lbDF|BS57xMbU?+f̌g ѱѱ z@8 W /jWVlְm'i_ #r8>h'jǣv0nQ<^](a&m X٤;cBVMv}aO$[VwƮ4;6m]z:z-jvXmkȾ*W,K~ h[KJpo2i ꂺꂧr>1/P_2kf;Ѭ*[Zns$sf \O<32Pˣkd1RKbq@$`u`Ŋ9胥뭶0Bm%¬[x 6Z${5&I+o\aݭtD LQ3XH*0z]L-p3?f'iaԴb8IHkvП6as? _=uP;SaԗL>{"xMXh0тSZʳ '6١f,L8A?EU(jAn o.5`SL{|^:znp\͒/tV|ؘ'-r`C{A70&;n,]/]Gǿ@MϒWS~Gba4f#NCACw,;D5xX؀L}>/wFkV7/Y V]沅0:5\뀠h!3O/6vKR)y*pW<}kfDBE O ]?4# br2ZW zcȲf13]ِj0hi[ToF |ޥ7Tcw+[yrƮN1,o؜M10+$qkZ\kp֎lf>/X)л )|V0Q̥rjMZ&P9/SLp:d+ZDѼYX][ob$f<. 6`c\CJ#&10gYA>@<NtkLN4Hb'AL@!zŌiGab2_oôp2o4_9 I^e4{Ihi 6kx)xQbeZ } A7<ܭ-ˋ gJ0GOu*njSD>?7A3zX}K5{s7 ٙG^d{agIrt2IZ#K"/?r9z(Üˣg}H [*ƍz*NRA|O * L#+. Pp8Tzլ^-;²&k^4/ˈA]SOD쾟! ]60ܧM*7L+t]IE_CjڡxkH6杯5m,cw%%vUܙb=Vn[L\\4/pbRk' _?aƍ[mP/Ny;2)Uf$W0 4D-Ȓqބpi$$nfBˬ6 7J4 F?b0BW'hBvxr|svVm(@CU)bz V]S@jY)ڬ$_R.e4#@$Zmaqޗ9[ğՕc Ħ;S=*x>2D}玐VkL{`WN^qJ&#bI.<ݣ!O $!CN:p "z/DŜTc +a[(4FG^YAʴt"@cwL-ʐ[f$DR,YijAZO8;B}1`H:QwnZfXTYڴ\1q<0ci۴-IcX}6F(:|+-.v CLib j*76usT]}h;%sv)vKlv{I"2Aw: aB|q5Q֊vo| =B\hI#_W:(aaR6[f^.Mv@ EoSD0YFlJ5YOHc6;4`qg8@5ԣ;A+FdMA.!9HQ!X[Os2 y:F| pO~gd/ӊ#A[ctr]yFj8S@:ς.p("y{%(.gGR]b%鏅l~!./- j_bG붽*dp.~KspʠT7*{x:ZIY=!܇g96?M1>YXѨ ݯ쵯ly9| e |Lz\be\?&\f9>9{fikEqAͱ3G\}Qoo*e2@>J/o2 oL,Ƙawə'HkV wZ9h\$IFMc$tx:AfhġpPS'Y&Z8i:Tl]H``C%yյo;l^#*!5%E%b:L^z3C|\`h"wS{>E" HB= <:RH.^uCbYg݂>>n H4Iu?1л?v8A5V̥D. *\F<{4 Te_,=wwb6=5h+V̮۵a3iTT^;WYXIuWlbvRD2K/{W'1Zgf\js>!F:x]I,& ޼&i?i_C;-ǃZT`P?:pS{&k:,ݲ`őg"\ٸϾbWn|=UK+` oETJlb⒀2{0;^젇aRSE@ۜ#)|h+2Qes֓8tR *c*\Xf?Xy p!4-{+*y 22-qΣe "ɼ#؜T|Z¿}[5R8U~9>+ ma?)>Xe|$Ol r"WIM^R]uB><䠉j[OcsDUsKH_/gRs M75Ch'(^Id Y p9*:< /r-w'_RLJL$A. ߄9r2* P_H ߳wQ/] .YQʦ:>tߗ?Ӥj:7=#wl,Jt,CL뙔.T4J^2ti20#5D$IP#sP~Szrv<]~+[yYE9It Bdo,p'Ȏ$\o32=H%``Z+q 3;T2!o3AfdϞ;iM1h{ot׭$h]Je?gnՂvrrN1K'AO,ɘM /gv-v#c~lʼnO?٭pp ayU L9TeL !Ƅ?Zd/:AY 3FH9Sa۟Tjf}f- p 0l !KtOypI1T+OU>BIb5sxq`tzrNaT}oln{be!2yϙ#Ҡ02bayK0j"s5ê%$YtsPa 0?=YX1F޳ d2t.wmOqA&lm*0?2az \=/E$/f_y;#)ioq_oo]W5;^jCmXed6V+e^$M+76(?rKwygtll_h6kv,Ũ#ّ ўz|f-W@k#p46嗤+D5T8B V2JdpVBY[,hw>!ωn)+7} #6V1%ޙc K-G'_zKt(m薴kǬTe7m40vKijg-~&HE@WR=q8$ϲc܄#"5RB>}"@Z 9=\@Y +qM ,"2.j_;*e8f#F{4?N>޼xQG$H!؉.&Ƅٮt مiJ5cѽ\;rLaL;Ki{Wg}u!ՖPU7dN1;g^2`Gi<+$P{ȟÕ kk?! ]{h ѻ5xl~4sU`/ FRD!Lt#O_ò7"JnJf}a~[`w<=UgfVcT61p($z2>3%=p5< ۼ ip"is\aoc0= v힮zD48)']fv÷f<+TdB 9Mm1@n;Ug _B|3}o\ HXކ&.HѴo-N Rn f,fZO/P@uş7pG78N9cvmnlz P?v=(#%TSQt/νzjR\dRFd"l6'AhʋkƣD4o46SR4p2>  nOi TQ/yyC;Dl7C=Mv86+'濴޷g<?<4z"-!Kk${?9!c@-aRbV!6;0MOE;31MpH]Bf/ twbZ>Q`8-xDzB4˒yJftM2/molX!# uQMuIQd lK["d!dم>) /S>V$Jnŀ\着R4Azkx׿]>&Vt4iJSZlZܴUcabMFUV%Va0$z󦦾HFw@Rڰ,u.S؎&rӁG`ҫ/J\/'9Xg"Vqw繒"EYIpܔ9cM{͂Ui盐~P^.@DG̸ yT8}.Xcě>:? t-f"28HDj62T^G 錩j/ovq|-Fuڴu*ޖSB"@l? 5݂m۶m۶m۶m۶m˶'wռxJffjԨXDž Wx0Wʞ{ ׀K ުg$^ iЮocў<͙~=3!/b/]gl:E#tw9 X|X/5tU;t3;ɮ"7y%ՂˆC:!v:XWGMR-՚ !0@IDCQMZelt q y驱溁iF?7Z.nr.&⽫(`*ͬ_"AׇR  1":GNn%)8:aV4I?^zڰq_}l6fs; gG ɺ˻bcN}1@ ŷ+6&n+`/jYG]O׵ՆSF 'o_;E,i{ PhS<5a}$,I0Va̠xO$/ɵ0phԔ:9h+>!PDOO@Gk{Y3<@H?SF<&{]Y__w]s^>"Zb;9U~AC-=ox8'$kOjϚR~O嬥JIpLP3ixjy>2ڈ$9cn4 #:cjdT9j>fV}C8?W4K`QD~ S0Yc5DI,OZ>wz #;)rcڳ9ٷA. Eo#Uf+D-2:|>( yIn.1Ckc+Rh\hj_ Me CŃ#ꀃ>aX6NTAxIǑ~;nr>Ʉ)ᖁ{ :h56s#]{}87A] y5>lMueY ߋaf ]|tՖ h"YfB@Ɣ0+Q>OYpDȶ[#D#Dqۏ)z19>ai v|Ʋ_!mB ]s :vnO.̯9ɏ"rv/1G\`"/Ql2@NTWfjm*;W_d{Hlj)t"d*_B/ĥaJkzX:s4(0]ODx }:uvAOkI% Q=GKE=KEŗ*Q.Q!0E:o $D3q.(L1v6k⹲oAp bJpdАU%,K'udʳDv'>/^a#po?bA@Tq)Ozv9T(c r8kb?_}QuLA{^ץ:#t|p/N d^:|rOUBA3JI @N\7e+@A]1{ᬵ]&lpͻ>.ƷW-t`2DхjpZ8Y;~B3 : VgqzUf&O4ӥF7_@4;%GBaLbytGkqc* $T8Z-%ae4YmOŭ98c0I$ףF#KgT1qiQdho \:.f^ڶoQy<#s36q#W\Ld\ "}/Qx#nاQE⇪?SdȬ9kƣM (Rދ-^ #l&^g~TZFӶol( U R~GJ^6Q@#I3PD=Mc*eìMP[עN9Y\c>=b¸"ŭ@B'@T|'%sAߢ% $iFG7?*Ί>)nD䧈4.w'qZmDnN:Z *`f(;R_jCKr3_r8j4Zzb5*TGWE> D^.ӯ-}#SnXSkՠ^2Me/҃1Q60A$- 1VJ]V \*'L/fl:J6V `Ԕf'2￷8+ҰKhO6W5 pT#My)쌵OS6q`9pkGBgF_=xfQOs;QA֙3pSCxx~qS#HSl*gWM~81 Cf}0}sL=9a8 _4Fu c+ai6gdU!/hGQc*Rݚrre͵^W֝G! 4t m 0'oH^ޜIAL`Uvg{?ʗC7g:~W'Y,ע \L;;d1Ļ&{ʝ&޻CII~zմVjVkY__l xLbRc{;3:V-im+uL:S󲲶`6wմJ  Mɓ~{v)ɲ:P? (p%oGo#Ri;;ɫ#}cdBeS"ƐtH#ɤ"eä7*^b& r}?0H ; "I`DDx x@iHH2@b:u1Y`D2"~eE 5d'(B,]9rf]$3l!"k"BIYw%RfTɳ:8:d s_9gwىY:rw.rcetT xʑ8B11QІJAM3ILS&3dʁ3`.S̋͆ne0~?P"TD㻋8C0FX&D%*Bz3>̤FVoҕz/QQx4UhxBoBQٵR-n#nd z >JmojQpX{4 5"-Ecꥠ6Ob',F\bMm3h#oCw(h.\XGCpՂ~3e3nGJp`Yϭ̒4r/eY!fnجyAH%5U3^TCz zȜg#z#QӶ9\ H&Ǯ(~ 0k!#hÿ\,!6d6sJb]cQ79gZ$34A8S2gfE%4y66 L# Z sTtuo΃Eр~ODa?l$%ZҜ!Kt9A/ױ\ǂӲUAxi:O~Lͬڣ)Ռ};޼{㸷_ 5eѴka uX% W J !lZCq gW孲)q{* %\a2$ fZe=Rcg8Mg(*"ȪE`\'׏6E+r0&Cb* ٙq;]'ݻ~m[ =ܽX4}Tʓ?- 3k4=GK5tjk3u)(ۉEllIPE)E(̱Au`Va 6ejBfiOs"MBׄLad}X}Pi?~Ԅ-A.smr,mi5 $E]2wy٢>v[5_Ux$\})GkG"B Y4Z&}y+M@لUq2omޔ9@"Iŝ`Bv+:D ԬTx [Vl jH[FBKtms3 <|1#b^>^Pk^ 3oV$@uݚv&wJ>kzkxP0.}bv2<(1Ƭ]a ,T{~lad1pߠqAК܍vNJazMaz5c|)Cn|=M͡\ ӌixī3mI) \׷LO*tmW+lô ^]Z2A@.|Mܝ 3pOS\dcDr(@^[ ~uq .to&7z 9n;vq(AUjV0ta\!:JA[ƤĪys_auX᭳cvIs>/@;1gQM\[:,D:bٴ7Kkߛ,% 7sY?K촮t4vXm2cn*Wˬ Hcoz ڗ'5|5sGG@ j c܀(*5M?b@ A-~mB ER&.9<'LVޘ !B:z`Go`tsa\>´"6Uͣ5qRK?FQ%lw;[{g:=[wĐ /*8(>LXYlqjB>r*B>zR:b-b>jfZPfKOc)<2묃gZoZǬ'wZQ5->knuh8n򝏝ީs(=]? "R! Ї]6 o!̇n/6qSH>TM<.Lex2N`+B@K9Y a>5@hFndUXb*9X h<,WmڥOAC ^ jΠAV{ EKեܒ/R'lC8c9U0Yxm!+HF$DCǨd-r(;U /׬Qͮ}f|Cg9Ra$u{GlZ__ÂNo7>}4>˧KE\L5XlZ_p[voh *V) p>BPQ~ă+B K"@Ndx30)deBNn8:n ?JGgjpu?! DdYŀ"2fgym;GuSdڒZ<D $&QH|r>D!g#B)Tg9Mمvelɠ|l  ʍu"haX&}:i_kBE~5:ėDg02)':Iw9ֳE rPTm$WF7Hӫy=_g>*z9Ư?RR, siξ&9f,$Ar' 4yr3Vk7}*>؁dC|r 2O4혴Ub$ $;0Ui>L\PeC6bccDPr$>O{/XqDw AJ~0h>oYN*R=sY5AA79Dpûk\|QB]6&@% )vɭilOҎ,d%DHDċpdT" B5_4pY6 J  #K%VpZS^`RDtPր`toşg*fw, 3*5=Vu`KqWR<[yNkUK4i '|z׮z}Ͻ!u+mٛ QnaZsur!=6;ZÇ2-iE{$ϳ.xx%OjꈙTݔz'/^wBRFRB׺BTe N/E$^A L50[>. {;zD[w:o@E_ߔuOa Da$.HoC xEa69:zE@<6 DYgEKKqhwҰ V>o.^렱lIT݅d_c(RgLM<qMiն^Q&|+n?ihSld15"q9,*P%"JpMByxo*x%Ќ&KErrmsڐD7AYW!Ul,xL$*kF(Aaa3("u?42z"5*{%jq$CIrV b~s/X1p.\$'2S<ՈF9 p'C4)l0p4'F }/8C²];YծjtXe IxmƺA_Y (. o!UƎ{ ¤#JM %_e')ڗ!k06^l &*eEMѦ@pRXcZ-!z|U/*D3!DT=6nj0CFDd9qGËȧʑ b@2rkfA嘡l/J:A;$eLN< $.tPjiz5((^[}|_BA) rXb&/T*/p轙TpM!oG%^0:7`Xi3=4"R1VJdREZ1; ny7ܝ}氹q|"kGkv[-G(?ÿ$4W~@TDU(^q?/D͋x f~nt8["0D9 \Hx?^IJeYe8;d/JrPx?:,qx'~]恲³m?sJp3 *ЀӆiG'G6Z=΢*}늘3ێ7C;BvQ`|FdEzzӴY_qxH'rͲ#2.@I6Z;{kֶiMԲi@d\H{+Jv&fL iB,"p-MBCt,ƂJ}pxI14#'2lg>R"xrlP1>!F.gCRH/ HI8,|tL4!N}'馞-kQO__WVP,inORƜ`>xF`:.=.0cn^lcnьAS*6άF{Cat3ۺ&_\wΔk87x)clspo? 4ܬ|D*/GNЇP!BY%Fx#75PWP}LTuB ܤY@ --ߪi AyMUPݣ`RTI8B^A%c]3m #Z'v_nhz0E}܀v,kD2Ua ! n=;&\Hhy}+zJn1 v,7}GfûVIwT&Xd~92bZؘ`4jH}JS8V(Tht3U\35DO^,F)W(WwoJ03`UBѰYklJmB"CUyu7%n%G\t9‚yZ^f,φuf_5\ [A"^:*th,GUG>iƝe]wUYQS\f17GDrKe YzmʘoJ)hW)3܇ozjw06<| cb rπϤUbk >y}l!%$[Lv$q 3o@kD*5J*Y/7'Fq0 2BaE21"[eߣxLRdPWM(wYo]RA MmAC{ҌZzr?F⻕ ar͸kj"1=Xu7.WrR/uEkeQlDx4Ϭ4Av] Ɓ016ž}܇$i&]$eL0B:u\+;CraIɘN+ٛ;GADE_>d^Tq$L++Z ܾj0:# 2) YKaRjz04#ߤ-(|zrД9W 0p ?{-zqP)/|R6<#g%q~ 3U8t/.{8jeFB+hگXhbM6F6s˪6[~ `vh60)B%9ns2ejIrf׭ ) |;ϮC)|zԽl]J妉 h}tTo;H%̎{[Gr凯k諌EU`Ɗ<}դ=]ComD]d;{CBF24}5}N,tl|kp_D~z:Alzl\Y ۱8tR'fs3Fp5Ic-!:1Ym.]_F#;QIM!6Y Ͼz Iec(icAB~"tE,\y2ɘ?$U5Z?ԪaY{M3Jr{Vx 苷{u2rhsk촗 PUG^>}@'s 3S. ѪX+?5hagmrk65K#0}M2SЛucm-a?< }<%pz(5Y\mwln8S& @.ر&~E!*@Paj5kр!+M.WAO/}R|hPpKYP-^("840tϫޞ${kN[4+4 (D;d4U%V6X`Wa˜cy~U򪋔9|EbzIh:$"\1}_lqut}e\6լUvl\E)eXf V6uEOs˭ kZʴr&[ k?;0r;5x3u?)ɡUrدUMIW;yz\ߙP, qN.-RAנc@!Bȗȏ^R|"0k!eokmߘ!RqdO'l=eDR*(eTP.QFlvH7 __ s n72~5^([C@ !.(D̅9ݝ^C. ]πF݊.^2* V̊~A ?0T}:D"2DyyFr{O< wøaF}-|wKO X-wy\بdIKg-rW" /% P+PEH"-Al&{+Mq1_G&i)VyN&swkXf!tZ*~ӻF|Hnim*T>NfiԮ1^Gp"Jԫ)FID=Y%wid3Sc> HnǝRe] hVkg#^$r`.d挟VvX#=cb#+xߧL꛴u4UjWրUApY)ps=ʢ@Q A >`rTWU2XLYc7dbҥ&BP#0\q m{, ޅf}ֱ%MW D:d=/EZS3s~SRC'jmRtCz^~PAFĖss8y'"O4QiXflD;膣Q,^|g|C,M `3 6khدU1E[Vh-%s]8bBHQ ؟`v> h4<Q8?Rs: w.={QQnŐ(?sÙ4tɰăOl33ch#r`#v;I 1M:B[ 9D<lx>K$D2`j"B+S$՛|)/"~(o+.G23REQ %A%[|:]$DĮ8RXa9;ˑqtK%D5aLYG;Y/g'9?[&|Nf&I'2w7dq nݐՉfLcR@5!HyᮡW)uh 8MƋi T{"Z:*VVѓMfFp8z&!ȊB"6I#H>M_6?Dp [1fB!c #^o Yt"osTHl<$,?or([QX:!2pRd^,2^$AHgʝǾ.03 +/\&K< \9Ŝ:L`]pO?sf$7>%#=7,g;{jT·ڐߝ|VwAT鞛~69]ȍ;Zhs񺔱b&p rY8]-s>0ݹ_Ɵ˷ruÉv7.+v8<<7gMd{򧑋FtЋH;+Kp帑3QZ~([_1(iT=*LuRYfi7ctâdW**qO4qZy$y$5d'fJ5Ep -#G؛>(:3[XnTw%,{I-xc`xHnyW?<*^+؂Um W_ruQ yѓWB󊲥魫6<$$Zei73 }]=Y!t@=!tPw]OM }J$'7賢frx%nC-OR4s.- J,ꋺ%}:7^K4J nƉ('4 .C7VGשJQR|zwN.n)SFuFlHiݻVI4kH=x #Mr7N'N+<>9}1B) aB)LbK$$# 7#1q(əfMTb&6ΞUԩɻsۼ*H ];R y< @9s&fEܸΖ rBik=[ q)O6 'pT .}Z=_ "d^htBi`p[ZH#/Fk (5r}bhV:Xϴ"?tR[w^b=d_= b>^(|zҩ7Ɔ7VM|IS Njwx&ɳybt<]F|AHY shh6\/)K߯j"NB  ظ.TIbH xyeeR#!t9Қmz !q#4#%iɶ,p^(KWGubF!}ĝK[e!"JB0k.vK]AwYG{E5_,THH;:4L9vTP(~jH<* :F$PHCL2 4P0(eI=Y}W+tQNsR  ZB2k\YG {{A&x|z;B+4Sj 29a2byU33IH7`NoUgpuZUFS&#9M@}nb:Mk]]t0 Pٮ>:o,{{'=8#GG {⪡ځ^_jOz2j/AJ.O~&7*ehL 5asI\im'a. AR8$5l=F" vSr`va?HU(!YQ0QPpJ*eDG $<ؖ㻄6O.9TAW蛵QKSSaG*`XzG]rԐ[ `Kd[bjfИ ͹ѭA$*,0CZw~fC6򛥄%?se5Nt|a>ŢJt bl* 1CFr6ogN.6>fY-QcilÎWv{54/0e% ~Zs}ksB:  ȂsCTv=_@ONJJ5ěۭKlI@E! x+KwH*nKYEq{GNrCj̖ͥdViNB̡Aixz%; M#E#+x;lg`(c07tA"m۬@cMvBuݷ 6ߓÇm( ڊ0ˬ$?C{ *˯cǮSfKXBpPWk>xˆ : MFZ,xɨMygA&Ch ڜK Q'!D5voSUKh/԰6ckg@3*w:0(#`jC?S;jz9y #\rpաދPT\-i=J8bH\F)>>Mʃ~{f.]47{z:]L]I%;?TAFLhh(jf A٨j("^;spHsB4 mOߕ) jjap|je# n`(FX39^!:exv!l}ӌ2<1^xNFs< 1| 0Ռ7J Q{N'HN3*w8e Al 6Yx[j{y&3:om=M^jz9v#ZdzeFz=Ł[:5sTrXtg*tm|05[z8!>`aFDl/G,=SKI eJyY"Q`&ZN9.G\) yIyۉ( KJqS/br~_#- +ǭOM֔%ي pM! .֓woKK+xm8m7\k54h9:7|@gj{ qU\Ce'FR[c3uԩMb5n ]W51FJ|Di-|e|y,wMy oR<yQP@X@`-{ @|B̰֬X饎gԥyi@D֕F& `L kZ;ȴETQ8mkA.qu6Y'{`'oM9*B\RٜhZ^J#/aź-:2 矓4d<(4x"n풴糭 II3!l5k_FӦ S> .[dSgk3|`QaMٴ 7c$WTXa1kД6ρS$(f~"(bcϣtEe[!䴽%C"LZ a}Wm(,.BSu8nRf.R0:Pw 6bP qܧEJJǰ$ErRqͦ h1=ĸ49C:|:H-i^԰k'd MϣCYlՔWm//Cr <]65jQJ+"VO "q[If. n!qN V`*nIEC .rgm 4@4$\$^6#$r7\Gèj.)lÊI20 3d؇ rE]IR\Ӷ \O_s۴8'_,f]^ ?'O|C.w.덏2UQ,Q YT)fxx; byEj",۲Q"vA|>] z+o6F-b?R 5Dv}ZBqe3(~qZLmGDf)6E.gBj|gOݲ =j釥==a}LU]ܷC@T|pvsN)`9r~V{xե[~6>+'zinvtam[s:6\s-5b٫ CEq{ B3kmY.e7^H(\VApI,W> 5%[}]@J^`n;ʩEMyoēHn1{,1OV-:v ARv 5࿎h^*?';y:gOC {)d3*\6} e$]"* "mE*E\ǭFOBvM}}o3%cj k4\̑P]1C ͓Il?J%! g ]ӹߡ"C0E;JJhI"pehv*9#2avU 8ҭ$¶StZ ؜ȼɑ1k. K\\|.ҵ5M*֙P4L4Or0H n\QٓZaj>KGgJvc 4 c u ||RM)Eڷ*^JCl?ʎ{X*R/0B&OÝy+Ee1_o;X ) D`T)pJ0U-cGǹAgTRh/EDE'ܢ#C6fy8("f ~\oF'n : u5NyJgq-ZP{

4*ކy,(DTP9C0%Ђ煍2ZMsQtuLp'H2gFVdE ZOng;NEBPCR-ŠB1 ?Y_.5Evb0ÈE|à[w9{ZwFCں9#1* |vΉRgGZ yHL/NA;_@%;Kẉ{7Q( b$ Ӆ$cokz8_x_e@0 n{ڱLdg B.!]4v XpzdC._jm.40r$r]bI4ʙ1Wh%jahJLT9k<-Ɓ9f$cUjR,z5wR~o ;Wec֨"%"I5'{t6qgQf́6Du'~N¶txCw8A;5#EQLZ]޵=3K-Ll}*$mH9nU@@k^8sW%髨X+feݼXQ쐫mM`aGˑ61_!l6Gj(:{ʈ*]hʨԟM5L4Bv" Xm~EP5Fuwg`o4C~܂>o;Q L\_L`jnUCRW5(}.vI___K*m\:_G͈J>ɝ{V&e|KrKF R ; #@;XaE;TLRkbr5k&% 1]V_`w[1:Ѓ rO^v h $  97 r*  %`%<,rgͶ?RD4WSLBDU_!"D]Ɋ5jW5l3*fڧbiޕ'ݿ5қpƗxCEu{|;X/An!Ԅim-^N~)So}Vhs*(6̏USR#;fW!P)gpi̊cI0kNJ{3$lEZ3@{hdoB*%<1Q&0O/bYI&pcQrkd\@ΆRV76˳ORgTCrw{gvybb Ty' ON8LG*>UZCR׏b46 DžY1f<ҷ|g=ȎH]\/Ͻ$lB0m;F}L?R=4Z* 3uB<@EH/qSX)W*t.6ZaJ>m_iWX\ qmx"rCa5U1Tt^5$hkdhr"$&S #T};qB3/c(C] U$ǐx2'BFYR%U3E<(IJҽfÜ*rFg P%aR*r%[g%]'ɹFvO~##M4W=P-3bP:NGJC}n,"\fUeG8f7ۻݬRp[R0*V(nj˨h O[wfF3hB==Ad2lozl$+Wz K|%r}s}(i5!7'ET&LOY-t;ct7.zv-C 8!7I}:1Ͷ>V'pڜuvpl۞^Lgm@fs"MЮ%^}*""`n2;-&whq:Fz:AL &ηкshXqhGɽc@'#>bf- C1aL>'X*U'%BWzw.֫[ vڥBT,yrVvPiyr mN5Fi^cH>xd| 9HLKa5p.弥mϔumieɗ¥R#~s<]()I垒sp#:g(w?]QAAPjf)fVr>v5U13ڧѾ(rb3ȚU?s{GB#Rrz&MZ_Zވxs3-RJ;gy7,Vlf\X?]K,.  WHcNbp_/>ߔ LMH;1yc !ۅ=o+Oxc}X&^N6?k:/yfW/Nǰ`-sSN*7硾$ζJثe@w1;o#MD.. _E澪z*. ˚ȿʹ? vb] 3gESҕR}I.|(&]Giw:BRfQ݊+tNNG 'n9~9UUPڬ?)M49pyF=a6oԱ@(~M?վ7EQֳo kꌙ{=sot!W@ -%7aͰ(˺ k87#Sʴdq=o~:%X dS>HhH(-à5)%G>V(Hq99:O<ٻ_ -n///`-#| ǣ5wd/6Zfz ڠRE!e*G6UGFmTFRk1uJ䫣/;Hv[QvuEtVM=t*_ O#:I." 4kth݇4{ptsuۻW޾qN(vnr;w1Y}w+s`ksY/^/\z\nNYM9 6bi  r{flr2aNj@ !J uT⯎zo!].U6 C%ڃ9$1_6H uk: fIbE{27ڀ*!Vp;z[xB9=iI 9| 9{&(!=-Ԛ2eEv Y=_7pM?謜~bw1HLLz 4-IQ~ բI3FEe P|› α MUML$ bיA崀f ѻ&xG(41e]% p6 «3.JE ih&&7=ᬀ2+|Njó=@I CM'ܾ,&RC!(ב'B񪁾euXlL8U&eẔr-A0"ՓswVEC^%xO,%,TBP; P*L0˧LDjD\ʍN㤇C'M,h%\U~1hL3F+ls\IUizrSIygX!^|3F̬%.n@ggKV0d?3-lу"6iC"Rz%ta4Lg.뽨;ν$5zӊ-X;tc}Y57$ڞXmTJvov([:wCQ=zT6$Qx|nn2Mh9ڍ뾷se0 ce:펚l"E8pĖeTď9/g+pٹE}h/3;g M7g~ИEIP5,HMJ0{(x~X 7ٟZ~?"hY,8b/$>*e~ǎreɿX?q jГA uDZ INCli鮻i7lfO(c`~]@2tnawV́-p9 db)٧ºär̃P#Cobfr[h6_lG3L~ac2xŏ++vR~QjHJ~lj["'iy;t8”z&>|AuQh\P Ja ߹6 s,S}_sw}GSXw-![ft1Okbmi^}wUgQ 5H;SOtjYQŝYYK[Dcf#b"\@]?;HֵCL-y,sm*$XIn dDpcIysZG:7)4f%ݏ1UʺX-L tWq= +n&oY1 Jd zHns4@:2O0LLB+{Qf !G 4oC@kSksN11-dw;EYʹf^:qgޜf3&ze[@TG#U[l=d2 /&P’7~ƲY2HBsF@y)}\>UMs֮!2Rzzr_O%5u}[?ʤ \?#^`dKt 4T$fmѹ(#܀?:;]sEʨ<>#pidԉ k];hdhAGYt1FH5h_)l? JX/K  +% F#i'%a W)9HcFʵC\1lu9Vj;!inhA9hպdO޴6㽳~O;&m%^lq>5ND2/@ hV~їUqrަEIXR K9S#j!o| Crp)фVJM~2Y?D@&u5)ʐ)0 I!)g$̈ܪ]/R\a c5C~)_7ʍmp%f|+qAܑ{ʳ3؊NA78ׇh%{TUSqO{` pAI N PCMf76*t 6pԇ_,]v<⋺ _ edT@lKE^YOr1AJ(|A5tIHnz{EB7k%j\W1Ylʥq 2Rު8wKRO7+%mmxT H!Z1i<99 4nºl6p%H忓Y//E;b{X`&/Hg  eW*M@GUrlMj !.,#L/9:hij}clresigKlB.#V3/G1Qܬ$[Fvg~{" m` 6) 9>覌MzNY~17 deCr*dqM1-xSNBv7x\#Us58/Ӻ-ީ3MLHvul_ J8m~r=-El vWh웪ԝm@!5b=ps@i/a.ږq7((~(Lȏy&Ѻ%3D")]f$WZoɾ+Y AZ' "޹Y:c@/OP? ZyEpIA'u߭por ,&2Շ-"ʸ;WZ;Z"UK**mŨGoP&66gn{̧azR:9#2;;";'sO\4)sPs(1+)8l&_5G VRhp&u [\AhRP֝])f ?|T$c8suJx}Xish )P޺JYOfkq3V| 2OΤ$QӠ"4&QזQ Dyn_Z>޳ ~~N^8 ;ȷ`.ml<:0v i 8RnN?d3 ֓LxP>]f|[ a:#nFI|X v-cuFa,#&+7"5m+U6'B3g7Wl+6{n=CoKA}jw "0p GZ]quμ;&B5"ߜ{74ceBR3N0혀A5<|*pSW44.rar(N=8!@>-xfVOEVi-p=]Kq"4?"iBm:m8u ;jFS 1jkoRo4)*% [,TS)okuDjx <;?%R=wsG ]ƽB`;:nm:bXsyo%(4OģR 6.\ier ~X\.JV o|?!)'y^ywOu.݆/8aE7 i`Sb=` LY 0K莪H=uzjc[W} ٚ-d5J5Z _NkVԊ#l-m> S0:[U8~C ԯO^sr8[#C{?FUq|3qKw;OIcTjimPbgnÏ,!L%Hmw!nIAXr۝fX].K'c1H)Rn'yQz0=I2Ŵkq:2 "[-^vUx~B?Jl2vyL*ߨջS;K4t@\&!hډK ,ifg0ܞ ȘpwhRJ84P@gIބA^d詍F9w4v*=łBbi3ч^2jb?5)do}t4.Z8!w:-j*^et5Mi /WbVR[d,j0*B ?yQ cF+NO9D Ж]z#!J_'q6cW%_I"86 tefˋEx<_"ˆYrDQ]=ux\XZ<<]y]݅c~emc9}=6mw棍ٸ'/K>+[X?on-W%!HKl۶m۶m۶m۶mݿm݈f1u|:UTf˗޶{S {h6Vrx^ep/[~]0MVxQv h,D1 |ɥcȥ˺ $MKϠxCx9+98S33}J2%ΦC.j]0سȌ^x4,s>w鐞׺v_*i3o~V4wg .;Wh.; .>"4I;Jaw(,m ;BF0n`S򎜣GffʟH;/#;{ +8Q<'o$AhyY}yR_̺_ 4jzALq`WEDhڂ#zX"2yxa^RRZ|2KRCA|+^fڣ}pFBkAkd8X9+`OF5c4"<.ԻH镲_E_XZ["D>eMv\#ItHY?J{ pnubOi7~ж/]E2ڋ-_L2J7 jY*YmbI?h qj)df"[sE::F|3=1 k׳PZ. UKet"_ŋ:1_tJ /JfقxcYݥw}gcҽ{E痡82DݦZ.3ZY@X EmetʽID؃OA9(%mcC("wW:Gawk@4lKg;2{Uc]&ycŬkpP^>"[<2%3Y!RYQ*LVނ e&Y5@@uV $V87{Dk܍ 0zR5!Z0"z 4g(L+ptwhJ$JbX𥗚`>50 =#k\.B0JE_+.p+[/tw"aXk<`a{m qބu(j;L,]0̤Pc`?O_J+ᒶgz_s,O@L44O֩9M{{Nj(^FD=_PЪF? սɉ!+Pz[Jw!fWqC 뮪@ZrcC߲X# [a|ߛw\)#"]' VN ULČsP`=,1~,`5_ru-r'A#p v\\xoSxZx ivWI3"mfۖ(:#1YK?&mcB*tӁQ8aU'-%I&dM"S_R"|'%5 S OFgkDZ]s}{ep_TjZ#a K:0d_oB=)JnPo!^gz.4Mn['\A1~kЏiʀ:ɩ/|z+Ox䩔@ׅyH~9 π4 HdG b+FD(;aL?ڇs }y,ťŖ#Y\FT+n$q$fB` ޵Nͅw1\_,**yBg N€J@:%VNs#NiD%E~e p 8Btνz^]mcgI7~@R?$~.S5eY7d[zjM]+ mKRbR3 8c;f XjIJjCH|<qBn5tɉ6T٨+nuY84cm&NFYP,g?gAse[h )S;@U@m6`8Z6q$b_Z?Һ%(IjEչ5k0p Ȁ;LƷWȔ=D ҇ Y=^uU)18HXT4#} s@5HAtaXš3xJ4l0HrqWC Hΐ_bsڂTIkI܆(RF\g6.YQhIx^2ہW޴1L$R3푉C;S٧Iǯ)jLsnS3pO7`IuNR)ZIKқIDvc|4WܨDm8#`yxqa( =Ss衪yHdSS:T}Z{3B4IP'd&,( )?i.'4-#Q-cr&=IoBe`q j?0qՅMw/xj0I-%_xv.AE$ NJU6; X#DKQBO's CƓ+6Xg.의ސZd㽁\+fУ|sul_TaEBУq~]oH핺8x4S d{v!{p8)$fw/>D>_PqMsXÛavFХTA.9$D(1ӡz:: 79moe8i-Uݎ֋@8 "G0&I?ZlϺXEVcQK9)Xbn&M*D5؃#^EBѽvybڜd͖f˺#s6TZKܔt寭$o$I#m*Cp.NS|Iه{[5xEHi߫IܒXcOcL;/m<fХGE7tF8s^ʂ( _7~d%٘;Y^Ds_tme_H~Qn(z9;6k>BˋFgSNX穷w^^ND@S0NрLX54QV>R"b`2-3A&Xww- UUL$IQ(Iu01)Q 9'M3cvX7 !c<|qMLRz:2y0ի~?y|7cd@ |$cnغi0L*QSsJL$Ix('$=sa)5V2fثY?"L P!Sm$RV+uʌ6֦SzKums 6o+$?c7=*Ja_[/g6aRV&dYBDT7+޸a @"Jgě;~x̜!1Ѥa45-Ж׮Ꭴ}Qnkq-Y$H!sh4ɒgTޏh-3ӺVŜ)gR@#>s$gA~ǙH 2B?}۲ܒ]mlƀVf2R:>wA=S=ZT;`V'y1+f$Γ}#_y{|qA9NJ<\h#W[3Bּz'Sknecۛ\L{F[FIH&Ys]8I8k @{*zR4`By=Q[D(GRaVK5LQ_e,b9ȇ%n>?PU fϪcЁENe 9]/(C= hmUxl&mD;Kq+!U!7 O3s_\)3 #.r&h7܁'6E-rXjgT6yue]!~`"~Ped&ҏ`3(80H߂DXD/SHGXe9&b㾞 B{ _`^Y`Ɯ:;,x!tfEy]*/"0r{zk&Cc '!]p{6%0e$̀x?HC fɢӠdꉾgLjƍ#,lPSG,WU}X~R2>p_ %Ilz}fj8ddи olAp|(ѱ7::Qm}d-g?i?^G: %ָF7Kߢ9,gi,Z)O%v `Ǧ܆m|Zf?ԠKcsg$P$3r6}\fujGy`h7L}AV }بȭP.֫ 7 tZ\?=%'[u+czpۡ)P\tҩS!sF;=??#%W]}*#oM9.v}_Y%v+~\ԍjWC9qͱ7WMjWB/CTj=嗉evSuM?l'KM<Íi1q.(6+ُ2JtH,yX͐Wק'\]Eݺ~#EF:GEKe.diJB&DbJ1Ycv_+XhEUT6B(! _]tN`Qr>lVf~_9ߙ02;2Pbe72"#S~lDrۃ,(.B1jܜ:76@zl6e;l8wl,w 2oĸ͋0|q۶Qa))d:=N:pԨG|xZ"_f eᏭt.%c#jЃɳa2Uײ rkE3Ui`Aı+{;~R`,go\1mɑ}ՔgW|I;#K z2"Q,rj 7T]D)ff-\ 0wl:HER?HJ:%9x$(CiĚCYHH*i*IYѡ /$fLpĪEDu˼YO@MVCMUr?(vggPBun $Q?O4-.b9[pYyF=*V尾j/~bfuv2=7OX"wZT c=~^N&Y_X`nS3X7[~~?_|3}OV:uo6"f]#U9NuǬRiKTLXə$F쐑~tvYeK=C>l*ě+K vyB(bt1 "F:^M [ Y4)xW%gS nAuKwzeu|5« ~=S:pt?qz6rq+[sZk>"<=asŪ˶'nr XK:VM{5,L-!{ 4'5:WYi0b HT9M=y,pUO5ąʐFq &qS8l_~dboloCWQcX'4p'1☥` 5V*q6"8Q@P\A(6EME~oi\ ۢ鶞rh&&xS9Ur/ZDz7G*ӗu qeaQ]Vɱ8VTGTpSl5VhM(24B;\KE]=˸븘 YX(."u[b!3usC4\21.Rvb&Vc:\t\27tQe#4r=KUꭸN.r`\ͦf k(BUⰋbM2$ƒz<ӣ)'gʣZHgNMU63/o}dQ&bʮȀ{zRfIdKclYк|Fgpp jxSTSa^\[hig9U1KO[I;)h܆)emϹZ)TzdnQZ|Œ-š‚xW{ %W bTj 5|yob E'Oadۮ'=ZP@A;Im7ۺPS:VV7uu;k< .&[/<2F|)jO1!u_nZPlP +MƜOX\[ B\GSHe}9:<}ibrO]ѧP>e[5JIY4֑w\^2{Efk`=~1)`l"! J~0H2Bѐ1HwHMmhEߒk],,lI9Qe6;XqQ ԉy((LAaOI?ymz*тɭhQc;GI6\[=PdzW4 'EJB$n=@Ex7U$"5#ї4\g..=0F8$$9<2dlT Gm(_pY\ZCiUsBA 엟0!9qH ' ~_#'s)ȎH[ #Q^`Ʒ@hmzH4g3YNnn-Q4y\E*Ճ0!I_Tz򖱩UWX)O t[2XTgDJ (&Y hxķvUHbI&Qg( NMKB@<,@snDQ-dܒSRD=Ð6Uz $m,iZ* ?qVL&ha -&AkW2 =VvXgQLEf)ȿm!Gl[/أAiL a|$[Hn9BȱSfX}nA n6WDSUj\C|dW#c9 Kz_~2u,mj,eO퍣Ԥjle[-5$N燏=T"#4 ߂{na4 VgB5|3P'p3E =':gP@d(̎\u֒_3Ƭ[q!Xt3-rL؀|‰lo}@!3jf7HYI G=IA> k!*P5}TaQ&>H 1&XiH]_v.krEpnqK0+~AWdFSOܛ?{$^ڔ0T"-b9l+VIs|A3ۀMz5+hV("\>5jQr1EsI)-k%zg"D_u/S%SSrhLղָܯlJ]٨> c;?FPd]i^eܕ]MMI_F"\p )=&uԡ[rd@rN]=)[Rٰ#^]m:S0F4zfӸ7c (OD0D~bFsn%M4ӽ{ q֭LV$ ƍV c1y Ce=9q'A{5~I 9)z8fR13CvB6F<^ԯ'4@ r!Oʉ?=ˬy<̞@MO9@aEygIR_Y%2&U4<ҋbYoh?\ ω;d18D$8n=;#~tL187ğg9Zتױrws݃S zgi17e:ek"~A}~q{KVR7a0(_;s7_ġ1JbO a@M_7{!!Ka3coϱVh kc?Asb\Nwq߀f(!pCN@ AnG\@Mp:r\y =Q=x1vUN~d>{\cycwDb`4nP7С֍b=l뱍h;>h=T_{E/Z|9~2QG?^J&OZәMa80=GDl7!Z 2SQH~V / zGD4L()Vw@lYqoGNHUA]KAvjs1>\8L#)^ ShuMo4YjPDlX:>K9+"2r Q?|`>t^F>Ϗ,L1 KGZ ,7ֵ_PblfٵI,Z3 m{NBVr_! \{c0C.\y@6Ƅ'l^QLSB0/@ &m ;أ 4a VJB {ĭx͡\3ԉ(+WQz$+ޮUsJ *IDզY 2l*AjDO+SI(i^µosoXr'6Dj-Jh6)y 6X/<^U//7[lb׋^ eUAeMJ[AOm?Rgm4v#q"3r$y22x I< dx$J5iOZl$sLR;ֽQq YGS YaӴoζOmp Ⱦx3덭Ww] 0r|74#V`$jB(ĹwMId 4vafIUjDNp}} 7fxߟ=|f=X"a4s=wׅ9!p*>S~/5 jxM鲐 00ZJ'LZ:1 hπtK(z&la#3zbUi+'n$$N/CoS2… SvgƞcE$) tkEvnK 7dHtmAK|6.8/w fOsSejC0Ӻ#L;f.\#c 7ҫRodNBpCy:xR2Y~'~/q&@W ,XS!/{4j|q |x7pKh$-]R7p tDKFY2~_v-6ij܎dWۭ<)\T3^pG|خ깒TjwTRE3iV0l2q%VjPC_N7ReXl+. wڥ؎M6oqg[]i,m.k:~QY,uM> נ#E)!-vH=w蹜43/zejjJFֳ k%J (욉W2$h, )X&SwY Tj:dڟP[>tߎ k-蠡/ܯ6SFg2!uTm93MaꟾF|gJ͕k9v>,6ss)L|%@>` \%A-+T%kVڅq*rA0Zz$uUwJn =#0a8ş=aTeF9rŌ5"7|$jMOB`w@.abwC. U ( Z`K Yl%. a$xHfRRjr(`] ˗Y3bcg#˫]&Fyt:aݎI.o!nFLTn"$א!r_ +'u3KJ SPЈsP,c]X7tQ /f9i 'vdJ0WAb;sQ#aTNM&i*.Yl i|oy,hؚ|[;o=p 0N:U K 7`D*]* $t;0!oml%:ߖ;;11Ƨś0+zz>~a;fPz@o+`jJ=|B=OJ3o / :u&SM2'G;fGآ +SJf&1훹,ǍM_] z??ڰ ԯ8<@+]3J&$cl gФB>}| /q?y.ABJ  4w.̥Pӎ%V!X?r֑Cr:k h圚1K\3j8сWLnBAFFE0ևni~[[9*۷YikZtAq^C*&y^B'-\J>/YD= v})⳸nwV=Ջ*}^? l+;՗s~<6/[Տ{}z;ׅ (U /u>%q!-~+c-= V?cCJLA"pFۮxq{ԋ%eMkvz9bޠUK|8 ͐8:z&5nP69kt{vgх_seӮ]M/W}xt|muޡG|a{9 f2GO?1Y) w J M8G✾H9}W{~„6u8Աg+Dz௼pi?f "h6t(yLh@(Z"sC f ri @h\BgR+HS{B&y:ra7V|&]/_*]j $FHPFS^'S(Tir[z{9oafx59Ue91 ^&LI);$ R [5 1znTVGSN8% @lƝљD0*:RoTIxfe0 QePlcjQllXdžx8NYKZ݉]%}4#Kxtq줎WF-%3PȑDV',YNu뚉;H!([yg |@љl1Ajڅ`|-%CVN"[ Pv` $˸58rr.h ko `'c=Ӄ?:Ȍe8sNRW:PݍyCjũ-O=>g/lѣGZsWl $;DZhPGdq.TaHŦfsI_k a2UU|ՔߔrNoXC:Prܴ_ $zb@T+nQt`>9j)h4QX$X۸@txw?0 (eS@83Q}#FfPR*FkQa;,VC4K6\Dzk ςk"O:zrƤiX]1 +bi e]RmnJmfEljcz_ߺnNlPTdA11'ᬋdB K `Y 21:LߖHn7z/Ζ߲VBqu'i#WO!뼩u~BmJ'wv7?-ĉmwov2])s]WdmE_\*!LWh' 5HB&i]kP"B#hЫ!3^%҉vH2?[>z<{}&\;PKGǺ`Xsٶ$NsPuD!XEP" U "pj_?Q7rPAkQ٘OP:me5+ߙN꛹*Vm6\zfjKoiiJ۔94Ѹ 03^P{%&P} j Mj})NTj7t= Z^)\%kl,V!on:6HKKz-ڃ? :>^;G'ɍ%Huf' g5vwg맚fnj r7\$zf̘nO'KGN 7Lh']i.G2.F}utZ-t*=B9ɠEF'1.ܲwN:cX@(v57"CH&8C%XyptLBKrFб'0ԌUY;#PT‘_1/S}d+--{-VcլެA_0H +lNKA( FN/<՞ xM 6]VV#5 4CDb?ƿIr4?5ГwFO#sI oɞ_Kn,c`F'UȺ,dz?R Zr?.'*Uvz1G.1zCf|sn%ĬAr~EZw8[WVj|?{Gs˯#8W$.kڵ逞 ;gNF:$9֯s`3_{iZějc8 SzǏrMk2GuhK VeJnjƥ'Z׽!}R4?"!*Necok%swr^V%Ki,[sLӄy \bgi(oyU/̋fZ vtdaa3_UX򭬛=qyZzfi o 5I6+c,_ɢ*ꈽg*׼0Hm#~縫;Շ<֜׮{(2n]R#)XήT G@73Ƶ c}[qA:wN.՞uG߸K}q뉳t!k;FaĜ$12L @{ăǃws@sƾVP^an*yqw^c'F>;H @)3d,31dO$++H $V2A)*+\~e5/X~˸y DG{EX[q̡e %"ϱ)6x jN½]X5L(n.[\5W]¾6ݩ-fu_Fqm//cp]R!oyl_[bey^4tV Q_*_Ey*Wei}.)/>iQ_b@j++-.UV^AE}%4WKVM{bࣱ鿫q@$N^Mc+E=~02OO/sqxТ6V@1Fvl< eQB[U;E>^ W9gG6Ӥ4xɷwCÔ>ftt<(z*sT}e2^HE%\z4UQbw=tU5~)G7T!úe]Q٩ٜ\.xk/ˑQuV# iA l3́ ᲻-F8vc| zC$PBA!̗rn`RCjX)` }Ye\ -aUrrDM^FŠMיQ~.|@Bp|/rZ1~WpHdčwyz6٦_=6/ǗqKW 5ŀq^@h߄J9RKfrDj送^ ĕb;l+ohM@9k V 1%.r *elڵi a:Nj"Be$@?VEc r#9fivhMH\OG|I"?8E1BNŚp͎+<]2V:sk=@[-$\}yVKo^7FPiQIB֘׆mi xѣC/y3cIWLS>~p ZGfH)L~Z?no&,}kx\~itMX[y6.jP ȇ'9(!gDT|2S7hcg;55^jTyʑU`5*um |T ߊ%Wpc:I@&5].{vk˱ϷPT$d`НSFљ#*_ I$C"FCN71.ɕʞJ.HSf &U1elZfn_ߙ֒'%p$?э3 򪪏si/&} Chz%a\]hNT5vB=̛ON(J5g  axF .C"nf(ɥSE8Eǜ(D*JDP?C>'NcBy y`RNM~Rt5Ʋ"@jʙUS`.d:D\)/Z8ƺB3̳i,)~tOu E 1IhOb^B#7j{P|5bLgaᜰKfyeǰJlmX`;Κi^AsH1h28 Ǹ©aDqCFaNd#փVf5%51v1>5Ŋ{ @JƉQ:> 8szN;Y17y1B& T r;N uFUxQu}1sk=+kF ^xZJsq`UA3<\(92ŜquPJ`Heqj`0+l􊎜$aJz<ݧ .2FDtJDtNS>OSN88ĺ>*Qy)ղkPnZK[8\Rw/KWZP//m)Ytݼ-tg|S2}}RŐ3Ӧ{=Ӟ" +8Unez ~ѠI"9EkMK%x&F^DŽ{o-Vnc;瀤.BɎRФe|]ҤuiN8M}<ȓ ֲ2–moģL^Ī^9oIH2%lx!& Jq'Kiשd`Nw,Ef$\x:Ӄצ~Z)l%K}ĠIІ?PwAcP6*4A83B7fl?aè3Z1;Vé[+pSahh5eaֳٳj Q4JwJ50_;099t99bGQ KyvnASb#=&|*;H A;R)ndK%<JEVe1m ~gR.".|Fun1vLF ة+ῗ*Sl5PV%Ĺw5ʽ}rߍ -ˑ(93mi25c&/T6DZ%W3MU%JS2Oқj})LE) ŬyI,yǔ; reӀSK\Zp} (.IwC08GSmT6!_Sm7K+)E2NJ+ك1PylJm!EUGq6TEIdsXƖZel1l +RAU|+b))Ķc>l+\rf2ȗnhMh[j.#\j-x^QuOJ"+]-µ|mLkФLty9$#1VjLl%As.cTŇT=T}VtTT~rN> 3SA2[SmZڋ#6F$\;(n ٰ-ä &K!R) xi!FXC.OF{>^O.Xu3fsBᛦGx˘]}W1=O`}{őo{a;Ai;fz#HPLF`+z2YF Gm^YW"ÏD`T"[!<[4!^ꁓfL}#rEhyPKGYWN!{-(g88<<.l1_Aس& "/a62o.`jOщ[MWa0-Ȅy#E5+h qg!0v`Il<~Isg6eAkgC\ۜbslTa`pbΰDNfHcPS?#NX? : 7\Qbgiu *9W1u"^4L0r{N( Iŷ{X ~1[/,?Q [v1cOj!FWȭPAth4k2ȱ'F|BOp['ϡv_bA=XY:xIPٌq k aӨ5nE. [bmGvYrmA= fV^*?J<:-᧳j+:z{:0O+q=V1o+UQ̵`sa3`Vy\!ƣS.2A Q JZ(:B̬N*Ξ fR%} Y -o lOch)o+8}(75WN4'r L^f n| byt?z.gfE=9۞0(Yiuo_wG{Պ)0ZH! 1R8t"J)*QWJj R`'kכxy /% zB<M=L׆7(84X]%3冊 -הi Pw 2ؼ>ָm-0S.Uw&ĵrj%VeE4|.%7Vmທ1qYEl׃?,rv2s{*u`CEǸHwEjqRZUAFmyBBٙ ^_kkz@DĔEbd'Z؁{`-2,{s-RILYizmdQI eF*-,GŹ#L',m΄}GsɌb]j7B9ޖ&ć>)IAڗG7#6d[hƲLjTԴ ۀ|"埭/iQF~#k)Q 5\+%FZ֝f6Q(O^3W_",-,t}gjWWvZa{emS}et!@6pB5 ZTZ@Y[הlP%fv=a/ɿ4*ހ90ʤH= o@aȠQʷZdwScCOƑƫvqOrZE.j\*yDv5hak.}cȓd~Dg<䜄}w6-п?d"RB A#I_#R\އ` l̵UyHŗch*) 11yo)8 2 JhSCqj摴?úUg7xo(z&AE+9^B[8䋽1CO8&ڦx;%?껓n1J4d Ez{/TI@D$:>AhgjG;뵾sT`# M9:~F8SE;R\df,ǵ{()8YA%ɇ5ʷmǼ*{+u.U3|C17~+)3c%^ķPyE4)ݐյl wre}~5w_R&t?5f$F-kjvz̄d?˩J;1s4,1R>9uU6Xͤ@JCKzj݋HHyX VNᒳPxQ-f&W`@ _kwK'_k#}($6QI7S&[^9#+H)dp ;/3^ VsCCvP3Z`>Zh|i񃃶Iك}TK[Tno3Ѣz#Y0\rML 6I( >?g%UV? =;yN_?N@aS6i) $e*/}t >$S{DKm3xDT-5QfCIۂsAD!| .æӃ'9yӋ/d󋓊* 2 Ld)I02s c,½M[> J& (O-#c4 O-F ]X ΍ʮ+#ps*r@sZ{*S 4NFU*?AT/t~rL#h]xk"+nTD=A)`G#Hۅjƥt%Ϛ{EƜR>0h A<3[Jj.+O\X5 =5iͦTTp{8MF7JE&}v"FCt>.}4bkd$nL=؎MG.!]I5eS1 s ŋßC"~,[޶ƶGOOy"f0%b݇6.>Lyƻ5]rͫ|@.osp;hlbTUW,什2gƏ2J[s[O^ꊖUlAUvxƛk<쨭psSɌB`8龫1O986]hl^~*XWˈi*JwUta]؜4: =.0$o:9s7~Ru$ };aM3Kհj&)$sLcX͟on#4B]hԟ@kU2JHȶ|h=%!=TB\AB*X5dGy$LkaB_74Xf(bh=rRrW9=$gvI0L-u4ӊ*=е lH`(pΧU˿jwo};_!3!bͬr.{gR@ŠtwA"9z”xS#ƂuOX.9s\UikZJMG<7)o>!rFf+,%_[uBf_u_3,?I `- DdapRS}uA4 N{A7ڞePvUM!QGAͤ41Oͻ%Q"?9ٚÏN#7L-f qWv r*Vwf4Q$Ŵz:9&ބjK~:unu PYiy,$Q3$<-[%{-2 ǹ%`(8c(u53:܄ZH[x_k3u*ڪӞ:A+;ALT1Ik7H%ɹ~3TT!_w6sVDNG@ch>sߓT LH>O~?Hh64dinF-YyF$5-89jGv>5~2Y#r-Ni&i75F'(q.& LB!Gd|UڷfMƔ@+!491^S?GV" SGW dCnViPB/+nhEexQ't{_.LfN-b$o\ԟ*bh@5T6֓͂m7Pd3 1T40'!Z;b:P-w/d1A T, ̙KDGs#U^`=2u!F@mK|anQg`׾KnFϻ1翝 i"S`!4K?).3 -Td쪦l~\/ x[ǕꨲLqc@BWɓ^pH6%偔8|x*JS 6bC|n/e7F>e.u|9ρ  !},4:e?>B״zəP7>$S编\Ӵڄ"}cyV4W(W)и+dл[k/$Wfu\*+ܵ?7WOP-_ 3 9?%L(Db^EBļ}8c.vG)qZg!T_fx4q7˭zmSVlK4t 6IoGm!}7`mi wt t#.\m9ٌұq#kdxS(z?E0.n \1GWy!1ZXϧ FJInA$'Zp@ẎX.KlF#O̪+0<0g%yֵ;,uf yuJ% a&n`4N2LdZ`lӽ_!QI: ߲μvsuo+h^M*mvںXВF%~5/A]}RV-rnQ9:joVAYe`%y.Ry%-rx\AݑWa=d]zi$ڋaIź !f\$Ĵ@jI&\%4ȋ]ȶޝl 6m^S7=r/C[5 bShYڵj_룄\ʒDFytƫQK5RRde)_.n)kp߃<S\Uք\k]D(n#L5h螡:駓hn)NG M.z9ьTjI2!fz9TT@DaK׻|uC&Pp#:AQKS?].W.Go8 ޔ/SENi˼J{9\HoК^i[Q†zj_U fN֢|D5]1r2O'N5+c8잏zu7؞5 )J eגGoܒk/p8*m>2:ֹ.sƹi U!^1}@ =);7p%ӢafRIY=&և|7%smha:tf6#Q0Alfzw, lΪee ûl2;PО=uh[휾;IަFq֮Q fh+nmv'QCN>nTnUOL8t}IMaZŊ2od {[!w3 ziRUzŠlxL(djQoa*πQMѮ%(@'8Gwz'U^D7=FWaNzz̹"%#-~Qz^oE)rźי$<#aUm0Rl:F<* o3n%z&ߢpyGeעvVFu9٠s#Z8@Wۋd*_1✴+M_/d**scb,LIdےg:+21*M8g_U}!-&GXxaۨ' )AMʛ}w;("8 ]`5ql`}I5fufUzhN~\Gųвy1^C _ ~ͥ>|ʿ;`$@=R\AE^VB*XxYȪƣMg?gN |nk] n⩁֬dM@`Mx H|:`bjWAxZ%|y`vuU}v,}eqdk͹ӭUSoa? j[E5t{Ց5[*tN{? SCoy7r٤EuHIx]c΁=/eˈI8MK0pc v:a̦Co;?U|I^{)A≷` *ҭ$hl߈;e [iŗ2q`_l矙[$uGSOfس%b鏿8j8"9(ǁfsiOuPoC`=ބLHW9!zb GTX}:y $2^/V@Yi-S(&$(^*wk2wcx=ވ릊5k{Ya"NCZ#,MQJH9_~ڥ}^ERAυi'NsV;D0טii';JړbLW:`G,zH|`y=p+͋K4_4{X}7PL7d::Ow.\Fv4=iԃۃO?okx#v~XZuѫ殎g0M \ q#*j o'B^ 2ES-/yYMv_ja֢>zs|cDzb.nJs'ɤ1e>Z.5yIEʋa"v/ƻP9TqYwB6:uB~ۄX,IZtLhU31!P{6LiSo]1EN:9_Ry-ɱ9gإӖ≻>qd:AM\Sh>@m1h? ;Fnk_o%wƤf쁏9b\j1$aVoÍZ+"Lw3I~SV$\f4H~s\dX!?-AXҫPt;miKQkЎPh MJ[p5 } =9ꋞN}}b77'Vl~{=7#Ҭ"h!-؋qGFu !:Szr\Od۞X(Nceߒ4Pr#gl%ʣ@ѪX\}Y}l;Uwf&9Dߚ8ekuwl/=ݪd2/;g9w8Ŵz.֝4u\P|}f,z+7蓾PBG) \v=7(Զ\h꾖IvWu`#Ee/lg|ᚼytkRq؃jkK oHaEiT-%^y^ CO'y^ vJvjY1󭏜y-P2-J, 6GB1ʾ}fnV&޲<+.fw=s·c?)' Zd\؄$6Ɵspm{o+X(ql#X̌WM0@MY 5$$reQZ?'&T M3w>F:ԪoIoA~"΃m:.ԶBD>67ScOd :&W]T|ƫp[ԗO˿ϭ&exϚ65]]s53}v:Cõ+sGZR=<2A~ Qi{ؚEHpȕ~b6p5lr.}>z3NT[-zVTk31v2ϋvwzRA_:22S) \lF(9-!;zdz8W>Y^)R%]I츻H_1/ 6~]u_V[O<9~$9;*c;ZBiyw&?rWpIO"_˞YYwyBjאvE\!J׿ZYTWTTC}=4LOz*WC[B:f f?Ћ J04EI%beȪA*oSӾ>2=FWFuzm:6;O&p~#\>;sH#;؋ ݹ[Vpr3 '? 4kqNe_`[];gPF| 6]@)0 \2z7]Z x,[_CJp:>:..VK${ άE (N52mAk9j6qY9QuoC#3@H wxFb|0 J\(TJ Zu4?AMEځ.L9Hps+K,d5ikCi0ěDu)7@qA> e>5k?#~+>br޷9sJ0t~WEϮLޮeJπ7\TQ3Q?4SI{#]h9) OSlF OflsrnMٛ@l>fcND"y_NRy݄?VoE|ɸ7eB02'NOM:? ~ ,[¯JG`|-XGTaPmpPlh"Xt+CeCRp<SYP[_c<ݥ;OQq6:hmn>3^2녅5wd+?榑HPYOJs0ɲ{`dqX豢+hD PP"SOcFmv(I1z0s'1'oZ*Ǹ?³E(dj{a "r4v܀Uh&b4~ siƁ\ʪsiK05 pE &uy둳؍!;Q$ꀉѦ=Vb:XiDx$v>vj3k"%Iۏ`Z9?7 JMg>hp8j#Ci4™0ړYL,3gMA~pf>2"U0~[/՘t-YJF\#.|\&h> S_6{Qea49P@4&czIƿ:3%xO _w RHJb8b^8 GzU.w0kQM'_ (b/i#&yK -d;ci=|ԝT.^ߓ?P0y@zMN;'s[[ar5w"ʑ+(L"$9ÐF@}*yG|zȍBex9~-aNqyUSX*#Q~`,ʯYJUxӈ2אhTw ֖!kN|8yS5ZR%8n]`.5\]"-DO ؙ/ݸc[`.OmHKKñFT C]x{>qX7M,C6dj32V÷T ~^qAx&_R:uc|p"`8+Qw0aY`▱RH w\42J3dIݾWOm=1|oK%uzpb2Rk,y%3A|3Ѳ&nЂi# 9* &E)e&eN&i61vo &Vtx3a/\t[̏ӿ{ ;OKd^8BM,# B> ^¬_{{XׄU|p+\xP VF`5YbO;j3Ab08iزhOP5pS^|]du ~'4] pbОDӼ=lKՊB k5  `q9eHwjf͋RE44:vTמu=m1mߩ(|w'19f#tK~=u'K[}FYLygH5ۈSW"媖B׍|'<lD-LG- 4gK|ee۟GěAv.;e[с;kwˀSjϪΛ@9;Ó%hs4ܜ&r:$Gl/>0oȷݯߟy[NծCŐ/x|j6ms'Mt @Ѡ3:K@O sJOI'`r$18( ߒb؆;*k7^HE0 -M( Sb۹i$D`Q:w:R_z#=lX]-0^A! ~i~6ewDg/jst_mys(qk(h4xr;&vV|)=++y14R*`"P:;o0v=jȒ>^kl`dJXF5"ѩ)1"Kpdzs67d {[? vaVN1H2:gΖ 1ny@r=l%}ZpR??a: YK6Y}tex#د_ds E֫$V,jۢb;E0EvqaA\Z#G,tp,H]\T]=K|C<`A&PJ._gӹwnU=^;7jAK&Ib`'}L jTƐ8mt,ؠN>nt(I\|p;4ar$n7&r"s=Dng4Cddޱg ~EH ( 5CLs.[0Ec왝v-`3CrSEi(N70%Q(F/[ iUzHWP8?Ú̳c@;f \NB)lkfsZ{ԩ,特xrz"̀L?r&렠>HȼĦ"L'Z-NSE-w3rg83R.qsF NsN=LAFos 75OO?ac}? l5sYߛh* ˸&VsG@yqZ'c4KyOݛ{TxC͜5\cK+Xw: W-%@F;EbFn8iPofFH2LOKEP"Mb׆8~bٺȄK[. Y 嬵w~BRnn(+]k֮7ƓLsjԓ#(ͶY' QZ6 ߕ+ ZAMJv(|2-rE`eSe({Rͣ񭾑aLT 8]]Xrk4bزѤ@M/I48c;8A9b*OSyGMg60s6WPD"m,svKNH&);-)z{RX{&+bS e%WN'K[WK*Zí 2'ES_QŔ"Ű[l5&#cxHES D!.9^: jA?QK8`mu7jK=Wx/ @ uՌkw'J-.tiִѧ=쩽5 A*e`H倱G8FA&n>'zFBNt;ċe3K~΍yK Ck'/Wnt2IAưpcFrQweA2 y!0]exC/ADJC)4ʔh}(Np V B+lUaWKQ)gcvU)s)ENJتLs$.nCznvܣlP&B ub\r{mSzFWh?WB1.cE޳&#hĢBpT=`eO 7m nX!8y [CD [oF`F;i圣){݀/R)R' (frbbk0*~s# 05ՅIKt\#olKQuwl G`ֱ|%Q7tH/e|6T!jo",m]* i5w]sj {d[60dm|,L=[rPyҥ{2'm0+oL j/rNX Vn?)@T̂6ڸyt&k`67#ٖ~ 9ç}@c|:;;;&OA롔dd]u? ypi*Da\ܬ~ڠszF 7FhpX+@m>G^y(P {0A>ݕYњZB/d`ʐw^݂ch:ݼ^/X<,5"OyJBb2ܷ#2Q{^3c&Cƣ8BgE׈Τl0CƴCݻ=dzO]XQ~ޯT!̄(2`!9K:z ?16IO +㕑|j U;9]_j/ QXFdl<63 N&ȇ 5䮩r1C:7|c^|W7[k{0:*gP>%O^/y s="/UijL& V,M]ZNn7/S@ױⱞ;sJ4?:X7",zV|l,)Af/:!Vo: ăsjݪtY+_Pmh)a@b0 c p_gursr>t3˷;6r1j,e0L *Ca/h_1 :w { l\y4چA3,ǸK!89k5߽sI,|YFkd: \| 3,'-Zkw7`'uyw 3M?% z@e@somWtr yӹʂDOؙndQ/no SRJ%>[ޥ|`GD0~4d_9Pq[}YM vYL̪L*/7T JtʨA*L (xb?m~F-]/L"#l 8,dt Y-"6-oYlKy{ )^ 1bK`Ĥ2;jКl> r:Ra4n0|6tȃ 9χ~6L}ސA@]X: *:2b:8y֍6qyX&pt,^`zUgOX#Ai\>6.9>yk%?̈́&\z-䙇yq$+^ZjE :UY ᑎwpBB[j~:oAw4Jyc3㦩 /;FjV[J+d4pU C|W|Cp@`?K!-" HZ0[@>/[`Ƿf)\S-o@C)CtKhR5]-oqc(=zb⦞e{.QJKI: Rŭ6 ͡[h*_[ǀcrjя2pO*ȹ=J-)I@+Ȧ21Q Obl:l 7ĠVE6޹sJ+zoT|Ε525߂]l=>vLKԽE>hywsʱ5xZlg]\ٔm}HVkRY~-`&{|r\ӾewL^ܢޏ+ph0+I.Ǥ&bn[+r7Xur YzJtp \ rsz%\FlhSvh4 hkz/XdGnL@Ъ j9`$+x_Y+'5+\sۭ߿%.|ɮi~jr@O>SńIqh#ۍm)`G' G}L-sRҗmy<'N3=,T˘h _t !q#Jelr?O3GPěrqdGNB"]УSST tob^"QJaQE-م&j- +=Fa g"Vm@*yB3wR7Y-۳z7:C~(8 VQCeLYSh":9@V|[NO0eBG C!023031 7abfdM>> "@EE%HPWLLPLHT+L+gV76F/&:օC)\'I1=->'/1FK;Ygbg^[k7 㛆yN|w D7`,p$XP?itDpE(\F3=8jxU] 1`X Rqd/$% "J°,ٮ** 3 cq\G8 IGxxMHyIic;kvd|/ II_>=m#S/5Zm?{uY?=yܯ #&WK+kMueNrsd7|Ճ]Q`ZVj˄<Ӝ9ل>{Za8 C׮=Hrm2Z͈vԝi}s.ڬ S>^AI]qke+brgNٮ ;#y9q¹+ꊾn1oǙy>PJ˭F,YZ,{v ;wQ/euzt1r< YJY VZnj}*c!a՜` *QjcQz`l@;L=iz=OM u[~P49]`o'tbɆ80F%'gˆ :0WO?%[$Xz \:0F#꿳g C^qlr҆rToEVs<mIdۚZ)t2B#?YZrS; P.JZ+yQ󬷚2`V(KRM: yG/9wL@ wx,CpdAy1ӷ#PEi;dO`AToP;^>m7yOoү4/;92'@'S|.]dSnN@v Av\H_~W!x*tL[mjX\{ЮZ(OiHWyŜq0kuf FiY D-gnx4^ϞMiCe&`@jdkC!s phM` ':qw|"Y7ܑ<6f狘 T?$DwDVo _AT)3fN~Z7S*Ʒ)S ˜)]0>5{'D>)]&)vS&< jXBi 6%Ӥ sW/p<<r뎡jbs:{-F=#{ ) Gdo*ːzIJHua̤YQ5!Ks$ΒƲMyնbMWT3D4KmLȝx+gxq!-QzrFU;v5-uCWv.b?X[t ˋ6 q\bg[Cm:hM } PФGG ;A]Joݑkm@zbM;,tI^ mf%D:!WjZ3Z8i!b% i;aIve^?.[Zv43Wґ-c`f!yî2VL'd<`ĠƦ]$іhFe5{H-c;ЊnQ_7bLX?b&BiZ_gZ^RANじ+L*&}mS0Um.D(5\w/Ϟm {ձf"; &У>jо";H|R:&'o^hrL1ߟBLfW_Vr7TsLxςJ;u^~tԌfmcG EgנYU8oW|I ?x5WF))tKs4΁]:q:r͋C;OpGT"Ӵ &!|ZAzxn+n7 @k֒x4E u>6nEn[3o\Gz\f$8=|\qYm |\_ca~D 8iskFŐ²;Kfݭ&ŌiH7gݬPb[!s1 8aPW #]f/jzCT>RTPQjqXOE; $S`(Ya,zJYi5 La^%-^-;jbT-B:^6ǜq8:k feډa˥7dgc\ .JZe)>9@TKfliFhU\UDKj)1=oQN 6GF&EmX1k=yt R+?pBxအ4#]QGVVI%{1 (y]`*4 ȕeG?swY\nj s S*4&"3.ʳuu#\5ȳ/WAͮVu_`ʟگ^JKK=.l`)+_V dVj"1f^Ba!T#{;cP)ZM^bFo4.0[C~c94B>58PyҀohP8#\]_e zfIo~coDu '.mvwd!L`+@*NjtaB%*;$<|P=Q[{ځ{&I"1U8wvƣ㮀]zw>3KhOa)M1#~]ILx]b" 5B%)UclQ@>39@,6 U?J5OOeAq o^0,M!Т;Z?1IN 3y9+ї0忴_9=_؞r^Y+j82DG /gaKZ7rQu[>GWWcW¦HɎIߊ0|t`W1-ˠFQh{I'sJs7#+;@ mN~R-!)$|/~Cn@;_-VA8a.! ~|}ֵ[:o$B WSob-DK!yO~ϯWJ>'/ֺ7mX̀Kr=Ő<,{s2'L~np[u׶[f?荸h¹GX}&9'V4ya֥T\+MZڜlj)2H6pGNo&giBJ,z߸R0=w|_f&Ěz|e8V!S-hEO\wPjqZ s|@;n1jD1\$k0tK+,L169+q4Ѝ)  DִNP܀.$lW* ĨLW7S-2Nl"X Ǽ=ssؖVVVĚooȐ;\L[;bGKAG܆Q-Y m 8'#Iآ6rBlRcbֶ=fcVX }u6'M?mWbB"%}*;/GںI8c(x;L=Y]&@&xr'\'!){h,%uo5] [WUWYt?x8- +ЗZ(:OuR@6?D1d;Dfǰ` F4ZGikxG̵ +q;Uzզmzo#rͰYSfXMN].7c{v_EOf߅Kt:19GShe O=rzAh}W]|0CQtjQT^f%'h88BAGՐ?"Ou{|>WaӼ q爷D|.zo\Y(GZ 8[Qկ}8b3FqW<)B ^k䠭j\%mp&O׫5a.,&?Bx&̈ËҔ5ViF pU23sz21'Š2`0A^| Z=d\T[7d !u&-}JL~d -(ֶ˙AVn4CCp̄ S}{)+{Em3c܉}4jEݟS~PTc^Su^Eigy?0p rn ok=KJHT_+ x~J!l+8n#"E"PGp~Gzn}A03KH%w Rc ̳I?Xf MxFͷF"D(9nvF/#N `]نrYiVgޝ?; lrV};kV* Lb>eta0PgΆh;,Zߝ¹ 6"o2bNϵڙil":D?Hr]I!mpXB0e >?ge~W91W>A N4 _rt}ՂV>52t `>phX)!oy\Wu= w9NB#wu)Uz4]%x^, !K 5{#ȘbFS!(EG |V7a4o`W%&Jm{>qvu87lTApʮڄ\ݩY*~fӠՁ G:~in}L ^oQ69C7#S!> _?z8&Ƽ"7foI8벉'yQGC(L8!Np\ sY&ɰ2*?)ڀX>((M 'h~|*{<HBc&"A@U'9=1dvw oooOS,5OBDzHplcRnғ^-־iAb>܅Vdy z zۥ7|C[>Eޛ ,{fc, 9yVWLO(x ^V w|m` ľ!>fldh.VZsyW/ǂN)ϖpf^ .՞ 6{JzhXEWxfӴ IPQBPr(R"iv(PE\/"\u*3-E݅W.aUOeau x0t ؎t77A fHA(qr@q"9iԤǮlqĉ* y"@yG[‰2 q'z\PHY-C۸7$0,Qcչc@K ^hbC~͏,͞4{'8SEsz1Q`wp^(1g(0OMIaԯ\l EI9(bES2̛MSqʏۗyI" |G/Nps^h6>xTKb"^.~jyv#ޓB.K渶2K4X.L/٥j?$6\{O{$D/3ٙ5Uor6~<&eN eF(g A\$m_{vKg^їB9mv-e˶dnX!"N>~&YpuBBKȯ κ8xٿ>Sw}qdc`F@-(]pdp|9t0܅q^okc#զ Tow?`&K>coJ,>"  X4Ѕ=Xsĕz2r4ΤGqGEE(b#Wy}{VnsYݕ<p/k'OJހ3cUrm>Ǻx"M3K 栄Cv̰jU|-OS&\!waiKvӴj`3 ǘRkjW/63P7ޤךBmV!rl^bPYC$Rz~J?M*⣉8 >iVԫ*?5O/q.b^lu_XK>.٤Zʆ4`qMVLa4E!Sc<% lw]Be6D^ %2:F0_v,cn%4P hk"]hԞ/擄 'b(jޔRh ]1ɷ%^]4cؚ-au楨nO' p_咽)c!&!8hњ~1RZq@QkX;^3n[#LunW / !Z50;Et_A1H:)-&G͈9/K(b CY}ɬ f9ÖtbRîoU7d`1Dr,t)ų z%.0D$VQݷK_̩3ifzq|k&/cEX~_$1A>T1VԅNۂ/*Nk4 o؟ǝ3ӀԄDˋ×S0f*AlfCA} YJȫWLZn `:"kjVCUmcFtIMuuڿգF"(9v4p^oQ\C~:^_2D_ 9:"~*[ݍkw!E8DqJB3mAHG{Ǩ"9ZgxXg&͑)0єj84p,U_aI miK)SaljAboF؏JwxBP̦g~_$QlXhs*dg38,FHXIz >G֦ց ,\;XB[q!IҨ.y UE 6o<:VlڼN1-&_*BY9R˥iD^5-㱎oE;l.iZs4:/ >PSr w  ƨ`-"a,О8&,IX$z7>B AqD:ὀ1 x2j ^pr#y&X"BÍ)鱖}7_4i*ܭQb<g_O9՚B(ԒfUukrT޴@V< ;'1DE'YB)G6{[et(<0"x|ᙒm31PN:+gKxcX !{ 90jVh-XnPg1y/EZrS=*T[d8,$s_Q.'"{)Fq#G&bm2ZEf2:0/yN4--_GCs;id8b}\plLʽEϽ7I`1懇>fY)7eootN0WǎZ E^ho*&RWF{Yvm9ȦTܞ9io#RW--qe0Y_w$ݲͰ0'mCòHcV k 몿h$3TD)A667)dݙ%#P3iyB5jxbuŢ.kU`61 bl^0*LO/.4ju)}Oڼ&S UJ}EgӋѮٙ  ] w҄6_vNEz~ ĭM~[kh\xG.}ds &Q]>X;;{i#3]촳'5[fPW4}]'᾵۲HCT;[w+Ų㨮FFï9"-:bnQ>gEqGIbamY>adFP2y6 QM H{(-B/&vXigZPHnS`,0W}T=YI*ڗI^ֈ H,Y9tq8ܒ" >usmF+_SE3 Z:>Ds.P9!?\t`Qp"XjN `hxiҤwf8~w pt'e3߃âG=^23ɑC⑟Cw}_;zپ˒J5@y7It /_UVHuﷄ *ʍIɩ[-[,GmM;+G}[XzvD]]M cEEj9 u=Cc2:/}G>CQb}SY6aV[w*`"Pda^]B 2z9>Fs$%tww e]] (3ZyoOwAItr}]֐>8ԕC^mHͩv3Ӷd[-@aym 1RJfPz ^Uqv{vg^{b֏{bqC^$+UV{uhܐ^znW}"-fP-^EŹ^Lr{˿{bReedNLdfhH i5Z+7.ЮUG:L`߂ w]'Mi 2]xWd?Aj:T~s;*~"J]vcѷp)@]ߎl UWrAV'x +S΋o'_zBAijw+K+a 0ǝs q_ȁ\\}ÉOi܀r6kO r>+>񹆕(A;Yzq ?#kȆ'℅PkD.1 $p y>֩Ll;%"S"'߶*2}جD{udבDlKY֑k ml,xkZekrEh @ۃ| J"[8r7A GronfNg;2% Ө}^e;u7(\-vLPnr5DݡGjIuTPh_Bjc`xh}+mQvB|C*mԳ6]~g')qJSJz]o;iCbX|jsTa̶V2;v*]KϕYOX\K\z-2-b>P"~̴c-|o*I5&)a; a{sDmsfGT7v2ѳ.RW,:8 ,'ڐ/lsGuB;YG3?]0c:w)nԃRuNI V!J<6 >Ŝ4ZuXLnoCy5ҒѩA[4vkXykWpՈӷQ*>eljƜZn6\FJ lTNrn.W3dT/ahqz ꤮N O,QU2`V%d'|KSàMRDdU{NZv4\"}pr !SgP)VKed?Qy-V+aTjw-1_gřa %tut͘HtVXJgX+@P1c!U]&Q=7 5q-S\RBQ5W(Q"-SWv:oiuEIHШ6qt `Ƞ`^m|S\S(qrxO>%/|`ڨ;pxލyTz34Q? Qa8ƳIz)9AG먁K Cʵd3t!jRh悏@ LBkUUeАϦ3n\ѩtK6,fA$Ca RueWǴ:c cǛlTUU&n4wFUf .߻O Pz~({@"ow\\# |.oԡ9zȌ__hM;y HɔcF'y#kɬޏcJ/ =MT)h[GsEas^' ɘ)[ayFN ЁeM`kDXZ4pi%M79<݂"qQQ}gX {+MnXɰ1^(׍T*䏘oOx Ui{ ZGLIDڭA:l `qy.u6*rj.4FFl+ix>=CEbi:e=w2j%N"LNL6#}vAFR5s3IWa sund8^;~.16Us  8G*lzzj Qd5n2ɫ5Hpx -- l'KS94XXq?tJYP $.âgƻǛ|vSvE]дAIVs\,al]M1Fy6%XORf '$72WּCr[r ^v ;ṟOSrV[6T ξƫv!"{15cL?2eF7Mjn8z:qZv55UHWuk%}Z+4kVZZc9jAKށ^i@|-zQ0#uvOotN!I{+/E2 d_g_WlI>Q{A)B^^}˜a`E0NXs.0ۑEPIXk=)gdQPm f[,Ӗl+ğ .:~=N0 9T ~ی}zPjon34x\Foy5C]Tmјs|Pt6⁝88@Qc6$̄j,˞󥓶BRJ嬋4FF[7;Պ\X?B+|Ḍfu4YnI9[g۬Yh`nӹKnHMN["|OĈ85uqgZa)ى6=^k+a(h622'3t$}=INfZrvfW#4=b|>ctT$8\|e=WK.uN}1ydJ|DGU+*U1"W *U:lF}ŌX(v48;oƬLnɗSiQKu𥽪84Kqk\8^c fGeɾz>wdJ'.]Yye9۪^F 2#䧽!TWK ;'&>^r:t ^m1AHLMH]x㽏S):qCb7Ȗ<{^@Eb& ׈dIӵf6)x\\Fc2àIJ+/p S,D@5L.c!Waaq5 PQ;zMk3]W7z3$7Nbk5㚺b4, 眸,;q]kXF mh6[:=.B"lIpL 2 %bEA: QX3ug* vkO=Oo>bNzA{q|0a]{tyу($qp2^D *M QJΣR+ (q4Q8w3Tʣ@edB)ggG>cV>V-lU{re,EuT,x",h6lmdqRT"_$i9HJV?73P7 7V܉F/Pـ;3*I @U-F]ED;ՁQ|Grk8FbT̗Wb ;&t5M't,'=-fӽ9'xa۔rpx#{k|*:^,Im8\0}^ZtAՓsyԐD!*5HccWn6+݈ي_}e.I?wDRdE~&;~y9vZ:V~wzRczB[K7@>jr*33#МwBԴ]S܌1*WmoІ?| fΈ F?.|DŽ ,"@mJ|bOW-]Z6Դ-)xo= @nyfyh28oA4{6ʯ-y[r( 0pXi}uyuytt@>kÃx,RX<6&33#9Wy)cj2:2j{ĹUe&MHFr")5   1 KsA員C끁w߸aw7TpxB?,*SΏ[w+UH7,>ZTಋ:k6,*zdHNhH\'^LiXT b a|O=mn`l]1ھQY+k- WAAʓdx|o[yZ>Jb Q.FrդV3Z~}yB]FGz~[`z;Qp}ZQw4Y/*P1TvwyVJ6Ѻ>Uee0}RN%)t=G-[N^._C5?6D*rQ5;$ +=8tA.B0\ #:(thiٶSkicK^H)Ѿ}qN໕+s{@e-ظy]*6S>3krs5bu#{T%34&PZ9cs7Fn0#V32Yk (mbhm ᢥp\u(4}5h5mܛ?-y=sF; pH +P a*`( I4aD̳L 'xɀdOl<: 9. JOiڬfF&@,uPGÐ;(l +T'-mWuSiӒmg8{:.,d x %$'s1 SLX>@ ' PO D/t}fx n;d OɋHc?H,^z:ARXwɪ-tnN50(Fuh7a7W10;!m.RmlQDqvt2]񸹯j1`4ܒ}bm";AoqaoC9&oʱ?dIBQ&[\T:'<< 46iQw|WU9!'2TV bYbcR#Y-m=K|WØeɈ,}/Wo@ol}%bg# ) z!MJK,P;a3A@!Tζ}RPk6떦Qhq6) i}k7pRq&c$(v4u6cݖP'7$ nԻvبUBOW|*[31_s-ǜ!:N[@s74Ho3T9zWrͣ0tnMBˈ0B`"7C:!5)VvnK` `2e/ɌAmfNDO} pMogA˨}C_jc.6^1>{TY(rXRJ[]Չ8b͝h򞨣&t+kgfWNBR^Zpc㽉 |#{=Η}-8UBEKJ(0cC-X [3tPM15o|Ò7"<.hcԍ4F-|%L=͞EM{;(Si%ӓB.sLI؝ǦNQ5tDpA!UB,O7mh㤜E[6 NJG+:Iq/M6t%//p0.t%u3 (;ϽVu73D1<2#x?;u9Lb+A#k3܁Ž򎨌W =:ȣǘY'1QFk9pM+q\ !ٌ {g~abmq`s1 FA^"s}샠!yVAu^uXR,dOAZ.pTKJiH3sb +t6E`7;w yEc-摏xt#H+S* j9DvG&]J˄݂/uĞ!ÍU?$-wHLC<};dc߄Dlk_+>bW^Wx%ޠTK) zҮT]^R3G_X2a,K9#<*)* `BU.AdnJqF׶0(̽ۡLlx,hv_U<9SWy))z(4~zRSY)^H[v>:WIn}2fyz[ΐmǘH( ua a 3L$xd cx!#Bd-O!J57PhK,]qVjg! q;XanYm۶m۶m۶m|m۶<ɟ}}MwgԨQoʇx_OҖ۶<=^=5݃t׎sGҒ 5>kk:BMyq\'Eb1[oi-ϯڽzD5$DŽ슈o#>,{?@mYwϩy(Ύ 8|f QKD} β`,di3cptI.z,iIm^/x$I*JEO,%B0Z>4m_t5y $_׾xټ|9kj+em'lO{Q[ZV b\hwFd/|A0m<&HE0N%3r4)m:fN}y@ϥ=uy>w s hyÿSs+p 6Kc st3&'u5N씦7?E%g5B>ZqNEW-u;mjD.H[d~JNWLUW-r/& ',)M⿧ی_ѣz7? Yy=lu-|Ic>seR#J!|ґ)rLpT+w?me 8#b yV-ie5Ծ^@@6䭣uIRyF$K$^:uu<w%Nn%q&lܤLSSx^rrI?[H)-:fei* 8!aMA鲎gzQD 6($YX*6e"fK,9N5Ug h8K'Ass-rʜYCEf܅XB CtP4Y!Atn9zt|T;>#O{Bޚnt t"6+tʐBD,Ǣ94KRvJ98%Kc9Tf8{H.GiIxj1K5(BLZK$I {{ .Dy/+(kV4t뷬[h/Kt|~_ã{9$lYыZѤ,'L^=EIv*oD!<>c~ĂPoi]BP{ݓ#kmHH쾸 :kF%DhōƲ_9\]I|o l}}pw~vn!IB#{ vt{[oi_qχ/]lo#unlvmB2D'hyhDKB'c@O5ZdϽܮ"*m̴tw%d5h tیG2 Ƌ:MŪ{ncs T Xjlu'Éٴ1Tي±b X(6A.tZJB[gݤ6r~677֢31NHM-&/ZT fj&UgAt4fdb@KS SǼdqXƻ&I/V!2n*Aw]t| ]c24JJ<3ߣ ƚm[MGl`,Kt߭:<yY ֶDtI XI5v1 -l@ß$*Lb CN}p(\It-E>X k10=>$2p->ͽba:mRڴs{ipډi롟h*WW_'xCmI9~v=L[knu-j)24Wni5_w^>l[:WD?Kmlrmaعdt6tkl燁Vߑ4I+ɽ-]Α/=iԍ:ܭg;`YTJ%`lXd:mU_/c/f?%dd7Ū[^^? Ux (feHʤ\GM1zXa~g D&T l?$L^,dd19D5{B`q)&T#/r9J/jfVN6wdX%N\I1K(s.Q_C~/tki'lо?w6Ɏ&!yW^VNJX۽-&7>mD4ο(B17;YQW;oٙI` x5k2`^Yُ5@foJOHsKI1 qx9^8Z#oKa9:>ଔ g7WM*xڌt5Y%t*4ڽKK)tD%m^͖x g[<[".[A;B gy8v;1{n׬5s g֞U֑(Z5ՑVoUͦ7{9ew(h p*?.4oCw/2`78dl;>+u2`-a>`Ț%y?gzcAOe-lk&2M9goq7t;h ,*V~N䶞(43`_ʓ䱖2g *͕#d\fa7Lyafˡ,Fdf!ZyJd 2!u:*ƌ?-'k/+6u /~vQ6˕`&ヤA|DI'#ktJѝ1ϰ'nX;H5z gM@n )M+o wI}};js2 tgʛ*rJ}ro?Q j\o2@agT{gWKzcSC"d8zaRWSsqz>b^(6 Jgbe&m^LܲDK8NòWs\lmܝMMZVA%V1 0ˌXCRqߑcM=:[CZg W X KB+eP PEɔ* )-)޵lhN6\@NBvɥvn:aDVez2nʑZ`~s%}5%2I2L `#2obI*phC"KI"Txt3' c臷⏺ߙ::1cӑ1><\{pyϩ}hPUp(sc0g"`u(ŜwӳL-PX-w ЙГm^6hy:'L[B}_B()xlu](|e K/+֭o\FN3cD>0ӈʜqZAvŘπ?ڇn; 'x:DUnNE9Y9[/ U9?\93swg7Pnwlnu-0= 2j CDM6TX݀St딴y1yF>SR&h,v wen`$+vР?m3>^|~p!'f}?WNd{8}4Z_M&-(w\1?'2[V~ּU,r̮ɍl{>c^D :V|9J.|Do{χtj@ _ܥ[lgxM8>tƶLh-"[:&JJ>_yE5#nOgIZC ̓NN"QTb=Lγ.ggY$#403]CZ&k"N3 k&֕;u(ˉLFgE ÀX@a\9bQ1VPm(VYh^.V^fSLGABe LE΢wSâ=-9 lQO k;N\Mx.%Ϳݾ.~<Mew?ls?𒦳#揷}V~ML9K+!aɘ"m̘;>1/f*K)ffHHM,gCuK3]6Ao`x 8a &E: G%\v@̜M^dd;4DH dF Yoo^vohQs|pѪ+s6+]#r-6c\d ' ),b8d"/; ፀQjvo/Oh 2--M3/ VFzzz@2'{(EX_6 ;ڀIDIr:PaOpw¢BWqeX,#u s2k˜ɁD9}PIXa(Uk#͎OvG<G줋]6,6OM4eP nVp1Z(9,<0Q #< P[gc#&hElz ͠.tH1ȯ(]@m4$(~nPTD&}3]r+pHph|Kh"Hc[:Öa4O!6t-ѐeZ[Zi6 '[&@ckK߭-!m3uiܵV,%Or(,N]—RhF' {=sw&W ibb?\UH#m9M2UlSp'd[5ƵսWFbqKwhƯóF !/$&5 @ j{gZ#E_$6KG޷Wl ź EUaZdH_9Dd27u=փkŖ`y~RuEc5>^&MWP*AoV}ȚaShDŽ˿$κ2! \!RV2 /a2?$`mWOCi]᫧`McT3^п^; 1Ȥ~ Pnl,0"^5VK=ܺPE@WGњlkAs ^(-vJng@csj>GPi _x ?9s_X,SWe⡇,Y2R4PhRA0A%7=&zA^|c &[;ϰ7m tMTU鋆cІF@ Do!-Z>3tWIgՠk@Hs .QnzސIAgZ\{[=NO;6qevEnɘΎfȡ};꺁Ln0a{]˯q gf_- ,!!`|m Tu}w*\k_]ʸ'-" ڞ}w|e Kbaл8 u۸t`1;- M|05d$ޅC~WjjS:ņ{Ev|"FhV`k^dI*ӝ)C>8NY=B]r*HnWcL'g6V~=-ˡGP,Qrxg6iw"OJi GCpOaXb-m)Ea $H,ME5<2X]Aab},Aw2vKeT,kcӭQNyPwjA{KP]!jb$F:K %ԥx+ hG}XРG 3`MIl-==ze(4VlkA7d/@z~V"?:EPl: @ Ys2ͦɬҔX 쫥BΉ<;Jdxa|gFid҉!U7.a]KtwbSm .F :pL"Ř`Ω. jJ׾݅ n7&5lQbŨ^cH6& (N gҩRhͪSF윧rWx&NSDݫzTwgAA^2a!bu\4`8+Sr pǮYuxBI4$jK%pBIMzQ陼{.UQyq(8b+<4?#o TseVn=!-UG@uFD"꾆,ܗx>־ݼ(}}iA%Ӳ_;P/ 'r5hǪ8I?'>T?w$Kce;j"Qd,/|(: vX>v8l0V\%wH u6q1VvwCa6 Fm<,*:E.*գ."_/ޤhWV2F,>`=~N{Kr.!u+iFH;Se4z})ŸNvs\ǪY:SB*DVj̕X󚜷3Lƒ/CwYOJpc7OP|l"Iw#(7cFAlj$RP,o p@/)snw!gҬ 94 *MuvLM%&uze6 ;] ԥܡri!1$)[;7:\O6$׉ŪV1`'޹9 {lyEi@O.,1b FN}w.龴r.E4+i`0E"^Qg_;\Hc댜xbms_<)4υD#LG#l4veg*d8sJ&*IղJ!yա9ӫc}M}KssV@MOlKI}1̂ ƪ RR0$czg#z1g)V]SFd̎/٠4cぅO޴_0WNa61Jg9o'PreɊŔrA02{1ËS1ȅqo/Op7 v%{(4pa@Z eUjv@ud@aˇWANrQFKPI6^)Jfom&g\WMA͠Q=X(h4sEPN0D"I1Rs!2cڐwt'R>;`pQ4G|x.X d<b^ =zg$M2uG(6 /noesh")IưF$+TbFaVI]3%;1GM bQ՜4QrEf?Fٜs:C_bǑ[#IjchQWȠOZ(8ǜqs$,{~ȺÇf $s`(U`\ t'1r$s`XD1?> e6f͕BpB}zS=%c&\wfgz滛TAVQ}k0MrugbwUל۠ry| ΜFC *u4 U KPpw\F>mf5\7{ lK[X~ $LB2x 0]l"a<=~jkdWQP#JJX ltҤh"arEv9$e M=2ߝuT @E9*3ɒB+U`_)WgfiYgrxBC8Gshb `|DPx0hà>|䤉. !skrV|0;ABMw^_)Fe(dq-hrE>QP_ZGRo<T5G;1©Y|`vH+F0+x=7x{$ཇHBwnO'J2Ѷkk~4T[z ._laq s e;e;\H{ b#R]+ߔඃ*t:W{x [<ÂD*G@鎂o&!=F͂t\Aƺpo@q2^[JBF9 V -Hو <uC Ql(cKX4)Q9 rgS&Él`7VU[_  +Y'9QWV^{Ҡmj-j{t7&l,S6Uv¨'휥遟Wxj;y8bSÓE7j?;mΐ,Y6dy9a@6)NUʮI7'm>-Y2nqs1gp=Y yI' MC !J^#L ^ /5!J %/TeS &~Kj|ޙ2ׅn,93pU5m:IR7UԹ_~90uZ=fy$O~BǪr WRREX'2ٿ,1y 8V//m'~һ,${)g=G1'p BU=Φ\ (lQ;t>R:@A3TXh͊ .C"nY{Q97Lf ! R?=>lc 6s1`^a2[Mɂ8u[ "v[ :27`n%2@gqa"~#-MR,oDk0֥0*BTs~kӒ{N F=5H},}*JW-Tv1LiۛqeYyLšsO?sY|W;5P-W܌LS#YX: dj01EŴx>VD JmhZ?w:Dߴz@#`|4EEL so)8")ض!!O)6ãi_:kdp (q$|XD>wlY"*&ߦ>TpEqH}ƒQ}M*Ya&M-8T!_GaI.aZOba,Rv絫[9-۶>r-!T?7 9M"wKKZK90IMs|U7ƞTq!N(ow ط+_o`[o697 $n:y.?ƖclokC_.S1N1mAn jUjs5ԩ+~ZxֱwW.٭Kst[lm_0SGS:Um_:>a;fpűǯ:}SoAS,t_:[ĒL1rݾ;cy5F\74L6/Ruzz]Zں5W^ZY?4++W'+[A5Lm yv'{@CX@&էN3KBOv|NL>쮇U}zWt[^;<҆ͧ[ {k5HޡC& #8Xڙ9xd[ܢ "hE$th?wĔ~$}>wag`8C3# \é\٣[祈% jh@!XmD`EO @d0ieHó, dG%JT > ` XNͭRP-L;l"(PO cyfK6[D~:;km=aRNTIZa/mf54cad!/5x@y{>췫/XNIC_p&UO/K駱:T`F?$JN1fK/]#:dz]"ib.Y- I=A);RӃ#e]ѕ6G(Qj8G@C?8ܟs*"B/xzEJ /$T (P N;uKu'R MJNAzqs M2VM6~>–rqH7e "4]m@Ӥ5p?.ٓXGGo8v.$/=2Æ/'ω;&-l"goex (ZY>>~~JߢZ_I30}?F+Qzh*!׳Bf+C%zy^ʡ32 VVaG{UL.1VԿog%*GP4Ȁv.E$Alл_TE*:MdWTTGZÀDwnn%|n"'jϯWo{w@3*M! P(un6Yfm86\Al $2pES}.UH e}Jqz4ϰD$/S#ѧ!ъ͊ݫ*j`b'7_qV١.y--?}˩`[D#vqRx8dv7!6!ͻ U҂q=Wf~]19I39JyIo;Ày?|rl;xlt$*{rIT`y~%b4fFJA9SS]˄*MQMDWH N!1!19%4!!$&9EDj((I$P˰nt@fxNp$7Qm /(Nq+ LzMY`*ZaewgE~/wB]?ѐs뱅;.b7e;ˎ$Se\W${-g1=o^HRke\ !$ja؅DWu"%M w-DIζ?帵.BI'- !% B-dq=5q)iqpXRݍv%wb/(w]R{&8O Ŋ !TrBnqC< W&{ܢٔ|ā7m]~Q%~PoI1*cFKzݿ7O.ry Fu+&6 ^n{oR<,rFsYgp6g࠺w#濈9d#?zr%pɘ hM?#J mI~N%XzG٫=4",c8R3=dH;svy q,H-8{EG3=0(FPk0}UCnEsǥ —|1 U;-__=&=z]PUe'IYƊGwP& OKV9-og OQ^r\Lڑ/''i鐶]N }EuXk-S3"2 c&4kz6jq'gYPRauR[/@SsZ0Q{YOw56 fOO_[ӽMdt\mGȫ1UEbSGe4 ] [) X7w5&o g"ݵKL3(N!·;acݫ&64aTsFq͸ ܰlQl "_7Iv"_`}V'aLg^}7<<|xW{>hs[Qs#Q<:&*ǯY4ٗg |}#VTsQ9Z0#[Ǹ=_-G^bX;E6|St"۳qg~>}rrM2Q={ $v okkjNU05dҦ9p>9 X`;q8ECfo$9_o,e&"4m4 9Ӧ K, $"rh#Z\)&r%LD_]=`"0:Ҩ =;Bj:{o^T kT3YC.>U9֣1funnvv}?q[K}YgBX@'(jWɯӰH0(=A#? MH3ă;3V x"?FE[8E@g K8rHV SF*X!\ &Ä0G%& i/P41$ TǡkqStyvo \ظ؀6?weFvdӤQ1mLi|`]x-صQ4q`4-koJw@Π'(fVŏ-oA)&!-Ry?\=x+{0S&|-xC$WX<-1+:}Zo^ ,:>DB.w>1ɘ§ܜ8?`q';Da6(>*dzQ(XOSA)] ,2 `tNN%!H^a܃\nڎV#I v7H"5˟6c eSaڃgU%f1}d)D/ Hns4T|)ʢ{E_a3/FoG4*/ +Q~-GR{.`w =IȘ%אȚ׮VI ɶ F4I7ibvK nnC 6V1Z퐬>gύO Tu< т^M-K'67+ycisc.a߬&ehE7iJ׵Ls90A-e\r2Qdd Z.ipK^|)b^44?EWWLJv r6C>y-Z |)uZ:Ś\H!ό{…|") CZp#j/}^2v摢o p[-f9A`E0eÓD-'͇bF}ݭ;j`aRvm>) B%c/ " M'Իp-cSB(}dkn1 Ԥ` PO#_i {;존htq"#:W5غ&ޑoȃ@KlHS н [5Y@e[m~`懵P ?D=/ATX#0iu ?d9¶UI~9"C?\K3xC1e9]ME #٘SbJw~(Ѥ`h0 ďK쫲maxȔQ+m}EhXt*HmVFxLg Wƺrʥk`Ro'C G\3O2E}wFIyd1ޣBWAHԟo{.spUT8Ep] 8T ~G~g}7m](OQ0ȂFTX2TqAuSXmթ2n(Y6VkN>~MLh$I;2bY4(@J%YEdE7Y!4ڑPV4whDKcށӵ9]<0'S [q6?#1 yKE~5*7Q/A K >CWȼ9eot `kfJ3JE.$n(% Kf$Sy1Jy pzګ:ᐺ[eh Glh9GDdnЀ3r4v-Lڢ6_ !y[KOaόQNk(&uGQSҿᬄQtIrkq֌tG5oE=dL"9QmÆ9",`mFLu,:4vx "ݻ(OnK+̾%f=:0XHB. R8q@WrVxӄV<~ܹdG_BbD\GQ-S uEND-csyr~k5 '],V]u.p 9Y\/@`Ae46" У\J^j3b] X5PN5ܧX`-1^>V_'I$S&ƘQy#H4rHdŧhy 3cgPL]3Qf%Iyun)Ӿɫ4I0T$AH"%apV8:]c[ >pQCwCPa~HLJl3V̄` +BQ[8?)&Fd|p4P{lR64q=j[f̝Sq k"7B spdH1*B3]&3bҿϲ-oZ!CPf.{dZ#dgzIoRLO90|)@QeT~抍B.[atr T= bG6eh9Kk="mF>ftyBRnjb _s5ؑ'pђRmOܲqrurAQMsPcK(T1nLh~q^a6πDM傳Q=% 0sBY5aE8Md y|ZmR7*,PL\Ba"hVMoqY_c+f$ĨF_࢔G@%vX6."0rgT;e$>b_ 1q]M=<#Ĭ ehEF+ZZg\[=SڡULi$S3!mG/Ԥko'Gjȼ*cOH)"?hzy7kL#Q:͠ây6*nLf˯]ϔ CfEڜ.ϋ[GTP1t/Ie"@phV8Zt8p!~]J4S~Kb6,,㤻+FYG.]>л%Ӷ[5849V#{އvD  !odii5h^oJ*Ebּ-`V)# ZܛrkN30 B͕TjM$rm"%.˔SRTCdEE5aA.&K:/B"7$yVZe:b"my"ja/#TBcT$)7.0Mśzb?$}Iɬu[MD]MN&|qP5HW>X$٩:$v;Z@ԐE(R)F/m:lCEuBQR4{9gy'Ibq}(zQcoDubpi?\nL aRtI A~m8Kعc*$ VU6m2>|aYbN-NwOXW 5 _2>aJm*䇕؂ ( "6[W4+5*I^ a`嵿xaO)_~>Q{9:z)K=]L4A#by53̿ܬ{.bi)c 7v+2͠/O ίoYȳ2˶]G^g?_lA wob k7-eqXME5:C` C9 PfW;dZ܃MG{>~Qm|++κLp?'w;J >9BA Zͭk.&ɘ+!ҡв;ĒAp֌^C G{k! t zQCgKe컅2&n菨;E}gE"s1!yB< FOacejWDKNNG˕K BU͝x]t$#Pb_dt&‹@!oґ51 K~-(]*m.bOT)#SrF .%*՗V_W"7M(\Үҧֽr;%>%+Z{(ZCA|nF=9{Ys*:,LHbOEy`\oetO^\b} $HL/]*%Ep|o+-(,k[IyN{~A q_ƾ)(MO#vŽq8Fpu2hysr;cKw(7 4lamv%lPʹZ5~-HhfN"CH"5aN@_8rQ}֔3 Wiv No*.-fFJv[w3iXWمp 4ΘV&c wUa \xn 1Oq֖x#o"<"qv^Qֲ4gD5+# >M}->5av`ٸh}<2)]"uA8&[;[6PֆO W (ho vٜCRYƣQ"ٛ TDٲ}E"%yxO_zeC(㘠-&!Yr Vicw @4)3?M*r%!kYΗ o}.DXMpL#/?l:W֑G{aw*.X xTevj{G|wOܫ_g c˔Ԯm9p )`t%SH9Lwm@b+} c+,-S)A62{wCh&}` \;<쳛q} z\:Pެ[-3/ex+/%j;ǯH`)_1WҤCgՄQMp=Fɀ  \>CD,/Sx;$s=_>@K=l+]Jj #֚u&e#DL܃ Ok?u7vw`otF,ELL5]TO*]0Ln0=gN}Z;aI&n$Ut3K1yI;ޥK+Vph)sV"Sۜ3.:nRdb].*hIl{vLq li 9+ޞy2x]TeNS}Mt.贛g&dJ4SZCR$î5&|k$GS6('j;%T!?s=au^bXfheȦRCOoM)(oGВ7qwe6[yv8/(h;F;*2 q$$koCSsXh(n8 Az[0Q"gMBqYQNeŷT@j$^%-E275E1m|'^>m Hv$Ԕ< .Ĝz$۞+ƝEPF>}q9~%)A:5N "cK8. Ayh"aA{#m$%Bwf07X^!w%20#̜e\A`5i;cRTB&Oho62,c4_,fm5W߿Z8oErj*GP7 mU ^ .w@=;:7i3;*(1]YK( 1d:,ς4p2ozJܸ;^ö\|aG p仨c745KfHg(|XpDT(eG. CQjy߶1wv ~ #%r8N"g7M"rs{ޱx>y~V}@C__wN /)qB0,KdRD0&:Ly] 6|ft +j9G&$(e>fXavB,Vw; sBMާl;脹eGuP 궢) G@|3?VNF.T09sD)t@޷x{}dGl-2w覧KCގ9'蒒DGSܾT~5}o^?(uTpbYɪڪ\8`X)؟ uS ,\/_.xa`@'*n_$no"uwsYSxMg#<08 䈉/P%B^:ـlȝޠ\"'%=EKr._jm\5^n B ^ &x:^&60b1*eIK[(,ьs#_?oxoOfO#Ern_.( J˚*eY)f_iټӓPlîR 4ccQ=j ]-uLhX,ZkTf)B~IM~e JݝkZ% =lk5W0<+hˋI B R>m q㋫}otvyhG}mpe]x|UHvzXM172_xИ710w8& @ե-n ®z-l/ x]rt5^U)w}@[ B!> .,8f/!_bb/h0VMQZ/\ۡ_؂@ u JRӓԲA`AQ`"L%ԡKNԦͯ'ƃ]4: L{& 6m:T^,9Fhv=L>$;i̵TCr}/YK"\#~P_%7zURGd"! czSfHm# MԼW7xiڰ]{M mx ١u#|z=|\LphB='BrC)}ĭ4Ifkvdi֬V*C΋\u՘^L'}]Fx]y} #[w'w;|D@eHt@H!>~T.0w -"'ı .g#櫛 (}uS(q7m aJ[La14d?Z9cˉKʼ`% 4_x٢g6JQǸ$G )R X=&DHv/շ7ݣFVTAE*iS<;c'1}9ĩ;dT({ `}c'ըd#Gg#cDA`DU0ppuSCWFG_Ek7@CVgy)Ӱ-$SЧP,Z1CXx- hv_)e`Z5FjFb`ѸF#FLirk@Z ہ\h!H{ù=(6 i܁ܟZT)p=CNHQdZ(gR4C/K&m@ACйX/ S4 h˜2k] I@Ց䲩 ij o~)T1F`f ƌkJo>haKMZK~-&L0d`mD U10 RZlƜmwIB^ԉ M $T~ͦ ml!kD|縩b3~&kfpul!,,Ѭ( {L j5Ǽ *^i5vF/s1ɀ$Dc6<;Y\9u(2F><5S !#&X-Eg=Ԣ a8el1fL5\neؕIPo ^yXx'@{;:ռtfr.<ۆGbf]0o:t"*]Zbƪ}FOLAL&rѤ%b׮K,~eD ]RQ ܝ <^ZD-!. *jUm#:-1S9ixڠä+>D M/YMo"C銱+^8U/T/ AxyK 6$ulyF73"9@SO#g.Dp# f=e~h~c5 Dv>FVjnJ%^r X^9y\dVձVUfoCo$&&}0[8AL~&0Z6_F=I(yqE'jTp 0" ~5$}_,dX09U!9K VHQHTGqle_mo mFrTAz[mٱn=$_k2ݗf6n%!,s>Qe#Y}7,dʴHu+.=^v0oAl1,uq,kIuG3 |rOFd2ULxol$ .y1?XCyP|Kf 3 5(ŖWDy!. 0-}wg<'<@ 7LlMu˲^nv&Tp~{*[Reuy0=TX@~Nj/`k%0R*QagJ8,F`t;l^R%Uun.P Ks"?C$\P,~DUq<P;f֥hli Hy&ېb6N!}m_Yư(8{s\e MF)}Elʗ(-aQ kTaMf،t2b;L Fy} EC^tj 2Q5gUbn_j~nh G8"6iƔ"~8#a68Yk8_'όu,gD@jތaUȊ. MA5.k.G?\<- a!_Krօtf먮Oj7ڋ%TI7v؃h>eu!F GME*`67 ɰ:UQGcbcVNv{R5JlR˴֌~y.""n [8LbGP`lv͏9 }T~*#;*&bwmɗ{1 ̎IT:Q͔ʬ Z5+gr #f&m2ҏ^[Ow k?-W0MBCvK/lr|G"gHRk5 ^n!;ع<mauBG Ɵ?ܷ e bDž?,T?E륲`{Bcbؖ$XAvcJDbU' n_O6WOZ8pS[>uxJ O%|t3>ѡWV=ㄥj??gg8oKxTp]mP`߶ 5ybp;+`J#)nec|r:sBDbeE1J v|nзJ&-wnWQu T dOLWxr ;$35 (iG&Px巹|"eEqSqF!TG_XM't}N4BM}w=/O]=chm;l}ֆZάsXރ5t/V&?&?} jс@oœ]kYY=F"xLr8!U\FK\jk#7Z{wp]1ĢaxG"Wtf <1}旔U'KKReƁRC XB$ja S>_U.`.7V[ ߏuAI 4~q|% @$3OHNvEANл8xvԪ|%9ݙmP zNN\{ :a;X>z8qZʀPhyJ6]ush-JԴZkSpj4ENF6O. ߌ+K~|YzRSv+l/S%ReKTD`mft h>}g$1 +4)$(y$˩w# ˴"cwfBFjx恣-czSvm) Ǣ@s?s(# Upm %vYC{VqjFbAC?ŊAqeիUΆK.}udw|ϥ3z3Vec.ZWm۶m۶m۶m۶mۮ4v?5|ό932row+u[_*utn&*;]Ď/_[g"knsma̢1 eJC %d$6ϰ Sܺɷ;l}x| x":/ #DpT*Hpډ傎***q3X=վxsnSWSjjT0ݷzrJS`(R#ixaEp DF D| akx~~D-#q'PDŽHY`Vf D黁I} ]vڜ$W(oKY*"A}+ܬDZ Hgħ*c6)ީZQ*B$eo7i ;a?8cnEYҶ;1F^pCjF̽е&HC1 8Ec zbʁ+V]l:e̞m޺Yt5l҇?] O/Фx&.x)vu|)&Ȥ+L sb$B&"F E,5QeADB& L띓-dK^1˜ 'j s 3'*ATJWAf/+S1ŵ|e-+o^3k ۅ|f)?;-{,`:;m|99͗JX$j/0ւW͙<q\DNm*s!fuQZkM ܪP2[ڞ)OTX[} xv pTx^%0 E|JڸTmzaoHqHLmXkد@_kK.QZѬ~PPZ_q^!ȧԲxxmW~ţ;/='nXi5ytM4=~RߗfjS[y39)t7}/C|;2gа6oJ17؇`Z⬞I'E0޹d1ӽ#r = P/o9_joD*Ytzi $5 Ι'1$B+JD@b4I+OFc"9/gl٬͡~hә/і!3#bHpYJ I)U/q@Uɸ$Cir +k=0#i6|$iآt-]l|s\}¸P}+R##{OeC}ZUu{uͳqQ/TN_& PƇl>h++xFԃc>%3w_mlTOP.#XnNԫLJvY,p% XCިfU׻~k754*Iˍ)l_6ޒ+&?-_2w3l-6o$*&:['mcj}P[^dR-={: _07a!l#gD7n쪑}߰ÜLT_v&?'H娊O@U,$ g+>q.rTrR^6[UJJXw,D$WZ@z o [qLQXރe@RR>̕V5pJPh,ŠcO;Q_w~`,mXInZ{=.(Gp#M6!/UU{ymFAuM6n/jtFApa gƕj8WfSqd1ts3&ۡ.bR/i@=Ԫ{.UrfV%ܫI7*<%(A}mEoH5gc&S7%4ea i$evu5N[\[Ҟk&ƟOXA{wK%^eN_C{aw| %AP1k&]?sA*AޒCx7^/LD{ dhF#qkAןn#5Yv=V3ʫԢ&g5G"V=8~?6uYr)Tn9ɹn_p(2qС{OįIBMgO  _m=Z( Zwc26P*Tna2T7Z_![:mƿ k?n83Rw}'.̈ީ݀=V WhnAUz cÛ3X7g'䰛3\g7>7ؠ<Of߾^-QŤ{x "ՏUK(Zp?%$#VѲ~T‌pP0KF vIa</C}$&}3#VEym-"ޚ>|p'Gr~upr $XwP[v>uiB&q&z^{&hVu29%I(@>`&ka1+%$% lQ8$ tLZ5Fuf\Z)GO!_b2HQCd,2¬c;&)}uE=aJx→jHQ#i2̀>x{̮e٪J2g䂑ItaטXT>в Mbty"&nbkҼ*&Ji'QPH#p΅^Gg:is`񔆺 瑗5 m.H}m[K V83!6\6!;YxbNljbT+S}ӍY vf]- g9'8_L=;y`vv_R#P񕛬] }~6٥; O=vAfz~k#hy)Yy?'L*%aױk8?*7o`]2߷c,X!GmF*2L#Beߚɓ6O,]{06s9 ऐ4\L^vv16=wte^ i s0;0񐵂]$@XDʱ,՟HNQQ@<n?,=[74@F, !+"8h !,/J5:Lo.P#A ݽhhh߂75h ]+hq:ia:"*,yKZ,K2kVP.D_Y_El\Ki.VC)Uz^A%|!Xd+='H77Babڞ }xs, y,S+#?Fr XbscqenDT"/Ad ˆɉ!&bde#x#dy4 rkoNig o z M_uetiɌ829G/k `jN.:j' K?lDN!\+~|Mv8i+teBJULf&K8:O`9cոI']ޕӹ}\!xI$[m+p/R&#E] tgC)`'&QH$#I1)De;Jk^=Lt_ u-]+ NbHi90X2 YȲLs>ԉD7ݠx-6:[ lG.hSG*v@w_q SBk?j{#;r&dHw >DǛ DVZ/L)084as+ۘk`ys H E]DkqšP qdxM>t;ľ<@|9Y mO*:N HN Puv>&d7_@@R2T7>_ѱ;kHl;`KvRإj5;az/7 gvYt1|10<Y?_!,wϯr:KDlȧS+SQ]X6\Ofi?!Oz`q2 [{"(&M mAL_d܊.mH|1)`9{g_rX[6zI[) !1$@0k酼+T#"sX7\廄OPDXB_SPVM`Xiٔ4`)F=9#/x:DϱF2j5@¥1>w(YyG!1X-}m1"t?(.i\% 0 0ۏ<_.ﶻ{ G Oc %_WDa'3jEoߤP >X&]]J <UB=L?ɕW$sEۤ}Z3u 9 gC_d!$gd\!OqK ^gWW"?ԧ8ԼizN*SW"ҷEu`: !pg+{G ݻ!pi8Cbbo:QBM )HV[.( bZH19y^UrZ1M,K&S,j3+1ԻuhSZHuk j5Ҫ"Z#]"6uc"&$Er+vJ:a*Um <[]2FKq..d<\y)mϖ赊9Ǟj4J jQ+ݘPu "w]VMչýgp(S JL0b/!T8O/X!G&P=k^SH)SNEz4xrX2PŖJ;hا>5xoQD~C}tq[OY"0NҎ >b5%~[r#s\|M.n̫ PmXo{z-3<[K`?NH7^v[zy bϲ!.]d-p Iǒ ׎"=]nSi벰4 ?S܁~ K$zp ܗ܁MQ= 1rSs Á˃icE_"hlزH6KalؤuջzCS#DQ!*xܹ6ٯ: 45&Y^ vk*ORZ3`úvf $5P Ң)u4Op?ݭ#4CZQ&ҟw"-toBj6K]oWY:ͣ=}t=|ay|~Rwu0b^/@%@(Ā&F^CVGq7Wk]=!t?0*>F^yJB($aT84:.g^O/dn`ܾȷl'8?ks86fW + " ݋߹gN'Ç)3@[ A"8-:Zr%'o]t颀Sб5XygXR;agSTk(]O#H@Iz"i7m#|Ёc;X<ݏx*Fե.fC0p >Re8Sk1̠5S*(@2 ʇL>0Ѭ60 HX5'G 1e ftH` AyZj >rllRN034K ե[#VwacCA|:э#)fp9ShK aY_9Q+bbԂ'#Ip/ϓ=RhԦ]09pBNQ@L(gnܩ_w;oP{]"؁IJ{3$c&>P^Z,IqZzPYT2Z|dl?uS45՞$RZFMX.>]+JK:U y@]Mfb6G_N/p>o UMUK&R]^҅m}>ySiA+4sg8_8߸ )+ ݚy ˃PTNS[_]Vw$O &#ЍI$˂Pm T}V0)ܠ> @ uⶱɰtVnd1']y>CvIuQޱmz)s@ָbLRRcd':m+}9*HΞ[R0(gnL|JVE%cJN:VSл =a!"WY) %r?[J>v#M26f*ZR\$:\J٩4j ζ/\8d9T QI *,YNvEma[),FemEV_A5nd7Êڡn9c'{E6nV) +%?11#1BA%hc)\|T(r7Y-P)m2+٦F Jf嵉')m^U]S53,l]#pՏe`f8gÔS#kXȤJ 9.mf/URZuk53ua%sPb T>>fkNUH8c[wVo*L lk0VGI(}=M+Gl.俒%Av\&M?ճ *4(:GPE194q_.l'H,Ԫ|Khу"1r`d@ҀY3~,cq|%=qhb¿vtԾquu8"'HҫI,y[]Iro[pTndwx)ݩ5ǖZ[?*H\(vCf$̈́tP?FBEw3N- CX8ݛҖ_ȳx%-jdl/2;D 1eAu Lay;" T5C,$OxnٹnB:aCC|(}F19;_<36紒3/];mC#Lgs0$@fK4zi/xp--*2NecȡC#Y KlSffS1a5ǡl!xʩU3UG=#ho% 0 0PzDT鳐\լk8^TRstd޿I;&jFYJ:Ձ 3Qa+V2hSu3kEX[IWCqNA㵥!L4ҼJۜ9:;0dN09;Ɩ z|Ba1h;QBJp'r9gLY1B="LFS>59˶g :fION.w1urXQaZ@-XE^_!)IG0R/, 5 ef{ Zw9ځl$}&K_gQݵJ@{<( =3xK{P Uw:FF=Ok]Np<,;(F[wT>~/xXw[M} `I=w5`9ǖ?fvxI;9Vxkՙv: v7Ԩz7ƨYhѨGn;*ebp֫8 D=3SMczŽЄ& =I9̎r6 wI S@--J_ }ƟjƉ Rˬ9 7#}/ ltwnb`p QQ\ ido߾/W_&YQk{4Y %dE {GG-v+1U1jANdS˞U]=2[Nxk<į×} nTAT| ܶ/~K1vUw UU]X[qu,vmuԔf^;~eԕ0"6GxW+ ~G^QYl9H*IK0?bpx|D[`cQьf76pt{{t̐'Xn4 ($.3HI(i}*@_?CeQB]dn <[CYsLSgQÔP^I[v gI܊߆;Z,YԺ h?wM*@쿙Z% [>wj)a( aᣛB)WĨ7_:dk@8% wgx_.guL`y_de:EՒ65N(P! SVH1%GP3_PʀR@Ȩ±r-U%IȴU-ѕ+V$[.$guj& tuP6w}bhc PˣPe<ٵ'So8N^RE h$M ro[TŘu[!h C&fmÖ.칡ӥqCKWHi:QXu"wE{cWEi,h3PEW0%EWJI< )S}9;inM@yI3l`"g3&5 hFCJ Aꀱ X֌PL F 5Q#[{NPb껽m5ӞSƉXPC`RҷZ2a]H侾XFLtt5혺2-_{pFlo dj76!K"4nCSdɡhcbs>#+Q]W}+>:n$y'4vrXł)k_lbsUxKW`uՒ}3&q" S Y j}T+|&(WB·$7tBu$Aj@}~ v' "KH8{RA:4-.j_EF36vHb-8dXɝ4ʩ$X 8͇+g\WqPyr7|poO$[m9][9g;3Mo2=rz{WTb ªzʳ)'ʳ )R'r)gr'7"t -ɖM7*J'!kf&0ڄW.Ų ՄSnZ6Ǩ,,Q4 NU²-O>7(ƪ"}JU -Qyr1=7ayn+SU-MdrSTDY1J^L'Vf]`s{a^ɴ/1Ad PrM#Qhu$ У:02_`An7(}qrz{&}%5}H24Ջ1~l O&BqH>"Hl4m}hW!?߼R?x f|F/qp)ba g I 3Ca?x,yaeY,x -+Jd>rχ]Lul[|!bN+̆%jR# kg`ʮLO*7dYHIXPVݎ ɛ1{%9+SƱ~4K05!(6ԕ6+ShDWkʳ%\Y|d1Ub&|`TGinG92# y&+ +'S;up( /kzxp{ꛛ9[)+;5@#1$hj%iY `q]j.~MXI `i]Ar*=c@2~ŸwaP߂R6^%. *|25CN5 G |XghDM)|&ߊ5\t!aBw(H=|YS~i5J9/FΜd!c~(^*?*PT@v:R?a1}tԊ0h6rea}ZaI.|My@䶛8Pj:pj_V`TFh~dxCeᡍk1az[{x[" {vqȥxRGx QWad`R~IL¡= nnW «宊c*D,H Z|#_zo?Xqy1?qJJCZV<mXq*:gDXEzU{L< JpG3s,\n6s}.XXKًˁ*b1SD!sǎB,"V 9X 獇XJ^3f*msz'06w†#у|n􇹜˄m~1pǎ-iկnpb0${_9D"$ߵ'T'~xf,& I6aXh}|]X)){U jqxWb}]d3):HN$5sPvR8D&(\=+y^3DNno w^ubf5n+1Q$|#K]+5~.[H2cAUR1zk*8ljGvÞM-1p`#3Z_kqYnخ^o Y-VCz!]lO}Yޞv6(SnEyaqjɧ1=x0\qL+y`=bi3Q81_чV}[DgZeT *.ӪrFoh>:'Df5˰qU &j &F(Obȋ̸a̗~ +Mu<|!{h&AiGP?YN7:J/"tAJy8 na^2_S jN}_OE OjxOz{pՐ5P ?|!¢IuF=3hU7dG5?){y)csdO3ҍ۞Kt^?=Ӓ[DWh{,5~}ͺ$";Xg~eXD_ܠul=6ǵ_#PCT_R gⅇ`m.Kx]S3hXJ@v2:n`exPvH T|k5#{erܑC0lK}a"J$ 9/=e""悎8˂m€O !QM ; +]뇪^m)+(b‘py\I0Y]WhGם&^hh$j=jO:)ppTMɅ7& =`N?R@h)" x Հ;GpuP lUvNf $̌p Y۶gIHUe? y=S)e<r%*ȸ)'$FotNؗ p) Ôh痾Rzuhް5 [pMQhoA8B >ʩáI\t;t&e+0 }rn.B͞eM"A FM[o<IԽvܭP?SFQ+-rA3eVI1Ťw#%X+DuօERB҈^ZɅ svm@` C 0Hڒ6to>|exX K(iD2~N_/>-kl'4j.Òz8Oɣ~vArb$J!\g(&UJ褢 8#si21vIV;/U>|)}ɦ%.8hP]s1x`TPhP;:C(cq&=}FҌ&2[_k_cĞߒ?]7\^]@h©.h_Ii'v 2*U^]lۇCr2scAfĊNUW@%tDB46ΏzIiasNweo4Y: y)WT=&ܮsWUjKZ_4 ZIx|(ø\ʛ kVǷɷZD$u#7xņ \ݒVa@ Vxnx+)[AGMcཎF芞)ZXqw~{²\ڡRuC ŋO,Z"\%cOmuci +4L|&-ƒy$ fvvnGO:d{^wn?fwl=}W*>Z5Yv Lik Pצ(L MK)gxOtHwW  ߭)cv‘H_Xw/[& {ºVZ e-1#H6S.R2w]t[ UnŊB7xĊ~f HW6vG]W(6eɶejvn+R׺jۂv:2KX.v8v{  + ,RN|R>%RfVZepvyY|(8\NJw#+G@H78<.99&ywLuv':C@i ќK#5\ 2ٛ #XVN[]~Ţk;*ux5\F3/iIÙg}{Bpefdž{]:t]hڏ bRDvC>bHU9h\t r([|q(vlj4Tko5ڴ{vBy#1JrR@ @TeO0 ʌDAs"ީ1>h߉m@̠ 161)^=޷3ԓKQ=8yqn=B 2B2xy@i9H9| /!S"VWgsopYHg#b8+Z_ Sn'ċ -#rt Urzw#sVx?KVωAb? %ȠGS40Ng"Tԏ(o]-Q"CٿG%Eۗ*Zj\mMNݩWݖ#Sf_ z ܚv.. q |A*6y0Z #͇Q7e%33~F).>9wABsZbVDzZ7p?&׺^|I4{ckh{y_qU&R % k'\aE$]E/cYC1;#i7ia{rEyJ]BX~+8G#U ^ /Mzr`0aM.OcǶ@\HO \T}8SiրO'@&˄/Jn?sgi'1A[ 'Qaщ態͖s6L]f'wØk\ȧ")ɒgHV\C uAC0Xm~*[M5T`4x,̬ɿČ #jbZ;iKB?vyx.aolR p!%> \''+KV&V$}+w;ppy}ѷGB<(`٨([+[z;IX;F7P x!w0Ԧj]Njd#(OzVO#P SAUիjٵf rn c7cW=Te aO?WzyBaz"!Ú->}wa<߆k%Re.5CMj|P(xe!ϧZJ ߪйͮqpdصY ϦzjfzӺ҅?NJ꘏ QQO1^"~KxaˆEe`S"@&/z8w-:;=,3Y!`MNZjϭVt2[r؈DK׽iq'c޽,L^5jUpOi-Qtp> @%*ᜠݨCZ)g_39 u}k)_jnmbrH|.]gKㅱl.)\0VR+ȇRnA/<4.hN(DΈ1p5MaǍzA:كO}ʴ^`ȝֶ f t $G}w~Y؅OlFr#m n-qDk'R?Ĕ|) f:~FCdmo hA eH(p#KVE2[wuu9Rn⌻^2> eY"~#W3'z'#k/m-pJIJ3;xq}34ӲS5y,JSN,u|L/.$%Ǡ&¨&i~,,vϋIi]K虝 :=9L=E0At3T/Y41|PIq\ 詟J*׉R`*IլԲC%L&W@JZ3d%8$Md&!Bb]'Bqm.q kEN|.D&o~o7B˫J1$?uzۉi֏O MNnn#3\a33%L t!>̓,Pb&S}6*SsFB7l&z*\_չPW0iC!oΔ>Zd(yjj2x؝|8omM*abOu&6Uo;^=4o\]VLϓt)j%r!S/޲Vɵ{/ 0⎳|toٝP&a08 >_wNJqNq^ACXVm:'Ҿ]h3tpкj0`J㔉lrih;(vOJTx6ў>̺~8) ~gM<喛f3XI1&?,0V~nΣ!$7f bk^tyV(gGw}OkDneәn 7.9+|+fp"UxFF6!B#sZc>c?(I!!~tX.m78XQAJ+OK6=u>4"ݱ2jZOXƿfb1:p&Esm.ވzVG3l> zSb/f7n wE@#a-ìtlXMKM8C ?olh X Ous6?g k$&9OwdU&&I03nnqLU [v BWY,{.˿cWQ"^m|=3/ް:祐&ٴɲI_U+P(mAЩNZNuD8^=r UiɊF <"Ő5K㸏%*!d3cmri鵽$v_^bk =#oC֨Y v.F|_ ))AI)+G#2J'P#S`c"Rﮤiv A4 o_}7(yѓEʢi@dq WB ~!I?]0)YАb+BCƄ`_EMT+ϧ DK >*,ݓS4$+K$ns8KMC?hXo k>M'ōO nb@]짞ur-]W. X!Oӥ<ˆex¶|dlkQ0zl(}ER3kJ `ܮkٱrj=ѻٻ#ٙDt7*Wȭ.GO54D\nB0vy#ed]\(IItµx&N?+Ŵ V'$K$$<Q# D j#h_<Qgc\_Ҳ_0FkCe#HI;w_ҐۮsG@??|FAk+5⏆9'c+UHjT9ճk-#կZy)NB[kʎêƚ4fq9²&v^qz| oCBW2B~kN&P,E.2k1.;ϖƳSˠf˯Ď$ɚޥcÝ=2Ɉ8ڥ99a0߲^9E*#g~}M.]Cei0b^koF6jL޶«&Y^?M0x(œv<!@QvqCc9 A{hH:!q󈅥pĄ93X֞׾ oywNU"t|0/w/649~ʄY_Ess4E})˽]E fenv-4;`ڿh i`iBU_ϴKqxeAF~/!Bʃ;MGʙN}IZiG7O^'JJ?8bKؒN>KGJhF)HTROà Mq[jX@`-W6֚4V:Ы|D.~iOT%SZ^{&0,š8uaU [<{"/6Ifk^2+1kDC.PҖqj#0f5p*mpTv6NeΒ-A[}za*ȤHf#3uC *ȼvD5Iz*B'Q(6]l2,ZbvnAPm ǿ>c]qWg)7kžq{A6jOS$ l "EKghiDIHeu~?lˑ=4>GFqu/=7s7h`9QqbRx_^RTwepw \IK3!Vv 08fzcdJ4ċQֽX3=]dй*0q'waec Ɗ.__U1,| ėGwˠs4#5=zqJ3ҟ/ڏ%XtT-[het4dZl_, [OM|>1[-]9T0: [ H,CaO=]/[ cĊ8tRvI_ { "w!D;\"4|if0@x1yIPvHN0&'z~+}ڕC|sP#vўcSZ0Lcz:xR%pa{ Of`..˲|n,xO][: G`zx %Y0)WU Kq < Si oY=r y )YK1䭗pz(ĽߕSHIZjͱ=X5غvfXI'.M91bŠ;=S$̤Uռ)FQD3q!Sm4-Fhӳo‹JHL6%T1pn; `w%At=~zCF rbH>}ȚK!,3$bg$sV KP{ݳ{';L&K %S-%5/cOv1ݣBɢw\ dT0?^q[8{ty5v]] Qs=P63~ vf0(n iX KD+Dm}6`E{Jiz//Ɲ;"rGWk"vxK]8n"1uw _⣗dsviF *WXA_cgIoCtGܟ\A}PKtjqbu5#<QqItȨj cX &Œ4`}JzVRr= ~s(E>]Oh;~,E̘4$YWo|/3vM{7֛MzM\iw$QM|9V074dPbg$ 9I[ؙt;w%O!A^i+"ںݺN"]Ž8jMq$Qq/:oIBh2ђR+&O(Uo=@4LE*`u]8H(Iǰ4-ki'y^,ݴ]yi/(Ay^Ԃ*lyڝ'ڽ.&ݻ}äAZzzUdYR/F(F(ֆ )&n2R I(nLVNIAiP0vex6Eh`(&yM a:D`&8B΃zc5,ZmGLr5O:l*ut yG7j:b$Y¦~)}8b12U%o_v.7I[VyXb]f. !?ClNCBmd&Ttv:|p}$V- ""TbUXy?on'QZJo.,m=,;ڬ/)^@ř[w$ _Vm|zS}V!5d-Gn/6V+M>HiY)ςYPsp{YgP쭋ZU-N㚺ʴc:]S$ΛxAPi)k(]+P'Z{!ΐ/ fvA?1܈מ`9LX+9\/Ua\smYE|.@t_mC * Y8XopU N8_.`ƈo@o݉d!W'IAa d Obt-m1|2oDb}[f%S_F^Zm-KV0Ο,lA/PDỒ{fAvyiBt5<ē (+Z+s(L'Iw n) 44"4&;u`rs2ϢUqwaSigTPJ 9:v0<;מּd -v{{}f?So ܎m #Opf⸝z;UQNҕ%[[r=5RZJK|U{oykl*6n~ƽלoH0vFW"/EJL &*$dlbD7_!2Z~N#n|#~`>ѶT^B`[0cX:AoCqSPHTL3XuA9h/^5,ms!a.DhCeC}a=K+Kzko}F5k38}uӿ ̔ X$ۯ1ӵk4YۭO'䣈k|~Ql4T C㿤 o4$GJ>'gѓlԸ)` 1?Gۙ όcwbk=wbKBYD9V9Q_ToDCzX<߃{ߘ\D%X"U,]3u_.Gn*vk#bU޸#[KLzu-u!CP[D#5i:2JU2G(`_`S&a}AĥRrCzx{eJ eP8I 4;vLF7nC3#_ԋ ԐfEE>]\0ZPkQ.xZlg ~c$YlL='CR)͹΃}GoPb# Rev"ݷP. MI^MINh8qlz lr,q 32pML,F;N`߄s5yq#pgGKc^pMί1>jMV)D͡* OnjGN{3vܽdbs`q|2|"psxBLЁoD0dNRj8U::DT1mQ;{*DuUxp2> J*\"Тo׶")vn1xfD/xrM_Dd?2py6䈔͐B&\1&!`/;_@5\^@h<2= Šwg|&y7\TmLF^RH2/J ~xrҨy&@kq<هǶT03|b+W9,,Lʦ$MVBɕ{s#B_aEf+9rbQ#1w2nTp,[5k9 º2cxS ^MdpUp]W5߮ǟԺ>GXij[0ج|o6*YM-oi35NX;+ՙWf-rY̲X.Xr+J Otp'*QHܓ;ⷧ*Ea ~W*)u 'o.l "!'n\Z6L8Fhw^6Bnr1g@܎VjRUڪ?8gj++?fJj]1`+4Ey8y+SUK ֯-QK{@à䞥Zإ!o;z jet9|Y8b!th ~bA%3t[`q 9lј$Sojun5?j?7]U)_x۫j5]nw%A}NO O=[nּ=کZ @Ϋ0.@0 B6@R-XlZݣ,tcO߈ľ) f#J9)fy*wJGUS3ODzlieY ^ZgO(V3}u 6r\ \?w'q_D<~Ǘ-V+jZW҃2d&n5w!H=]75CRZtx6klz ,[MU2!א)A_@Bnd NJL˭MJsM8["fV]kÜ]`34[yRFDK <'hYg=`?#)UZaZ-6)0#)!.Wz٫ܧ^T̏hӖŗ㈳\ܿ;{ӌe(B&v,d#m,`7yf4i޹d c@ʼn&OFsDn!βױ9`@a)#12wS{ gknA.!ӫN[ܰyWtK~i +_րcu*52ٚmgS ,Cr^,K-?3. "o#?:f{u͠#ג Vy"3'֣ `KS; 9{hPN $#߾<)(M4%ĕ fv"yJ^{^Sd?ڈD ăN#j8G!QCo5"IpjRLvnk' =BhD<]䫱fuz[᫛0cWx|O.H&4lR6Rjl [JE9k19@U^^ѓ JݞEtӖC2a43LDT!!bےufh!IqmUTM%O6Rx0mv%K;Sܚq] }m۶m۶m۶m۶mڶMrsN?Suj{7vhQ)4v(]J~2CJςS)8n*%\'5.׃t7c|(qZdm<1+o6dWm_#cDiV ۴Gf<+CN[=: wo>? y7-^7ptS$)o Uy>>ȉ,m+|!6O9ZYd'Szv$ehRr~V.0M gC+"n>g.ss=*]#T7*ߨ$z]Q>ew6@ر4*+)=Zu][Vnw[/KI_aTvyH4+2t:K7~WSu [_82%$^MXW\v$vDE/V" MO1Bp+ύ2_fzY%"ZZUvk)q-h6g2i"jA) M}aB73U+k|o.t~ |k @e|㻨|H"`8& xmn )n@ ~XbJG ̸)™'B$F~r4EmIO"/iTQACDWӮY.͖B>jOXp $(tmUYTNH L0l_r`@WW n8wasgJaBҢc?8|F)^x; ބŻd Xͣp›=A`HW\Lo7&ܸ>X(p{3;uP}0AJ}{d7]}蒂) sG Uj(h L;\dV\g&m?7tpdƧ2,vTd)C%Wn!3kdEfk!MǭMdɛS.ի8FRN,JU8jWԊԋtWKJ኷k_PÑj"VRbhE_*T%j"(*:Ӿl~>ccq=xbحP1cwچ//c_L|Xg5[wH~׳>:}^>Hӳl?b4l 1s}>v|vnVKV+R'sT$7{]eF7+ӟ6׹n?2-K[/R#εY?֋7z݋CZ~ޫ_2`ӱ9I nebBZMMsqU$CVbTԭyiɫEU#QOf1l|"=fczʨ;,g+9,DUۤ@2!6%R30eO Ӄ xʧHqmֳЂ&-pAq\T(*fs{@ :?wƈ_f73 i_@rK& B`ߋ`OZ! ??;91DA1MQM? -oAD5*zzk T;xFk q eDkfD#!9'/,yGB7;Ho$XD˾gc;j&I Dkρ7>%󡇞ϾDc83>ED3Bu.x m52R(jTdɋ}+WB̋Vƹk.Ўj5k#tERzy ҞSW;̄fP\ W+UELUpZ>X Ђٔh$`';v-.FNoā(е]!rԶH1{?qՓ:/^>J=.B[1hgؚqykۖͬ TwɎ"ۣȘ-ڸfvnТ\!=O/n5׶wnl&M+1l0f뚍k^5 YMda X&Pٜޮ ԫgӝsnk3;4Kt'@&˃Ɏ.ضֱ#3z3~)N߸u]veXh#VhRL6a*7PJ#*ا<\$=;28&;֭'f lm~ftamNC|zAe]*d $(ZD{F^d^D`_%)<([ZlF-p~p%-] ]lMfb:JBE>HTjG!(_#omfVq%j_pNP* h/!JX/.̓G`'(@Br@F8H"mfta~#(.Jm,4˷IprcGXɌ(nn ǡ@o$L}#-vlHrDE}mlOj^1uhT hDgpJ)c*l^ܙ9,xлőb0& :}+;q]v^?6=H\f{G(݃_y>aK8PR[黗SN > ]"AJ= rhE0!#XM˸WQm]$kb= ݎt(*-%H:_ XJ'QKZwh3=nGH^Һeq0Jaj R H~ t^e#-_․TLx: 5TN#gDIJJȦ0a S"JbMPEBfz&ciO]#@ <Ր]vsqZz-J[ώ-;^\؄Cfu4wo^om>-n*y͊#rĿZ34>06)>_֎0-KZVP%u:8r~znCRԯ%d5s#ŸڐԝPFxkLϼAYM4mjk\Is̿FnVMQ`FP\q!PlK?r@+Gm攊X3Ȥ3"bAa(X(1AN"Gf{RU/n7Z8$*qU)5[sH<>qM~_׈/ 1C< w#ڱ.(C9ҔǡMkn'VpG="BP+w[P{K.P9$b3)ZMصj*ߡ/$ص EiTndΘD-IŭSVΤ\y;ޠhkX I*V;uœ-\+rY|n+yjrܮsph7^ꇴΗ+Osnו'Z2xuɽJYE;TJ6n|yP$J'X=ֱI@Lq rƤv,"QE (4{,ӋYTpXO:uqMMNߨ<`%30`@pe*={yvGM/5bTg N r#KQ~N ;V"4-Gqt&-٫S]:>Q}e~F+NTf$էicMNjd1: WşG-;BoJigkeعA T/w(&gzS 5.k*s &o4%'I}Z׺߆CTI0d%LЛL6mΛ<vU{Z;3EzUX.l4Bwhڱ(ޘ ]_cR^|nC﩮2^![b2NWb98:Y]^R%fV`rEBtUǖ9eegtrGZdeHPDAtw {"8KG/< CEEy YBrOrMSEX$ܟKki71M DdxDWv64^ޡ_o{J9(27QNEsV  h9eUUL^|.$tu&\9sm"OyY{nR[T78d7vX9}K*ڰOsN"bzb\9c?<~h361nX(/(!w+FU ? z-WW9p0jcZX!45=Mrb3LiΝdYkM-h! G0IC!9e0lf~P%XBj{Ѧ_s..\ 1y(x!ʼnw]J >]Z tKvr*e@p?LZvpWjf_WN^tOa{@}mvlNZetf}^6jyk3nrA\/cyDMg9m_֞RTmNS޹w]A3ĝ;?$!Z3bI7qD[2҅6Zi cL4V4DY*1@SސNE/n;[飜\^|cԄ)41+ImIfQabV%uQQnP*ESB8xm$՛w ؽpL_d?x+ `.X^m"斶U(R?-iSZ@3>8dg/gxpM NQ/)v+мgN(/Knj17 `5#k]] ;BBGwKxF,< ŀʯ%V谄U8ӆM)I)j?;&ƩPZ5gng٬471o IR#QQp~;͘kpzn)4v>޴Pݸ@2ӳ _hxT*. Ȕ / ؿ%i18(c:A b. KtnbBB⤟3➺gzPdf|vry{(.!Z%4M_H(S(-h*l+ p"l%}Db s`uC==A0ڸlo7?>V-MKq>d k :Ewh[- j U=1-Ԇio}}vޫje۵~m[GcF&n)2qFrBl*%E$ H{$=qĠד5lzϭBxH]rF+4uxӗB^6x" uܥ<l*F#v&ajbk2^odh]xPGFk2}_+ktP EK_K6Jpȡȣ}ר91*- 9@ [;dƟp}_網lLɽ)D`u.?\Lˠ"aW@iN^{](36i+bpAW~Z1IrKiH Zj}?:ɭݺ==|;]okGpmcN?eŜG1ֻoɆA3+Ы1.;ִ$`Ѽ%H-cn_]ZT!ַSuF߇Xuz. 4jӅ y#; 샮QՠC FxN _혪UC <OYy5TgUN9GsZ&e|lX|?4@9Ď~۫Ef7$3_4t&|j[ 8u<' W݀ʫбF%*ׯ@4#˞VV1(x [歷ZDN8;%%ݤl2F Af]^@˝~nRrgH=ȩTƭNgv(U(bs˜&ˎa/"T׉ow[]@&rpo:5[\2f~@7}sD̦q՛sLbF1ӇTrռҺ2"f_6F mԒW[[?7᭄4U ?w-I"'1q2}MyUz@Ώ 7qC (2LRjD+bMwAz2/{ϷBR KϤeMBSViA]<&v\Mw {C &To tGFBeN>svs> aS>xb{f^fyPZ@pn h&“I\$% KzVgF?ĤY1on:dj;ŝЏag$SXA #g?!䓌=5.q(E>$YnE1ys"t!]iԙ~6Շ,-! pjwKT9wos6_' 0=!fVnZU>l1<Y!*H {p}Qʃ+?F!ţbgs.G,Wl,!]UjZW'.A*տ󵣦I+o2#K%Xqp"'r;Y&ʸobj&ک\NvpQʬ^VA\yd^ס=T.q܋l\j:PБ o~1y;}I7KfHI\^:*7cY%U;߬7=]wYrvgsm!LYNx&ݔH3B~wg8J8xHZIs LnϩbwD4_38w{.k oWW#%MtZ 0poM36dL—ixt`s\NU@MvRE.kw]@47}B#kQ̔C[~4!6%x]O$b3*7,cgs $|B!GҲE$;#p>6L~';}|mvZ̯z];۾OE5؏:~X=E)FK?/woa­r|SvWhYP(1$j_܌[Y:.R[lQvx "KMb%i_ᔉ96`;h)qMj’GU NR=\Q\$zeC1k"b>F}`>GlDgڲ1sKYw4^޲2cw*qtbtuDL|z bRg'ǟ%r1{(޲dVeGfNgMA9_,jΝIH=Z1쑡$߭5ұJ$ӣsWBޛAv Twv{$.}& '94s;Lq- (eDiʽ.1#EbZ YvڟuvWHZ޿`+Y ^w ~"0;pIS࿼t_8ƾ+ n {!xvCC,q_g8e&~v. wǻ}WS%. @!rCqթRD7 wPnv ;ݘI4&_M`G0BS)iJKǒX*)JlnK!b1~%ue,4X(87g<56e.h|&K)5ۭ2A[ti-L`%s@/3g->Nh;vis5jx*aB} ٵ$뙆Gc#"R[[tp@K5]5@x SQ% d 29Zvce\9ƣ-*IqKOd%ē{?2)(j77G-幇tXj,谟6NMH+ytO*HoK4ܲE{73 )3zzT]t(qsY͇g/gK:&|hx'}"rI{KZoO+186zWy(HLr v0|/suO8Ot"$Kx}>s+{hk]smZ}v'0!"!'~i"p`Ad͢=륽*1?AwPp ,deDE04`pjޤ|b\mEy+2e~LsůC9v?̒ܳfd' s?&pjB@I!nElevWŬ ̻0[hV,$Ee(Lu~Dp>#%+>!}pkw`2o=WxVMgZLM=4v~(3e`hea0 /mH >ܝv$_D%ScRҍK6pqnS49S.'ۼ;et &JpO\Դ܍}L(MOĦ |=,aQk2a*{ڇ/YO}&'ؚ5UI!9圮ڦJȃV9z:oHb =;t-e[6_".Ա qǵ*Bf/钓G9CapXpϥ6AL_w)^,wk߂1r$cBVP-./'`=ikIRq.U bUy.o %v B7yU+s|NK/gz O:\%^圻ld.Ĕ4)Ū| rшbd qE!ٷL׋ |zFUnp1ϧ*xw3ϒg7d!-.i.+OkFCELNa{b"OBp]SN;ʄ;i򟫮Ÿ85P{h*C&H 9I: ԡ)mُ! ;}}0ͰMNPxjf$+ߣ3=WKqr~bx)xFjQz1K ;PPqC9 Ws8Z.MMP|s;Hs|_Vր_6i:[\*Wjlj_Ra`n>KXtIKW`zT?KWjP^c7.=YceѼHŋ^FsjsPH%5P 5f3ӗ}W}i24;JseV8V<-%` وL l2|=pq˴{W*i/#ۻe#p̩#qy%3k%ǺY؞jxh$ R6K=0 " LKYH=i9Mnf%S#+ۣ* q xG5L>?(ū9/U_o#ˁ"ou5rsq_wo!nD.YgnbPyj4j7O+`0DL$(J2߀۸K-{ X0>坽o5|ϋfl8?`pFcr^aoX4.abۖih7\+fҖml։kt(ڭg\kXƒ>pȯw C'N/P &Vѭ;bA{|†CȒj0 6-ن@b}L80pTisϨE בhI / BR'*[MS3P`K~95!uΠ~"ZK/<,{zN΁ z?d('3L’p#= N /}H}ua#W>n3_C)q$PQDgJi8=~xCw:A.,8gfX:H"iܮtw4W6h".T-^rPzKW̠:XD0d2'гe%Tə@q=#ta,I^$8ykk= +=wz&n 38Vx]b7C 28 vo.۬W ^Z vqmNFG o7 Mn Y`ƒ 1HC(?+ͭZ3aZm" qɹ׉W}GChdVqY]aH.?A ɞ^_E_?6m$(ĀrZD-@".2MV]FsxNF?>+ B>uLtӾ!2sG'P,s25ašwe#_t@k3'7߮]{A:}S|S1Hn'*2 .,zSvԹyhC#N`FUP]aJjp~5Un8 {~󧗗'Gx5GWOLy_xY::0v31+@Kt & 2N`H%?L Sl;&6eV~YD^XuH ݤdkBJNRTtTK?P}_Fj''3zdK-":g`0Z}E+ȗ8wļs{뎽hǨ-*4Yn@ݖW1?]`5_Tn pc'Yj=ѐ_iδ#zZZz mLӷGOQi Lɔھ%-_}C@=Hp $+PvK^ۂXsS(!o/ py}ÄSpޏHڇmrƑj}2@ßMƧ3Ǩ_m:E]; hb!^?.dW%&yö:[m'-ȫ-UӶg#vwʥ åDZ>B5hŀ rkTZEGŒ =-,Lm ibr՝K X Jҡ_ \ɬ= ;L$lTCLC8,sz1l}1vYQ!:,cՙjuj1<:ԊԛN%3`)䐯^m:Hv۶q: }+ |tQ' eZ SeِnerI:26JAG1r=\,yT)Qj2W&F3zϸs` ~cWI7XꃅMW;ImNg~f2z6&\=G̈́흍AHV;uо^D d}z^ hz^h3=` vDX;{GL8 w6m>lؠ?:}<&V\Wlb{'Gb%ۢށ }WJGM]H  5iVD`A}JXGh2THUjWNM7* m6ɱp+kDXK_Y3ӹ%3_\-Nw7ry*縱C&1֣gd&Ǥ`E8QŢ(>YD᠒<~M2X,[NURYQ'%m*#үˑZeB8b0|z6lRa2/g m~U5Kد:}1P%CQl' "{"ͰzԢ椷N6q*1M(oOUEsiA:5 uȒGKҮ~k}Qo s-FFׯ4_8i[H5l8~o|bS]ߗPuiFw HȈFs huky|*)jchf(X NJsѮbCv{MJ]X$Et 8fl=~1íOu=1cǖ Pq%L pCEJp#BB$L=2b8lBw:L* VM-fpNζnLl*?yhag?GW_fBre: >B0.4|^#h@KkYjik rl3O#[=hjkpǶ\!>Цth˷½YޢxgvsU"PRrAT8EbN;0ṵ̇JЉ sd+LdݎLöpbe쇣-85/g$r(-YWL2^ҍ3JXʞ"6P_wܢcO\ ^<8W bL-Zd!aVKqu8S|b+#кjL?urbY۩~93i]EpNHbqgh'Ve x;4pIwfC^ xr f{em MVqZ*3$Ϛ &rϘ)k< p.*OG6~+=ħ}smy虗w$LrPI֒LDΐe~NGӈQ/T/e}9ZA"VBH4޽5iCNbf VѰzXK\jX4lơ^?֬vR]$T;hΖ^O;ĽKkEu #]y g {N2Lo*x q\:Z-څ]! &iHr|h"5l4F͎y b`h% h&h0ȩR|f4ʾ Щ>X70Vrbhtn\υ8/FU)vZ(+i9l0\7zP0T}eP4I֢a@^ES%K,<35qeIFV`_U\5V@TO誺3[&ۙ9e<$T ^&Y3Y1R lofXk*DUnVs&]`r9v0Msg t68Qxse.g =~i➪^n6vtvMgnnCb d0eNVNV̜f+쪏Ak0HXRTHdKqt丁_⻩.EYfNnSW`뒇KQ='4HHWg鶿VSEz/@[H7<]_kbϓ?n$f7Trg:e=c6L{:q;~uqo_sdX^ &<+"Ko#ouiJ|Y%"yo?n-%9W&L`{.nk} װRY/8w$>Gw_5gqQ6toHkM6Ҿ1s&4(8Nde(dkݼJז{P+Qҿs9{/7'QDi\[}\;zK[Ҷ}i[;48t:a>a>r3_KU2۰˵]l9d1~omgxup9!DfH}?yNb8 s$uc&}gRp(K~l `7wҹ)%qCoina勦QB>;?>0ͤ@9ɘ!i)JG%0P쐹I^ E8rɮP)Kr!'k}与4/eTL}_j7Ne_ wwI_1Z2'ocL3 7^LG Cq@RV2P?V } G:Z\ +OtGF !$+Bxu[YG"\Il ILLIJ`C6/ iSı!}H>e€92ӢFRLgE'ڢ@TJ-0-UD|k !F)[0 ̼gKB{L{"c[IoEDQu}ыp[i V%#$+<{s&oEѠW)~űlkǴGγ~n[~:ke:AWDeI\tK~-}rs݊ƞWº=>>[gφ`1`<0Ů4Y 'CU-%332sB1Y&11RAo82+1P=$#Y` s](]iV4?E1v( RUڤXg8i,_{y$xI8\v$J ˳v u'7<v!$ 7O8Sd@ ҅FXS{C 'Un઄ڣՂKh\sIֺ(+Ȋz8O}Ix7sq*A ;JZy(7d; [WMQ̯$ u#:. VҐĂ2ꍩV4kCZ <ԧ :${QwXUMB "I1ǘG(?]pvIȕy[|ICbn!pMCv;sރ9sQ8m HN<0.̬N\7\p<qFr<Nd-D+BUҘVQ̋vC]w]MR ᇅ3vef9§Jqw ԉ?//q)9MO~#Tʭ?g v6Hզj\F,Sĸ@3'p>pE0Y8{mb=_[BG^5rilI(5U|v kx@DZiPF$l"z0A5dvmr{~v!$ T+@W0;|fB䆟9jhurKc,eaAṪquG QF"Hf\ t}EdnwbCQP!0)l;T ^OUC2d xu.>*Y$ Gn˔}htT!?Y $He"(pRQ j2jN!ho7x;˱.mU5Ju:/u Ơ8JQ I[33{.CPe!|u < +gg*qi!26Bz2^>Q{Ik NGXUwH~ںsp<AɥJ=*&+F?a H@$S@=_f)R6P.=%!i3_X.C"7-~0j RY!<fSulS#kcV`:/4UqR%C-zy+9ޡw&oၐ6E-Àzky [xQwzw%05;Kc1  +E $ޥ/4^Mĥ3*d4 5^*=2x[R TqEE#F U![U5K1c Ⱥ]]l{>]e:roBBtTjƟHŒuc’@Ett3hpD|s] #He ^<}pw?y`Nˑ|S  T1rœz@@oMIe9,SC !4[قdPM+|%'[.H7D?{4KY@LśZ.nX?aGP*c:#gԍkյQK(H:mv&Ԑmhc ,- HBFi[ѽҖvʟ EE~̟K}9 )'a'ss984_ÓQKZ $+Od `BH5 .=:-hs8px/:!J:ܾ?jA D"akeq{&G⹴3A?668t=wj7h +d'Z4Bk@/Jh'W \RS'îLl3 V0TЅmQN46$˯v^pe-e62JYq9oQl B4,onPFpn,],Ӌ7J e6X9cp%Ibl通mשPsxCBJ gF ~#sFoQH֫Qcu~á, c'xh6(Ls#“^Dn.:!s\Wm4CA7J ~xIøQ=e#9uaku^Nc%vI0,cl=u䟮l?UG׺+v~'ܢ 8_۷WlR L{]fܬ9;/ :ϻ׿\Tm:?^gs#ZI`C8ẳ)?|%)RWdr/=ҬΫXK4X4 ȡ`K79+$Y쟃S?PE~ .+_wr+YXm̙u;s.|֒jpS GvH\.|fMh;7%~Kq9]P#v<9/9'Xx`a) ;xMFg`Sp06pE {k#L1gȂF(.-5Iu:Xi,=!;B=1'R23>1#Vf}{v!>" #JϪ"$=0ҨaUX/ 2HnJU \սGa` #һsE ,3$yBw( g4(gJɖs XX4W9 l@.ɵr',Bj"S[c:2%@[ʽ2DJS=x",(Zޠw ƪyi`'qs9v#'8SvT(*>cfir$č>fvwwh. DU M׃ȮZ>bdw|":3Ï4"4Ȃ!SR7= GMyTOLgPd!1iF- g9'8hg3„dYǔW[v(7\NH+.!분J#$!9)#oE,y,,)mspHK 2U!^P9jV(7 K%Vq&XTZ=fÜɦ5G>(LX68w89I @y!yW-w>Q!&i,oF^j5,.b`G/xL1"Wɞ eӹ/LX -/-#Z? :^Ϗ55irXIy03 _'ׂ3Cوm^gzmZ5j}ǭű5j ܏{EwwW4%쪷WKoMK[=Fm} Jh)zWEev~s}&w%f1BG%7ilyqAr-)Bvj:\%@D'E(kHAK2+ch)8]#?r" L6G@ d|3hCP}aV%U4cK0Hg5aJ ;K~FJ-f OHϥ(| JyӲMV>;uRJttyF!णb}ʜ>EXZ_[t~e|$!I::aJ:{G۠gLϦDLo7LxakPg<)'[~m*9NI=EV{6BYn _P<FXapRHGz/ƯÿşY@Gw m-<Vk$)fF1P O:m#[~'DT -`iSs 7~AIcHUbqx /y5?vLGq$݋)Ǐ;IPM:kdILdjys'>W"MICAx4(bD2n&p2AJ:Vl/h"\`VCw ;3ÏE&&Kw8*Hp1e)$P션ۻ3RxtAG& =L2+-*;3+݆%IZ":ijBs t%hzu \}HQ۔25rS?z  50*g)PXTi.:JZӬL=8qB겭Xs&xYېYlт?ñҹr(&QXDܞ|mn_-맪5pHw?CnH)e%"R`W슩SY\ _XpA%f=L`=y A nA9Rek}`Hi+aCĞ}(K#N"?O1SE?Z-uSu,~ 5v>LCFqa^0UZ!C3JmӚWQbiGT>ǩ+$t:`tnm$h%z | c&c$}&σi)T(^WUdKWб!^̑i(HUX &w$V|nͥ59\QfS ;ֲ?.۰teC): - @p(X ~ M# /_i#^, .ٟaȱbCV,JYj!ye* /ي!™hO x>7v mjcL(1ĀwߠQFh-+滹+ܞgmZ Ѝ@XWt.Lփr"é"-,*qlWw|'ڡp`w$97%W 4/e)D99U4@E:+}vn{#^ORxQ,ro;kSgܘV^ M^q;OH_%,A)w{>7EW0Ԑ! Z$zlj+\Z:4 CYGT F&8%]s *r_AX@ĿeFVuw\Fyw~)G Tml+!6eB>v@ AK-`i,4cXAwZ/# |Xǝþ2B BRiLΝ"nVp3hЇeUl@ĒoiA+ybr^K;ˢ ڥ/C-Go6tX9t-T u=IwTcclT M@XnYPCn1cc4{O~h؝yIßnQg}Fm­ E!L]vaN5hS = DKg:XױHШ#m>FAsםL[el%Âkc5ٽI[EqR 㖊-ns*W)Fc# vN]9IQ{I^bvhDzYp#ꑖEjZj /xC}&La޾+ia}tuZRpDb'^ž._h.VcD.Eh.e-:ův'?yy9WuYj&_cb2a eE񶜔=-β~ DY5ϹW%K%Y7{UX -sBhVQ4Yoe [=.T uQ+JnfZ7Qmz]o<.w:9Q@*z$nKE2 w.hL<}$&fG&V9w>~I<}LK˂!\|9b\4ӛ7ۣ6N|Rl׾ѳ/=_7\ Q++AfPNՕi_]2{FLȷZXtI\b沟S3X^K&+o2=Q-L,q1F/2 Z {H]ϸA\&[t}lgSgD|Sh6-;0cI8 !͂Pg4'8Sx: }W{K qnwu6u/P@ML v,(PD(A=#,!Dc.s+9.v!h**Q.uiG|Ztg~ ^#e[) ,&{ JnJ$(m/Ha{)'Ih77`q{P(fj \` P.'Ҍx?@(L( [u[M۶m۶3Gڶ9Ҷm۶m۶]'jbS=轵ak=Wr:*2ndbF$(ޣg }~ВJBI:8Lsd,r<3!2E>IaJ6}Xa$EGK…|Ayt]=d2SNa~ΰC2Z>[X`׈C}Ofk O ׁ܃ytBI9,M0Amuii{?F0d5IP0)߷(}'+&_ P*$鑱.uM?0Z 1QJtxީa@wIkäIhs.ZpZ4ӕZ3_ʴL << rQkwC#ƕ@L-=Q8Ɯ"-0_Ot LyՏP ؖ6ʦ D# 3y(*ILXYTDx Bs< pl2,Ԋ׻ᇸjV`*-pȸT\ꊏB(]ir{R8ÙM-(tppۃ2|v,l6Q" ,)EY2] iaoDC~ahִ)|5<N֑> >k%ѻNW˳{ %ȧt;&S ٯ>N*Zs,lIU5 UX!4ͩthC{)&a uvƬu-Z&RQq5uytУb:%ܾmAle!sֵ^@2=A.?Ex~cАΜ[[W+(pMY3e(-%.kPW\72yn)2Xj_U<פTV~v[_"0rX`Į 8T|e:A4TƆOMIt" }-QG Tqje\6DgJ7<2= {@K+PQ9݃#XwTn3bȡ~ d\6h \0ڤRҶUNV9(S]60k9ì"N9I*)wu0!}BlF6A0y+ {ڙm%c6oj,Ĥ0@C:ߤQx<1Wt!ã4R}hSPU3X{n{>oVz&CI7HX-a atj! -C/IJTS`miacF114աi͍_QJQxyaA5HAXu0p'Q渡V5c^/w[#͇,-oen=nӴ9y,AeVȹ4hU)b)"7Lg /H"X M)IJh{d=M,4x-LO5?zprN05e>\R- СLʏR8Q)A?AkH=-T[h\ȂS/jvO9N*8rPlde`yEnVEOSH1A}tt=}bAbUOv*4qEy`b"^&MQ \lwe6DO~=H(+.<-W^Fg`b\p!u`P+p!fctTHN9s Y;3_Yww #$sU_&\xlyMJl"Sd%w&@]"vD2*,l!u"-eLHjCDBBcQ}6iMXe2rw4ֶ Z-K7B!3E65ՐƳY>Km A p Sh=|;==:9#[B8T/{A. v_=Mj 8*UM;3tOnh sxv=kUQ-١ϴމfX !Ab` Bu!{AKz8=Ĝ) AW xp 6l;Z[q?D$+ ɗvi~EI4KSٙxϧ6P&QIA{[_$$ ûQslBHe'3sG(:}ɑcp`%S)Йg~wW_?eH]r'.U^8x_5S^`a?`uu u1T|){%1qXB24)/uS䈜)㿇Nh!2zU6>mNoE0e99\-tYv_cDRthP I$RҀ~6Fn#X#*P»5"([4udžu5RgCKS?lO![,Κ:R㫒n-uѰq -W%SrI˕MHn9PhEenz}ܴCJ0%b sj;쬙_ۅ}@ΰ|\,m"pJl+وq`*ViշKU2=/[$CMW05{i7۹.|LjF\:V8VK$ *&.kn;m!}W#d:~plw0 g͒9b[@*} hCǣ,71{4$':4zw>*4j E%=}@ŌWzY蠉pq9>F,S>H꫑/אO^Hv ůU~ `?/-"l}۲d쭅@mT2%XTiWSLpE6xyw7 (,!N8qÅuu@˄1"Z;j^6)9!yg7*Qe/a*I@6:".1sOm[xh̫Df'qF ;PԈfHj {5ҬyMϝ 2k {m3/݃ r ΖNřh~s7ZmΤOoIruYr!Gݸ_E[mbJVq:+{絒PgW+ a+Ĝ=m}yaJ]+SBD+-1s`جxB^rSvO.Ȗ^93i\|?36FUQK 53W"F)OܕS ]Kb]< [VD٬SNYA5s[Yojgt9G$ZsMi.qFu뮁u/K'eynn~tvnje>i{<P[[;ad( Z/j=&Aڹ)N{/fX__|o"CVlGW$b.|Q\3)DKoդFSTBQ5t7^\h#\dNۥ_~l[}:1 g 52>9ڀ,81h9@c3C_īw/5ZLZRs&D=&Hlc,CvM'L<pn67{HC o)xݥ r~?QZ)Kƀá Wpluxc,2l+deꋵ@L_He Z-n0'  5 |:p6ZkHǢ2ߊA|`H Qڶx2E\PdTp47mF⒧-xRdo-c.pѫk2jO%AwztmX*mZ|I`SNX}ƘXιV M͘>lh]뻩fQ|2hv q]# `D0y?GڬduvӐ4.no<Vi*>aoא@r5)S6G51jVC?qSq_]N \8N :>N)UaU53P~ uEN;㓧wޕ"7Bi >=EHV1RgcJZm5 l>Ⱥ Ԯ2a3hǯ |+ߎ kls 3vNyI5|Ӳ3cmbuzG ۅl;}< TP'?я7 R96 B bOa8a0WAX3dc?٤sNC4@DZ]X6|Q SE [!k{kմYC83]Mቕ0{Yh'b^*4 9P}`(B>P ۑ蝄N8;۳-y|!|HpORIʠ͔}AH5%0)t/tZyPUGD*\bbM΃Y9.ԦIp]KԆJ樞qdGpz}.51E~s-P1^hհK*i|~n}{֜BD{},Qa(n)Zh]9=+߻=?{7@8/8*!3""aG(9dzPU"o:ND_kh)7@Cbrٍyl:,)p r`X3W Rp.` "=J`>7?'&ĺ@ GY .MWVkB2 {}O$xJ 0o SN-Ҹ$\}: sD2Baffk} k@%yႣ(¯A5 D|("!G֖I&iyL%e=BsaJ,4L=RxU,Mv#{rz%qG: PVgG ̿xN+:`V9qӵ0( OuxJmDr6@jV4YW/̡`fG{I@bJ ]u!$UdaoO2@L0|`q o#ﺜ0njYD'ui~(,;Ӻc+b.vXO‡H=G!*<͋o=ub^Ӈ%MaD% w4IȫMUl K;XLJR\f{iΌ j@( t5zǟ¬W@:2Yt=Bo u9i4Kxe.? yIO\ r IWU^ٳRyJxd˙69G!{+.K$ԒCm UpMej\߯Îhr.&ntG BCMbҦWt2Ov!/PG*Id)9C*V!1D@ҚZinCjl*uu>Lg;ZΦ퇗jb&k@ Y'k؃!?U۸@fy T5 qJc@STC lM2JJԲghuB4ZO; ˸dCǘ20o_ɤrsöS-}s,bc,JΕ-~Spex nGB+C$ME |E$rps݂{,_B'Owg/?7(}츪6ʦUyVDF{ *$3J l~!MGђnL=Nս28D{\]$&)-[½&ibc d^fJU5 :soHODyӞIҭqc R_/7XdN^෉?2 e ASiȮzP;m?/a:g[2L:-M_},WglRX7E.wM]>@pec3h&<$Zc;+en&|{Νs+7/WXՋ 4V_V!i_4s0py}ٵO z?艹Qֿ(oһ%7W Vj F\i csh qwwOTz@*^Qq+Ƣw= )s:^]I)7Y>;>sz|<,mZPA& Ң1{FzV=*[a4 UnEk*d  `z`T6Bm۹!O< {>Ѭ69H6$X{hYAS@&C? }I}eh SoP+y X2Vڏܵ]Ot\8;?}73y;RJ*QV{ ZuL)nIsX/"YH2W| ߝT>7vEXW(X1U;&#O@E31!RI3 ;I,0(/,׌1̢k D]>M u;G&i'pЦk;wNdnbA*틢NT=7]ۈjƦQSg l$.;6/!!"P9f3L7ԗ@ 4ek鶰NшF2МDYFǭl !mpYc١e.k6;ayN*i!ʽqq aD5,?t*G-IcUt\Q3 yQ-Ts~tF⮊G=ڈyO<@#ɡ5Nv-=[43aـ ׬(:ya,gYCNhx+cYySYľpWBui>O~صrebnBL[d0Q$l&I&tBꐬAn|+R&×=?3|„è,63Tp7>¦8#a;G\y J%ULlv4O''s3PLI.@`TVytx1M"G` i/uSl<­uahqFm"޾Q#cN1 u,Zƾ~U ] &Ov< ?FPG#+1M?=i+} ad=nKyd%~.- kЄK2}݃q:j_[ <j\RP-fbInln=Bjbʤrbu=J#9YI]md Bt.[O&½ܢbWke0>vç̻eWUKG8 @lA7[ |Q#[ z3p]Sum*3V%_26;>4v<3,k(~kʏaM}2,i,yYmoKs+Ay 0a#ʑՈ-3Cya99@\Mq%637OaYcU I]=/x Id\O [,@|()sm N,^< +rS!U?e>FL:)< ZM;ݳ[oM`nVJC縜,Y]osy?u!ѺJ{;ܘG![oY&x cX+^f4sZJkLHGOڲn)U$!Mfg!n e \64, \tzFj.űv)Ҍwj`tՀ':o)> /G!'ǁ/Oq(t| :7-_KRI tDYԄ-v#5xQ-I)4TVlӶ ks*ܡ|N !:|+S|lG "xF/-l;ϲ\Y ;~.H(ݶ7|^V'i(3qN@&yFsW _P+@79\8Eءp_8k}/^fY~"DW}E48<މv[މO]gd4ӎc[ oќӉX|^ Z C8W 2ssz3's/,ĔOyfT/JQ)'-/{"ߖIs40/LúL|Fz_< `f G֢baW iVRĤ%ޡE':AU8qUGx/,F(Ht+à!riF @c ;7**mwQ<lGM?uJKY,amG1H(f=wv`36!$!e(/h/`w6t U :?Uc=;؞:nÕ g r2 [jRXeHHzb';(>5r|rÔJFSŜf+YXB;bC \~c겲%uZsр䭣Sv>e::vPk4FpDϫvgjp>/v~1q̨z(^K@.ۺ38>;m`Κ%?ooՒBE9ZuQiH?HH~JNjz-aEeRԎγgdpS ^|r@7  _[1785}@g~VApم<ŪAFey\~[Yo6~n)'C}wUex"y0RK)p@yā YtttwʠT/Kfa:~aiP^i\1D5'n HGb&߻Bo0dArhՀ!7C|Z^ !]{o!Wf.Hm:HBV PJp16QE+SڹMc]dbG>0s_Șw+e +RfB-amaaRpjM΂4Uy\ѐmzEa@ a*FfђEOtojp3ekgxIJcohˮ5蕶{~KD /5^tr"r *;6B|̠F.uv^ Yu5@!~r& 8͉;@1!~|]2 $uj1˄JQҰR }:smjJvHF\mstra.#5Ѫ:9Q5|b0Q¤ɃADӈb NWrLPpZ4%IGgt(Z̘ Ƅ'.^9>k`}P1q zg`e<7a`l b#/Z ɓ7#ǭE1RfE$R% 0$X3lHk`7f ŗϽ=_ڈ*֊O3~ I{ʩ6%nX#p#Ŷ0끝Ӈ9|VMexRD+KcM@R}``9I#:xֶ? LJ6_$!ҷ;nlص#qFn[BKK.sɑK u*diiDVd*C#Jba=HQM<\8/7a?W J+v{k&Q=n?G CN<+nj=d-tK·nw."sɝ=D%z{bmv+N*el8fϧ;6`1sn,-:ԈV|f %.f:j֯rk7-]3Q nX(gJIueq|"YT4iȗIiȖ%SRU.GM LpN-/xtXsbtMjdOts*TrʞGoAQy,4|*asx}ԉ @&FXSn@-qġˤق~I6l}aV^|ak \ @;]ulHt)d# Gi y~/։ SLƁv?MSt69{i\pIi_p9lm\;[PhZPˋڇ^3 ) ̹#} L8nO@)'J#_z!F>Br 5h4lTt{eC|miF.X_籲\a'* XGGfhsk XHz+v&)}{%$~N\0♱zTUU6hIejJ/{Hמ?S\h'kKYA1Z`ʺoZ-p=xOpTeл=&q畯#l>s3 Eѫ0 ߮pر>ojMqa)X&ޫl,#%J 斏V Au!Zvpbnj9ryIO$".sά<,iV_K]Ӛ:å*8ÈB\8z}Ply DZSЮK0&./`0bfKAZWw4® _WXۍz}~$%+g̊mEe&zP.hE$l3 ۜ Xr U(u ]fMF[ƅB@ eGy"iCY~6$WB8/J`a?)V[ m6 nw.gq Ju1Q<ڟO,xPwoa:Kc?Â?(HK$IRn?'h{>dp-KّD˪tvJQ ^O~=Ξ jHU=H,d_.y5,=X&I P9U>:A|JWrG 9 Eym(.RujS ̧ʴ(8k-ы*`x})[TCi^f+B;FS6, 9vX"&]`&rG z-\u`b?U K%  QBc4lP䖽Mx5+MI[c H ʅW\Y>hfwΝ+&ot=]36e.*a#)]ƀ%W(lvɬ\jv_ؚSƆ9U${HU~'8d,,6ħ ӌeV“fmV'| Qg5Ud䣀?6gNGEK "CbtrOxFpo=B-,(C*pooܳ'0@ >i=1r+0A|<x[YAdow8ĈPjvgrsB qUow,.:.kmip"R.ZĞm|D5gwh%F_;]~+#/1G}ilg+Θ3TIR>>X,`rG 6ǭTrT3#>:ܧЏ׮ >ښkN[:؋ e5ȉ; 0`+ '|"mUgsS`NnCRn 3['/32"+#!7Yh pd r{%H=[$se~(@raɚA}қ; g\HbQ`H~y((J?U2,#'44RoSϳ#;"'P{G·$>ƨaLԙMی&+ Hrh-et:7M*Ū"DPW|~`޿gy.We١za9-Pu>m oReԾ7oOȢQP>hw.X,n.D8Ipǽ.s'lͿRO,Y c_)Y%7Gel%zӈڴqQ<"C/0gL㿺ع/+]M6W=A UXNw,l"9-Q9$b+wmE~C}isϛN1ۑnWq ƌp=]^rqܱl1`0ݔ!PI,3ZI/flAZA)4c?$!(P5W|pwu"L"ڧж":FHL'b.E!4 2e!>C$2L(uCS`-ڃHϸ-TB]/i!f8D\ 9Jl|W YNFuAaz֯zŚB49LP gxy 9 .ѪӢ 1?F t.TYh5jօ!uB)"uzCn0agCQz HHen~ծZ C CL=s [b"_0uIŢsG~+9U~uUUyG jLxd!@v5*҈׫ H}k H񝲿~" oLK++\9*E%)%݉YUqdȫTO>#GҝnZ _07f;XXӀ.hwkV~dgD Zol9t+$47߽]g039GGy-Vj0+3ZZӪ?ߞC e6XV\N%F3 (^>>yDhD-Ĝ-)/nYKm=6{)8 ka"50{ F+ҙ=zSVZl] /]h^-k =5<#6WN1O !hB.ώ'h [ [Ԙa6.0m֍#7oD $ݾ H]7ﲮKů[,*mpI{ r*NjTvk6sX,TnG,S{óJvf]Q.yK]>fylkD(|EA;wlfj1+Dh_Yp3^ZwoKH]d)ns2)o6/wsMJ '䪬jӿ!='j?`Ue{syt6E>}sonZ9&*yVR`S! pr'e&MOɈBBW&VKm+j9mU7&RN5lZhw¦u&=+O$$!}BU^65j|:}TrНTtdǼdGb=>۬6jH/I;Ѥ!_Uݘ*ถP:#EX xM^.t+SKPu;;~ÿAWSde^ϳ})[*v ~߮eA8*i֜uuIh`RBIL`HZ/c1sKan$@L#)2CJGNz8O\4I{yѶ,yѽ^~hK ׳.,FCA"zCԁ꤇MGD90l%8wYؓJsfiՉh/iǰ ] 5-@y+"]@y/U57Qȷ mK:J:0=@/d3Qoe]Tvƒ?C{>əDC{>N37Bj"zEEvqb%OeoJaNk:A/+ wL: Gg8 )r4:B,q%R)u;y!9<)8V~E iǞy9K`=wO;эa6տ@3M>N7Cn=hqePfl!Qt`E|ӠEJ&SթA°$QG݁}2C)C-}ӊxB1 AlzeO]Y8~4_ DC(YR@&'Rfz-Ae"cW|Onon]Ƕl2J2i <#iF 1]Ä3)c<'6 J "9+4]$OOl^]ZrX GtsyG{0lqʪGvH-KZk.}O2N>N5x2DѸ$SrЖt.9VCqӳ”\+"EpX>0+%tNTGN`ņ~$CxdjzOe< ɬ0D~OPJJ$@ UBF+dRL/ya%R up86!ryC_%_oB#d !3¤f3FM` *3mL7w*5׬F4JS )Z4y0%99=s ER}!Go@r2x83LcFrpaWQWǍ0֏|OAZ4tZ%"Y-38^*$y^$: kQ`XHLCbFtK\:A(8&Ku& MG(&%~O4R&@d2esա%!׮HnR!ܠ `Ku'  u ?PHъ sZ{q;l$ d9Hŧ5pt8R_ta MbI a3*Ф'5Exg-. P{?; WgV o,hU6C^K81V1,ʺ@pհr춠rdg\v'rcلR-pJ<Y]G]O+`0gR` RP4gMNf7BSs O@ /C | bF ޴3{n Zf\8pvLa\7p+) 6P>Q5gs]G_5>Q|DT``.!XWegbcGJAW47b>/+ۀ5A# CV{7>gqA?N~#S#V$r ~.yњ7놁Ϻu}@Z ժ_Nk^T6`; mXk͕MR6ۉ7/JJuAjkF$Cj$ ׶ӈW\_DΔygx@ꇗNk-dvQ̩w! n' _3us {ҋ3}Oy;Ab[i t XyԞ\+ԩ $>+6!,T*tzgٱP*ZѰaU>1(Os楗R.!oZ wqbB# d%RU럛opiÔnsA8rx9Wyh*e)?̼]Rb̜$q+j M@)Qs( ETC)T"pCRE5Lbb_Я Я, di[9@: 6Eo]? - ~ i,g`h}[ EWqdlc=J)`=I!Rے``h[qc]2h~Bl cbÁ!\ 2񆻃Kgm0mQ۟-e^(S8!\nzV_pDb̄_xS@_`63>PĹ) -'qp<[ bE=>[z'6`G9w0VVgh:~qE恉St0&M֑'w\YAHTco u\ +WςB% w5;zV^ci35~]%lK|;_EH,Έj cK20QOxGsGF:߂2)8܁估Dh]$ 3rUǴ\Hr TmYxm /8n=|HxYӀnrSZ׹M2KpWM.nWm#%]7ŇE0EgɵD(3{HגlxmȈ%^V7Nj(8}]aI͂EX6sD uN7gʳ[IҚ%a'ɵ%&f4i~z&+}:guJ읙x$L-i0aa<y~ZF3ZRSp?A4S>^Ð5p> .]_ҟJb:i>1jF*u o YXG*9Yy&oB>[UOq.V]{1D$ ihn|]LD2rJ&l=!=0o=Vd&R['rS%pw!60 ƎEAQoђgϧ\Ho_(հ9fKIC3u}/СuBqa| ',lj5> K{8h9oɚH06zЎ[FFn&Ia]:_<#flĜtvx8#A89R@b;_h3@#NJUWlY=^%gkqYw@ ^ EPs "3?W*ՔOFIdN]$Rsu@uf_.G+SZ]޸;kcI)Řdt&j2FIæy/z3Ir&&#:9v* gMsYK3XsT[ڮ0g(_b%gIj'[ꝉ_ /89}a֌XŔ0^"kp^KԤҼ0=J` >%hWXQQ8(|ő@Ma:[5ڎ*A=}CrL+ M9cW[EF.Wcu+ qhއbt{zzy19|xAV8 ڷ.eu= t2K“s;osHډk6ºUqz~Us]/%6X(wWDTebh"lW9LB,Q`plEr3,>|hYU+**h*[[f_m  f_Ծzog^ _b_?~,޼䟰~@< G&h+M@Sh!;ih0;&`ܩt$WL[ _BmQ񀻃 hZ}kݦW޻G~ 2@#RX@*!QD{xon=H}Ujqn"(Oq+Vm}afr iI-iܤURm gb9cD_tO?wuL z5m Wtr`zt Ž,HiYnz;bl2?$wH3w@r*,Mbn̠]& KSa޶ U+^ twh, Q +m;>';55BͲ/I3'+-C-{_@Yxivhx#zHW+TtE$*$IZrGBɲ8bcQ kڀn|SO{AvUP[k &⬸rg]2= R;}. ||}ugUMk㖴.`PcB 77 DZ-fIC f|&sgvΌ"%x|4|)@x' t_foN\@j$(a{i* qK\TsVTLgUVϡD_ Sj`9oyvŚܽ: Ljp?K];*au|<%g^LP CP!LM c}XPZ-Out;n=Up2>d;WՇ`,3 H&t0='Sa:xXZh|NWilסY=|r'ACR5S{%m*`֯󗢔qV#O>de  #-aYdrA*6TT(%OH륡,Z\  ϗEQiii_]"P9 yT egXgVOdlLP_Q-D7[ ڄ~LLe=x*mrVD׃_m cHgq@D)ZTakjߓXtOA;*Y^Yj*6P"l}2-Bt[kw,Wm@B 4׵X_WW@Syu9䤣|swE !_x]oW\U07s~ݖSst1^Bf7cKNo~$^ K)iY۪"2eW mc.ĕNM"o#ڌ>pXn/M?kޣk2O ]w8C]!pb:3!1ĎBNT/1V&jm۶m۶m۶m۶mO߯M~I߳ٙəuR*,aSTdlA'"iEmP:YMCg=H.NiS)gZg'T; h|<?U\vbٔU 9ft2J7?̺֡5h'/l.xRFa!#/!+ٗPjnLVM/ J~c-Gi^ah):/'P1l/U#YR$L %iݶ.և4-SAXfPz>Tk;h=MBo:ϱ X20&l&CY@f41~6CASlsƃ>k; H@tk5ʖ͸R#Nˬh~y%!6N o2:u횠J{.PKV:k y̥ZM3Weu[cDiCpg6lΥ8ӈӋ'(?N/è_GG7W'9furuskVnojc*0=~LBO$KЁ*U M3՚mͮcUeCm=&,f4y6Md/ V~kؿgZ*77-<'W={'ojm[/ؓI_jmtw臕k[UI9ۦX h׃sO)X23>rgg)ӎy3ȇ2|ѥ~דּS/X3(.AGl7}pRkW_05Uwz ~?p~S'9T:M?0<;AYv+Zw9~sswDIS}/BN=|=\w8z:ss.sc`џO}w_b~cX<;aǧQ ϢZ5{efuqr|:MY''<8u>g`"N~q}|o'T,Ēi *tX gG,?=,_MY*c_{p>{'}DkI`Xz Q%*~{.m&٥WMTԴff[ &ʟf{-9LXji`ch-p-^M~|@I<ɋb}=Xb3>|u}I*Qf0#JM0f[A2UZv)6ٜn`[+<>N{4vM>!aWX嶍`IB%Xc* Q5l0#_#Z64,K []H1$X)cBftDWFt/n_IvS4OL@8Jݑ9&u`ν6Ȓǔ64#grJ_ 5 /ԉX4J8MƦPb "rg`p8v yL^,h MmjtG]+;5lI5n.E-$@7eŒ݋+jw %k!H#w֒qZ@8~C 6Xq¶6#@\7$]R|JG^35t8t"Ach".A*O W)Z|mmbEGF$ܲYGsގCrܟfFPb3j0kRO+h8ȧ-N\z={gYr9.a @V͙gp,jW H=eh3Qo8N`=W,JvYΓB:rIi eߧ29Lg dV6aUM&2OBXᾛS<sV}hMɫ#T mmaT6"tgoιe %@Zhq^Z9$n}6"%F ECi$]_kI ; Q7FLUWHAS<Қ~E(轀Ԡ¿@|0?b9bS*|쳳.,g)؏?[%w}:+ط e)[9NtlUǥ n<%}02`%SamAlK25uXv~.d v͓6]|`o2Ÿnb`W/%H>G"6PD8%S {#?nm=: nʝP3N *R*cmD@6ki4n JsP`&wֿɈxw/?b?ؑbEրdW! ~vK0v Q642}dHf=lff!4 8XhnU |mL 9;0)Nbك/PřuDq7x`s tn!6b pb}t>j?b$A5}k ihvUJ, ǜ kb15I3!;")PAk.d=.:8C+2} }lƌLy>Ļ*eN3d됟Uc2NiЛ*Ld&RМp&(.;j͍TS!U$w )tԸzd|߰$F{JQ~s09;#~%{ D. ,8@`m +V/CaJdZHvҲy1';Hw1EyKt(ACfSan!YfٵG ax6vΟ+fFBqTݭ:[l"Q؝NRYڒfDzA-Zg^S Љ4*Pq"ƾ! wx0 LY,t" 1 (,l37#J8ONc= e54(Y{U"}P d߈XjJiosh]5ȵ`3&b M}mSB""~vtBd0ka+ߕfD[D(r t ôcP"eCfLE/{F8XJa>hnt†s"ٝT4!Na]R;&z#bͥ6=hq+U}@ZXk`/|MȂBnF}pͶ  "f2Vr{+uI1'=2Ȁ傁/':smݴlL! 81Qb 0T[IJ{p~.L#y +#Pj9Xٱ0s 8xYV1Ր>M`_\zvY: g]kJtS,A|4FW6<6q}k BDI'Y-ĝ m㦝[PɠSt75ŤQR ۤdFeM[ꭸx-؝+=Lİ^M3mUѻr5 ɿ5: >Yst=I 9l_%P`Ϡ_:^*^U+w2l= m{TsaMFv}sƗ'8GwI)mJ!jh*Guz.f.4UI |>#(FJ[X($<3S\#}<7ic_! ,A _Rݵyx4 ,W;*xh%zO+-6/b72g"Y+.X?a vM>_#;\9ThA'X L0df_@蚛ɆsN =S6STc菤7?S-k2C^zTTvy";m-8DcW]R"qbj?$wn&ܻ u{`\?M-TNWTl4)"Wn>q;6`?S,>1,9prBSnE>4+wOcӌ>V#%uJ^72Sq?Y&Rq‾Ï}FA`in?h> w,e>R'swёEb@mPKgTʡ(AnPGR*`4vwTթ8nVK-ҐY#'AAB 5eބg"æTji_@iGTY<{!9O6[}dkų잕d,U#3gdyF$ fqh03|0pGa`$Z0z@熒sQiocJTfFW8/qi|HT͖?-m"sֽTEJݑ@<![AUub궣VĂĎOjСAU &c'~}К/nﲢt;79n) QkxAs- X1w MQ]_CwAxߊЊlUI/|l7=Ƀ,B\San*Q&m.6nF$AbرhzA矓`'8FvtU q^ rLYtTݳ!!ۇVN#Ei&5پʃM+gΡ)jR?|}82YO!KO2#aG* $vօHyO-NH8d8K8 хaһ:kLjiN9{%.^K? Qq UP3]2VA]|{*VU]s< U;Yb,.Vfl1 Bkg=Ϯ41 =0l螠m?DzOWc46+풦q^ɆXE;[NbIPr[2 r(zͼ0k9=S>YedrL/CaYXoMoF\6_zRCKhhw>H±prC,O$y⺾`qRdȏgp#^0c+PX؇b1^B☄Vc,2HI];n4钌:?,?c)|sBX K,N;ۜ?ٔU/օ)r|,b |+=ItI2gK>)@,TG$2,5vyŠA'g8-OVN|Kb%8z5Gg햚gPxe-&[*9dn 5윹r}!ڎdgdv%@̻C_DX@VGR[DT3"9gDNb#Gw=[)x|WY2YyR:N~1E4Gs%Ew`_,)z.FI'n_w%0(@-sVžFs}?g3-OzgL{O@+:;6Q-_jXAS RutMkiЏpY^8 YQ#:ޟKU6؛?vOf:F}8)xo:oӉ~ӘsB[S@ƢCe-|Q|3T!t<,՘BO}  M'b5lY|\m4szW؅F(,;A1.ր`HW/O[fyjVaVo%b-}93G_h1D'"j<)=CW׿{p7K7{f-RBD4G-ŀH{7F0fa(_7Nm7^C#JcPix[gk!D20!*r-҈tX>f ?d6  {xvĩ/=aBq46zkP$M[3U,aq5/$ź; ?⿷1oB+K5pos̙*roCWL0(VdHPtW-໳}W*ܩ+pmϪK~ @݄1,px.ͷ+s=NBE0=ϴNg6-?zya&'󞱙 ,g}9)y|{v(@9Z:l&l2BΡz̬saf&sf̽ձ# w掩[&`)-{c}5{憯7cnc`o: >k31! >s4|fqiF.ktA,>c쭂OU|我[[CLur5:>ss+ogmlmlkˠ)V[ubf(ePQ5sIŴ+1^ckD>ʭ}> A5>awB=+u1gC`ҹ+\ƕ]Ϩ㩧1ʡ =R9]Ƶw`oQߥRk"Dz>je)Sà ) jՊ=D.NO7J;dFL9GܱV\֌3ȹl)zd7D' nYt]N\/7FOTv6$iXx}iiU[g U3擃5]sj~t+GkGh٫8ز8'Ɖru'\)O0`'љ/c5YM4,4,2sB}_KQ,;RDx-@GϢMIY]j;iqx_МQ!HƘ2_S #/73e@-S6P3!ia{sEo8UVH9~QdfЪUC҅BOߺ1\^͘Gao NU$7']- ߕɷ39435I֐TY^ZROup /Eܢ;}v_pVgtkk !B߇xq=.,@ úuF 4݉AiGZt(º@@2]:du=D\"4Df6' q\Gd]YU{Qh~ ]˲ Gl ~pf,7'w9P):RGRdp(IQt7K^TkY*^6dj/W?yjY>T2F:YrAis~Fn11GܸHd et(\T<=:ȝO1ī'RK,L%+$IS:<.-{ )4h֧jN@tAsMCl62ۻnDS[X bt^Gch.!ftR= 湨N&g;;QѬx@&խ_5H~$lZMDJ9|@K 5-+t'j*S7@){ִpjcU2-թn1PgpP>`j|H|}cj^~|j䶈[ђpXܦqF2@G[j&vWL) /V՜}Y,8לEq!l?zbu 9R9Ãtd%c&^,0[pb쀣ec(.h Y`Ty10谗x"窜AFdݯT.Sհg=ؐkfȗewu@([$9}/ 3my%jq9Pu <_#WE6\QJi|r9%XҕIEOu7E<6f2m2J#MVHOt<[)Ҟec92ok޶)?<;l:<:s@M7LtX(Q66'vU۟ :#J+r E09KV`g˝v{z L0K].-\wSI/@ht0$)r0FƄDT^_@7:7.^a]3HgVPq 2n"(6/- H}kb ]s:g &يӍoyU C]e Qw~bI9`Ԓr2;Err6U!*<,jea 2Ni"w*2)h"V#I]c/5a߲MXϗ4YM2Y\oө+zM,1פfup˛nwsqA:X$N_|P]v4.dQXЉ˨Vn_ի!yVGjI*[|./,Etm]R޷Z_vfK*Et T,vT$P. >kV/O+&R'p}B.jyDIITT  6L6.Hb[&U.*SD>%,}}d֕,:KkULAI1ߤOROkXA8cz\?͛VX* &w0p}a(U ۗYK~a" ekD(SDd}ua 2%V̂^4-j#è'bOTiʥkFrUa{9\®hfm,D7 8֬At#;oU¢;6os0h^`wC@$Sa[>Q?tMYDŽ5'%U\N )+e[P!O=6]Q,<7;6Byp{l,Y4 7x=kMպ8|?I2vqrX'&pkx lWny IT+!\pov&Btj\5!CR'?:KHdLMd :5E}r((FYYj쳈0\pf5s2^ ^|31.r~tIz h{N&eNqrq!G) \Hքh~7H,xS\&JX+|d%2; InjMG9[m}Nxk2ˊ*W'٥Jpy&wAR+<zb;菄ȉquC\+yՔ=;;ӁG`IXֶ{`vcl;Bu~ZPaTu$̲^`+ĺ*Y% r #l7&IJ 0P<8nYPPT u෹VU`Z=a:A$ZMAm> -zNqD:M( H=mAn!P%I@YϺZx%onHO X$a}ǔBz"mADDzOzIcA#Qܓ)F<'xchR)ߡ SSE΍;-$ّR-cv)ۭ{ݲ~v+|,G]RL%WkY$ƒkD0{Rd9 _ YR9+<\B> |kґP:G&⩷ (/+ZupuRň8:7>3&UAL FY-տ6/;Fu\]Ni6ƷK6] lXX˷S֣&JνbbD64PA9zq\td&WY`$ň2@U4PV,$;+r}>ou߇l/ay@1ђeիMa: Sz9K)s9RscƱ<̿CWWXt}+Qki{]T/Γ';wRD7(Hq)*m>kCSXLb:68Kz^11l2NqX||n"y uKqI0mXө7߳W hN'`|Z=wS$>{;\f" +p }6DAhzmcn$@_{:{IfJ37hH1f} ~И4ݏov/LC/?_OwOAߟZ9[SdXGlG>etR3r೷='*7n..ɝ{D>s8@]ܝ(q3읽~OdtY cMi1'RVO8aٻދ5}AVw]ӗ)BmXu޶eN5x*) ^m_U ,>E?Aq[wԲ k#rdR`909!ɂ$f4Y吡|ZE- ٵ m zˮ!ѽbџ uoIA;b:Cn W9:CF׬߱88Y5[}6o7JzѼFmU:EX xEl99!s& J~ZRE0B+$Œ@13|n$Bph1L2`= pvVm(F5GM嶧A0s@]-m=(n/cZS#tTMY]j"'g/ JV,΄(`.@[(Y-!S ~UM'C՛U;\0ÚFy E5JW{VyTԮPNXg>ܴ*u]3(k4ݬw3>y/rJ^zϳ]@1AWe+؎ *>eFg` aݏ1kA=,\^PA!e`CXg~TsF.g }aS,B l#fCN7@8H6'kgN[dϮAeG$G*8Xt3]@ghe%UkQEJȋD<(C幕,},YNW Zq1MJ?7zv#O)nѮO#7Xoaq\gk~Blf|8t$j9Hв'Hwתrvld$9r[$t"g׻~&jn~9aV%=mƇ;$RVYTmՇ/Uqݦ"p^LI`̸̑qrt? !;3SRrFiY5Kؿ/ oBCP;O  9vpս 2;5M @ON^qWw pjOHݵ 1C߀}O2+XXzs'\ں|}/Cd@2,Sb_C^XAڰ[ēh{!Hɢcy*9iVf^X}UČ_@N߲Bp˕5vH]eV^7!IkSQ)YE%9yc|X^*NgEwwmĐY|Vtm Y"2Dx74N:j >l%]d݀hPΣB Uc'lQHdKrQBy?Ӣ0ci/SoQ7BmVO5Q1(ʈN;cᖼMXJ9v)rrTMA9\?#BֻdԉeYAuΤiUkdyW^jݼ2&Gq+,>>?݁?P5Ljr^,kkCl19:޺)Vrfm;ZW[iU$c>P* a_Euu[m߰eӂ3K/4q`t 3eֈdCl\B}r[:$Fs@}z|dl9_}[.BL0ȩa$l+k=wBCx.^tLc봠Eh"iV '^ffW؅Ιܥ#ې+glDΣ# VΌA ws żOT Z^nɨcI\5Uw*YĀgL<ү?W |pTn1I,gtL,^X.`s (qǿֺ?lT$ߧ-ڲ ^W?gׅ! @?ʔPfbC<߫jn'SFE9;))XĊ-9F% l܋_NY~VˠI8__;*,goYiіui3OcI8ɩ=k6^xi;nJeƇuUle̲2Y[Jd@f foN<~eºh}V6vc6ȭ, q%Ȉb6I٧l|"Y1TY%  E`'ȭ(aH(f^'U@g7XF|;=;.h2Bc˽~tԊ)I&9n?biD{*](\ ;غl}D9njrScgr0 3e`i/x`;Q,&FMjdAO ^7x_!fPAWt=^T&KfzŊ;IYUO\Ò)(تNlL.iGE1'@rFڝ;WIBK~GA;Ǯd{.rXa~!y w0oZ<4GںuJݪ]\1Yyb9DF* g $J}wM79ViImE<\܍0FD7"QgJm&8(9nXfq8p1ukϏb+[di `I[/dz׬RU;V╦P LCHar]iuo|m1i;y{'9fg^$m<{hn`A:C/SeZ;(6 ;滹d^]r^qEeZ{Z4;\!٩.;dew]SCSI7_2?_-ɸn#3o? )2[j_h2,SY<8r72hW6ʮ 5~ooD:X#Tchy7% F31{ )@Ӻ2$xO~TVp8VhۏuK5g'q<ڜ;Rp MEJ+Ǵ5+~h˷.r.4vՖ./%u]^?kDVz:Rjx4f+W|3NՊL튗ώԜ5M*xn, F8Z9`dX4Bu5K k>mCϖխ7fקՌS%WRb%^xH.ۏσ:! 7PlGĝ gE7ο5']߄|hҠ&Ffwua>\]}wt &ډN{+(z%7(=rTz: 2AW!SvAHD`CxG貝?|$Ҋu묟~$i'=XW>T" ]x]DҲKRؕ$"y{tU֦@k?`Q{;UnVt-$_bÀrsW<ũC?>n%!ֿÕ8[[O,6$ 3/e.oW?v `a윃䤬 K[.5=Xn+ D'Ŷ@%-U\ OIu_Ħt--뤗Լd`i"h_Ԙ7=vGԞI,R6_K`۠ϧjka7_j;O]q2չ:Xv~%G}Q}yHzz%*ӉcoYŞA)K˝0וX|u}w|ݢN%Xu[gS(YnPҴ3cLwJ~L]w5aop#o #h@m_6kAE 75 nk䇡>]?;7Mg8~2[6|}mbߜik+}TzV꾉n~-`nzlNVAʔ\;z{&B_f)Wp_]:ʴ ;Zrn>=ŲMzDHlO~yқUʜC HI;YI:2؄qa UnXIb-qYQ[MtZghɻNC(ֶr`zF?t{EVE?"؄W\8Qc1y+֭7޿\p.VB@%=zܐóAνOY{aO|˺'ZK!s-4zTŻ6j\3eW]ɪ*vMM,gHKűH?b[IF"=*3}<$lTӫ>^V'7G' dvRXJ뢵ʸ%dQJoE=V@U|KXG).޶F6bHO^8z1cs~?~~>n^N|8rzzy~zJ /ec?@S vTq ̈́Gu=Sm LwB.`kE7D; SrXO&7?fzfyꪴ海ЬVk~mcRl(תwF|EVoo>GpG0 S4&M⮫BMq'B&c9"ޒ 8 !ㇿX3T7TTcwAµ?Nd8f+s d . ,<UQWeV;nopF;- ԙmvr;r36U66Xe&I$4dަG\`-o0) Z1(r[1f?\H&h}A?JK:5yzs3?fO6ԨM~ꫨvUPLJSRW:>n^+'7*ܾǤ<<}=]\BUV2ovn? 6Foϱ ,>Uj~rreD6۰GydBwOO'8IzN>/~ x~>_cXE:' /ļ&E0/jG 1^$6z-'|B=%፝uk|da3wɐY<"iOxN"~~IMs+S"@e'3FĀ$0j8i5G@*lx"걹Nlu֤zɱH[竖F6f9lNl^߆-8)!!yx 0#]Y%@lG| x[K V^ЌRXPpR na.gV(⌰2*7ׂ̠` Z L $B=@ppuw909"_;>dp\Zd47ܐ`j4U;Lfuku*ÍZQ Jba h66pmCb2}a33XXh%TH$Ow5 0hRKj8#k.\\sȡ҂'RٱK4czW<~Nl˕͆3,\ܪQ OeoDҰ3x,9и36K ɜ_83[qYW!xA@ƅJl"UJ5˟H ȅ H(cPIҹs$=͎k9ANk(0(_ULjY: F@t"sj# (u!lVy0&7rbm[ O8)f@#"7`kְ cMs~^sp(& lȳaKpnט11|`l~kY^xv~YK ck0E"g'}ₛ,hF2fld>9e܁4!3a!={iisx !'+ؖ0p``io- gu,[n-`m. (F?[Po1;q pΦ?kK~>nx釞@M\npEdlDڦl, )9rg;8 ^a8dK0Rۨ{[kmrxz(ibAx7l8oЂ0,ɢhP Ao|$;B0 "i΢ݴ[PjCIƭ&2S+;-գrvN_M`]xG/q6Z5 ɘmw-i/+X,7DF?x:d裬LL`dC8,e5,ycr!nssٰL_ ƻ/mo)F^,p 2ceJ7frh  YWt?* 0!vY0c$EH/~I JuɫoxW 8׃`Gd+;[ }.Cgtq"5%qobJѰI_~{p<:)/M DhiqI{㱁4B J{}5C0"<[΢l0bf.yl¸)iR13E(!1 jwc8[pmg81Xx8D]Z/D|:dz,#PSN6q'4ZO?#NC}=xlB8FLƤ%>Pg_!Ƴzz&*wwRG'],Rgu)ϥ!TT(%,c>ʞIa;˃.5v]Ca{^jikVw T7q0€^| @wڄd:~2I6"12An~s# >$.\Y 5Ync(W mC,nCq6+~B}&sb58Om?|:tGZeC}l;p2ahunM w"LaŚQPfE=#Kd]>ef.ȇmr&oʄhAQCh a-qȍS:8 1&u4 3/+,>6Ty 70%lPKnۤ]@WAYuhb aBZǗXb4t BڅZ\l*صb'sK5"{PEX.5x -mq),^IUƛZր/G''_=yonC"/)jM'Rvدۛ5ۿYZ$ӮbPPJw.:'Z1 P8EI=uumѥz=`8\׈WtA .{;h270Ѽ\Kܓ8SGR .bSf"巣ؙJNŻ=g!ϕ.YpL4tZ5T^>B.Rdbr$+Mp+틖DoW~}>vO_vď J^h kd}iT&2Uy" [DM$2g},- ӦKRPe}ۚ$f2'T2‹O]7%`0-7A] rk.ךc쏬ec81ԜI[ZH2%֭SZvc%d>ǔSu ըV29[K.4߃Cȕw.c>EaM3ŀNO^{* DrzכnÐy@ 6wAСK3/2hox2@n=8:Lه]:iܫʌ1Bx" sM4W<6XwC>ߜ9PXЍqQ5^dfSC |*A!j5QQ.Fh7X<w BJ3MGRܑ.ҨeJS<9Wά{Ưj{<|Gapb |*ix ۝om/-""xR"+(̝UcJ2Q01? O?oni"Phh f.`ZtZ)bQ= d~$/PI993qYB',$6?ebp $3aYh< Jdx̝qV&LӤASyf ?]Ÿ ռhڽґK 0i /2"z1`kA]M]_Wp)CJP5͵бlo4J}U\U$'IG0־wAo*Л vaPaσltY/mKO94gG奩Rv@dA?)~ofFMx꜈ag˫\E -=H@,G[g)T+<8T]ev?4֐^9pmAIa%F)8Gh}hi?KJ߯Oc7v8DHfd8V:=j?Kxp;61bHJn U+7KNܴu J|rŎۈl`V>6HN`DD+oN% \%SVn"k$FAPR8~55^;4׍ 1)\7Czt/b/wU8h/z-m,=IFi >S^\Ly;r _ShqX= |c,׭Y7?6 bɣG'! Uqv]YUiu@;j77Yw |7'ң (TH-o8KLeEH{N_5h;?y|ߩ36q e؈"| t!$/7~NU+`}Ƞ\ԗA!0AWD{DlQqۮ+iEL>uw,gֈAw90Tׂmހj0K2B_Z#;^xRn5e.O}Bϸ3(Y¯O1 $Kz l{HO m8WHW0jن!w<i8ul蟜f-ٛ ͫ\qk:Idw֭yA"$_LxEtR7aFPWsʊSa$us ݝ,/{Nc}Ky ÿ~߅7zˉO3>X(;|u׿ɀlvQJ8lYWYlS]pӻ3׉2p :,3'\ ;P_16;B[e4'| ,S, crTaQ+S m:VcjϩPJ}܆xїxusipzaik?ܴ=K>?33%+q{JD&{%ٵ@#JM-}? m%3!LX+.s,嵸q5ڬ=;ߒU-1|(}6 dc̰J 8$EJ<N?rèncB-zZ AiŴU`D(os2>m̟",5pYwq}NZaR7ȌCAÀ9gWOBqҎ[!Ʊ F X2B73FūDyX#^Z2cxd䅹19yױ=lM'vriD>? (*t5 K}ںR e*^ͼ|. %mLNNͮ9wftq]PaJU[ss9/?@xi`oZWr |^`>q'ʪ-u,h|8WgO &tu$.E鄽8ZWU;1Т2ٖ]94u"aH]E$:"#2J{k`2ĸ{FPߌ5q[yNXtMPYavIPH#1=RXbRE $Pؑjk?s ]ɘWJ6$ٍ^ej,Zh҃< 7pâ"I(o%?5.{#r+e5n4Bs 8Z=$ySP+@*6f(*jO^(@~ܿscq'Ic:]_OƣB8ƕcRT-Rܨ6b)&IWs]Wja3xώ}N@k3S Ao}|]p%* Z ?4?'Ҥܽˮ-z2qVr? Jxj>h RsP_椟6$d*z_5&]E[8d@ d<XvrUǐ՛K%?uN,Vq?!'E~M*NgQzzaBۏ|P{u!JcpWZd/+GqoDWLl3zcEt#dX&Lvz6H,.7> ) *J=]7C*m6oVgȅB0l8@w=[K}=3Upم䁴xmѭ=Sv2voOžO/3ioQA'\C&r\!)*Q=4٩08 A^Cm@Hpz:worU; vG,y"\oٷ3lˡm7vl\wzi>[;WeG\wc0b-m7}'0|bH]?wGH(C?;ҘQvP@!(?8b>Hڢ-kUC"ʾuwEx`#ߏ2k|b˗WgTfڹ\$ٵz=nz?;ng6#X#܅$oW!"twZIguzDbz%ǎF8qCiIi62]ؚDp8*9UGCGb9Jgzk18%=^OCW~t~4ABx@%0>e1nvvXQLt|ʹlUQ9~_}^dY}Τ|^Nx۲ܝj_\I'qkžGi>SGč쒋F<eܷCI #, 3REnqlZ`, uށ=xQ5aYFңd tohAxWps}z]}}-|*=tkɢ2`E6vkZ2T^ͧ)cݡoࡎBf$JQ9)DPsZ86,9?<)5 (SƺJ29u@:\ t30wњ7MF( V6€EQ2o?p^`x^I@~,NdAIғAP!g㉙a~TX$:j3E2|V>ݖ}"-3QrdZd< 4L!^I"Yذ3sy)EW‘<ޢ7BVMDDfr6`Wsv>WMUTm yE|rcvsqz/p(]NC|@j&v 3kjDXka| GǖmܠW G"]"Iͱv]\Tw&99 X #"nV| a'aCIDcŲ9OL_/oi8;+GhsY-QY'1x6#0(_`Oaz󽾩_t7Bmc' p\Ѧn F籸-*Y0}qK>Νb6z/ˌ]G ? 5fQD)uO.T2O4]Жzsr}{##Y#_B)3$ C\bhKH&k>ҁ]YwTKi(cmY6j>ţM+AMfb G3rAhfcǤw`-$4rjb/T]AʂAf)W4$PYyfTlGg}UV|'B\͕՗L6jo&|c@FhJHb_t!ֹF] ّPSeR1$FC2vўz}Yv'Y돬ewQ3tɍ3[򳿄IВ%A-4vwH@QP~,)\ } zh_h)jf;OWCYx\ZbscfZ -s+qݴW6^]܉s mũ}@uw Ym[q[A:70IS wf 9WPA/C>Ր t5}ëԖkz5S1YXMH֟"AXE8dD{^O(i+`J܆OO>NpԞ7Spy_#Pp]{e\̙B!Fa`(cqd/흏"ƀRވ9tCbc-A$0( DniНI^UY$LpL ~ysDػWkgGc8ON/V*(mhnD1a%ivLO&!cZԩþO༂`{k#m=ܪ* xuIܧ5H#@&>*v٘εҲ.QMbrMa y$DCE%$W9ߙG,]O7 Uqs5ϱ73S 7,b^%ۄ5lXfp Ի&ߟeW›) (;*MZJFVhr.Ɲ֝ qL R=$NVw,Ci 8hcWCt{m:Lx2Gedo X6{4SsLBu-78:lu앴.lNS+Zȝ+}#ڸ~R3KpnTN6P>~/=ɒ<9a,N&Q~BncȽ)vP) A],TQr:u,_8j{c*]pz;u6N6ҫ.5$#T$&t ~}G_ya#6'򦽐=BE\ U*!n=Մ5{=GpL^R9{̟g4qjvw+!!o@R炏OeUGe.WaSBJt\2Se"ۛP%m?[1eJɄ;t+4B윢ԱGU IiIV SVIn%aRERd."L/ekF9I|JbDS0xK\ՔG2L1J씳nK1tnfK\GjLSpG)I܅eO͕٩g +,xJ)cOѕ 3c*'+ނ*3o^OՕ*3d*'+ޒ*3e*'+ޢ* 3f*'UsOd.8˾%x̕茠eJ)qKح)cYsF-{`oji/da|ڔ] XD=N@z3r8(hj.`.Rll׶bvlJP7{lymA9%+8qȣU_ bM(«nJlU.q~"j(rE1{JL5 A,Zԁ@䠳vZ؞&gi2@Ewi(B`l71e—W:Goxd݆rςDwO:+o95ނ҉j8ޒ-S* TNŖ "Kf+:Q̵KXP^\aEo7HB\؎>05d>bxba^kлy Dزj\E"M]+8d7w=fX4Fk#D=Pjl+PMd[DVh_d-VDp٨跨F`9x"؜;GPj?/#2Ue'my`v~i>م8qٵgB8ď`t.PGQ S|$>DGB)7Ҥuf=Q]}|ӄ,*ۖ)]"!4މ}ȹ,3ovhRu3kI-X!]{5mlLvO_ty.amN{B[Dч>`ҡ >m'-zh:ķLQ{0ؖm +?!" 9vp[tIǢPV6=xB ՚:"<%>B=.-ZQm ʪ9; [y$7zgMvٱk sQal+f&*}*[ |}#2̺x~&_z9=aw/k \)KuXd3 ߂QQ^05W[NYk='g{Ґiػ9Mg#Eߣ\ۚ!%}@_9۳qZ2G܀PiVX tykS:a A32ӸyJ(|E3Ѫ^~}+u϶qZ8-jn5~e9.*뙮\\;\12ffjf7f4Tl]"adMZc XA ,+n)Ϳ`W1 ˚H>Ig:ԗm&>W\(|#璸czMd)ȅY\KNuv"iW0Ё@ΒCDQjd&d {,8|%?CT 4>/ɐ *T0``&X֞`[ w1?`: f-n6M݂_m7o}pe-?󑬩n2 Ď3W@M)K3Mf?O'8|~$ ? sMϏk^ZW*VhQu"ڵCׅޡ%A,b *@(~da@q'']dRMv M2ڨ`Yt }ڦFs6~݊wHtnHd*,S5 ڴJօ3ȉ>|z^C0>BXs/fg`ŠZh^qMmf Gf6^> k@d4cx~sue&F0I + {3*4 iY*)ؘwzaQx6xjyިuS9-9i}F4ǕiS 8|e+U3* Jj1X'.+#7@ K0vE+W" `UtFFPIUfi+4Rh M?sTD'J郞O&վF9f] L~}e_@QPn ѐ:zE8E(zeX0ڶSܔ_rv=Y"?$b`X+a>D7ApYJU46w,gDE55D\MkPl/5\C*"G[l`_%>ڸrU0HF;%H-Z#~ƑNNcU':0 +V LjIkx3O' ~$0^gdkR/:;%L$ݷK$Ӗr•4ٍ/=w#iL5`VºH;̊sHl12]_XBū H1MS$Bg0bJ"^v!4Db:.g85B H6;4zo.)@@ihSe)Ȣ"4d,5W3`y`%сd勪\Tu%|9"Zřj֘NCY>z.4UޫJFߓgEoaМ$#Oć)uXQK(Ay|tOAc/mM @ =(-m {ܜP:z%#.@!EģZ@</eǠnG # lzUrVGxٗkbKpB,KDX hQ_K7 c?*g>Rdt&3qqvk|vah#Ѐ̊ڪ 4W%5N r,/=HM(.0#'j*#.e3̾4Q 03Fe7!Z[c.(1 +ؙR)"Yu  h.ȣ .A]x Eu0af$an7YjUxu։BZ/\H4FMmz~]'P uuj`Q`1]x e )^ݎ_:l&Dgҽ7x7*/>j*ü-]B?xAh!yvX1Sz)_f8ߘUs,~Y^<$G #34Z(Bf_;_`U\N9[[*V"ྡ|$Lpz7hJp )ȓa֭0Ig_S@o7Yz n_5L;KF[NsT7vo)Vo: W6m%o OJI$*vowZų_CN8X|ƙr:,%V^6R/bQ& 2 a\LCAz6RHQN Z۩^5e\ ?՜b?Uz*`Xj|ali=ҋNKwGӱ>'uacwY ?BZzxҾukP.j9kS_)ݡ~OwwP_\h'L o*k1!-qc4I䑃aSC @/ 5Oym^U4\C4M""&:®O'͟YsEnߔս\Μt}{֫\*>藾-Mv*~0\v'PXO~{e'f6|}@6UE.e}Ø0Yw޻RV\> 2^KC^8j~/G/w6jKs,ILʒMҼ{EљSLj^{Q^1^ềBei>K1}'A=#.K ʉ vg cS3XMY2}ҁ7!J`!Yl!/ث\ȶ ߜݭH$OylT#7$%bn8E y;ct ^fޡgGE"I Pb[.ޅR'Nc2m_p#=3׷.Ψ>yѐV>b qNGnlnM4K΋-=٦yHA*h ]F#rMe$Ja'+s h?Yqav 0X0}ސX-{Tb^}Wfm6`9Y˩ڭC>c7l * XMb|C zҤX0h8J!?6@z)kYsƽ(^caoݲ`Bo5;Waf=Đ)g2wDCt(J>M@^;G+QB3m zcof~i*9P(бc%H)?Bjc$B&I -vxrOey.πo kfidwo)o \]Y|VbXrx`0E)kN_X[b (UR쬙_]\۾sBu1܃0~'^$2! gT rklqݒvyŘZrڄd3M {8 pLՅX\h.d.[8JnSO]s(vM'BB|,}*0du !Oi<ȯ' T;Iq][ZyYa^>\q.b8e$ryn7Ê4h"lzz>m M,MDħPL3Ցa !g  ~LYH$;=ӿꨗ\=aJMA!G 0A^+;YS٥Rq XOuMc U4R0(O7* ]Ja١gh! Nbat)G;((3=1:cw%NgnV?<4-D<Λ(73M7d wa w1ڤ.RNH:-KsbFk=M%=wl{0.cgt9,o"E5m u(`x4?>X~ JGѕp3;iܸa)yQ=׬tK ⌲. ]@~,b09ps::Qd])p[?`kW'ú`gdLNa:sh?቏nu}Zbw B@S[k 6Yնe.`LwF~D:n@ŨHE=Y*< NvgvA{+3 ~e~m(J ^o9| xZl$%HC!A;8϶ݭH' + ~L]vklCZA͑,'lz#.{ mu$0:&8*eB< X~=yϖ-w`ɀ+wAe%̍۟_"bha/^on[戄ulGA:-:QnsRHړ(b TU ώ,Xp)T5LG$l_!l,Yߒr6Ceur=r9&3.\(UuSyEWP 9o wڟ 0~c3bK9F${Gԝ_FepkyWk" ,هK#8H!%YG0&hp,$y%.R5*)ppĕAkWKX6z6}pgDžo0zx )/1FY ϳfpflZ 0hcOoQ/ Xp=a~gv)! Q4>!zN8> @"DA]kd n{+g ߿6CX NK"jDI &g(2s7+FHagi"Gf/ ӈ0m'IBș-qrpyViv9<чεI8v\k^=$r 6Vv(ɯ Hb#nîF.tw$05iV,ٱH-]|/T_0Yd++BZф迪G]pJx,~ IZs!8$"P`OvZVznx{Nk2{YJ@^ uMuְ7p\tLupEX;z zs[3:G}ѐ|M8艊xĠk[E=Z)gTee*-&!)+t玣z-ƾ"m 2!>%$S~;=w]/]8YR׭~'VA뛤vQ?>!!Pk_+TmW1٥ϳo_64M hup{;twK <ې~|+AՐrs\8gnѭHӽdݶso3l6Mɮeߗ1yt_R~CBkQr(|liS2Xu9g)^P"8^KBMM%r[v/=J'FRB,nT|[mZCjDB\xow2٪@([L )?,K-a1* 1ٽ7tGê 5-i7][CA[k Cӷ:kUv#v^gy1o0;^6x<tAIפ{瘃XtVFx+~7:-}GI `4S 3ߕ%slrcHÓ]-8ZTR={}d?h%kxX7J fbxY _ SG%|0F2aI$eFV)nHgexpR@s8_{IƅDeb;+ ~q{eΒob.IUSֆUu/. fӌ.R_sob"GY*7 bϛ6s~̞'a_dK/?\POv5ǒ1kL/S5˝{@Y?Kq\/'fC|p__M/UMr6{`RE׳:52:BJEFHtN_"X ^|spжqtԟK]ƀ'Yi=z^v4\\cQ7S<7s}>:/ib lNc56u!NO; Di^wUB :R5I}f/ϑIeDzS .x*V)_QȎքf^IBދD=,1%WwnX\ JpVXGsMt 9f;yi}m_Y@|v6) !!W;yۜCp7}]᝽Л$=#I^x[:'V֦nPƏN.7yS;mӱ3{o]ENN|2ugOYZ8\K5c z:`x0P=0q(pDt6Oq!ROt"t;pbrngw $|l%xa|'g8al ")6OLeNᕊt$! <':eD;(h|L }A~B̨K}n~8R 7ع e}uy$9Nq Q,r1^o4Xb]wK,do7&Zt"-t2&-f,'`E7K\|2f`l1o @PV|KusR2;91OfJv覸i'VI#Cn~GM&RpEM!@Ly%s*3D!s+jrorBRQd@iDDXц#^ue&@Ju% )* 71-ا1]Ws/,"ݡA^s0C{{rꊻA7M)*W_%ijFd,D!Ni[:*%ĭZ+/׊Nt(h6E83.1&?sttHѠ {ɐDjc8Mv[`/H7n!d{07KMO9_rB,^+ ÑmbӒ~ub3.>VO\bh|99a]uЈs6r}U8hפX':%<)"t~1_XY_[ 0 Kr)Rȡ{d `{ю96{~?9<[B- ^O P9x#V+TWZ '8T ^o7Æv%<55^#1;[}n'Uh.ݩhi=|J)fS;(6ԕ)nN5M,`::o-`6*0d+ ðbpo|GUI+iT.%^2MO.:9_p{5J il\)O&0ERf8";`{位jf-C0&B1> &I[B,nuBCMl޹ǠDuE4&xEbTJ n")c`|x@qYƽNL+9\1\K7\n^lZ]2}6ݡ+Swx5l]^W"֣"֓ L'`zMR Zt)Fc`p,l۳mid#]!}tI>)Qi6( qJJJvQJ~08bU37.GvbD0vFv}iCCQiV.~O_,~˄SgRkxvo\Z>[UkxUXpu~HdJPϰR}6Op]VINN\__ez= aj {^iѼ] _2Ѳa"^ p=i:.1Jh6˯IK@HpKN5nR9hJ;g"OM( rJGKP]NMz7~0ApnO'0{Bu}%LfOը'w@bl<*cn) )k+_:jQܻ yixqrmْ 2UFlCvQ#fY}=/3/ 0#6oѮ72^$pZ@*Ũy<7 pTq͢AMV,q ɤ5sv*]K4|:#X̪ŸO`<){Jpbi`LdIl+- y(􄣶eFe.fYbtLn,q0!NY2Gܞ[ʨXکf(1[ .!샆<ۊ =2oW( W O./- 6=k@?'bX8^L8Sxb"5b@]yHaڥv2=h"d]:T$X"Qj/4ME#Ɉaeג8Sxqp —WHBH@?c :4SUJG* J @1su01bHTӖ2Vz;[Ӊ%|-)^ǽG`x4γ%l.!9e\(K6 ֋3Jg|9/zh.Cxpѧ哹dSOlx @KQ8^Rk?xyee$'9kkPw@nT>`$>2ƍ!eXϺO+jR<~v\Or~KOa?;J<;)tc$ϼ8Uy)\Iz%?ܙԃoKtvsԇA@X0F a_;g* l2'! bu/ay@ .!~E'pC/3c 7az >!| X pƸB _@7pF25EBL\BjM ;[w g½/7v嵿&%Pw dyG|&{W.j%C/6yS*8sc`oQAYMsN([ʕE!2d-/}[䛗XQLSH]j\db.vZMWT Ol&T͙J4{ ?rJɷ䩙专J/V>+X{1%ܤ 1s15̕q9mT'_/HOsH"מ0lj-2h޺VYEVE[Kp* G*hX+ --tM kr!F)<ڲ/[s 풒-7@lNV'ՃsI3 =f1h<#m)|zYM_ihr42yb'$J '`.`@ٝ N*»LOrN3^= 5;wM~zd2ܒWF0cc.~RBb{g ˋȹ.- #*3a*ލd鯮(OUڅr}׺L'%Ǫ!ⴆϜHRSSjωelI+ QfNf"oji78 n?_Gx>E[x4NGVq|!DxZ䖽gC @5xϴBC^l !iP5)TbC65裮bUږ~\ ű%x>3 oĦ.#hw tF6i(=(g8^&`&D[cMT+وr!u|.2Ȓ$a}[?$<I\ yp@6/?71GDi^uE8tlL !}COnaofSٮ&P/.Rzu-e 8Plؔ%M67Fw }lڌa] m\ku(!y5Cv lўie|BB\nX,,Iݒvўk|Ed&:JCiz&߹!9cHp|t L4ۂ︠SC+q1nᰉV.ɄĶ'$VE0FG7N|9^$I2\@lL` YclnԯI"OqZ,MbxK=X/uQ$'wƍSF(uZ !jz%Am`,\k8'W'Wv"a9^M?22]O5mL6Z֫4ӣk^Y(j~?9L(M| c $Pi,O'"8UF:PF%Hw8b'kBfH2v >ϲ-uN4tk,K4=Ė=̑8_}ͤ,rsJK-gޝcԯˆcrƣ1 pX} |hN[jf&S0EsI~fBӄM{.%GKz=MWя#U19} *>w.޽B3ڒ=t8|I1|!}~I1q̗'(_ĨSC sM[P<l C00}e3NEٵ2 )|1N,OD;ׯfl@Ϭ%Q6 Ȳȳ.Zh-}j&!Ch쥒zwzK"uMWV4?!=SPt7@b|U 3%b~*wb3>I;XrL/b9iܵ~b8ϸW w(G Ry,.W7Azlq 4TE\V&Fo\u 0n[\ƅx .,lS'hXW ܧdRczUƆuVtǽNN=_ԃîrp𿩼M%O|NF66UQvSOLHzl99CIfw 17s\\JT9g>}u??H*7U>le?L]Ex/"=K- XhERol4Y )VGdO3QN:lFbGT`$[A x`t%X'`y17H%\Rkȕx&uxѼo_}q999Q*Jσ}-p{ Mè3+|):WtA92 vYJN6"'L744.fdg@~ w aW|p)F\L“,PfKXΊ[d~gs sʺ_ M6&c@ְ˅BĄ/Q֕GDtZ̰L=}?o6I*6|bR#B$[tJO! Fh`&ܐoH|ͯUϯ6)ބ5^쌺:W'CUi6RG@ 0nCo_u' Y<>%Y8)daّn2p"鑑E<>r&ֻ Ex^G53Eg٥i ',Q;--x:4\qԺIп*[bg̃`;5D'nw|4 &Xl3OIbWtxmvt뺵U5($<8d/H!Z8w~=Wk4>6I1llfJXLRt w%iS=MzT9XH+;`J'R^(\=nGr +"Yn&?< ȟD_($O]ᤝWrj(pv gCLjDۚҽPuU*FX`iOu(&#LzhL7ToiI7`:-5#n|p] $b畡BbL.QAf/&wNտ9,D!Qv1m_G;ǡԭH3}w8Ըua-"7.\)hX/5#dH2FɃ~q+\rc8؁:'BuVfn v>B2$8MULxEfYDw@F5J֞k{QEm_߻x/0Ji]7CG4Yx)h+8_;NQՑ;bGk!(6qMUiL-@=$E:z۔oJ}8:.'\ Ft9+v2R&RujDMa5XkBWJ,m4TL& m^Lrl mN*Qo kQ!k}?zY!+Bj.3n^9]:b׷am8)HSͨuΨ%=PT")7Aڹ H/':H$NH4x#^!Mi^PB_3n1'u1(mtF⍵g:ua Fߛ#ԅѫ3d;[˝OY%+aa̬%ɏJ2?B=,HET*͈@bıԊ]C![ Ûd0?]`>YpjJܤصfAY o*n9+̀4ŋ&K۞A,`+,An=cGJ7._c|yܻ viZ 3!Tcѩ;4˩)P"J>[\n Zu84}< :r 3x$ PeuP'> ƁB264tqbc=6R{mQlڠyJ/`D-)1镯['Qjny+ o(B{N<*-#!7lVHBPouLhYXEK,Q(υ(=C"Wp1G?W&.ɝVc*dXdwG:"1BڏI J(wS6.H0# aMg`u4'yy&VAUlئwI;멓{4Ύ89ghкyS:hS PFڒuQA(&F0{/}#_QEN/MWk|&U5Ba~ M-?h,M~,TCB2ޕb/ OsQeh׹kj,:6؆[60㯊~`D|TRawLWF" _#-3tG{ծ(g72ddbjzZ=@s,t뱣jPq8If=M(R"kݖȷNV[+u|wBlh!c'EALH;^# pi#W3?)ɢHw7A7&ObDRU +zvx|HQ"8@([06¨2 jz@:+I& ?uvD2twv3 .AZ'.+WRy4 ֮d] D ╨i9eG d'Rϙg_F-ZG>*z  bVhb7IP~-DMJ)Ww<4zg蹥{nh S9ҹW_{oq?ex AV-]E}(;փZGn8m$&4|IJFk'UOh3VX<8S5=qSP ۡ,I Kh6^)AP%(QDlvcƱ1dBN&ZX%umMKB눕󰒡vO7t?u#ډJbRjBa=umR$? %c=A>OeH%'C:ة0NO?^sRBt_;UmL+t#Dۥ"RI=W#T{ F"m4jtO2*=Է9'x𴸅0 L&g5Q0U (`{}5/r2+,ZÆ ⯟UȦ`M)tLk R$:bX%6qde:BIbGByn0]!}5q)sNB4Gbe8SƳ]6hT"3 ;|eO](DBoU_̠L"û8BA6'%yuScqV6K-tdS gu YPFl(`(1[ik % Cwu&3| 꾽??|8Ӗ:c6~agK(2rpِ4Lb8MGqz(fr0jfVv 3sn%%$+r*G(?kc󉐱'Aaa"dS@I%L5Y}C3g muI9y^%ڛR+Fa@#$rE K) 2i)עGÄrGxZCIT= $Iok mk~WĔ Q¾u\bWM?)P)&0oTค+u&h)4wsIf-LEȉvgVbn?}M# ɖj;$7]9"[W iɷ ,Ч ]y|&낮cA e@j~(OCT=G?˼c5qNJ n#jlc|1V)\"DكYWDDtW9sTbN/#T9c",|f>EQ-dgLPK6BE܁~2 e Lo(Z]u|8D;[хA80tɴ(M30~[a+Q,>>'4A4y]z'$UdgЛ4n)*k*{/U#J)㎺I6o`Mэ5􉑻xwM#GM%ֺFՌjf+Q)jjHK5Pk>7PR ι7 uXW(mlN,-g xCkhɫՙGFkĞĥ)#{q F-W|Gp,qL84C#L 0)m]Okud ok{ڻq ns)ҬoW j{6&;]|e2Q' )Z&@{NÐۍו_z9`9IHѮSnsYqp?_?܁d`o|]L㙳sk#@pջĊjA"o+~;HoF}7R_ b8}NABeo_p9ҁ7~u=Xzkj'apJ`_) sUWR;><熫^đrV|L;zha]5~u :d2&I K$}=yy.zWX 26u?ybݺ}'l@qp쪞ME ݛP ݞ`[kMjIozb;Cy`nvvJ#5xŃ,=$ǃhP -LŎ"͐sW5pbKpPA!)D_FFU9BqUi ֎&OϹ(<3̈M9 &f8|UURc#@.ę$ZᙟHܻQth~zG=JTVo8^&6<нo:@8P{#l7+`Vcsw8[Y6KJn>kh>ViKqck%HG%%>k%^+8#gh'1,]#U4lvW-xrdc̄Eֻ-`Zn#nR9CMԝBpAUW2rU2f`t%hmK[tiٞq<יb= {Md/(d M SAHgDh$ô$W]h(X*&dSkcd@GgS?;;/6\Y%j]nzxԲԥ?X(ΈK*umÓk Cg3(B0<,AVgVe%"S>MӫW8q04o͉<<\I=iN\`ppp1yHrr`#B>J("03)۸VsӞ̢]2 !e+C1vW$TΌN\ɸl_e" 1Yl,RQpǗQj ((FCQHryHG7 mtu1Iw }n 45?ܩN>iEo=^o2oӽzcN[6MkD"d`/>mWj)vEQ}XhZJ:4\™M{J+;;L:JSwXjR5mquyƫ>@Ԩ%7MlFB@?%8w|72H-?~~k`uMp~ωt *L8(aր N (  R.(Q0)< jBCFfEP;I3>-gWӡ޻tgFSo^$N/O?>#sþ/lZ!ZU۸ji^m!-" =S;2|u_B(]8#ɔjl&P$U`\sC Bz<&3/ġURn1$8RFRN%H`gh]8.6%^ȯa^$AalU[`JVmz6(XҺl. ^=}Q2I6ͧpx7R(DXg¿TBfY_@ֲ'H;1%iB-l z#IAmËӣBrd}`-xzΥY|Ǿ8Ji@b (o^yݯleP$>MUݍ H;[ L$Pۄ]scMߒZH%`I`K46 v=qL& y:g/I f!CAijn`YbdA}, e$ $pd/>وZs0Ѽ#^SKBEY^ʇ@0{x,vO9.XB#cu@#5h1ƳZq?-tsrU-4ڶfU-*VIcz0k5r1CjxaP]Xޞ:zE0b_zG<^":z~/4ޞ7ւH[ .<|Qt<t2Y2@":<pjXn=A. %~+*b*  dF?QzJ/1`(EnMLx&eѪ>]3!H5+ jH<=:JxquOی:[C;^7O3g[C㮮`14UF@Mz~fPP"8MuTq-?KRy*I.d4`}oNkGro$,_ zfK]U@8ZZ-S}xg5G YR=6!.z1fSXh}xۤJrث? YcԊ}UQ&iQ%lU0*(g%خD>D`]ω+Kq'kOhO(ܑo? 2ERmcfQb9 -EEIЎ-2WRGUhsOUܿ ò78zaWMO/gh fĤhb5ǏwU\ шqɰ++|Y2aqp8FP]B>B_"HYI"Q=*hϖ V9KwRQzp4:)Xn$ڲ>H{5ʓhh$@-IG*Mt gK]̫6Bԅ׉{b蘕~[|| t\9\*)^( Q9I|ݧ}YBI[sLN~%/eWm#w lOegUeH7H Hwt Hwt7ҡt"J ˝^@k?ٽZ6OM)kw tѣ}৩۾pig}iQ.Og܃*C?N\(x*K83qg| Vj_hq04l͒ps?EI-8K 6E/O2vAA+o+GhY;$Hڨr :Uw)ʢ*U1r2Ns.WO'Cͽ?U~q#ŭ q 1)7 OF;0"-HN(q Qx1<Ifx{LΈ)Fe5VM(RɶE+Iz?L֞'4 wϗM NO'WDQda2;;h<吉Oǃg* ~Zp=–L,?۾l _߲Hk n]"7J1$Ɔ=7aKk6(6бJ7c/Aߝ!؏-w3dۺcѽܩw.ځ"CFᢱjiii5χ11[zG;J/Fqe|w&++.(#mhgJvy7Ѫ mǚL3()!-nbiNV|o `ɹe_Gu9{;`H#ǭ%nS#i.U;0@ v\Dԑ*3m5po.YH#S_ll\g?Ir뻨4I 2:Hr^y0%!G=AD7FpO3a;3\K݇uż<ڂ_cvԆBUT+nĽO %`f6\6ΝQMo׈'Ye% L= D5ޞjӉz]H֩A2Lmݓdk%R;[$%Gז>dZc/-Рڷ$ayLFgQ.KB/V\bAgib)TRC.W jBmB@(sL3D tuVKt{><'|rqIcA $j?`HeUp "н.\ʊ~8ݛ*fC9:dl۔A{֯E{Lqu88pvP4\5 n Omt{a1/j=cJ]}7.rEoeM?U3*uDzx~!ͪ.j~2&lXNc-J8hd _;yHY~.Ffu0F5^8jܦ]TN15ֹd|ʍ(wzi桍k/PSX7BVR( 3Ѱmp/`·IË!gGY3IOF1 NMXv&`ClxYwcb z¯v Ӵ% ߲۩Lz;Y? #$V-~arǀMш?Z2cbOՕɢ 讵6ۀ2~C"vw>Y\8j"FSnjMi(qc~|u'~ \a6ݘ/L=k9uBCSiB#7(`Cob[M'ފ! ?VU'^FB`Zܘ9,h} W#+ GTC.oŧ[01,Pi**US͏@\K*NQ778^bH<ϛb&aUH/E5-Y?"OPlHUMijNS.5| s{&rǴ=߸(}r đ!I$.%S-!r-zL!tQ 8 @ȶg\F~k4(N'Hs"$,/&wamd.]?[7ūmoSd29vT48X{_ﷶ" $Ta|?fGhLU??VoY*qM1490hA62LQZRٚ+7S>H!ܞkBnhjbb{l=Ov5ČFGl,+!73t4~S~ށX+[.:b"[FAxF:3"-58P]_eC{a3O(i?xJ`L~;;(2+1E>,GifOG1?b`ySY[Nf$>w>[I8 8$_ۉOYZ*՚yB.TpKPH}Y,# Ze>Fڇ7M Bv'Q؈Kp66a49툓eΘ?3Ί)A7!@Wct}ނ>itEehCwKo,ӀdpҊ-vn_ 8_dǟ#egD|hR*x)ypKGZA\|)*;p6pT#7׷ =☢i>a3BXW UFY ?v9*LDPK `Rmy䖽Xq&sʃh>\'RlðZZc "*rTh6ٖ.}jg_}j` oS;o]˱v;Y`b Ms-E>С&:/包=^r|ͤ}"$ؓ *d׎C =D]#2= Cۅ'i@Ļvz,XqSM]y?8'Ŕ Üp/"غbfdzE#^ V!HJC~gVjGȸ( }7*ES}m"hO&эj2-1Z[l+>¯,IP.(,YTH@d@"=xK5w\,l.#D'C(&HZi҉Ik{ƴ" } 8Z?~*NZ+L8'L'P0b GQ4|p]Ko!|=Vu9(ېM]yzT[fdep+'{^WvhO; xtY/N3d-Q="GsG."k'7BC&~eJrJ,.LdQmZ!>8\+ HZfݒ`C*1n->[*Aد+Yc3I5\yH2VJBLlQ$sXkR^ZVQvYO!j3=y9PϗxcJxrrStC]i)Wܵ1G V57 $u_ZJRXftEbx}ǽ2IO#Mj0 7ajYvMT9.1'% P09Vqk)SPLJnhŦrKX #uVroCW̮gOFGoX4p4!޾a$P6-dP'l;j{ZmY߽0;8#> ]RiJx|*m U6&2[X1W$sCQCŴѐ"+S{GYQȭُ(jJBҷsw1_-':n93sb@6AҚy]\;JVgLjHrnb!jfaIwBB"@-ɰdjfTto7}r<o>"&Xd8\>5Ju`4l|.GZhm"mEܛ(ze<-C/nB=!3W+qԈ0jASfȮ1PۈBaGKxr'{{3Qp4oT/I`5:?H|dö1Fa 21O>32CSiE0*`!}|?6zy8OUJNܿd'\VYxieYzZnYcoRśN"On':FFTER)D"नu)XƼnACV$]W1*>oײ4HBV3j?‹8)-f0ArNx*O=(-PM$U3]F$T)Yrm,&㦠{{C~R>|bt(\顏ֹBJ]s`~ zj)\֭mKUϹytNJӾOTA&N^ Tmy徠Єu/^sOMn<] }uueNy+/c:oQcT3wJr-\}&3R^D(]>-3yAXTm቉ш)"y4kUo3 m,{#Ny-<iaC踤btyP M{VR?@u M/KVtepd,l'AQDCg7|'ֶ\Ao7mVu,4})C"ٚ T(1ۅXvz1Rdu etP/qsy?߇G L׻t8K'Scp7p9-|rY,q K(r :AkPF p֟cX\G:E|MJc#Fvg '(}3$(0,gpNIS@=xf^QN(/\-Y'9V9IxcV"ƩLxs3ͻ6p1wbf;+y=y=&"M7eRݷ=:?oYs uэ=i![]aY3u`7Խ84Zd|7~4'O9+~,Ƭ&^|25$A"sы;fEll(v>>ѥc?Ҕih.Z7֦wxd rn cXJx5f>r!4{fDGz,,G#~_O\*yv1gKe~Y[0z;p^nҺ&z%.] 9Hp=uGwz) >JოMUyut(]˙/_aݪh"iD!1 A&̶ԍ_6}絿Tkߒr"@㣈'Kz0bfz@S&/Q#=-aU27a6 D>0ntu#F7U_jh_ 7{o(*dyql&D˸q>X0zXI8RXXJdco07ru74ƪq"8w#4Ib8f-"YW\H| S>j,Z C&mCYRRLu";4 ,/0Qw,D>~>ݨdDz8Զ$[GTQ:2yHfGQw]n+?TO|تsl:5GZƐQp=Z.9/FjEQ7>@.zVtxRhbEn{ϒwM/SG_?>ͱe~*Ϭ]cOHCҬjwװd0RZ3CF-iX4~tK@}CXl ;O*d!ӯ9MyUo 34hWJQ#s(?-zsIo9| TV=jwwwa tp۷$o,Nk@wNƈI=@u.>$kN{J3g" z?.Rk ٠), i~N{9ѿiYֱH3mG.iHK+| ]pM([$^Ogx4 c%Uư tiq*k K*\Oyd"/#6ybd6~|iq̬knC՘q^ Obz\(vƈG-ǻŸeVXg&35հu̍փN"mZlXƀ_e( *eh[2 CM(1h~6DZ^(WW"MCw'SVC*CNIL0ݳR./ɣ!0sr2#gL$}/ 1F3'0Oo zX;m'mfW~/\bѵ̇deykG4I0Cw宄 u/}lӮ,Dkja?kۈGn³l>B]0$1NNOd>ipt#L<=Z樥lDܲߐApyi[6IaP#% `XdmBkaѕc AZw7#o=!0QHՐҍLI!VVVrL'9^'ߋwTTɗͣq*{##RU1Æ}S>PQwOK欄]WfT6`z&;θBuM'|ÜOTLr9%6S&h>3q>/9]vmҩpv)<ӿRWێW:Dļ748ZOY}Rn|X18l}͆se%pcJiVWY!r ^XL{B ]>fs/-E|0>rDj>v_x'ԏq0Ajrg5!'#9L{!KF΍ XG3Jhs^YFQMGkg6rm_CEy H.90]xaO@w%s8'\"6fN?L7VnQ~\Dž-M#[F-9 LZ鶨y/pqab+ DqL$ )#edCAWhG_K>?@ +{cX2!cw򘴜T`$y_d[]j1ohPDR 6Sbf#'5:n\xSD7d~*~EVW^L-Jy K`crzBʼn3}5 FM/tklJ$cRpTdnO[|;w<{S p{~"2| BV]=!ޘ@.Fҿa-jrS.G^>j:->kH ʬ )99҈ѣu<ϕ+"VD"CϖC~$K881-,DFD[E?O"L#SW z:m!{hVJ~ôxz/B_{MH#aPLT#Et+m E4^ ,{w#}!2 X7%0* Fx '2I{gv(KCδ)b^.87˴R!/:69wSyP\6*rD>BŌ:}kmakۤF(Ϭ~)[!CLFO~;կvR Qaʅ]6~ E31ʦq^`/7&n["* s&(v杊9"n 'wםP^.NqS4wYGI*%mpZKzNvz+왹_⿜թW:vf<40ew^IMIlzo)roИ<8=^|H [Oľ|h j5B{D-7] ]_uKX$oyrt,3$iGD ]2#'ʼ7zDb2O?C1DC.0''`p(*pp6v c}lOH{_N2@ʡ7_| #G@7)U @fPKepېHΤM=ck7 گ_E1҇myl*Cw9z'cH] \4$GkhKO*0GqC#cgؠ*K"-FOf^*;ܤ VWfyYJfpXIr}PWcS{Lfp ̞(ђɋP~ezr K LfcSMk2FgX%v_EFLچe+)L%nY`#s_"D7X~榬x0}=U2c2E| cHO)Euz jx6 gtUsEm*&^1䦈O6//6VBX#%:xQTZ|eL Gw R' ܴXM.ΩB9& |⦍uVa "nqc $ٷGZ&膈Lg4` +V<ڶ|wZ껀x"whg< Hg?)a7rZ.dl.=_0-cSJ:u?95|O/Q9*#ҁ4[X# Q4OmQIb%.\]o4 ȩjҧc<-y~T6c;bCšr!sZU!sѽ.%%k'ǧo u ʅ!6x@;C<;ŭY$3<|o.t/4/L_lZ0u{[kȖ6`XnS nc.bZ0/eqypCH0I|`31⯕NJW|7p2|^I?7A.v(|ݩ1/DFWږUf5dI$&8%u~<.}[2 \>3ôuw|&W| ޔ^狕 &ad\'b>p.]m]g%Q<`9LwfQ],kIL{]&)C*[6& @, $X'bor͛V ?Ƨ9DvA8XB5ܟ{ ykދc*Z~PV+dzާ?_O 9J=$+wof0n!K,cuW>Ů xOn䦜=I M& !%K:8/}毭URY鰥PH14 ξ?Ƴ=XS/o :FsK\n Y~g řƙ쵁_gϗH YŁ@>¿Q0A%4ق@&˪ Ms RYg߾p{l4`W``&bؐX.cnecvGɀ ؿoV%,gK3+ Ã`с va`]~}?t\i 5)@(Q"d/dcae r3srāߠj=oS$ږ"G^:?Swcʮhp$&846M ,8}0@αebo{e$Zn ༎zv@@oC DP.CٹALgUx@WYDp(*]&9[ kj=*@:ZJu 9 L~gh _~pXQ8?O0@v.`r P314qK٫bhB,3 .~k'sen9OpQz iBzNVNe|VI~-dm_߉0aYx xyh]xb(E$Njs 8O~o[׺_OZ98.==R$Q+`97$tBD=_*iq .)Wocb@t.Clq+gKT9xv-q Xq"cfwZ"p_/dfuEO0yoB`^[@ >&8ZBv>ɲYsԂs y>*`u7:9D\ސnX`I,L#@ jfO)˱ejtc38Ր9)@_gfݽo&~ AU ASrW[*C@߃T,0iV.3 k_WU@s}fyg1e\ {kr^;ԄyPUEb?OW-4y}b!x7gNkU,'8n4)/8.fkbqDGDVXX.[9]ȵԡC&`1xp夳#`,Y3LKF\qp/X(=a h5vƼ<'q9B>+0O#L)a r;,{p>$Bb!`"oBHS$~^ڛX_^@~%V|޶X < @1;Iӑ9f_LRؙHIcU.'6Xزu d#|1 ๶=wE9ktos7G/aT.SKl5h}er. eu.8nj4 }\u~; KgP!4DBJIFARޅʁ hF<Y(ۋ"\ ll]<\ hI*@r%2UՔ$T.O;_4fvfv&6Vv`*(9iRb/raⷠ ! Yw[s,/%zOM΅b)6]xր%"H$hZ |^+Y.P8V,IWmc#2^ ܆0͋/WEh?R?z~64:ۤνBȂ`IkvY#禡缸qV# vmL\`,ǩ{3q%n9YoPWD֮K׋98t:vhzH(S ou08++| !hW/c?G g8¿n&GK]pt%5PӞ1+Yמ[>A 'z"kgK˖gO[6{vyɚP/4Ä} Tv먿?F<˱jS 'XsΟl`uFks Z!6nM<R$9^rFO ,fR Ĉ{vzUVƍcӮ &"QVa6^ֿ^GDD8_~p`(Q Wn .3xL ζ+[^ex[Sc`*|jfnfvZ7Tt(BgOl.WH?1ᧈN5~[ [YY8ŬOCڹ^ftÐ)n #Mu"!خ%k țkd`e. gjp* ^A3y:om%ޔ&0G.󠷜qLp]GO|,3w 28,\#Av`B>g]o밗ݳ8Bg3uLgWgp<^YC:Z]fRNtF8;;_V\rFTX?z": 6y?l\G5r2iŚ0YJ%c[](g׋D4.TV@_mWx2`†X" l/)/,uL8C!PD K-'zSoLC|੥OsB9C:R$('^AR& h`_L?^ofٽQxpHɦq篿Sq Ω?/U &+~pWs^bq ~u.w՘ L ̷e(6j"~0pڤosqax.E"ή^5}v' <ʇW2w*!˸Og49oK?VK$l<%@E'ױ.Ԥïi@O&XR68YnGJqw%7*}2nj]Y *5p (kD`M#] :S'zttࣶXWLQϮW6|KG@ S!p+>`"vXF~Ph3\h2Ku3́T8pQ{Fg r0὚wy]=.(b @BU—qlz ]n= 0^K2of>ߴUH\KM V-.\t.D[R`CgdE_$D䵨X}T{ER,V ![ʅQn7q^9[(+,8rt\+XgaA6xOCrgvtL8[m7HLEm"(UNE^!L@.68]*iڨVe@[Rsjhps BsDpd''> ya;յ@+"a #^Q Τ=#7 խk&6g6D߽w䯁;vKp,ذIlV6X_Ldl؇ IVx N.Ѣa \QNh 4!h`,Ti\>|J._M~}jarvOtm+|nR̕6zvRo}l =. n/ӅYlR!@[[|˅zޅj]Jpb+>qBF G=s.8ꐣߵv^w'.]r{fzw!8JFXύ؟&x|Q.WG3_ZyaEꕰ߄leS%-";W࠹Tw+Вu#?DZXg_c9dYdv`ZJ0:/`=tWv%̻`FywWë K@c}0*`WwGwU>^0 [o/(x~ne?7;ߚl!mB#pKRw 7!PKg&LblHandlerManifest.jsonUT 7QZ7QZux 1 0FwEl"J5^Jr Uox|L*6":MMHvC絥cWܠ;=6z3UWvL?΁DLaՒ4-ee%j,t Ily\:̬`ta|0 0?:!8mc<PKg&LnG manifest.xmlUT 7QZ7QZux SM0+E+DMGEn&]fϼff>N4bꍂ(rOd' \ J#XShL5!D'G,J(efD"fkt T9#s[ mHУl6 lU2cj(1 0Gm|~W-cJ+ ]_[ 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData WALinuxAgent-2.2.20/tests/data/safe_deploy.json000066400000000000000000000010161322477356400213560ustar00rootroot00000000000000{ "blacklisted" : [ "^1.2.3$", "^1.3(?:\\.\\d+)*$" ], "families" : { "ubuntu-x64": { "versions": [ "^Ubuntu,(1[4-9]|2[0-9])\\.\\d+,.*$" ], "require_64bit": true, "partition": 85 }, "fedora-x64": { "versions": [ "^Oracle[^,]*,([7-9]|[1-9][0-9])\\.\\d+,.*$", "^Red\\sHat[^,]*,([7-9]|[1-9][0-9])\\.\\d+,.*$" ], "partition": 20 } } }WALinuxAgent-2.2.20/tests/data/test_waagent.conf000066400000000000000000000061541322477356400215350ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Key / value handling test entries =Value0 FauxKey1= Value1 FauxKey2=Value2 Value2 # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=y # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.SshHostKeyPairType=rsa # An EOL comment that should be ignored # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n#Another EOL comment that should be ignored # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=y#Another EOL comment that should be ignored # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval OS.SshClientAliveInterval=42#Yet another EOL comment with a '#' that should be ignored # Set the path to SSH keys and configuration files OS.SshDir=/notareal/path # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # OS.HomeDir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=n # OS.UpdateRdmaDriver=n # OS.CheckRdmaDriver=n # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of exising VMs OS.EnableFirewall=y WALinuxAgent-2.2.20/tests/data/wire/000077500000000000000000000000001322477356400171415ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/data/wire/certs.xml000066400000000000000000000117071322477356400210110ustar00rootroot00000000000000 2012-11-30 12 Pkcs7BlobWithPfxContents MIINswYJKoZIhvcNAQcDoIINpDCCDaACAQIxggEwMIIBLAIBAoAUvyL+x6GkZXog QNfsXRZAdD9lc7IwDQYJKoZIhvcNAQEBBQAEggEArhMPepD/RqwdPcHEVqvrdZid 72vXrOCuacRBhwlCGrNlg8oI+vbqmT6CSv6thDpet31ALUzsI4uQHq1EVfV1+pXy NlYD1CKhBCoJxs2fSPU4rc8fv0qs5JAjnbtW7lhnrqFrXYcyBYjpURKfa9qMYBmj NdijN+1T4E5qjxPr7zK5Dalp7Cgp9P2diH4Nax2nixotfek3MrEFBaiiegDd+7tE ux685GWYPqB5Fn4OsDkkYOdb0OE2qzLRrnlCIiBCt8VubWH3kMEmSCxBwSJupmQ8 sxCWk+sBPQ9gJSt2sIqfx/61F8Lpu6WzP+ZOnMLTUn2wLU/d1FN85HXmnQALzTCC DGUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIbEcBfddWPv+AggxAAOAt/kCXiffe GeJG0P2K9Q18XZS6Rz7Xcz+Kp2PVgqHKRpPjjmB2ufsRO0pM4z/qkHTOdpfacB4h gz912D9U04hC8mt0fqGNTvRNAFVFLsmo7KXc/a8vfZNrGWEnYn7y1WfP52pqA/Ei SNFf0NVtMyqg5Gx+hZ/NpWAE5vcmRRdoYyWeg13lhlW96QUxf/W7vY/D5KpAGACI ok79/XI4eJkbq3Dps0oO/difNcvdkE74EU/GPuL68yR0CdzzafbLxzV+B43TBRgP jH1hCdRqaspjAaZL5LGfp1QUM8HZIKHuTze/+4dWzS1XR3/ix9q/2QFI7YCuXpuE un3AFYXE4QX/6kcPklZwh9FqjSie3I5HtC1vczqYVjqT4oHrs8ktkZ7oAzeXaXTF k6+JQNNa/IyJw24I1MR77q7HlHSSfhXX5cFjVCd/+SiA4HJQjJgeIuXZ+dXmSPdL 9xLbDbtppifFyNaXdlSzcsvepKy0WLF49RmbL7Bnd46ce/gdQ6Midwi2MTnUtapu tHmu/iJtaUpwXXC0B93PHfAk7Y3SgeY4tl/gKzn9/x5SPAcHiNRtOsNBU8ZThzos Wh41xMLZavmX8Yfm/XWtl4eU6xfhcRAbJQx7E1ymGEt7xGqyPV7hjqhoB9i3oR5N itxHgf1+jw/cr7hob+Trd1hFqZO6ePMyWpqUg97G2ThJvWx6cv+KRtTlVA6/r/UH gRGBArJKBlLpXO6dAHFztT3Y6DFThrus4RItcfA8rltfQcRm8d0nPb4lCa5kRbCx iudq3djWtTIe64sfk8jsc6ahWYSovM+NmhbpxEUbZVWLVEcHAYOeMbKgXSu5sxNO JZNeFdzZqDRRY9fGjYNS7DdNOmrMmWKH+KXuMCItpNZsZS/3W7QxAo3ugYLdUylU Zg8H/BjUGZCGn1rEBAuQX78m0SZ1xHlgHSwJIOmxOJUDHLPHtThfbELY9ec14yi5 so1aQwhhfhPvF+xuXBrVeTAfhFNYkf2uxcEp7+tgFAc5W0QfT9SBn5vSvIxv+dT4 7B2Pg1l/zjdsM74g58lmRJeDoz4psAq+Uk7n3ImBhIku9qX632Q1hanjC8D4xM4W sI/W0ADCuAbY7LmwMpAMdrGg//SJUnBftlom7C9VA3EVf8Eo+OZH9hze+gIgUq+E iEUL5M4vOHK2ttsYrSkAt8MZzjQiTlDr1yzcg8fDIrqEAi5arjTPz0n2s0NFptNW lRD+Xz6pCXrnRgR8YSWpxvq3EWSJbZkSEk/eOmah22sFnnBZpDqn9+UArAznXrRi nYK9w38aMGPKM39ymG8kcbY7jmDZlRgGs2ab0Fdj1jl3CRo5IUatkOJwCEMd/tkB eXLQ8hspJhpFnVNReX0oithVZir+j36epk9Yn8d1l+YlKmuynjunKl9fhmoq5Q6i DFzdYpqBV+x9nVhnmPfGyrOkXvGL0X6vmXAEif/4JoOW4IZpyXjgn+VoCJUoae5J Djl45Bcc2Phrn4HW4Gg/+pIwTFqqZZ2jFrznNdgeIxTGjBrVsyJUeO3BHI0mVLaq jtjhTshYCI7mXOis9W3ic0RwE8rgdDXOYKHhLVw9c4094P/43utSVXE7UzbEhhLE Ngb4H5UGrQmPTNbq40tMUMUCej3zIKuVOvamzeE0IwLhkjNrvKhCG1EUhX4uoJKu DQ++3KVIVeYSv3+78Jfw9F3usAXxX1ICU74/La5DUNjU7DVodLDvCAy5y1jxP3Ic If6m7aBYVjFSQAcD8PZPeIEl9W4ZnbwyBfSDd11P2a8JcZ7N99GiiH3yS1QgJnAO g9XAgjT4Gcn7k4lHPHLULgijfiDSvt94Ga4/hse0F0akeZslVN/bygyib7x7Lzmq JkepRianrvKHbatuxvcajt/d+dxCnr32Q1qCEc5fcgDsjvviRL2tKR0qhuYjn1zR Vk/fRtYOmlaGBVzUXcjLRAg3gC9+Gy8KvXIDrnHxD+9Ob+DUP9fgbKqMeOzKcCK8 NSfSQ+tQjBYD5Ku4zAPUQJoRGgx43vXzcl2Z2i3E2otpoH82Kx8S9WlVEUlTtBjQ QIGM5aR0QUNt8z34t2KWRA8SpP54VzBmEPdwLnzna+PkrGKsKiHVn4K+HfjDp1uW xyO8VjrolAOYosTPXMpNp2u/FoFxaAPTa/TvmKc0kQ3ED9/sGLS2twDnEccvHP+9 zzrnzzN3T2CWuXveDpuyuAty3EoAid1nuC86WakSaAZoa8H2QoRgsrkkBCq+K/yl 4FO9wuP+ksZoVq3mEDQ9qv6H4JJEWurfkws3OqrA5gENcLmSUkZie4oqAxeOD4Hh Zx4ckG5egQYr0PnOd2r7ZbIizv3MKT4RBrfOzrE6cvm9bJEzNWXdDyIxZ/kuoLA6 zX7gGLdGhg7dqzKqnGtopLAsyM1b/utRtWxOTGO9K9lRxyX82oCVT9Yw0DwwA+cH Gutg1w7JHrIAYEtY0ezHgxhqMGuuTyJMX9Vr0D+9DdMeBK7hVOeSnxkaQ0f9HvF6 0XI/2OTIoBSCBpUXjpgsYt7m7n2rFJGJmtqgLAosCAkacHnHLwX0EnzBw3sdDU6Q jFXUWIDd5xUsNkFDCbspLMFs22hjNI6f/GREwd23Q4ujF8pUIcxcfbs2myjbK45s tsn/jrkxmKRgwCIeN/H7CM+4GXSkEGLWbiGCxWzWt9wW1F4M7NW9nho3D1Pi2LBL 1ByTmjfo/9u9haWrp53enDLJJbcaslfe+zvo3J70Nnzu3m3oJ3dmUxgJIstG10g3 lhpUm1ynvx04IFkYJ3kr/QHG/xGS+yh/pMZlwcUSpjEgYFmjFHU4A1Ng4LGI4lnw 5wisay4J884xmDgGfK0sdVQyW5rExIg63yYXp2GskRdDdwvWlFUzPzGgCNXQU96A ljZfjs2u4IiVCC3uVsNbGqCeSdAl9HC5xKuPNbw5yTxPkeRL1ouSdkBy7rvdFaFf dMPw6sBRNW8ZFInlgOncR3+xT/rZxru87LCq+3hRN3kw3hvFldrW2QzZSksO759b pJEP+4fxuG96Wq25fRmzHzE0bdJ+2qF3fp/hy4oRi+eVPa0vHdtkymE4OUFWftb6 +P++JVOzZ4ZxYA8zyUoJb0YCaxL+Jp/QqiUiH8WZVmYZmswqR48sUUKr7TIvpNbY 6jEH6F7KiZCoWfKH12tUC69iRYx3UT/4Bmsgi3S4yUxfieYRMIwihtpP4i0O+OjB /DPbb13qj8ZSfXJ+jmF2SRFfFG+2T7NJqm09JvT9UcslVd+vpUySNe9UAlpcvNGZ 2+j180ZU7YAgpwdVwdvqiJxkeVtAsIeqAvIXMFm1PDe7FJB0BiSVZdihB6cjnKBI dv7Lc1tI2sQe7QSfk+gtionLrEnto+aXF5uVM5LMKi3gLElz7oXEIhn54OeEciB1 cEmyX3Kb4HMRDMHyJxqJXwxm88RgC6RekoPvstu+AfX/NgSpRj5beaj9XkweJT3H rKWhkjq4Ghsn1LoodxluMMHd61m47JyoqIP9PBKoW+Na0VUKIVHw9e9YeW0nY1Zi 5qFA/pHPAt9AbEilRay6NEm8P7TTlNo216amc8byPXanoNrqBYZQHhZ93A4yl6jy RdpYskMivT+Sh1nhZAioKqqTZ3HiFR8hFGspAt5gJc4WLYevmxSicGa6AMyhrkvG rvOSdjY6JY/NkxtcgeycBX5MLF7uDbhUeqittvmlcrVN6+V+2HIbCCrvtow9pcX9 EkaaNttj5M0RzjQxogCG+S5TkhCy04YvKIkaGJFi8xO3icdlxgOrKD8lhtbf4UpR cDuytl70JD95mSUWL53UYjeRf9OsLRJMHQOpS02japkMwCb/ngMCQuUXA8hGkBZL Xw7RwwPuM1Lx8edMXn5C0E8UK5e0QmI/dVIl2aglXk2oBMBJbnyrbfUPm462SG6u ke4gQKFmVy2rKICqSkh2DMr0NzeYEUjZ6KbmQcV7sKiFxQ0/ROk8eqkYYxGWUWJv ylPF1OTLH0AIbGlFPLQO4lMPh05yznZTac4tmowADSHY9RCxad1BjBeine2pj48D u36OnnuQIsedxt5YC+h1bs+mIvwMVsnMLidse38M/RayCDitEBvL0KeG3vWYzaAL h0FCZGOW0ilVk8tTF5+XWtsQEp1PpclvkcBMkU3DtBUnlmPSKNfJT0iRr2T0sVW1 h+249Wj0Bw== WALinuxAgent-2.2.20/tests/data/wire/ext_conf.xml000066400000000000000000000030521322477356400214700ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.20/tests/data/wire/ext_conf_autoupgrade.xml000066400000000000000000000040721322477356400240730ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.20/tests/data/wire/ext_conf_autoupgrade_internalversion.xml000066400000000000000000000040721322477356400273750ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.20/tests/data/wire/ext_conf_internalversion.xml000066400000000000000000000040721322477356400247750ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.20/tests/data/wire/ext_conf_missing_family.xml000066400000000000000000000061231322477356400245640ustar00rootroot00000000000000 Prod Test https://rdfepirv2bl2prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl4prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl4prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr06.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr09a.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl6prdstr02a.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml eastus https://walaautoasmeastus.blob.core.windows.net/vhds/walaautos73small.walaautos73small.walaautos73small.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=u%2BCA2Cxb7ticiEBRIW8HWgNW7gl2NPuOGQl0u95ApQE%3D WALinuxAgent-2.2.20/tests/data/wire/ext_conf_no_public.xml000066400000000000000000000116201322477356400235220ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK"}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.20/tests/data/wire/ext_conf_no_settings.xml000066400000000000000000000111241322477356400241030ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.20/tests/data/wire/ext_conf_upgradeguid.xml000066400000000000000000000031351322477356400240520ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.20/tests/data/wire/ga_manifest.xml000066400000000000000000000044721322477356400221470ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.0.0 1.1.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.1.0 1.1.1 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.1.1 1.2.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.2.0 2.0.0http://host/OSTCExtensions.WALinuxAgent__2.0.0 2.1.0http://host/OSTCExtensions.WALinuxAgent__2.1.0 2.1.1http://host/OSTCExtensions.WALinuxAgent__2.1.1 2.2.0http://host/OSTCExtensions.WALinuxAgent__2.2.0 3.0http://host/OSTCExtensions.WALinuxAgent__3.0 3.1http://host/OSTCExtensions.WALinuxAgent__3.1 4.0.0.0http://host/OSTCExtensions.WALinuxAgent__3.0 4.0.0.1http://host/OSTCExtensions.WALinuxAgent__3.1 4.1.0.0http://host/OSTCExtensions.WALinuxAgent__3.1 99999.0.0.0http://host/OSTCExtensions.WALinuxAgent__99999.0.0.0 WALinuxAgent-2.2.20/tests/data/wire/ga_manifest_1.xml000066400000000000000000000005771322477356400223710ustar00rootroot00000000000000 2.2.13 url1_13 2.2.14 url1_14 WALinuxAgent-2.2.20/tests/data/wire/ga_manifest_2.xml000066400000000000000000000007371322477356400223700ustar00rootroot00000000000000 2.2.13 url2_13 2.2.14 url2_14 2.2.15 url1_15 WALinuxAgent-2.2.20/tests/data/wire/goal_state.xml000066400000000000000000000021061322477356400220040ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://extensionsconfiguri/ http://fullconfiguri/ DummyRoleConfigName.xml WALinuxAgent-2.2.20/tests/data/wire/goal_state_no_ext.xml000066400000000000000000000017021322477356400233610ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://fullconfiguri/ WALinuxAgent-2.2.20/tests/data/wire/hosting_env.xml000066400000000000000000000043251322477356400222120ustar00rootroot00000000000000 WALinuxAgent-2.2.20/tests/data/wire/manifest.xml000066400000000000000000000055451322477356400215020ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0 1.1.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.1.0 1.1.1 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.1.1 1.2.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.2.0 2.0.0http://host/OSTCExtensions.ExampleHandlerLinux__2.0.0 2.1.0http://host/OSTCExtensions.ExampleHandlerLinux__2.1.0 True 2.1.1http://host/OSTCExtensions.ExampleHandlerLinux__2.1.1 2.2.0http://host/OSTCExtensions.ExampleHandlerLinux__2.2.0 3.0http://host/OSTCExtensions.ExampleHandlerLinux__3.0 3.1http://host/OSTCExtensions.ExampleHandlerLinux__3.1 4.0.0.0http://host/OSTCExtensions.ExampleHandlerLinux__3.0 4.0.0.1http://host/OSTCExtensions.ExampleHandlerLinux__3.1 4.1.0.0http://host/OSTCExtensions.ExampleHandlerLinux__3.1 1.3.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.3.0 2.3.0http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0 2.4.0http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0 WALinuxAgent-2.2.20/tests/data/wire/shared_config.xml000066400000000000000000000046351322477356400224660ustar00rootroot00000000000000 WALinuxAgent-2.2.20/tests/data/wire/sshd_config000066400000000000000000000047771322477356400213710ustar00rootroot00000000000000# Package generated configuration file # See the sshd_config(5) manpage for details # What ports, IPs and protocols we listen for Port 22 # Use these options to restrict which interfaces/protocols sshd will bind to #ListenAddress :: #ListenAddress 0.0.0.0 Protocol 2 # HostKeys for protocol version 2 HostKey /etc/ssh/ssh_host_rsa_key HostKey /etc/ssh/ssh_host_dsa_key HostKey /etc/ssh/ssh_host_ecdsa_key HostKey /etc/ssh/ssh_host_ed25519_key #Privilege Separation is turned on for security UsePrivilegeSeparation yes # Lifetime and size of ephemeral version 1 server key KeyRegenerationInterval 3600 ServerKeyBits 1024 # Logging SyslogFacility AUTH LogLevel INFO # Authentication: LoginGraceTime 120 PermitRootLogin without-password StrictModes yes RSAAuthentication yes PubkeyAuthentication yes #AuthorizedKeysFile %h/.ssh/authorized_keys # Don't read the user's ~/.rhosts and ~/.shosts files IgnoreRhosts yes # For this to work you will also need host keys in /etc/ssh_known_hosts RhostsRSAAuthentication no # similar for protocol version 2 HostbasedAuthentication no # Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication #IgnoreUserKnownHosts yes # To enable empty passwords, change to yes (NOT RECOMMENDED) PermitEmptyPasswords no # Change to yes to enable challenge-response passwords (beware issues with # some PAM modules and threads) ChallengeResponseAuthentication no # Change to no to disable tunnelled clear text passwords #PasswordAuthentication yes # Kerberos options #KerberosAuthentication no #KerberosGetAFSToken no #KerberosOrLocalPasswd yes #KerberosTicketCleanup yes # GSSAPI options #GSSAPIAuthentication no #GSSAPICleanupCredentials yes X11Forwarding yes X11DisplayOffset 10 PrintMotd no PrintLastLog yes TCPKeepAlive yes #UseLogin no #MaxStartups 10:30:60 #Banner /etc/issue.net # Allow client to pass locale environment variables AcceptEnv LANG LC_* Subsystem sftp /usr/lib/openssh/sftp-server # Set this to 'yes' to enable PAM authentication, account processing, # and session processing. If this is enabled, PAM authentication will # be allowed through the ChallengeResponseAuthentication and # PasswordAuthentication. Depending on your PAM configuration, # PAM authentication via ChallengeResponseAuthentication may bypass # the setting of "PermitRootLogin without-password". # If you just want the PAM account and session checks to run without # PAM authentication, then enable this but set PasswordAuthentication # and ChallengeResponseAuthentication to 'no'. UsePAM yes Match group root WALinuxAgent-2.2.20/tests/data/wire/trans_cert000066400000000000000000000021271322477356400212320ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDBzCCAe+gAwIBAgIJANujJuVt5eC8MA0GCSqGSIb3DQEBCwUAMBkxFzAVBgNV BAMMDkxpbnV4VHJhbnNwb3J0MCAXDTE0MTAyNDA3MjgwN1oYDzIxMDQwNzEyMDcy ODA3WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBANPcJAkd6V5NeogSKjIeTXOWC5xzKTyuJPt4YZMVSosU 0lI6a0wHp+g2fP22zrVswW+QJz6AVWojIEqLQup3WyCXZTv8RUblHnIjkvX/+J/G aLmz0G5JzZIpELL2C8IfQLH2IiPlK9LOQH00W74WFcK3QqcJ6Kw8GcVaeSXT1r7X QcGMqEjcWJkpKLoMJv3LMufE+JMdbXDUGY+Ps7Zicu8KXvBPaKVsc6H2jrqBS8et jXbzLyrezTUDz45rmyRJzCO5Sk2pohuYg73wUykAUPVxd7L8WnSyqz1v4zrObqnw BAyor67JR/hjTBfjFOvd8qFGonfiv2Vnz9XsYFTZsXECAwEAAaNQME4wHQYDVR0O BBYEFL8i/sehpGV6IEDX7F0WQHQ/ZXOyMB8GA1UdIwQYMBaAFL8i/sehpGV6IEDX 7F0WQHQ/ZXOyMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAMPLrimT Gptu5pLRHPT8OFRN+skNSkepYaUaJuq6cSKxLumSYkD8++rohu+1+a7t1YNjjNSJ 8ohRAynRJ7aRqwBmyX2OPLRpOfyRZwR0rcFfAMORm/jOE6WBdqgYD2L2b+tZplGt /QqgQzebaekXh/032FK4c74Zg5r3R3tfNSUMG6nLauWzYHbQ5SCdkuQwV0ehGqh5 VF1AOdmz4CC2237BNznDFQhkeU0LrqqAoE/hv5ih7klJKZdS88rOYEnVJsFFJb0g qaycXjOm5Khgl4hKrd+DBD/qj4IVVzsmdpFli72k6WLBHGOXusUGo/3isci2iAIt DsfY6XGSEIhZnA4= -----END CERTIFICATE----- WALinuxAgent-2.2.20/tests/data/wire/trans_prv000066400000000000000000000032501322477356400211020ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDT3CQJHeleTXqI EioyHk1zlguccyk8riT7eGGTFUqLFNJSOmtMB6foNnz9ts61bMFvkCc+gFVqIyBK i0Lqd1sgl2U7/EVG5R5yI5L1//ifxmi5s9BuSc2SKRCy9gvCH0Cx9iIj5SvSzkB9 NFu+FhXCt0KnCeisPBnFWnkl09a+10HBjKhI3FiZKSi6DCb9yzLnxPiTHW1w1BmP j7O2YnLvCl7wT2ilbHOh9o66gUvHrY128y8q3s01A8+Oa5skScwjuUpNqaIbmIO9 8FMpAFD1cXey/Fp0sqs9b+M6zm6p8AQMqK+uyUf4Y0wX4xTr3fKhRqJ34r9lZ8/V 7GBU2bFxAgMBAAECggEBAM4hsfog3VAAyIieS+npq+gbhH6bWfMNaTQ3g5CNNbMu 9hhFeOJHzKnWYjSlamgBQhAfTN+2E+Up+iAtcVUZ/lMumrQLlwgMo1vgmvu5Kxmh /YE5oEG+k0JzrCjD1trwd4zvc3ZDYyk/vmVTzTOc311N248UyArUiyqHBbq1a4rP tJhCLn2c4S7flXGF0MDVGZyV9V7J8N8leq/dRGMB027Li21T+B4mPHXa6b8tpRPL 4vc8sHoUJDa2/+mFDJ2XbZfmlgd3MmIPlRn1VWoW7mxgT/AObsPl7LuQx7+t80Wx hIMjuKUHRACQSLwHxJ3SQRFWp4xbztnXSRXYuHTscLUCgYEA//Uu0qIm/FgC45yG nXtoax4+7UXhxrsWDEkbtL6RQ0TSTiwaaI6RSQcjrKDVSo/xo4ZySTYcRgp5GKlI CrWyNM+UnIzTNbZOtvSIAfjxYxMsq1vwpTlOB5/g+cMukeGg39yUlrjVNoFpv4i6 9t4yYuEaF4Vww0FDd2nNKhhW648CgYEA0+UYH6TKu03zDXqFpwf4DP2VoSo8OgfQ eN93lpFNyjrfzvxDZkGF+7M/ebyYuI6hFplVMu6BpgpFP7UVJpW0Hn/sXkTq7F1Q rTJTtkTp2+uxQVP/PzSOqK0Twi5ifkfoEOkPkNNtTiXzwCW6Qmmcvln2u893pyR5 gqo5BHR7Ev8CgYAb7bXpN9ZHLJdMHLU3k9Kl9YvqOfjTxXA3cPa79xtEmsrTys4q 4HuL22KSII6Fb0VvkWkBAg19uwDRpw78VC0YxBm0J02Yi8b1AaOhi3dTVzFFlWeh r6oK/PAAcMKxGkyCgMAZ3hstsltGkfXMoBwhW+yL6nyOYZ2p9vpzAGrjkwKBgQDF 0huzbyXVt/AxpTEhv07U0enfjI6tnp4COp5q8zyskEph8yD5VjK/yZh5DpmFs6Kw dnYUFpbzbKM51tToMNr3nnYNjEnGYVfwWgvNHok1x9S0KLcjSu3ki7DmmGdbfcYq A2uEyd5CFyx5Nr+tQOwUyeiPbiFG6caHNmQExLoiAQKBgFPy9H8///xsadYmZ18k r77R2CvU7ArxlLfp9dr19aGYKvHvnpsY6EuChkWfy8Xjqn3ogzgrHz/rn3mlGUpK vbtwtsknAHtTbotXJwfaBZv2RGgGRr3DzNo6ll2Aez0lNblZFXq132h7+y5iLvar 4euORaD/fuM4UPlR5mN+bypU -----END PRIVATE KEY----- WALinuxAgent-2.2.20/tests/data/wire/version_info.xml000066400000000000000000000003361322477356400223650ustar00rootroot00000000000000 2012-11-30 2010-12-15 2010-28-10 WALinuxAgent-2.2.20/tests/distro/000077500000000000000000000000001322477356400165665ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/distro/__init__.py000066400000000000000000000011651322477356400207020ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/distro/test_resourceDisk.py000066400000000000000000000076341322477356400226530ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx import sys from azurelinuxagent.common.utils import shellutil from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler from tests.tools import * class TestResourceDisk(AgentTestCase): def test_mkfile(self): # setup test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 if os.path.exists(test_file): os.remove(test_file) # execute get_resourcedisk_handler().mkfile(test_file, file_size) # assert assert os.path.exists(test_file) # cleanup os.remove(test_file) def test_mkfile_dd_fallback(self): with patch.object(shellutil, "run") as run_patch: # setup run_patch.return_value = 1 test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 # execute if sys.version_info >= (3,3): with patch("os.posix_fallocate", side_effect=Exception('failure')): get_resourcedisk_handler().mkfile(test_file, file_size) else: get_resourcedisk_handler().mkfile(test_file, file_size) # assert assert run_patch.call_count > 1 assert "fallocate" in run_patch.call_args_list[0][0][0] assert "dd if" in run_patch.call_args_list[-1][0][0] def test_mkfile_xfs_fs(self): # setup test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 if os.path.exists(test_file): os.remove(test_file) # execute resource_disk_handler = get_resourcedisk_handler() resource_disk_handler.fs = 'xfs' with patch.object(shellutil, "run") as run_patch: resource_disk_handler.mkfile(test_file, file_size) # assert if sys.version_info >= (3,3): with patch("os.posix_fallocate") as posix_fallocate: assert posix_fallocate.assert_not_called() assert run_patch.call_count == 1 assert "dd if" in run_patch.call_args_list[0][0][0] def test_change_partition_type(self): resource_handler = get_resourcedisk_handler() # test when sfdisk --part-type does not exist with patch.object(shellutil, "run_get_output", side_effect=[[1, ''], [0, '']]) as run_patch: resource_handler.change_partition_type(suppress_message=True, option_str='') # assert assert run_patch.call_count == 2 assert "sfdisk --part-type" in run_patch.call_args_list[0][0][0] assert "sfdisk -c" in run_patch.call_args_list[1][0][0] # test when sfdisk --part-type exists with patch.object(shellutil, "run_get_output", side_effect=[[0, '']]) as run_patch: resource_handler.change_partition_type(suppress_message=True, option_str='') # assert assert run_patch.call_count == 1 assert "sfdisk --part-type" in run_patch.call_args_list[0][0][0] if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/distro/test_scvmm.py000066400000000000000000000064111322477356400213260ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx import mock from tests.tools import * import azurelinuxagent.daemon.scvmm as scvmm from azurelinuxagent.daemon.main import * from azurelinuxagent.common.osutil.default import DefaultOSUtil class TestSCVMM(AgentTestCase): def test_scvmm_detection_with_file(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) fileutil.write_file(scvmm_file, "") with patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as po: with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): with patch('time.sleep', return_value=0): # execute failed = False try: scvmm.get_scvmm_handler().run() except: failed = True # assert self.assertTrue(failed) self.assertTrue(po.call_count == 1) # cleanup os.remove(scvmm_file) def test_scvmm_detection_with_multiple_cdroms(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) # execute with mock.patch.object(DefaultOSUtil, 'mount_dvd') as patch_mount: with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): scvmm.ScvmmHandler().detect_scvmm_env() # assert assert patch_mount.call_count == 3 assert patch_mount.call_args_list[0][1]['dvd_device'] == '/dev/sr0' assert patch_mount.call_args_list[1][1]['dvd_device'] == '/dev/sr1' assert patch_mount.call_args_list[2][1]['dvd_device'] == '/dev/sr2' def test_scvmm_detection_without_file(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) if os.path.exists(scvmm_file): os.remove(scvmm_file) with mock.patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as patch_start: # execute scvmm.ScvmmHandler().detect_scvmm_env() # assert patch_start.assert_not_called() if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/ga/000077500000000000000000000000001322477356400156515ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/ga/__init__.py000066400000000000000000000011651322477356400177650ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/ga/test_env.py000066400000000000000000000061511322477356400200550ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob import tempfile import os from mock import patch from azurelinuxagent.common.utils import fileutil from azurelinuxagent.ga.env import MAXIMUM_CACHED_FILES, EnvHandler from tests.tools import AgentTestCase class TestEnv(AgentTestCase): @patch("azurelinuxagent.common.conf.get_lib_dir") def test_purge_disk_cache(self, mock_conf, *args): names = [ ("Prod", "agentsManifest"), ("Test", "agentsManifest"), ("FauxExtension1", "manifest.xml"), ("FauxExtension2", "manifest.xml"), ("GoalState", "xml"), ("ExtensionsConfig", "xml") ] env = EnvHandler() tmp_dir = tempfile.mkdtemp() mock_conf.return_value = tmp_dir # write incarnations 1-100 for t in names: self._create_files(tmp_dir, t[0], t[1], 2 * MAXIMUM_CACHED_FILES, with_sleep=0.001) # update incarnation 1 with the latest timestamp for t in names: f = os.path.join(tmp_dir, '.'.join((t[0], '1', t[1]))) fileutil.write_file(f, "faux content") # ensure the expected number of files are created for t in names: p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) self.assertEqual(2 * MAXIMUM_CACHED_FILES, len(glob.glob(p))) env.purge_disk_cache() # ensure the expected number of files remain for t in names: p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) incarnation1 = os.path.join(tmp_dir, '{0}.1.{1}'.format(t[0], t[1])) incarnation2 = os.path.join(tmp_dir, '{0}.2.{1}'.format(t[0], t[1])) self.assertEqual(MAXIMUM_CACHED_FILES, len(glob.glob(p))) self.assertTrue(os.path.exists(incarnation1)) self.assertFalse(os.path.exists(incarnation2)) # write incarnation 101 for t in names: f = os.path.join(tmp_dir, '.'.join((t[0], '101', t[1]))) fileutil.write_file(f, "faux content") # call to purge should be ignored, since interval has not elapsed env.purge_disk_cache() for t in names: p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) incarnation1 = os.path.join(tmp_dir, '{0}.1.{1}'.format(t[0], t[1])) self.assertEqual(MAXIMUM_CACHED_FILES + 1, len(glob.glob(p))) self.assertTrue(os.path.exists(incarnation1)) WALinuxAgent-2.2.20/tests/ga/test_extension.py000066400000000000000000000700531322477356400213030ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob import os import os.path import shutil import tempfile import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil from tests.protocol.mockwiredata import * from azurelinuxagent.common.exception import * from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ ExtensionStatus, \ ExtensionSubStatus, \ Extension, \ VMStatus, ExtHandler, \ get_properties from azurelinuxagent.ga.exthandlers import * from azurelinuxagent.common.protocol.wire import WireProtocol class TestExtensionCleanup(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.ext_handlers = ExtHandlersHandler() self.lib_dir = tempfile.mkdtemp() def _install_handlers(self, start=0, count=1, handler_state=ExtHandlerState.Installed): src = os.path.join(data_dir, "ext", "sample_ext-1.3.0.zip") version = FlexibleVersion("1.3.0") version += start - version.patch for i in range(start, start+count): eh = ExtHandler() eh.name = "sample_ext" eh.properties.version = str(version) handler = ExtHandlerInstance(eh, "unused") dst = os.path.join(self.lib_dir, handler.get_full_name()+HANDLER_PKG_EXT) shutil.copy(src, dst) if not handler_state is None: zipfile.ZipFile(dst).extractall(handler.get_base_dir()) handler.set_handler_state(handler_state) version += 1 def _count_packages(self): return len(glob.glob(os.path.join(self.lib_dir, "*.zip"))) def _count_installed(self): paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths if os.path.isdir(p) and self._is_installed(p)]) def _count_uninstalled(self): paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths if os.path.isdir(p) and not self._is_installed(p)]) def _is_installed(self, path): path = os.path.join(path, 'config', 'HandlerState') return fileutil.read_file(path) != "NotInstalled" @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_leaves_installed_extensions(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=ExtHandlerState.Enabled) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 10) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 10) self.assertEqual(self._count_uninstalled(), 0) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_removes_uninstalled_extensions(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=ExtHandlerState.NotInstalled) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 5) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 5) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_removes_orphaned_packages(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=None) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 5) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) class TestHandlerStateMigration(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) handler_name = "Not.A.Real.Extension" handler_version = "1.2.3" self.ext_handler = ExtHandler(handler_name) self.ext_handler.properties.version = handler_version self.ext_handler_i = ExtHandlerInstance(self.ext_handler, "dummy protocol") self.handler_state = "Enabled" self.handler_status = ExtHandlerStatus( name=handler_name, version=handler_version, status="Ready", message="Uninteresting message") return def _prepare_handler_state(self): handler_state_path = os.path.join( self.tmp_dir, "handler_state", self.ext_handler_i.get_full_name()) os.makedirs(handler_state_path) fileutil.write_file( os.path.join(handler_state_path, "state"), self.handler_state) fileutil.write_file( os.path.join(handler_state_path, "status"), json.dumps(get_properties(self.handler_status))) return def _prepare_handler_config(self): handler_config_path = os.path.join( self.tmp_dir, self.ext_handler_i.get_full_name(), "config") os.makedirs(handler_config_path) return def test_migration_migrates(self): self._prepare_handler_state() self._prepare_handler_config() migrate_handler_state() self.assertEquals(self.ext_handler_i.get_handler_state(), self.handler_state) self.assertEquals( self.ext_handler_i.get_handler_status().status, self.handler_status.status) return def test_migration_skips_if_empty(self): self._prepare_handler_config() migrate_handler_state() self.assertFalse( os.path.isfile(os.path.join(self.ext_handler_i.get_conf_dir(), "HandlerState"))) self.assertFalse( os.path.isfile(os.path.join(self.ext_handler_i.get_conf_dir(), "HandlerStatus"))) return def test_migration_cleans_up(self): self._prepare_handler_state() self._prepare_handler_config() migrate_handler_state() self.assertFalse(os.path.isdir(os.path.join(conf.get_lib_dir(), "handler_state"))) return def test_migration_does_not_overwrite(self): self._prepare_handler_state() self._prepare_handler_config() state = "Installed" status = "NotReady" code = 1 message = "A message" self.assertNotEquals(state, self.handler_state) self.assertNotEquals(status, self.handler_status.status) self.assertNotEquals(code, self.handler_status.code) self.assertNotEquals(message, self.handler_status.message) self.ext_handler_i.set_handler_state(state) self.ext_handler_i.set_handler_status(status=status, code=code, message=message) migrate_handler_state() self.assertEquals(self.ext_handler_i.get_handler_state(), state) handler_status = self.ext_handler_i.get_handler_status() self.assertEquals(handler_status.status, status) self.assertEquals(handler_status.code, code) self.assertEquals(handler_status.message, message) return @patch("shutil.move", side_effect=Exception) def test_migration_ignores_move_errors(self, shutil_mock): self._prepare_handler_state() self._prepare_handler_config() try: migrate_handler_state() except Exception as e: self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return @patch("shutil.rmtree", side_effect=Exception) def test_migration_ignores_tree_remove_errors(self, shutil_mock): self._prepare_handler_state() self._prepare_handler_config() try: migrate_handler_state() except Exception as e: self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestExtension(AgentTestCase): def _assert_handler_status(self, report_vm_status, expected_status, expected_ext_count, version): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertNotEquals(0, len(vm_status.vmAgent.extensionHandlers)) handler_status = vm_status.vmAgent.extensionHandlers[0] self.assertEquals(expected_status, handler_status.status) self.assertEquals("OSTCExtensions.ExampleHandlerLinux", handler_status.name) self.assertEquals(version, handler_status.version) self.assertEquals(expected_ext_count, len(handler_status.extensions)) return def _assert_no_handler_status(self, report_vm_status): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertEquals(0, len(vm_status.vmAgent.extensionHandlers)) return def _create_mock(self, test_data, mock_http_get, MockCryptUtil): """Test enable/disable/uninstall of an extension""" handler = get_exthandlers_handler() #Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get MockCryptUtil.side_effect = test_data.mock_crypt_util protocol = WireProtocol("foo.bar") protocol.detect() protocol.report_ext_status = MagicMock() protocol.report_vm_status = MagicMock() handler.protocol_util.get_protocol = Mock(return_value=protocol) return handler, protocol def test_ext_handler(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") #Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) #Test hotfix test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 2) #Test upgrade test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", "seqNo=\"3\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 3) #Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.2.0") #Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_ext_handler_no_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 0, "1.0.0") def test_ext_handler_no_public_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") def test_ext_handler_no_ext(self, *args): test_data = WireProtocolData(DATA_FILE_NO_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Assert no extension handler status exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_ext_handler_rollingupgrade(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_ROLLINGUPGRADE) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") #Test goal state changed without new GUID test_data.goal_state = test_data.goal_state.replace("1<", "2<") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test GUID change without new version available test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("FE0987654321", "FE0987654322") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test hotfix available without GUID change test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test GUID change with hotfix test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("FE0987654322", "FE0987654323") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test disable test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.1.1") #Test uninstall test_data.goal_state = test_data.goal_state.replace("6<", "7<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test uninstall again! test_data.goal_state = test_data.goal_state.replace("7<", "8<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test re-install test_data.goal_state = test_data.goal_state.replace("8<", "9<") test_data.ext_conf = test_data.ext_conf.replace("uninstall", "enabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test upgrade available without GUID change test_data.goal_state = test_data.goal_state.replace("9<", "10<") test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test GUID change with upgrade available test_data.goal_state = test_data.goal_state.replace("10<", "11<") test_data.ext_conf = test_data.ext_conf.replace("FE0987654323", "FE0987654324") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_download_failure(self, mock_add_event, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.download_ext_handler_pkg = Mock(side_effect=ProtocolError) exthandlers_handler.run() args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) self.assertEquals("OSTCExtensions.ExampleHandlerLinux", kw['name']) self.assertEquals("Download", kw['op']) @patch('azurelinuxagent.ga.exthandlers.fileutil') def test_ext_handler_io_error(self, mock_fileutil, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) mock_fileutil.write_file.return_value = IOError("Mock IO Error") exthandlers_handler.run() def test_handle_ext_handlers_on_hold_true(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.ext_handlers, exthandlers_handler.last_etag = protocol.get_ext_handlers() protocol.get_artifacts_profile = MagicMock() exthandlers_handler.protocol = protocol # Disable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=False) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() patch_handle_ext_handler.assert_called() # enable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=True) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() patch_handle_ext_handler.assert_not_called() def test_handle_ext_handlers_on_hold_false(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.ext_handlers, exthandlers_handler.last_etag = protocol.get_ext_handlers() exthandlers_handler.protocol = protocol # enable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=True) #Test when is_on_hold returns False from azurelinuxagent.common.protocol.wire import InVMArtifactsProfile mock_in_vm_artifacts_profile = InVMArtifactsProfile(MagicMock()) mock_in_vm_artifacts_profile.is_on_hold = Mock(return_value=False) protocol.get_artifacts_profile = Mock(return_value=mock_in_vm_artifacts_profile) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() patch_handle_ext_handler.assert_called_once() #Test when in_vm_artifacts_profile is not available protocol.get_artifacts_profile = Mock(return_value=None) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() patch_handle_ext_handler.assert_called_once() def _assert_ext_status(self, report_ext_status, expected_status, expected_seq_no): self.assertTrue(report_ext_status.called) args, kw = report_ext_status.call_args ext_status = args[-1] self.assertEquals(expected_status, ext_status.status) self.assertEquals(expected_seq_no, ext_status.sequenceNumber) def test_ext_handler_no_reporting_status(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") #Remove status file and re-run collecting extension status status_file = os.path.join(self.tmp_dir, "OSTCExtensions.ExampleHandlerLinux-1.0.0", "status", "0.status") self.assertTrue(os.path.isfile(status_file)) os.remove(status_file) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "error", 0) def test_ext_handler_version_decide_autoupgrade_internalversion(self, *args): for internal in [False, True]: for autoupgrade in [False, True]: if internal: config_version = '1.3.0' decision_version = '1.3.0' if autoupgrade: datafile = DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION else: datafile = DATA_FILE_EXT_INTERNALVERSION else: config_version = '1.0.0' if autoupgrade: datafile = DATA_FILE_EXT_AUTOUPGRADE decision_version = '1.2.0' else: datafile = DATA_FILE decision_version = '1.0.0' _, protocol = self._create_mock(WireProtocolData(datafile), *args) ext_handlers, _ = protocol.get_ext_handlers() self.assertEqual(1, len(ext_handlers.extHandlers)) ext_handler = ext_handlers.extHandlers[0] self.assertEqual('OSTCExtensions.ExampleHandlerLinux', ext_handler.name) self.assertEqual(config_version, ext_handler.properties.version, "config version.") ExtHandlerInstance(ext_handler, protocol).decide_version() self.assertEqual(decision_version, ext_handler.properties.version, "decision version.") def test_ext_handler_version_decide_between_minor_versions(self, *args): """ Using v2.x~v4.x for unit testing Available versions via manifest XML (I stands for internal): 2.0.0, 2.1.0, 2.1.1, 2.2.0, 2.3.0(I), 2.4.0(I), 3.0, 3.1, 4.0.0.0, 4.0.0.1, 4.1.0.0 See tests/data/wire/manifest.xml for possible versions """ # (installed_version, config_version, exptected_version, autoupgrade_expected_version) cases = [ (None, '2.0', '2.0.0', '2.2.0'), (None, '2.0.0', '2.0.0', '2.2.0'), ('1.0', '1.0.0', '1.0.0', '1.2.0'), (None, '2.1.0', '2.1.1', '2.2.0'), (None, '2.2.0', '2.2.0', '2.2.0'), (None, '2.3.0', '2.3.0', '2.3.0'), (None, '2.4.0', '2.4.0', '2.4.0'), (None, '3.0', '3.0', '3.1'), (None, '4.0', '4.0.0.1', '4.1.0.0'), ] _, protocol = self._create_mock(WireProtocolData(DATA_FILE), *args) version_uri = Mock() version_uri.uri = 'http://some/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml' for (installed_version, config_version, expected_version, autoupgrade_expected_version) in cases: ext_handler = Mock() ext_handler.properties = Mock() ext_handler.name = 'OSTCExtensions.ExampleHandlerLinux' ext_handler.versionUris = [version_uri] ext_handler.properties.version = config_version ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) ext_handler_instance.get_installed_version = Mock(return_value=installed_version) ext_handler_instance.decide_version() self.assertEqual(expected_version, ext_handler.properties.version) ext_handler.properties.version = config_version ext_handler.properties.upgradePolicy = 'auto' ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) ext_handler_instance.get_installed_version = Mock(return_value=installed_version) ext_handler_instance.decide_version() self.assertEqual(autoupgrade_expected_version, ext_handler.properties.version) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/ga/test_monitor.py000066400000000000000000000057551322477356400207650ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.ga.monitor import * class TestMonitor(AgentTestCase): def test_parse_xml_event(self): data_str = load_data('ext/event.xml') event = parse_xml_event(data_str) self.assertNotEquals(None, event) self.assertNotEquals(0, event.parameters) self.assertNotEquals(None, event.parameters[0]) @patch('azurelinuxagent.common.osutil.get_osutil') @patch('azurelinuxagent.common.protocol.get_protocol_util') def test_add_sysinfo(self, _, __): data_str = load_data('ext/event.xml') event = parse_xml_event(data_str) monitor_handler = get_monitor_handler() vm_name = 'dummy_vm' tenant_name = 'dummy_tenant' role_name = 'dummy_role' role_instance_name = 'dummy_role_instance' container_id = 'dummy_container_id' vm_name_param = "VMName" tenant_name_param = "TenantName" role_name_param = "RoleName" role_instance_name_param = "RoleInstanceName" container_id_param = "ContainerId" sysinfo = [TelemetryEventParam(vm_name_param, vm_name), TelemetryEventParam(tenant_name_param, tenant_name), TelemetryEventParam(role_name_param, role_name), TelemetryEventParam(role_instance_name_param, role_instance_name), TelemetryEventParam(container_id_param, container_id)] monitor_handler.sysinfo = sysinfo monitor_handler.add_sysinfo(event) self.assertNotEquals(None, event) self.assertNotEquals(0, event.parameters) self.assertNotEquals(None, event.parameters[0]) counter = 0 for p in event.parameters: if p.name == vm_name_param: self.assertEquals(vm_name, p.value) counter += 1 elif p.name == tenant_name_param: self.assertEquals(tenant_name, p.value) counter += 1 elif p.name == role_name_param: self.assertEquals(role_name, p.value) counter += 1 elif p.name == role_instance_name_param: self.assertEquals(role_instance_name, p.value) counter += 1 elif p.name == container_id_param: self.assertEquals(container_id, p.value) counter += 1 self.assertEquals(5, counter) WALinuxAgent-2.2.20/tests/ga/test_update.py000066400000000000000000001767771322477356400205750ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from __future__ import print_function from datetime import datetime import json import shutil import stat from azurelinuxagent.common.event import * from azurelinuxagent.common.protocol.hostplugin import * from azurelinuxagent.common.protocol.metadata import * from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.common.utils.fileutil import * from azurelinuxagent.ga.update import * from tests.tools import * NO_ERROR = { "last_failure" : 0.0, "failure_count" : 0, "was_fatal" : False } FATAL_ERROR = { "last_failure" : 42.42, "failure_count" : 2, "was_fatal" : True } WITH_ERROR = { "last_failure" : 42.42, "failure_count" : 2, "was_fatal" : False } EMPTY_MANIFEST = { "name": "WALinuxAgent", "version": 1.0, "handlerManifest": { "installCommand": "", "uninstallCommand": "", "updateCommand": "", "enableCommand": "", "disableCommand": "", "rebootAfterInstall": False, "reportHeartbeat": False } } def get_agent_pkgs(in_dir=os.path.join(data_dir, "ga")): path = os.path.join(in_dir, AGENT_PKG_GLOB) return glob.glob(path) def get_agents(in_dir=os.path.join(data_dir, "ga")): path = os.path.join(in_dir, AGENT_DIR_GLOB) return [a for a in glob.glob(path) if os.path.isdir(a)] def get_agent_file_path(): return get_agent_pkgs()[0] def get_agent_file_name(): return os.path.basename(get_agent_file_path()) def get_agent_path(): return fileutil.trim_ext(get_agent_file_path(), "zip") def get_agent_name(): return os.path.basename(get_agent_path()) def get_agent_version(): return FlexibleVersion(get_agent_name().split("-")[1]) def faux_logger(): print("STDOUT message") print("STDERR message", file=sys.stderr) return DEFAULT class UpdateTestCase(AgentTestCase): def agent_bin(self, version, suffix): return "bin/{0}-{1}{2}.egg".format(AGENT_NAME, version, suffix) def rename_agent_bin(self, path, src_v, dst_v): src_bin = glob.glob(os.path.join(path, self.agent_bin(src_v, '*')))[0] dst_bin = os.path.join(path, self.agent_bin(dst_v, '')) shutil.move(src_bin, dst_bin) def agents(self): return [GuestAgent(path=path) for path in self.agent_dirs()] def agent_count(self): return len(self.agent_dirs()) def agent_dirs(self): return get_agents(in_dir=self.tmp_dir) def agent_dir(self, version): return os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, version)) def agent_paths(self): paths = glob.glob(os.path.join(self.tmp_dir, "*")) paths.sort() return paths def agent_pkgs(self): return get_agent_pkgs(in_dir=self.tmp_dir) def agent_versions(self): v = [FlexibleVersion(AGENT_DIR_PATTERN.match(a).group(1)) for a in self.agent_dirs()] v.sort(reverse=True) return v def get_error_file(self, error_data=NO_ERROR): fp = tempfile.NamedTemporaryFile(mode="w") json.dump(error_data if error_data is not None else NO_ERROR, fp) fp.seek(0) return fp def create_error(self, error_data=NO_ERROR): with self.get_error_file(error_data) as path: err = GuestAgentError(path.name) err.load() return err def copy_agents(self, *agents): if len(agents) <= 0: agents = get_agent_pkgs() for agent in agents: fileutil.copy_file(agent, to_dir=self.tmp_dir) return def expand_agents(self): for agent in self.agent_pkgs(): path = os.path.join(self.tmp_dir, fileutil.trim_ext(agent, "zip")) zipfile.ZipFile(agent).extractall(path) def prepare_agent(self, version): """ Create a download for the current agent version, copied from test data """ self.copy_agents(get_agent_pkgs()[0]) self.expand_agents() versions = self.agent_versions() src_v = FlexibleVersion(str(versions[0])) from_path = self.agent_dir(src_v) dst_v = FlexibleVersion(str(version)) to_path = self.agent_dir(dst_v) if from_path != to_path: shutil.move(from_path + ".zip", to_path + ".zip") shutil.move(from_path, to_path) self.rename_agent_bin(to_path, src_v, dst_v) return def prepare_agents(self, count=5, is_available=True): # Ensure the test data is copied over agent_count = self.agent_count() if agent_count <= 0: self.copy_agents(get_agent_pkgs()[0]) self.expand_agents() count -= 1 # Determine the most recent agent version versions = self.agent_versions() src_v = FlexibleVersion(str(versions[0])) # Create agent packages and directories return self.replicate_agents( src_v=src_v, count=count-agent_count, is_available=is_available) def remove_agents(self): for agent in self.agent_paths(): try: if os.path.isfile(agent): os.remove(agent) else: shutil.rmtree(agent) except: pass return def replicate_agents(self, count=5, src_v=AGENT_VERSION, is_available=True, increment=1): from_path = self.agent_dir(src_v) dst_v = FlexibleVersion(str(src_v)) for i in range(0, count): dst_v += increment to_path = self.agent_dir(dst_v) shutil.copyfile(from_path + ".zip", to_path + ".zip") shutil.copytree(from_path, to_path) self.rename_agent_bin(to_path, src_v, dst_v) if not is_available: GuestAgent(to_path).mark_failure(is_fatal=True) return dst_v class TestGuestAgentError(UpdateTestCase): def test_creation(self): self.assertRaises(TypeError, GuestAgentError) self.assertRaises(UpdateError, GuestAgentError, None) with self.get_error_file(error_data=WITH_ERROR) as path: err = GuestAgentError(path.name) err.load() self.assertEqual(path.name, err.path) self.assertNotEqual(None, err) self.assertEqual(WITH_ERROR["last_failure"], err.last_failure) self.assertEqual(WITH_ERROR["failure_count"], err.failure_count) self.assertEqual(WITH_ERROR["was_fatal"], err.was_fatal) return def test_clear(self): with self.get_error_file(error_data=WITH_ERROR) as path: err = GuestAgentError(path.name) err.load() self.assertEqual(path.name, err.path) self.assertNotEqual(None, err) err.clear() self.assertEqual(NO_ERROR["last_failure"], err.last_failure) self.assertEqual(NO_ERROR["failure_count"], err.failure_count) self.assertEqual(NO_ERROR["was_fatal"], err.was_fatal) return def test_save(self): err1 = self.create_error() err1.mark_failure() err1.mark_failure(is_fatal=True) err2 = self.create_error(err1.to_json()) self.assertEqual(err1.last_failure, err2.last_failure) self.assertEqual(err1.failure_count, err2.failure_count) self.assertEqual(err1.was_fatal, err2.was_fatal) def test_mark_failure(self): err = self.create_error() self.assertFalse(err.is_blacklisted) for i in range(0, MAX_FAILURE): err.mark_failure() # Agent failed >= MAX_FAILURE, it should be blacklisted self.assertTrue(err.is_blacklisted) self.assertEqual(MAX_FAILURE, err.failure_count) return def test_mark_failure_permanent(self): err = self.create_error() self.assertFalse(err.is_blacklisted) # Fatal errors immediately blacklist err.mark_failure(is_fatal=True) self.assertTrue(err.is_blacklisted) self.assertTrue(err.failure_count < MAX_FAILURE) return def test_str(self): err = self.create_error(error_data=NO_ERROR) s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( NO_ERROR["last_failure"], NO_ERROR["failure_count"], NO_ERROR["was_fatal"]) self.assertEqual(s, str(err)) err = self.create_error(error_data=WITH_ERROR) s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( WITH_ERROR["last_failure"], WITH_ERROR["failure_count"], WITH_ERROR["was_fatal"]) self.assertEqual(s, str(err)) return class TestGuestAgent(UpdateTestCase): def setUp(self): UpdateTestCase.setUp(self) self.copy_agents(get_agent_file_path()) self.agent_path = os.path.join(self.tmp_dir, get_agent_name()) def test_creation(self): self.assertRaises(UpdateError, GuestAgent, "A very bad file name") n = "{0}-a.bad.version".format(AGENT_NAME) self.assertRaises(UpdateError, GuestAgent, n) self.expand_agents() agent = GuestAgent(path=self.agent_path) self.assertNotEqual(None, agent) self.assertEqual(get_agent_name(), agent.name) self.assertEqual(get_agent_version(), agent.version) self.assertEqual(self.agent_path, agent.get_agent_dir()) path = os.path.join(self.agent_path, AGENT_MANIFEST_FILE) self.assertEqual(path, agent.get_agent_manifest_path()) self.assertEqual( os.path.join(self.agent_path, AGENT_ERROR_FILE), agent.get_agent_error_file()) path = ".".join((os.path.join(conf.get_lib_dir(), get_agent_name()), "zip")) self.assertEqual(path, agent.get_agent_pkg_path()) self.assertTrue(agent.is_downloaded) self.assertFalse(agent.is_blacklisted) self.assertTrue(agent.is_available) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") def test_clear_error(self, mock_downloaded): self.expand_agents() agent = GuestAgent(path=self.agent_path) agent.mark_failure(is_fatal=True) self.assertTrue(agent.error.last_failure > 0.0) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) agent.clear_error() self.assertEqual(0.0, agent.error.last_failure) self.assertEqual(0, agent.error.failure_count) self.assertFalse(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_available(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_available) agent._unpack() self.assertTrue(agent.is_available) agent.mark_failure(is_fatal=True) self.assertFalse(agent.is_available) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_blacklisted(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_blacklisted) agent._unpack() self.assertFalse(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) agent.mark_failure(is_fatal=True) self.assertTrue(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_downloaded(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_downloaded) agent._unpack() self.assertTrue(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_mark_failure(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) agent.mark_failure() self.assertEqual(1, agent.error.failure_count) agent.mark_failure(is_fatal=True) self.assertEqual(2, agent.error.failure_count) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_unpack(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isdir(agent.get_agent_dir())) self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_unpack_fail(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) os.remove(agent.get_agent_pkg_path()) self.assertRaises(UpdateError, agent._unpack) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) agent._unpack() agent._load_manifest() self.assertEqual(agent.manifest.get_enable_command(), agent.get_agent_cmd()) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_missing(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() os.remove(agent.get_agent_manifest_path()) self.assertRaises(UpdateError, agent._load_manifest) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_is_empty(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) with open(agent.get_agent_manifest_path(), "w") as file: json.dump(EMPTY_MANIFEST, file) self.assertRaises(UpdateError, agent._load_manifest) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_is_malformed(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) with open(agent.get_agent_manifest_path(), "w") as file: file.write("This is not JSON data") self.assertRaises(UpdateError, agent._load_manifest) def test_load_error(self): agent = GuestAgent(path=self.agent_path) agent.error = None agent._load_error() self.assertTrue(agent.error is not None) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") def test_download(self, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) mock_http_get.return_value= ResponseMock(response=agent_pkg) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) agent._download() self.assertTrue(os.path.isfile(agent.get_agent_pkg_path())) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") def test_download_fail(self, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) mock_http_get.return_value= ResponseMock(status=restutil.httpclient.SERVICE_UNAVAILABLE) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertRaises(UpdateError, agent._download) self.assertFalse(os.path.isfile(agent.get_agent_pkg_path())) self.assertFalse(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") def test_download_fallback(self, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) mock_http_get.return_value = ResponseMock( status=restutil.httpclient.SERVICE_UNAVAILABLE, response="") ext_uri = 'ext_uri' host_uri = 'host_uri' api_uri = URI_FORMAT_GET_API_VERSIONS.format(host_uri, HOST_PLUGIN_PORT) art_uri = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(host_uri, HOST_PLUGIN_PORT) mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri(uri=ext_uri)) agent = GuestAgent(pkg=pkg) agent.host = mock_host # ensure fallback fails gracefully, no http self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 2) self.assertEqual(mock_http_get.call_args_list[0][0][0], ext_uri) self.assertEqual(mock_http_get.call_args_list[1][0][0], api_uri) # ensure fallback fails gracefully, artifact api failure with patch.object(HostPluginProtocol, "ensure_initialized", return_value=True): self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 4) self.assertEqual(mock_http_get.call_args_list[2][0][0], ext_uri) self.assertEqual(mock_http_get.call_args_list[3][0][0], art_uri) a, k = mock_http_get.call_args_list[3] self.assertEqual(False, k['use_proxy']) # ensure fallback works as expected with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[art_uri, {}]): self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 6) a, k = mock_http_get.call_args_list[3] self.assertEqual(False, k['use_proxy']) self.assertEqual(mock_http_get.call_args_list[4][0][0], ext_uri) a, k = mock_http_get.call_args_list[4] self.assertEqual(mock_http_get.call_args_list[5][0][0], art_uri) a, k = mock_http_get.call_args_list[5] self.assertEqual(False, k['use_proxy']) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_ensure_downloaded(self, mock_http_get): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) mock_http_get.return_value= ResponseMock(response=agent_pkg) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) self.assertTrue(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._download", side_effect=UpdateError) def test_ensure_downloaded_download_fails(self, mock_download): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertFalse(agent.error.was_fatal) self.assertFalse(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack", side_effect=UpdateError) def test_ensure_downloaded_unpack_fails(self, mock_unpack, mock_download): self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack") @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest", side_effect=UpdateError) def test_ensure_downloaded_load_manifest_fails(self, mock_manifest, mock_unpack, mock_download): self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack") @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest") def test_ensure_download_skips_blacklisted(self, mock_manifest, mock_unpack, mock_download): agent = GuestAgent(path=self.agent_path) self.assertEqual(0, mock_download.call_count) agent.clear_error() agent.mark_failure(is_fatal=True) self.assertTrue(agent.is_blacklisted) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) self.assertEqual(0, mock_download.call_count) self.assertEqual(0, mock_unpack.call_count) class TestUpdate(UpdateTestCase): def setUp(self): UpdateTestCase.setUp(self) self.event_patch = patch('azurelinuxagent.common.event.add_event') self.update_handler = get_update_handler() self.update_handler.protocol_util = Mock() def test_creation(self): self.assertTrue(self.update_handler.running) self.assertEqual(None, self.update_handler.last_attempt_time) self.assertEqual(0, len(self.update_handler.agents)) self.assertEqual(None, self.update_handler.child_agent) self.assertEqual(None, self.update_handler.child_launch_time) self.assertEqual(0, self.update_handler.child_launch_attempts) self.assertEqual(None, self.update_handler.child_process) self.assertEqual(None, self.update_handler.signal_handler) def test_emit_restart_event_writes_sentinal_file(self): self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) self.update_handler._emit_restart_event() self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) def test_emit_restart_event_emits_event_if_not_clean_start(self): try: mock_event = self.event_patch.start() self.update_handler._set_sentinal() self.update_handler._emit_restart_event() self.assertEqual(1, mock_event.call_count) except Exception as e: pass self.event_patch.stop() def _create_protocol(self, count=5, versions=None): latest_version = self.prepare_agents(count=count) if versions is None or len(versions) <= 0: versions = [latest_version] return ProtocolMock(versions=versions) def _test_ensure_no_orphans(self, invocations=3, interval=ORPHAN_WAIT_INTERVAL, pid_count=0): with patch.object(self.update_handler, 'osutil') as mock_util: # Note: # - Python only allows mutations of objects to which a function has # a reference. Incrementing an integer directly changes the # reference. Incrementing an item of a list changes an item to # which the code has a reference. # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 return iterations[0] < invocations mock_util.check_pid_alive = Mock(side_effect=iterator) pid_files = self.update_handler._get_pid_files() self.assertEqual(pid_count, len(pid_files)) with patch('os.getpid', return_value=42): with patch('time.sleep', return_value=None) as mock_sleep: self.update_handler._ensure_no_orphans(orphan_wait_interval=interval) for pid_file in pid_files: self.assertFalse(os.path.exists(pid_file)) return mock_util.check_pid_alive.call_count, mock_sleep.call_count def test_ensure_no_orphans(self): fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41)) calls, sleeps = self._test_ensure_no_orphans(invocations=3, pid_count=1) self.assertEqual(3, calls) self.assertEqual(2, sleeps) def test_ensure_no_orphans_skips_if_no_orphans(self): calls, sleeps = self._test_ensure_no_orphans(invocations=3) self.assertEqual(0, calls) self.assertEqual(0, sleeps) def test_ensure_no_orphans_ignores_exceptions(self): with patch('azurelinuxagent.common.utils.fileutil.read_file', side_effect=Exception): calls, sleeps = self._test_ensure_no_orphans(invocations=3) self.assertEqual(0, calls) self.assertEqual(0, sleeps) def test_ensure_no_orphans_kills_after_interval(self): fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41)) with patch('os.kill') as mock_kill: calls, sleeps = self._test_ensure_no_orphans( invocations=4, interval=3*GOAL_STATE_INTERVAL, pid_count=1) self.assertEqual(3, calls) self.assertEqual(2, sleeps) self.assertEqual(1, mock_kill.call_count) @patch('azurelinuxagent.ga.update.datetime') def test_ensure_partition_assigned(self, mock_time): path = os.path.join(conf.get_lib_dir(), AGENT_PARTITION_FILE) mock_time.utcnow = Mock() self.assertFalse(os.path.exists(path)) for n in range(0,99): mock_time.utcnow.return_value = Mock(microsecond=n* 10000) self.update_handler._ensure_partition_assigned() self.assertTrue(os.path.exists(path)) s = fileutil.read_file(path) self.assertEqual(n, int(s)) os.remove(path) def test_ensure_readonly_sets_readonly(self): test_files = [ os.path.join(conf.get_lib_dir(), "faux_certificate.crt"), os.path.join(conf.get_lib_dir(), "faux_certificate.p7m"), os.path.join(conf.get_lib_dir(), "faux_certificate.pem"), os.path.join(conf.get_lib_dir(), "faux_certificate.prv"), os.path.join(conf.get_lib_dir(), "ovf-env.xml") ] for path in test_files: fileutil.write_file(path, "Faux content") os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) self.update_handler._ensure_readonly_files() for path in test_files: mode = os.stat(path).st_mode mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) self.assertEqual(0, mode ^ stat.S_IRUSR) def test_ensure_readonly_leaves_unmodified(self): test_files = [ os.path.join(conf.get_lib_dir(), "faux.xml"), os.path.join(conf.get_lib_dir(), "faux.json"), os.path.join(conf.get_lib_dir(), "faux.txt"), os.path.join(conf.get_lib_dir(), "faux") ] for path in test_files: fileutil.write_file(path, "Faux content") os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) self.update_handler._ensure_readonly_files() for path in test_files: mode = os.stat(path).st_mode mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) self.assertEqual( stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, mode) def _test_evaluate_agent_health(self, child_agent_index=0): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertFalse(latest_agent.is_blacklisted) self.assertTrue(len(self.update_handler.agents) > 1) child_agent = self.update_handler.agents[child_agent_index] self.assertTrue(child_agent.is_available) self.assertFalse(child_agent.is_blacklisted) self.update_handler.child_agent = child_agent self.update_handler._evaluate_agent_health(latest_agent) def test_evaluate_agent_health_ignores_installed_agent(self): self.update_handler._evaluate_agent_health(None) def test_evaluate_agent_health_raises_exception_for_restarting_agent(self): self.update_handler.child_launch_time = time.time() - (4 * 60) self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 self.assertRaises(Exception, self._test_evaluate_agent_health) def test_evaluate_agent_health_will_not_raise_exception_for_long_restarts(self): self.update_handler.child_launch_time = time.time() - 24 * 60 self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX self._test_evaluate_agent_health() def test_evaluate_agent_health_will_not_raise_exception_too_few_restarts(self): self.update_handler.child_launch_time = time.time() self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 2 self._test_evaluate_agent_health() def test_evaluate_agent_health_resets_with_new_agent(self): self.update_handler.child_launch_time = time.time() - (4 * 60) self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 self._test_evaluate_agent_health(child_agent_index=1) self.assertEqual(1, self.update_handler.child_launch_attempts) def test_filter_blacklisted_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) kept_agents = self.update_handler.agents[1::2] blacklisted_agents = self.update_handler.agents[::2] for agent in blacklisted_agents: agent.mark_failure(is_fatal=True) self.update_handler._filter_blacklisted_agents() self.assertEqual(kept_agents, self.update_handler.agents) def test_find_agents(self): self.prepare_agents() self.assertTrue(0 <= len(self.update_handler.agents)) self.update_handler._find_agents() self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) def test_find_agents_does_reload(self): self.prepare_agents() self.update_handler._find_agents() agents = self.update_handler.agents self.update_handler._find_agents() self.assertNotEqual(agents, self.update_handler.agents) def test_find_agents_sorts(self): self.prepare_agents() self.update_handler._find_agents() v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') def test_get_host_plugin_returns_host_for_wireserver(self, mock_get_host): protocol = WireProtocol('12.34.56.78') mock_get_host.return_value = "faux host" host = self.update_handler._get_host_plugin(protocol=protocol) mock_get_host.assert_called_once() self.assertEqual("faux host", host) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') def test_get_host_plugin_returns_none_otherwise(self, mock_get_host): protocol = MetadataProtocol() host = self.update_handler._get_host_plugin(protocol=protocol) mock_get_host.assert_not_called() self.assertEqual(None, host) def test_get_latest_agent(self): latest_version = self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) self.assertEqual(latest_version, latest_agent.version) def test_get_latest_agent_excluded(self): self.prepare_agent(AGENT_VERSION) self.assertFalse(self._test_upgrade_available( versions=self.agent_versions(), count=1)) self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_no_updates(self): self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_skip_updates(self): conf.get_autoupdate_enabled = Mock(return_value=False) self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_skips_unavailable(self): self.prepare_agents() prior_agent = self.update_handler.get_latest_agent() latest_version = self.prepare_agents(count=self.agent_count()+1, is_available=False) latest_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, latest_version)) self.assertFalse(GuestAgent(latest_path).is_available) latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.version < latest_version) self.assertEqual(latest_agent.version, prior_agent.version) def test_get_pid_files(self): pid_files = self.update_handler._get_pid_files() self.assertEqual(0, len(pid_files)) def test_get_pid_files_returns_previous(self): for n in range(1250): fileutil.write_file(os.path.join(self.tmp_dir, str(n)+"_waagent.pid"), ustr(n+1)) pid_files = self.update_handler._get_pid_files() self.assertEqual(1250, len(pid_files)) pid_dir, pid_name, pid_re = self.update_handler._get_pid_parts() for p in pid_files: self.assertTrue(pid_re.match(os.path.basename(p))) def test_is_clean_start_returns_true_when_no_sentinal(self): self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) self.assertTrue(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_when_sentinal_exists(self): self.update_handler._set_sentinal(agent=CURRENT_AGENT) self.assertFalse(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_for_exceptions(self): self.update_handler._set_sentinal() with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=Exception): self.assertFalse(self.update_handler._is_clean_start) def test_is_orphaned_returns_false_if_parent_exists(self): fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('os.getppid', return_value=42): self.assertFalse(self.update_handler._is_orphaned) def test_is_orphaned_returns_true_if_parent_is_init(self): with patch('os.getppid', return_value=1): self.assertTrue(self.update_handler._is_orphaned) def test_is_orphaned_returns_true_if_parent_does_not_exist(self): fileutil.write_file(conf.get_agent_pid_file_path(), ustr(24)) with patch('os.getppid', return_value=42): self.assertTrue(self.update_handler._is_orphaned) def test_is_version_available(self): self.prepare_agents(is_available=True) self.update_handler.agents = self.agents() for agent in self.agents(): self.assertTrue(self.update_handler._is_version_eligible(agent.version)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False) def test_is_version_available_rejects(self, mock_current): self.prepare_agents(is_available=True) self.update_handler.agents = self.agents() self.update_handler.agents[0].mark_failure(is_fatal=True) self.assertFalse(self.update_handler._is_version_eligible(self.agents()[0].version)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=True) def test_is_version_available_accepts_current(self, mock_current): self.update_handler.agents = [] self.assertTrue(self.update_handler._is_version_eligible(CURRENT_VERSION)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False) def test_is_version_available_rejects_by_default(self, mock_current): self.prepare_agents() self.update_handler.agents = [] v = self.agents()[0].version self.assertFalse(self.update_handler._is_version_eligible(v)) def test_purge_agents(self): self.prepare_agents() self.update_handler._find_agents() # Ensure at least three agents initially exist self.assertTrue(2 < len(self.update_handler.agents)) # Purge every other agent kept_agents = self.update_handler.agents[1::2] purged_agents = self.update_handler.agents[::2] # Reload and assert only the kept agents remain on disk self.update_handler.agents = kept_agents self.update_handler._purge_agents() self.update_handler._find_agents() self.assertEqual( [agent.version for agent in kept_agents], [agent.version for agent in self.update_handler.agents]) # Ensure both directories and packages are removed for agent in purged_agents: agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) self.assertFalse(os.path.exists(agent_path)) self.assertFalse(os.path.exists(agent_path + ".zip")) # Ensure kept agent directories and packages remain for agent in kept_agents: agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) self.assertTrue(os.path.exists(agent_path)) self.assertTrue(os.path.exists(agent_path + ".zip")) def _test_run_latest(self, mock_child=None, mock_time=None, child_args=None): if mock_child is None: mock_child = ChildMock() if mock_time is None: mock_time = TimeMock() with patch('subprocess.Popen', return_value=mock_child) as mock_popen: with patch('time.time', side_effect=mock_time.time): with patch('time.sleep', side_effect=mock_time.sleep): self.update_handler.run_latest(child_args=child_args) self.assertEqual(1, mock_popen.call_count) return mock_popen.call_args def test_run_latest(self): self.prepare_agents() agent = self.update_handler.get_latest_agent() args, kwargs = self._test_run_latest() args = args[0] cmds = textutil.safe_shlex_split(agent.get_agent_cmd()) if cmds[0].lower() == "python": cmds[0] = get_python_cmd() self.assertEqual(args, cmds) self.assertTrue(len(args) > 1) self.assertTrue(args[0].startswith("python")) self.assertEqual("-run-exthandlers", args[len(args)-1]) self.assertEqual(True, 'cwd' in kwargs) self.assertEqual(agent.get_agent_dir(), kwargs['cwd']) self.assertEqual(False, '\x00' in cmds[0]) def test_run_latest_passes_child_args(self): self.prepare_agents() agent = self.update_handler.get_latest_agent() args, kwargs = self._test_run_latest(child_args="AnArgument") args = args[0] self.assertTrue(len(args) > 1) self.assertTrue(args[0].startswith("python")) self.assertEqual("AnArgument", args[len(args)-1]) def test_run_latest_polls_and_waits_for_success(self): mock_child = ChildMock(return_value=None) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(2, mock_child.poll.call_count) self.assertEqual(1, mock_child.wait.call_count) def test_run_latest_polling_stops_at_success(self): mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(1, mock_child.poll.call_count) self.assertEqual(0, mock_child.wait.call_count) def test_run_latest_polling_stops_at_failure(self): mock_child = ChildMock(return_value=42) mock_time = TimeMock() self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(1, mock_child.poll.call_count) self.assertEqual(0, mock_child.wait.call_count) def test_run_latest_polls_frequently_if_installed_is_latest(self): mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) self._test_run_latest(mock_time=mock_time) self.assertEqual(1, mock_time.sleep_interval) def test_run_latest_polls_moderately_if_installed_not_latest(self): self.prepare_agents() mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) self._test_run_latest(mock_time=mock_time) self.assertNotEqual(1, mock_time.sleep_interval) def test_run_latest_defaults_to_current(self): self.assertEqual(None, self.update_handler.get_latest_agent()) args, kwargs = self._test_run_latest() self.assertEqual(args[0], [get_python_cmd(), "-u", sys.argv[0], "-run-exthandlers"]) self.assertEqual(True, 'cwd' in kwargs) self.assertEqual(os.getcwd(), kwargs['cwd']) def test_run_latest_forwards_output(self): try: tempdir = tempfile.mkdtemp() stdout_path = os.path.join(tempdir, "stdout") stderr_path = os.path.join(tempdir, "stderr") with open(stdout_path, "w") as stdout: with open(stderr_path, "w") as stderr: saved_stdout, sys.stdout = sys.stdout, stdout saved_stderr, sys.stderr = sys.stderr, stderr try: self._test_run_latest(mock_child=ChildMock(side_effect=faux_logger)) finally: sys.stdout = saved_stdout sys.stderr = saved_stderr with open(stdout_path, "r") as stdout: self.assertEqual(1, len(stdout.readlines())) with open(stderr_path, "r") as stderr: self.assertEqual(1, len(stderr.readlines())) finally: shutil.rmtree(tempdir, True) def test_run_latest_nonzero_code_marks_failures(self): # logger.add_logger_appender(logger.AppenderType.STDOUT) self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self._test_run_latest(mock_child=ChildMock(return_value=1)) self.assertTrue(latest_agent.is_blacklisted) self.assertFalse(latest_agent.is_available) self.assertNotEqual(0.0, latest_agent.error.last_failure) self.assertEqual(1, latest_agent.error.failure_count) def test_run_latest_exception_blacklists(self): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Force blacklisting"))) self.assertFalse(latest_agent.is_available) self.assertTrue(latest_agent.error.is_blacklisted) self.assertNotEqual(0.0, latest_agent.error.last_failure) self.assertEqual(1, latest_agent.error.failure_count) def test_run_latest_exception_does_not_blacklist_if_terminating(self): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self.update_handler.running = False self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Attempt blacklisting"))) self.assertTrue(latest_agent.is_available) self.assertFalse(latest_agent.error.is_blacklisted) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) @patch('signal.signal') def test_run_latest_captures_signals(self, mock_signal): self._test_run_latest() self.assertEqual(1, mock_signal.call_count) @patch('signal.signal') def test_run_latest_creates_only_one_signal_handler(self, mock_signal): self.update_handler.signal_handler = "Not None" self._test_run_latest() self.assertEqual(0, mock_signal.call_count) def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False): conf.get_autoupdate_enabled = Mock(return_value=enable_updates) # Note: # - Python only allows mutations of objects to which a function has # a reference. Incrementing an integer directly changes the # reference. Incrementing an item of a list changes an item to # which the code has a reference. # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 if iterations[0] >= invocations: self.update_handler.running = False return fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: with patch('time.sleep', side_effect=iterator) as mock_sleep: with patch('sys.exit') as mock_exit: if isinstance(os.getppid, MagicMock): self.update_handler.run() else: with patch('os.getppid', return_value=42): self.update_handler.run() self.assertEqual(1, mock_handler.call_count) self.assertEqual(mock_handler.return_value.method_calls, calls) self.assertEqual(invocations, mock_sleep.call_count) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_exit.call_count) def test_run(self): self._test_run() def test_run_keeps_running(self): self._test_run(invocations=15, calls=[call.run()]*15) def test_run_stops_if_update_available(self): self.update_handler._upgrade_available = Mock(return_value=True) self._test_run(invocations=0, calls=[], enable_updates=True) def test_run_stops_if_orphaned(self): with patch('os.getppid', return_value=1): self._test_run(invocations=0, calls=[], enable_updates=True) def test_run_clears_sentinal_on_successful_exit(self): self._test_run() self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) def test_run_leaves_sentinal_on_unsuccessful_exit(self): self.update_handler._upgrade_available = Mock(side_effect=Exception) self._test_run(invocations=0, calls=[], enable_updates=True) self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) def test_run_emits_restart_event(self): self.update_handler._emit_restart_event = Mock() self._test_run() self.assertEqual(1, self.update_handler._emit_restart_event.call_count) def test_set_agents_sets_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertTrue(len(self.update_handler.agents) > 0) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) def test_set_agents_sorts_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version def test_set_sentinal(self): self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) self.update_handler._set_sentinal() self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) def test_set_sentinal_writes_current_agent(self): self.update_handler._set_sentinal() self.assertTrue( fileutil.read_file(self.update_handler._sentinal_file_path()), CURRENT_AGENT) def test_shutdown(self): self.update_handler._set_sentinal() self.update_handler._shutdown() self.assertFalse(self.update_handler.running) self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) def test_shutdown_ignores_missing_sentinal_file(self): self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) self.update_handler._shutdown() self.assertFalse(self.update_handler.running) self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) def test_shutdown_ignores_exceptions(self): self.update_handler._set_sentinal() try: with patch("os.remove", side_effect=Exception): self.update_handler._shutdown() except Exception as e: self.assertTrue(False, "Unexpected exception") def _test_upgrade_available( self, base_version=FlexibleVersion(AGENT_VERSION), protocol=None, versions=None, count=5): if protocol is None: protocol = self._create_protocol(count=count, versions=versions) self.update_handler.protocol_util = protocol conf.get_autoupdate_gafamily = Mock(return_value=protocol.family) return self.update_handler._upgrade_available(base_version=base_version) def test_upgrade_available_returns_true_on_first_use(self): self.assertTrue(self._test_upgrade_available()) def test_upgrade_available_will_refresh_goal_state(self): protocol = self._create_protocol() protocol.emulate_stale_goal_state() self.assertTrue(self._test_upgrade_available(protocol=protocol)) self.assertEqual(2, protocol.call_counts["get_vmagent_manifests"]) self.assertEqual(1, protocol.call_counts["get_vmagent_pkgs"]) self.assertEqual(1, protocol.call_counts["update_goal_state"]) self.assertTrue(protocol.goal_state_forced) def test_upgrade_available_handles_missing_family(self): extensions_config = ExtensionsConfig(load_data("wire/ext_conf_missing_family.xml")) protocol = ProtocolMock() protocol.family = "Prod" protocol.agent_manifests = extensions_config.vmagent_manifests self.update_handler.protocol_util = protocol with patch('azurelinuxagent.common.logger.warn') as mock_logger: with patch('tests.ga.test_update.ProtocolMock.get_vmagent_pkgs', side_effect=ProtocolError): self.assertFalse(self.update_handler._upgrade_available(base_version=CURRENT_VERSION)) self.assertEqual(0, mock_logger.call_count) def test_upgrade_available_includes_old_agents(self): self.prepare_agents() old_version = self.agent_versions()[-1] old_count = old_version.version[-1] self.replicate_agents(src_v=old_version, count=old_count, increment=-1) all_count = len(self.agent_versions()) self.assertTrue(self._test_upgrade_available(versions=self.agent_versions())) self.assertEqual(all_count, len(self.update_handler.agents)) def test_upgrade_available_purges_old_agents(self): self.prepare_agents() agent_count = self.agent_count() self.assertEqual(5, agent_count) agent_versions = self.agent_versions()[:3] self.assertTrue(self._test_upgrade_available(versions=agent_versions)) self.assertEqual(len(agent_versions), len(self.update_handler.agents)) # Purging always keeps the running agent if CURRENT_VERSION not in agent_versions: agent_versions.append(CURRENT_VERSION) self.assertEqual(agent_versions, self.agent_versions()) def test_update_available_returns_true_if_current_gets_blacklisted(self): self.update_handler._is_version_eligible = Mock(return_value=False) self.assertTrue(self._test_upgrade_available()) def test_upgrade_available_skips_if_too_frequent(self): conf.get_autoupdate_frequency = Mock(return_value=10000) self.update_handler.last_attempt_time = time.time() self.assertFalse(self._test_upgrade_available()) def test_upgrade_available_skips_if_when_no_new_versions(self): self.prepare_agents() base_version = self.agent_versions()[0] + 1 self.update_handler._is_version_eligible = lambda x: x == base_version self.assertFalse(self._test_upgrade_available(base_version=base_version)) def test_upgrade_available_skips_when_no_versions(self): self.assertFalse(self._test_upgrade_available(protocol=ProtocolMock())) def test_upgrade_available_skips_when_updates_are_disabled(self): conf.get_autoupdate_enabled = Mock(return_value=False) self.assertFalse(self._test_upgrade_available()) def test_upgrade_available_sorts(self): self.prepare_agents() self._test_upgrade_available() v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version def test_write_pid_file(self): for n in range(1112): fileutil.write_file(os.path.join(self.tmp_dir, str(n)+"_waagent.pid"), ustr(n+1)) with patch('os.getpid', return_value=1112): pid_files, pid_file = self.update_handler._write_pid_file() self.assertEqual(1112, len(pid_files)) self.assertEqual("1111_waagent.pid", os.path.basename(pid_files[-1])) self.assertEqual("1112_waagent.pid", os.path.basename(pid_file)) self.assertEqual(fileutil.read_file(pid_file), ustr(1112)) def test_write_pid_file_ignores_exceptions(self): with patch('azurelinuxagent.common.utils.fileutil.write_file', side_effect=Exception): with patch('os.getpid', return_value=42): pid_files, pid_file = self.update_handler._write_pid_file() self.assertEqual(0, len(pid_files)) self.assertEqual(None, pid_file) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', return_value=GoalState(load_data('wire/goal_state.xml'))) def test_package_filter_for_agent_manifest(self, _): protocol = WireProtocol('12.34.56.78') extension_config = ExtensionsConfig(load_data('wire/ext_conf.xml')) agent_manifest = extension_config.vmagent_manifests.vmAgentManifests[0] # has agent versions 13, 14 ga_manifest_1 = ExtensionManifest(load_data('wire/ga_manifest_1.xml')) # has agent versions 13, 14, 15 ga_manifest_2 = ExtensionManifest(load_data('wire/ga_manifest_2.xml')) goal_state = protocol.client.get_goal_state() disk_cache = os.path.join(conf.get_lib_dir(), AGENTS_MANIFEST_FILE_NAME.format( agent_manifest.family, goal_state.incarnation)) self.assertFalse(os.path.exists(disk_cache)) self.assertTrue(ga_manifest_1.allowed_versions is None) with patch( 'azurelinuxagent.common.protocol.wire.WireClient' '.get_gafamily_manifest', return_value=ga_manifest_1): pkg_list_1 = protocol.get_vmagent_pkgs(agent_manifest) self.assertTrue(pkg_list_1 is not None) self.assertTrue(len(pkg_list_1.versions) == 2) self.assertTrue(pkg_list_1.versions[0].version == '2.2.13') self.assertTrue(pkg_list_1.versions[0].uris[0].uri == 'url1_13') self.assertTrue(pkg_list_1.versions[1].version == '2.2.14') self.assertTrue(pkg_list_1.versions[1].uris[0].uri == 'url1_14') self.assertTrue(os.path.exists(disk_cache)) with patch( 'azurelinuxagent.common.protocol.wire.WireClient' '.get_gafamily_manifest', return_value=ga_manifest_2): pkg_list_2 = protocol.get_vmagent_pkgs(agent_manifest) self.assertTrue(pkg_list_2 is not None) self.assertTrue(len(pkg_list_2.versions) == 2) self.assertTrue(pkg_list_2.versions[0].version == '2.2.13') self.assertTrue(pkg_list_2.versions[0].uris[0].uri == 'url2_13') self.assertTrue(pkg_list_2.versions[1].version == '2.2.14') self.assertTrue(pkg_list_2.versions[1].uris[0].uri == 'url2_14') # does not contain 2.2.15 self.assertTrue(os.path.exists(disk_cache)) self.assertTrue(ga_manifest_2.allowed_versions is not None) self.assertTrue(len(ga_manifest_2.allowed_versions) == 2) self.assertTrue(ga_manifest_2.allowed_versions[0] == '2.2.13') self.assertTrue(ga_manifest_2.allowed_versions[1] == '2.2.14') class ChildMock(Mock): def __init__(self, return_value=0, side_effect=None): Mock.__init__(self, return_value=return_value, side_effect=side_effect) self.poll = Mock(return_value=return_value, side_effect=side_effect) self.wait = Mock(return_value=return_value, side_effect=side_effect) class ProtocolMock(object): def __init__(self, family="TestAgent", etag=42, versions=None, client=None): self.family = family self.client = client self.call_counts = { "get_vmagent_manifests" : 0, "get_vmagent_pkgs" : 0, "update_goal_state" : 0 } self.goal_state_is_stale = False self.goal_state_forced = False self.etag = etag self.versions = versions if versions is not None else [] self.create_manifests() self.create_packages() def emulate_stale_goal_state(self): self.goal_state_is_stale = True def create_manifests(self): self.agent_manifests = VMAgentManifestList() if len(self.versions) <= 0: return if self.family is not None: manifest = VMAgentManifest(family=self.family) for i in range(0,10): manifest_uri = "https://nowhere.msft/agent/{0}".format(i) manifest.versionsManifestUris.append(VMAgentManifestUri(uri=manifest_uri)) self.agent_manifests.vmAgentManifests.append(manifest) def create_packages(self): self.agent_packages = ExtHandlerPackageList() if len(self.versions) <= 0: return for version in self.versions: package = ExtHandlerPackage(str(version)) for i in range(0,5): package_uri = "https://nowhere.msft/agent_pkg/{0}".format(i) package.uris.append(ExtHandlerPackageUri(uri=package_uri)) self.agent_packages.versions.append(package) def get_protocol(self): return self def get_vmagent_manifests(self): self.call_counts["get_vmagent_manifests"] += 1 if self.goal_state_is_stale: self.goal_state_is_stale = False raise ResourceGoneError() return self.agent_manifests, self.etag def get_vmagent_pkgs(self, manifest): self.call_counts["get_vmagent_pkgs"] += 1 if self.goal_state_is_stale: self.goal_state_is_stale = False raise ResourceGoneError() return self.agent_packages def update_goal_state(self, forced=False, max_retry=3): self.call_counts["update_goal_state"] += 1 self.goal_state_forced = self.goal_state_forced or forced class ResponseMock(Mock): def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): Mock.__init__(self) self.status = status self.reason = reason self.response = response def read(self): return self.response class TimeMock(Mock): def __init__(self, time_increment=1): Mock.__init__(self) self.next_time = time.time() self.time_call_count = 0 self.time_increment = time_increment self.sleep_interval = None def sleep(self, n): self.sleep_interval = n def time(self): self.time_call_count += 1 current_time = self.next_time self.next_time += self.time_increment return current_time if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/pa/000077500000000000000000000000001322477356400156625ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/pa/__init__.py000066400000000000000000000011651322477356400177760ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/pa/test_deprovision.py000066400000000000000000000156671322477356400216530ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import signal import tempfile import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision import get_deprovision_handler from azurelinuxagent.pa.deprovision.default import DeprovisionHandler from tests.tools import * class TestDeprovision(AgentTestCase): @patch('signal.signal') @patch('azurelinuxagent.common.osutil.get_osutil') @patch('azurelinuxagent.common.protocol.get_protocol_util') @patch('azurelinuxagent.pa.deprovision.default.read_input') def test_confirmation(self, mock_read, mock_protocol, mock_util, mock_signal): dh = DeprovisionHandler() dh.setup = Mock() dh.setup.return_value = ([], []) dh.do_actions = Mock() # Do actions if confirmed mock_read.return_value = "y" dh.run() self.assertEqual(1, dh.do_actions.call_count) # Skip actions if not confirmed mock_read.return_value = "n" dh.run() self.assertEqual(1, dh.do_actions.call_count) # Do actions if forced mock_read.return_value = "n" dh.run(force=True) self.assertEqual(2, dh.do_actions.call_count) @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_dirs") @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_files") def test_del_cloud_init_without_once(self, mock_files, mock_dirs): deprovision_handler = get_deprovision_handler("","","") deprovision_handler.del_cloud_init([], [], include_once=False, deluser=False) mock_dirs.assert_called_with(include_once=False) mock_files.assert_called_with(include_once=False, deluser=False) @patch("signal.signal") @patch("azurelinuxagent.common.protocol.get_protocol_util") @patch("azurelinuxagent.common.osutil.get_osutil") @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_dirs") @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_files") def test_del_cloud_init(self, mock_files, mock_dirs, mock_osutil, mock_util, mock_signal): try: with tempfile.NamedTemporaryFile() as f: warnings = [] actions = [] dirs = [tempfile.mkdtemp()] mock_dirs.return_value = dirs files = [f.name] mock_files.return_value = files deprovision_handler = get_deprovision_handler("","","") deprovision_handler.del_cloud_init(warnings, actions, deluser=True) mock_dirs.assert_called_with(include_once=True) mock_files.assert_called_with(include_once=True, deluser=True) self.assertEqual(len(warnings), 0) self.assertEqual(len(actions), 2) for da in actions: if da.func == fileutil.rm_dirs: self.assertEqual(da.args, dirs) elif da.func == fileutil.rm_files: self.assertEqual(da.args, files) else: self.assertTrue(False) try: for da in actions: da.invoke() self.assertEqual(len([d for d in dirs if os.path.isdir(d)]), 0) self.assertEqual(len([f for f in files if os.path.isfile(f)]), 0) except Exception as e: self.assertTrue(False, "Exception {0}".format(e)) except OSError: # Ignore the error caused by removing the file within the "with" pass @distros("ubuntu") @patch('azurelinuxagent.common.conf.get_lib_dir') def test_del_lib_dir_files(self, distro_name, distro_version, distro_full_name, mock_conf): files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint', 'Extensions.1.xml', 'ExtensionsConfig.1.xml', 'GoalState.1.xml', 'Extensions.2.xml', 'ExtensionsConfig.2.xml', 'GoalState.2.xml' ] tmp = tempfile.mkdtemp() mock_conf.return_value = tmp for f in files: fileutil.write_file(os.path.join(tmp, f), "Value") deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings = [] actions = [] deprovision_handler.del_lib_dir_files(warnings, actions) self.assertTrue(len(warnings) == 0) self.assertTrue(len(actions) == 1) self.assertEqual(fileutil.rm_files, actions[0].func) self.assertTrue(len(actions[0].args) > 0) for f in actions[0].args: self.assertTrue(os.path.basename(f) in files) @distros("redhat") def test_deprovision(self, distro_name, distro_version, distro_full_name): deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolv.conf" in w for w in warnings) @distros("ubuntu") def test_deprovision_ubuntu(self, distro_name, distro_version, distro_full_name): deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) with patch("os.path.realpath", return_value="/run/resolvconf/resolv.conf"): warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolvconf/resolv.conf.d/tail" in w for w in warnings) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/pa/test_provision.py000066400000000000000000000112361322477356400213260ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.pa.provision import get_provision_handler from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * class TestProvision(AgentTestCase): @distros("redhat") @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') def test_provision(self, mock_util, distro_name, distro_version, distro_full_name): provision_handler = get_provision_handler(distro_name, distro_version, distro_full_name) mock_osutil = MagicMock() mock_osutil.decode_customdata = Mock(return_value="") provision_handler.osutil = mock_osutil provision_handler.protocol_util.osutil = mock_osutil provision_handler.protocol_util.get_protocol_by_file = MagicMock() provision_handler.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data("ovf-env.xml") fileutil.write_file(ovfenv_file, ovfenv_data) provision_handler.run() def test_customdata(self): base64data = 'Q3VzdG9tRGF0YQ==' data = DefaultOSUtil().decode_customdata(base64data) fileutil.write_file(tempfile.mktemp(), data) @patch('azurelinuxagent.common.conf.get_provision_enabled', return_value=False) def test_provisioning_is_skipped_when_not_enabled(self, mock_conf): ph = ProvisionHandler() ph.osutil = DefaultOSUtil() ph.osutil.get_instance_id = Mock( return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') ph.is_provisioned = Mock() ph.report_ready = Mock() ph.write_provisioned = Mock() ph.run() ph.is_provisioned.assert_not_called() ph.report_ready.assert_called_once() ph.write_provisioned.assert_called_once() @patch('os.path.isfile', return_value=False) def test_is_provisioned_not_provisioned(self, mock_isfile): ph = ProvisionHandler() self.assertFalse(ph.is_provisioned()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('azurelinuxagent.pa.deprovision.get_deprovision_handler') def test_is_provisioned_is_provisioned(self, mock_deprovision, mock_read, mock_isfile): ph = ProvisionHandler() ph.osutil = Mock() ph.osutil.is_current_instance_id = Mock(return_value=True) ph.write_provisioned = Mock() deprovision_handler = Mock() mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) ph.osutil.is_current_instance_id.assert_called_once() deprovision_handler.run_changed_unique_id.assert_not_called() @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('azurelinuxagent.pa.deprovision.get_deprovision_handler') def test_is_provisioned_not_deprovisioned(self, mock_deprovision, mock_read, mock_isfile): ph = ProvisionHandler() ph.osutil = Mock() ph.osutil.is_current_instance_id = Mock(return_value=False) ph.report_ready = Mock() ph.write_provisioned = Mock() deprovision_handler = Mock() mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) ph.osutil.is_current_instance_id.assert_called_once() deprovision_handler.run_changed_unique_id.assert_called_once() if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/protocol/000077500000000000000000000000001322477356400171235ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/protocol/__init__.py000066400000000000000000000011651322477356400212370ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/protocol/mockmetadata.py000066400000000000000000000051441322477356400221330ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "identity": "metadata/identity.json", "certificates": "metadata/certificates.json", "certificates_data": "metadata/certificates_data.json", "ext_handlers": "metadata/ext_handlers.json", "ext_handler_pkgs": "metadata/ext_handler_pkgs.json", "trans_prv": "metadata/trans_prv", "trans_cert": "metadata/trans_cert", } DATA_FILE_NO_EXT = DATA_FILE.copy() DATA_FILE_NO_EXT["ext_handlers"] = "metadata/ext_handlers_no_ext.json" class MetadataProtocolData(object): def __init__(self, data_files): self.identity = load_data(data_files.get("identity")) self.certificates = load_data(data_files.get("certificates")) self.certificates_data = load_data(data_files.get("certificates_data")) self.ext_handlers = load_data(data_files.get("ext_handlers")) self.ext_handler_pkgs = load_data(data_files.get("ext_handler_pkgs")) self.trans_prv = load_data(data_files.get("trans_prv")) self.trans_cert = load_data(data_files.get("trans_cert")) def mock_http_get(self, url, *args, **kwargs): content = None if url.count(u"identity?") > 0: content = self.identity elif url.count(u"certificates") > 0: content = self.certificates elif url.count(u"certificates_data") > 0: content = self.certificates_data elif url.count(u"extensionHandlers") > 0: content = self.ext_handlers elif url.count(u"versionUri") > 0: content = self.ext_handler_pkgs else: raise Exception("Bad url {0}".format(url)) resp = MagicMock() resp.status = httpclient.OK if content is None: resp.read = Mock(return_value=None) else: resp.read = Mock(return_value=content.encode("utf-8")) return resp WALinuxAgent-2.2.20/tests/protocol/mockwiredata.py000066400000000000000000000151451322477356400221550ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "version_info": "wire/version_info.xml", "goal_state": "wire/goal_state.xml", "hosting_env": "wire/hosting_env.xml", "shared_config": "wire/shared_config.xml", "certs": "wire/certs.xml", "ext_conf": "wire/ext_conf.xml", "manifest": "wire/manifest.xml", "ga_manifest" : "wire/ga_manifest.xml", "trans_prv": "wire/trans_prv", "trans_cert": "wire/trans_cert", "test_ext": "ext/sample_ext-1.3.0.zip" } DATA_FILE_NO_EXT = DATA_FILE.copy() DATA_FILE_NO_EXT["goal_state"] = "wire/goal_state_no_ext.xml" DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy() DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml" DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy() DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml" DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml" DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy() DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml" DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy() DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml" DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml" class WireProtocolData(object): def __init__(self, data_files=DATA_FILE): self.emulate_stale_goal_state = False self.call_counts = { "comp=versions" : 0, "/versions" : 0, "goalstate" : 0, "hostingenvuri" : 0, "sharedconfiguri" : 0, "certificatesuri" : 0, "extensionsconfiguri" : 0, "extensionArtifact" : 0, "manifest.xml" : 0, "manifest_of_ga.xml" : 0, "ExampleHandlerLinux" : 0 } self.version_info = load_data(data_files.get("version_info")) self.goal_state = load_data(data_files.get("goal_state")) self.hosting_env = load_data(data_files.get("hosting_env")) self.shared_config = load_data(data_files.get("shared_config")) self.certs = load_data(data_files.get("certs")) self.ext_conf = load_data(data_files.get("ext_conf")) self.manifest = load_data(data_files.get("manifest")) self.ga_manifest = load_data(data_files.get("ga_manifest")) self.trans_prv = load_data(data_files.get("trans_prv")) self.trans_cert = load_data(data_files.get("trans_cert")) self.ext = load_bin_data(data_files.get("test_ext")) def mock_http_get(self, url, *args, **kwargs): content = None resp = MagicMock() resp.status = httpclient.OK # wire server versions if "comp=versions" in url: content = self.version_info self.call_counts["comp=versions"] += 1 # HostPlugin versions elif "/versions" in url: content = '["2015-09-01"]' self.call_counts["/versions"] += 1 elif "goalstate" in url: content = self.goal_state self.call_counts["goalstate"] += 1 elif "hostingenvuri" in url: content = self.hosting_env self.call_counts["hostingenvuri"] += 1 elif "sharedconfiguri" in url: content = self.shared_config self.call_counts["sharedconfiguri"] += 1 elif "certificatesuri" in url: content = self.certs self.call_counts["certificatesuri"] += 1 elif "extensionsconfiguri" in url: content = self.ext_conf self.call_counts["extensionsconfiguri"] += 1 else: # A stale GoalState results in a 400 from the HostPlugin # for which the HTTP handler in restutil raises ResourceGoneError if self.emulate_stale_goal_state: if "extensionArtifact" in url: self.emulate_stale_goal_state = False self.call_counts["extensionArtifact"] += 1 raise ResourceGoneError() else: raise HttpError() # For HostPlugin requests, replace the URL with that passed # via the x-ms-artifact-location header if "extensionArtifact" in url: self.call_counts["extensionArtifact"] += 1 if "headers" not in kwargs or \ "x-ms-artifact-location" not in kwargs["headers"]: raise Exception("Bad HEADERS passed to HostPlugin: {0}", kwargs) url = kwargs["headers"]["x-ms-artifact-location"] if "manifest.xml" in url: content = self.manifest self.call_counts["manifest.xml"] += 1 elif "manifest_of_ga.xml" in url: content = self.ga_manifest self.call_counts["manifest_of_ga.xml"] += 1 elif "ExampleHandlerLinux" in url: content = self.ext self.call_counts["ExampleHandlerLinux"] += 1 resp.read = Mock(return_value=content) return resp else: raise Exception("Bad url {0}".format(url)) resp.read = Mock(return_value=content.encode("utf-8")) return resp def mock_crypt_util(self, *args, **kw): #Partially patch instance method of class CryptUtil cryptutil = CryptUtil(*args, **kw) cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert) return cryptutil def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file): with open(trans_prv_file, 'w+') as prv_file: prv_file.write(self.trans_prv) with open(trans_cert_file, 'w+') as cert_file: cert_file.write(self.trans_cert) WALinuxAgent-2.2.20/tests/protocol/test_hostplugin.py000066400000000000000000000411551322477356400227360ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import base64 import json import sys from azurelinuxagent.common.future import ustr if sys.version_info[0] == 3: import http.client as httpclient bytebuffer = memoryview elif sys.version_info[0] == 2: import httplib as httpclient bytebuffer = buffer import azurelinuxagent.common.protocol.restapi as restapi import azurelinuxagent.common.protocol.wire as wire import azurelinuxagent.common.protocol.hostplugin as hostplugin from azurelinuxagent.common import event from azurelinuxagent.common.exception import ProtocolError, HttpError from azurelinuxagent.common.protocol.hostplugin import API_VERSION from azurelinuxagent.common.utils import restutil from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE from tests.tools import * hostplugin_status_url = "http://168.63.129.16:32526/status" sas_url = "http://sas_url" wireserver_url = "168.63.129.16" block_blob_type = 'BlockBlob' page_blob_type = 'PageBlob' api_versions = '["2015-09-01"]' storage_version = "2014-02-14" faux_status = "{ 'dummy' : 'data' }" faux_status_b64 = base64.b64encode(bytes(bytearray(faux_status, encoding='utf-8'))) if PY_VERSION_MAJOR > 2: faux_status_b64 = faux_status_b64.decode('utf-8') class TestHostPlugin(AgentTestCase): def _compare_data(self, actual, expected): for k in iter(expected.keys()): if k == 'content' or k == 'requestUri': if actual[k] != expected[k]: print("Mismatch: Actual '{0}'='{1}', " \ "Expected '{0}'='{3}'".format( k, actual[k], expected[k])) return False elif k == 'headers': for h in expected['headers']: if not (h in actual['headers']): print("Missing Header: '{0}'".format(h)) return False else: print("Unexpected Key: '{0}'".format(k)) return False return True def _hostplugin_data(self, blob_headers, content=None): headers = [] for name in iter(blob_headers.keys()): headers.append({ 'headerName': name, 'headerValue': blob_headers[name] }) data = { 'requestUri': sas_url, 'headers': headers } if not content is None: s = base64.b64encode(bytes(content)) if PY_VERSION_MAJOR > 2: s = s.decode('utf-8') data['content'] = s return data def _hostplugin_headers(self, goal_state): return { 'x-ms-version': '2015-09-01', 'Content-type': 'application/json', 'x-ms-containerid': goal_state.container_id, 'x-ms-host-config-name': goal_state.role_config_name } def _validate_hostplugin_args(self, args, goal_state, exp_method, exp_url, exp_data): args, kwargs = args self.assertEqual(exp_method, args[0]) self.assertEqual(exp_url, args[1]) self.assertTrue(self._compare_data(json.loads(args[2]), exp_data)) headers = kwargs['headers'] self.assertEqual(headers['x-ms-containerid'], goal_state.container_id) self.assertEqual(headers['x-ms-host-config-name'], goal_state.role_config_name) def test_fallback(self): """ Validate fallback to upload status using HostGAPlugin is happening when status reporting via default method is unsuccessful """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") with patch.object(wire.HostPluginProtocol, "ensure_initialized", return_value=True): with patch.object(wire.StatusBlob, "upload", return_value=False) as patch_upload: with patch.object(wire.HostPluginProtocol, "_put_page_blob_status") as patch_put: wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) wire_protocol_client.upload_status_blob() self.assertEqual(patch_upload.call_count, 1) self.assertTrue(patch_put.call_count == 1, "Fallback was not engaged") self.assertTrue(patch_put.call_args[0][0] == sas_url) self.assertTrue(wire.HostPluginProtocol.is_default_channel()) wire.HostPluginProtocol.set_default_channel(False) def test_fallback_failure(self): """ Validate that when host plugin fails, the default channel is reset """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire.HostPluginProtocol.set_default_channel(False) with patch.object(wire.HostPluginProtocol, "ensure_initialized", return_value=True): with patch.object(wire.StatusBlob, "upload", return_value=False): with patch.object(wire.HostPluginProtocol, "_put_page_blob_status", side_effect=wire.HttpError("put failure")) as patch_put: client = wire.WireProtocol(wireserver_url).client client.get_goal_state = Mock(return_value=test_goal_state) client.ext_conf = wire.ExtensionsConfig(None) client.ext_conf.status_upload_blob = sas_url client.ext_conf.status_upload_blob_type = page_blob_type client.status_blob.set_vm_status(status) client.upload_status_blob() self.assertTrue(patch_put.call_count == 1, "Fallback was not engaged") self.assertFalse(wire.HostPluginProtocol.is_default_channel()) def test_put_status_error_reporting(self): """ Validate the telemetry when uploading status fails """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire.HostPluginProtocol.set_default_channel(False) with patch.object(wire.StatusBlob, "upload", return_value=False): wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.set_vm_status(status) put_error = wire.HttpError("put status http error") with patch.object(event, "add_event") as patch_add_event: with patch.object(restutil, "http_put", side_effect=put_error) as patch_http_put: with patch.object(wire.HostPluginProtocol, "ensure_initialized", return_value=True): wire_protocol_client.upload_status_blob() self.assertFalse(wire.HostPluginProtocol.is_default_channel()) self.assertTrue(patch_add_event.call_count == 1) def test_validate_http_request(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( status_blob.get_block_blob_headers(len(faux_status)), bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) plugin = wire_protocol_client.get_host_plugin() with patch.object(plugin, 'get_api_versions') as patch_api: patch_api.return_value = API_VERSION plugin.put_vm_status(status_blob, sas_url, block_blob_type) self.assertTrue(patch_http.call_count == 1) self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) def test_no_fallback(self): """ Validate fallback to upload status using HostGAPlugin is not happening when status reporting via default method is successful """ vmstatus = restapi.VMStatus(message="Ready", status="Ready") with patch.object(wire.HostPluginProtocol, "put_vm_status") as patch_put: with patch.object(wire.StatusBlob, "upload") as patch_upload: patch_upload.return_value = True wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.vm_status = vmstatus wire_protocol_client.upload_status_blob() self.assertTrue(patch_put.call_count == 0, "Fallback was engaged") def test_validate_block_blob(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.type = block_blob_type status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( status_blob.get_block_blob_headers(len(faux_status)), bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) with patch.object(wire.HostPluginProtocol, "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) self.assertTrue(patch_http.call_count == 1) self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) def test_validate_page_blobs(self): """Validate correct set of data is sent for page blobs""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.type = page_blob_type status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url page_status = bytearray(status_blob.data, encoding='utf-8') page_size = int((len(page_status) + 511) / 512) * 512 page_status = bytearray(status_blob.data.ljust(page_size), encoding='utf-8') page = bytearray(page_size) page[0: page_size] = page_status[0: len(page_status)] mock_response = MockResponse('', httpclient.OK) with patch.object(restutil, "http_request", return_value=mock_response) as patch_http: with patch.object(wire.HostPluginProtocol, "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) self.assertTrue(patch_http.call_count == 2) exp_data = self._hostplugin_data( status_blob.get_page_blob_create_headers( page_size)) self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) exp_data = self._hostplugin_data( status_blob.get_page_blob_page_headers( 0, page_size), page) exp_data['requestUri'] += "?comp=page" self._validate_hostplugin_args( patch_http.call_args_list[1], test_goal_state, exp_method, exp_url, exp_data) def test_validate_get_extension_artifacts(self): test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) expected_url = hostplugin.URI_FORMAT_GET_EXTENSION_ARTIFACT.format(wireserver_url, hostplugin.HOST_PLUGIN_PORT) expected_headers = {'x-ms-version': '2015-09-01', "x-ms-containerid": test_goal_state.container_id, "x-ms-host-config-name": test_goal_state.role_config_name, "x-ms-artifact-location": sas_url} host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) with patch.object(wire.HostPluginProtocol, "get_api_versions", return_value=api_versions) as patch_get: actual_url, actual_headers = host_client.get_artifact_request(sas_url) self.assertTrue(host_client.is_initialized) self.assertFalse(host_client.api_versions is None) self.assertEqual(expected_url, actual_url) for k in expected_headers: self.assertTrue(k in actual_headers) self.assertEqual(expected_headers[k], actual_headers[k]) class MockResponse: def __init__(self, body, status_code): self.body = body self.status = status_code def read(self): return self.body if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/protocol/test_metadata.py000066400000000000000000000136441322477356400223240ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import json from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.restutil import httpclient from azurelinuxagent.common.protocol.metadata import * from azurelinuxagent.common.protocol.restapi import * from tests.protocol.mockmetadata import * from tests.tools import * class TestMetadataProtocolGetters(AgentTestCase): def load_json(self, path): return json.loads(ustr(load_data(path)), encoding="utf-8") @patch("time.sleep") def _test_getters(self, test_data ,_): with patch.object(restutil, 'http_get', test_data.mock_http_get): protocol = MetadataProtocol() protocol.detect() protocol.get_vminfo() protocol.get_certs() ext_handlers, etag = protocol.get_ext_handlers() for ext_handler in ext_handlers.extHandlers: protocol.get_ext_handler_pkgs(ext_handler) def test_getters(self, *args): test_data = MetadataProtocolData(DATA_FILE) self._test_getters(test_data, *args) def test_getters_no(self, *args): test_data = MetadataProtocolData(DATA_FILE_NO_EXT) self._test_getters(test_data, *args) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagents_manifests(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() manifests, etag = protocol.get_vmagent_manifests() self.assertEqual(mock_update.call_count, 1) self.assertEqual(mock_get.call_count, 1) manifests_uri = BASE_URI.format( METADATA_ENDPOINT, "vmAgentVersions", APIVERSION) self.assertEqual(mock_get.call_args[0][0], manifests_uri) self.assertEqual(etag, 42) self.assertNotEqual(None, manifests) self.assertEqual(len(manifests.vmAgentManifests), 1) manifest = manifests.vmAgentManifests[0] self.assertEqual(manifest.family, conf.get_autoupdate_gafamily()) self.assertEqual(len(manifest.versionsManifestUris), 2) # Same etag returns the same data data = self.load_json("metadata/vmagent_manifests_invalid1.json") mock_get.return_value = data, 42 next_manifests, etag = protocol.get_vmagent_manifests() self.assertEqual(etag, 42) self.assertEqual(manifests, next_manifests) # New etag returns new data mock_get.return_value = data, 43 self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagents_manifests_raises(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests_invalid1.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) data = self.load_json("metadata/vmagent_manifests_invalid2.json") mock_get.return_value = data, 43 self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagent_pkgs(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() manifests, etag = protocol.get_vmagent_manifests() manifest = manifests.vmAgentManifests[0] data = self.load_json("metadata/vmagent_manifest1.json") mock_get.return_value = data, 42 pkgs = protocol.get_vmagent_pkgs(manifest) self.assertNotEqual(None, pkgs) self.assertEqual(len(pkgs.versions), 2) for pkg in pkgs.versions: self.assertNotEqual(None, pkg.version) self.assertTrue(len(pkg.uris) > 0) for uri in pkg.uris: self.assertTrue(uri.uri.endswith("zip")) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._post_data") def test_report_event(self, mock_post): events = TelemetryEventList() data = self.load_json("events/1478123456789000.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) data = self.load_json("events/1478123456789001.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) data = self.load_json("events/1479766858966718.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) protocol = MetadataProtocol() protocol.report_event(events) events_uri = BASE_URI.format( METADATA_ENDPOINT, "status/telemetry", APIVERSION) self.assertEqual(mock_post.call_count, 1) self.assertEqual(mock_post.call_args[0][0], events_uri) self.assertEqual(mock_post.call_args[0][1], get_properties(events)) WALinuxAgent-2.2.20/tests/protocol/test_protocol_util.py000066400000000000000000000057371322477356400234460ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import * from azurelinuxagent.common.protocol import get_protocol_util, \ TAG_FILE_NAME @patch("time.sleep") class TestProtocolUtil(AgentTestCase): @patch("azurelinuxagent.common.protocol.util.MetadataProtocol") @patch("azurelinuxagent.common.protocol.util.WireProtocol") def test_detect_protocol(self, WireProtocol, MetadataProtocol, _): WireProtocol.return_value = MagicMock() MetadataProtocol.return_value = MagicMock() protocol_util = get_protocol_util() protocol_util.dhcp_handler = MagicMock() protocol_util.dhcp_handler.endpoint = "foo.bar" #Test wire protocol is available protocol = protocol_util.get_protocol() self.assertEquals(WireProtocol.return_value, protocol) #Test wire protocol is not available protocol_util.clear_protocol() WireProtocol.return_value.detect.side_effect = ProtocolError() protocol = protocol_util.get_protocol() self.assertEquals(MetadataProtocol.return_value, protocol) #Test no protocol is available protocol_util.clear_protocol() WireProtocol.return_value.detect.side_effect = ProtocolError() MetadataProtocol.return_value.detect.side_effect = ProtocolError() self.assertRaises(ProtocolError, protocol_util.get_protocol) def test_detect_protocol_by_file(self, _): protocol_util = get_protocol_util() protocol_util._detect_wire_protocol = Mock() protocol_util._detect_metadata_protocol = Mock() tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME) #Test tag file doesn't exist protocol_util.get_protocol_by_file() protocol_util._detect_wire_protocol.assert_any_call() protocol_util._detect_metadata_protocol.assert_not_called() #Test tag file exists protocol_util.clear_protocol() protocol_util._detect_wire_protocol.reset_mock() protocol_util._detect_metadata_protocol.reset_mock() with open(tag_file, "w+") as tag_fd: tag_fd.write("") protocol_util.get_protocol_by_file() protocol_util._detect_metadata_protocol.assert_any_call() protocol_util._detect_wire_protocol.assert_not_called() if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/protocol/test_restapi.py000066400000000000000000000027231322477356400222070ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * import uuid import unittest import os import shutil import time from azurelinuxagent.common.protocol.restapi import * class SampleDataContract(DataContract): def __init__(self): self.foo = None self.bar = DataContractList(int) class TestDataContract(unittest.TestCase): def test_get_properties(self): obj = SampleDataContract() obj.foo = "foo" obj.bar.append(1) data = get_properties(obj) self.assertEquals("foo", data["foo"]) self.assertEquals(list, type(data["bar"])) def test_set_properties(self): obj = SampleDataContract() data = { 'foo' : 1, 'baz': 'a' } set_properties('sample', obj, data) self.assertFalse(hasattr(obj, 'baz')) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/protocol/test_wire.py000066400000000000000000000456711322477356400215170ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import glob from azurelinuxagent.common import event from azurelinuxagent.common.protocol.wire import * from tests.protocol.mockwiredata import * data_with_bom = b'\xef\xbb\xbfhehe' testurl = 'http://foo' testtype = 'BlockBlob' wireserver_url = '168.63.129.16' @patch("time.sleep") @patch("azurelinuxagent.common.protocol.wire.CryptUtil") class TestWireProtocol(AgentTestCase): def setUp(self): super(TestWireProtocol, self).setUp() HostPluginProtocol.set_default_channel(False) def _test_getters(self, test_data, MockCryptUtil, _): MockCryptUtil.side_effect = test_data.mock_crypt_util with patch.object(restutil, 'http_get', test_data.mock_http_get): protocol = WireProtocol(wireserver_url) protocol.detect() protocol.get_vminfo() protocol.get_certs() ext_handlers, etag = protocol.get_ext_handlers() for ext_handler in ext_handlers.extHandlers: protocol.get_ext_handler_pkgs(ext_handler) crt1 = os.path.join(self.tmp_dir, '33B0ABCE4673538650971C10F7D7397E71561F35.crt') crt2 = os.path.join(self.tmp_dir, '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.crt') prv2 = os.path.join(self.tmp_dir, '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.prv') self.assertTrue(os.path.isfile(crt1)) self.assertTrue(os.path.isfile(crt2)) self.assertTrue(os.path.isfile(prv2)) self.assertEqual("1", protocol.get_incarnation()) def test_getters(self, *args): """Normal case""" test_data = WireProtocolData(DATA_FILE) self._test_getters(test_data, *args) def test_getters_no_ext(self, *args): """Provision with agent is not checked""" test_data = WireProtocolData(DATA_FILE_NO_EXT) self._test_getters(test_data, *args) def test_getters_ext_no_settings(self, *args): """Extensions without any settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) self._test_getters(test_data, *args) def test_getters_ext_no_public(self, *args): """Extensions without any public settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) self._test_getters(test_data, *args) def test_getters_with_stale_goal_state(self, *args): test_data = WireProtocolData(DATA_FILE) test_data.emulate_stale_goal_state = True self._test_getters(test_data, *args) # Ensure HostPlugin was invoked self.assertEqual(1, test_data.call_counts["/versions"]) self.assertEqual(2, test_data.call_counts["extensionArtifact"]) # Ensure the expected number of HTTP calls were made # -- Tracking calls to retrieve GoalState is problematic since it is # fetched often; however, the dependent documents, such as the # HostingEnvironmentConfig, will be retrieved the expected number self.assertEqual(2, test_data.call_counts["hostingenvuri"]) def test_call_storage_kwargs(self, mock_cryptutil, mock_sleep): from azurelinuxagent.common.utils import restutil with patch.object(restutil, 'http_get') as http_patch: http_req = restutil.http_get url = testurl headers = {} # no kwargs -- Default to True WireClient.call_storage_service(http_req) # kwargs, no use_proxy -- Default to True WireClient.call_storage_service(http_req, url, headers) # kwargs, use_proxy None -- Default to True WireClient.call_storage_service(http_req, url, headers, use_proxy=None) # kwargs, use_proxy False -- Keep False WireClient.call_storage_service(http_req, url, headers, use_proxy=False) # kwargs, use_proxy True -- Keep True WireClient.call_storage_service(http_req, url, headers, use_proxy=True) # assert self.assertTrue(http_patch.call_count == 5) for i in range(0,5): c = http_patch.call_args_list[i][-1]['use_proxy'] self.assertTrue(c == (True if i != 3 else False)) def test_status_blob_parsing(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(WireProtocolData(DATA_FILE).ext_conf) self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob, u'https://yuezhatest.blob.core.windows.net/vhds/test' u'-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se' u'=9999-01-01&sk=key1&sv=2014-02-14&sig' u'=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D') self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob_type, u'BlockBlob') pass def test_get_host_ga_plugin(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(WireClient, "get_goal_state", return_value = goal_state) as patch_get_goal_state: host_plugin = wire_protocol_client.get_host_plugin() self.assertEqual(goal_state.container_id, host_plugin.container_id) self.assertEqual(goal_state.role_config_name, host_plugin.role_config_name) patch_get_goal_state.assert_called_once() def test_download_ext_handler_pkg_fallback(self, *args): ext_uri = 'extension_uri' host_uri = 'host_uri' mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') with patch.object(restutil, "http_request", side_effect=IOError) as patch_http: with patch.object(WireClient, "get_host_plugin", return_value=mock_host): with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[host_uri, {}]) as patch_request: WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri) self.assertEqual(patch_http.call_count, 2) self.assertEqual(patch_request.call_count, 1) self.assertEqual(patch_http.call_args_list[0][0][1], ext_uri) self.assertEqual(patch_http.call_args_list[1][0][1], host_uri) def test_upload_status_blob_default(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus with patch.object(WireClient, "get_goal_state") as patch_get_goal_state: with patch.object(HostPluginProtocol, "put_vm_status") as patch_host_ga_plugin_upload: with patch.object(StatusBlob, "upload") as patch_default_upload: HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() patch_default_upload.assert_called_once_with(testurl) patch_get_goal_state.assert_not_called() patch_host_ga_plugin_upload.assert_not_called() def test_upload_status_blob_host_ga_plugin(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(HostPluginProtocol, "ensure_initialized", return_value=True): with patch.object(StatusBlob, "upload", return_value=False) as patch_default_upload: with patch.object(HostPluginProtocol, "_put_block_blob_status") as patch_http: HostPluginProtocol.set_default_channel(False) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.upload_status_blob() patch_default_upload.assert_called_once_with(testurl) wire_protocol_client.get_goal_state.assert_called_once() patch_http.assert_called_once_with(testurl, wire_protocol_client.status_blob) self.assertTrue(HostPluginProtocol.is_default_channel()) HostPluginProtocol.set_default_channel(False) def test_upload_status_blob_unknown_type_assumes_block(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = "NotALegalType" wire_protocol_client.status_blob.vm_status = vmstatus with patch.object(WireClient, "get_goal_state") as patch_get_goal_state: with patch.object(StatusBlob, "prepare") as patch_prepare: with patch.object(StatusBlob, "upload") as patch_default_upload: HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() patch_prepare.assert_called_once_with("BlockBlob") patch_default_upload.assert_called_once_with(testurl) patch_get_goal_state.assert_not_called() def test_upload_status_blob_reports_prepare_error(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(StatusBlob, "prepare", side_effect=Exception) as mock_prepare: with patch.object(WireClient, "report_status_event") as mock_event: wire_protocol_client.upload_status_blob() mock_prepare.assert_called_once() mock_event.assert_called_once() def test_get_in_vm_artifacts_profile_blob_not_available(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) # Test when artifacts_profile_blob is null/None self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) #Test when artifacts_profile_blob is whitespace wire_protocol_client.ext_conf.artifacts_profile_blob = " " self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) def test_get_in_vm_artifacts_profile_response_body_not_valid(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) with patch.object(HostPluginProtocol, "get_artifact_request", return_value = ['dummy_url', {}]) as host_plugin_get_artifact_url_and_headers: #Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(None, 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) #Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(' '.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) #Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{ }'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(), in_vm_artifacts_profile.__dict__, 'If artifacts_profile_blob has empty json dictionary, in_vm_artifacts_profile ' 'should contain nothing') host_plugin_get_artifact_url_and_headers.assert_called_with(testurl) def test_get_in_vm_artifacts_profile_default(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{"onHold": "true"}'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) @patch("time.sleep") def test_fetch_manifest_fallback(self, patch_sleep, *args): uri1 = ExtHandlerVersionUri() uri1.uri = 'ext_uri' uris = DataContractList(ExtHandlerVersionUri) uris.append(uri1) host_uri = 'host_uri' mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') client = WireProtocol(wireserver_url).client with patch.object(WireClient, "fetch", return_value=None) as patch_fetch: with patch.object(WireClient, "get_host_plugin", return_value=mock_host): with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[host_uri, {}]): HostPluginProtocol.set_default_channel(False) self.assertRaises(ProtocolError, client.fetch_manifest, uris) self.assertEqual(patch_fetch.call_count, 2) self.assertEqual(patch_fetch.call_args_list[0][0][0], uri1.uri) self.assertEqual(patch_fetch.call_args_list[1][0][0], host_uri) def test_get_in_vm_artifacts_profile_host_ga_plugin(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}'.encode('utf-8')]) with patch.object(HostPluginProtocol, "get_artifact_request", return_value=['dummy_url', {}]) as artifact_request: in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is not None) self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) artifact_request.assert_called_once_with(testurl) @patch("socket.gethostname", return_value="hostname") @patch("time.gmtime", return_value=time.localtime(1485543256)) def test_report_vm_status(self, *args): status = 'status' message = 'message' client = WireProtocol(wireserver_url).client actual = StatusBlob(client=client) actual.set_vm_status(VMStatus(status=status, message=message)) timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) formatted_msg = { 'lang': 'en-US', 'message': message } v1_ga_status = { 'version': str(CURRENT_VERSION), 'status': status, 'formattedMessage': formatted_msg } v1_ga_guest_info = { 'computerName': socket.gethostname(), 'osName': DISTRO_NAME, 'osVersion': DISTRO_VERSION, 'version': str(CURRENT_VERSION), } v1_agg_status = { 'guestAgentStatus': v1_ga_status, 'handlerAggregateStatus': [] } v1_vm_status = { 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, 'guestOSInfo' : v1_ga_guest_info } self.assertEqual(json.dumps(v1_vm_status), actual.to_json()) class MockResponse: def __init__(self, body, status_code): self.body = body self.status = status_code def read(self): return self.body if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/test_agent.py000066400000000000000000000141221322477356400177710ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import mock import os.path import sys from azurelinuxagent.agent import * from azurelinuxagent.common.conf import * from tests.tools import * EXPECTED_CONFIGURATION = \ """AutoUpdate.Enabled = True AutoUpdate.GAFamily = Prod Autoupdate.Frequency = 3600 DVD.MountPoint = /mnt/cdrom/secure DetectScvmmEnv = False EnableOverProvisioning = False Extension.LogDir = /var/log/azure HttpProxy.Host = None HttpProxy.Port = None Lib.Dir = /var/lib/waagent Logs.Verbose = False OS.AllowHTTP = False OS.CheckRdmaDriver = False OS.EnableFIPS = True OS.EnableFirewall = True OS.EnableRDMA = False OS.HomeDir = /home OS.OpensslPath = /usr/bin/openssl OS.PasswordPath = /etc/shadow OS.RootDeviceScsiTimeout = 300 OS.SshClientAliveInterval = 42 OS.SshDir = /notareal/path OS.SudoersDir = /etc/sudoers.d OS.UpdateRdmaDriver = False Pid.File = /var/run/waagent.pid Provisioning.AllowResetSysUser = False Provisioning.DecodeCustomData = False Provisioning.DeleteRootPassword = True Provisioning.Enabled = True Provisioning.ExecuteCustomData = False Provisioning.MonitorHostName = True Provisioning.PasswordCryptId = 6 Provisioning.PasswordCryptSaltLength = 10 Provisioning.RegenerateSshHostKeyPair = True Provisioning.SshHostKeyPairType = rsa Provisioning.UseCloudInit = True ResourceDisk.EnableSwap = False ResourceDisk.Filesystem = ext4 ResourceDisk.Format = True ResourceDisk.MountOptions = None ResourceDisk.MountPoint = /mnt/resource ResourceDisk.SwapSizeMB = 0""".split('\n') class TestAgent(AgentTestCase): def test_accepts_configuration_path(self): conf_path = os.path.join(data_dir, "test_waagent.conf") c, f, v, cfp = parse_args(["-configuration-path:" + conf_path]) self.assertEqual(cfp, conf_path) @patch("os.path.exists", return_value=True) def test_checks_configuration_path(self, mock_exists): conf_path = "/foo/bar-baz/something.conf" c, f, v, cfp = parse_args(["-configuration-path:"+conf_path]) self.assertEqual(cfp, conf_path) self.assertEqual(mock_exists.call_count, 1) @patch("sys.stderr") @patch("os.path.exists", return_value=False) @patch("sys.exit", side_effect=Exception) def test_rejects_missing_configuration_path(self, mock_exit, mock_exists, mock_stderr): try: c, f, v, cfp = parse_args(["-configuration-path:/foo/bar.conf"]) self.assertTrue(False) except Exception: self.assertEqual(mock_exit.call_count, 1) def test_configuration_path_defaults_to_none(self): c, f, v, cfp = parse_args([]) self.assertEqual(cfp, None) def test_agent_accepts_configuration_path(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(conf.get_fips_enabled()) @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_uses_default_configuration_path(self, mock_load): Agent(False) mock_load.assert_called_once_with("/etc/waagent.conf") @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_does_not_pass_configuration_path(self, mock_load, mock_handler): mock_daemon = Mock() mock_daemon.run = Mock() mock_handler.return_value = mock_daemon agent = Agent(False) agent.daemon() mock_daemon.run.assert_called_once_with(child_args=None) mock_load.assert_called_once() @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_passes_configuration_path(self, mock_load, mock_handler): mock_daemon = Mock() mock_daemon.run = Mock() mock_handler.return_value = mock_daemon agent = Agent(False, conf_file_path="/foo/bar.conf") agent.daemon() mock_daemon.run.assert_called_once_with(child_args="-configuration-path:/foo/bar.conf") mock_load.assert_called_once() @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_ensures_extension_log_directory(self, mock_dir): ext_log_dir = os.path.join(self.tmp_dir, "FauxLogDir") mock_dir.return_value = ext_log_dir self.assertFalse(os.path.isdir(ext_log_dir)) agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isdir(ext_log_dir)) @patch("azurelinuxagent.common.logger.error") @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_logs_if_extension_log_directory_is_a_file(self, mock_dir, mock_log): ext_log_dir = os.path.join(self.tmp_dir, "FauxLogDir") mock_dir.return_value = ext_log_dir fileutil.write_file(ext_log_dir, "Foo") self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) mock_log.assert_called_once() def test_agent_get_configuration(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) actual_configuration = [] configuration = conf.get_configuration() for k in sorted(configuration.keys()): actual_configuration.append("{0} = {1}".format(k, configuration[k])) self.assertEqual(EXPECTED_CONFIGURATION, actual_configuration) WALinuxAgent-2.2.20/tests/test_import.py000066400000000000000000000020321322477356400202020ustar00rootroot00000000000000from tests.tools import * import azurelinuxagent.common.osutil as osutil import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.protocol as protocol import azurelinuxagent.pa.provision as provision import azurelinuxagent.pa.deprovision as deprovision import azurelinuxagent.daemon as daemon import azurelinuxagent.daemon.resourcedisk as resourcedisk import azurelinuxagent.daemon.scvmm as scvmm import azurelinuxagent.ga.exthandlers as exthandlers import azurelinuxagent.ga.monitor as monitor import azurelinuxagent.ga.update as update class TestImportHandler(AgentTestCase): def test_get_handler(self): osutil.get_osutil() protocol.get_protocol_util() dhcp.get_dhcp_handler() provision.get_provision_handler() deprovision.get_deprovision_handler() daemon.get_daemon_handler() resourcedisk.get_resourcedisk_handler() scvmm.get_scvmm_handler() monitor.get_monitor_handler() update.get_update_handler() exthandlers.get_exthandlers_handler() WALinuxAgent-2.2.20/tests/tools.py000066400000000000000000000106601322477356400167770ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # """ Define util functions for unit test """ import os import re import shutil import tempfile import unittest from functools import wraps import time import azurelinuxagent.common.event as event import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.version import PY_VERSION_MAJOR # Import mock module for Python2 and Python3 try: from unittest.mock import Mock, patch, MagicMock, DEFAULT, call except ImportError: from mock import Mock, patch, MagicMock, DEFAULT, call test_dir = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(test_dir, "data") debug = False if os.environ.get('DEBUG') == '1': debug = True # Enable verbose logger to stdout if debug: logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.VERBOSE) class AgentTestCase(unittest.TestCase): def setUp(self): prefix = "{0}_".format(self.__class__.__name__) self.tmp_dir = tempfile.mkdtemp(prefix=prefix) self.test_file = 'test_file' conf.get_autoupdate_enabled = Mock(return_value=True) conf.get_lib_dir = Mock(return_value=self.tmp_dir) ext_log_dir = os.path.join(self.tmp_dir, "azure") conf.get_ext_log_dir = Mock(return_value=ext_log_dir) conf.get_agent_pid_file_path = Mock(return_value=os.path.join(self.tmp_dir, "waagent.pid")) event.init_event_status(self.tmp_dir) event.init_event_logger(self.tmp_dir) def tearDown(self): if not debug and self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) def _create_files(self, tmp_dir, prefix, suffix, count, with_sleep=0): for i in range(count): f = os.path.join(tmp_dir, '.'.join((prefix, str(i), suffix))) fileutil.write_file(f, "faux content") time.sleep(with_sleep) def load_data(name): """Load test data""" path = os.path.join(data_dir, name) with open(path, "r") as data_file: return data_file.read() def load_bin_data(name): """Load test bin data""" path = os.path.join(data_dir, name) with open(path, "rb") as data_file: return data_file.read() supported_distro = [ ["ubuntu", "12.04", ""], ["ubuntu", "14.04", ""], ["ubuntu", "14.10", ""], ["ubuntu", "15.10", ""], ["ubuntu", "15.10", "Snappy Ubuntu Core"], ["coreos", "", ""], ["suse", "12", "SUSE Linux Enterprise Server"], ["suse", "13.2", "openSUSE"], ["suse", "11", "SUSE Linux Enterprise Server"], ["suse", "13.1", "openSUSE"], ["debian", "6.0", ""], ["redhat", "6.5", ""], ["redhat", "7.0", ""], ] def open_patch(): open_name = '__builtin__.open' if PY_VERSION_MAJOR == 3: open_name = 'builtins.open' return open_name def distros(distro_name=".*", distro_version=".*", distro_full_name=".*"): """Run test on multiple distros""" def decorator(test_method): @wraps(test_method) def wrapper(self, *args, **kwargs): for distro in supported_distro: if re.match(distro_name, distro[0]) and \ re.match(distro_version, distro[1]) and \ re.match(distro_full_name, distro[2]): if debug: logger.info("Run {0} on {1}", test_method.__name__, distro) new_args = [] new_args.extend(args) new_args.extend(distro) test_method(self, *new_args, **kwargs) # Call tearDown and setUp to create separated environment # for distro testing self.tearDown() self.setUp() return wrapper return decorator WALinuxAgent-2.2.20/tests/utils/000077500000000000000000000000001322477356400164225ustar00rootroot00000000000000WALinuxAgent-2.2.20/tests/utils/__init__.py000066400000000000000000000011651322477356400205360ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # WALinuxAgent-2.2.20/tests/utils/test_file_util.py000066400000000000000000000244221322477356400220130ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import errno as errno import glob import random import string import tempfile import uuid import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.future import ustr from tests.tools import * class TestFileOperations(AgentTestCase): def test_read_write_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = ustr(uuid.uuid4()) fileutil.write_file(test_file, content) content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_rw_utf8_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = u"\u6211" fileutil.write_file(test_file, content, encoding="utf-8") content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_remove_bom(self): test_file=os.path.join(self.tmp_dir, self.test_file) data = b'\xef\xbb\xbfhehe' fileutil.write_file(test_file, data, asbin=True) data = fileutil.read_file(test_file, remove_bom=True) self.assertNotEquals(0xbb, ord(data[0])) def test_append_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = ustr(uuid.uuid4()) fileutil.append_file(test_file, content) content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_findre_in_file(self): fp = tempfile.mktemp() with open(fp, 'w') as f: f.write( ''' First line Second line Third line with more words ''' ) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*rst line$")) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*ond line$")) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*with more.*")) self.assertNotEquals( None, fileutil.findre_in_file(fp, "^Third.*")) self.assertEquals( None, fileutil.findre_in_file(fp, "^Do not match.*")) def test_findstr_in_file(self): fp = tempfile.mktemp() with open(fp, 'w') as f: f.write( ''' First line Second line Third line with more words ''' ) self.assertTrue(fileutil.findstr_in_file(fp, "First line")) self.assertTrue(fileutil.findstr_in_file(fp, "Second line")) self.assertTrue( fileutil.findstr_in_file(fp, "Third line with more words")) self.assertFalse(fileutil.findstr_in_file(fp, "Not a line")) def test_get_last_path_element(self): filepath = '/tmp/abc.def' filename = fileutil.base_name(filepath) self.assertEquals('abc.def', filename) filepath = '/tmp/abc' filename = fileutil.base_name(filepath) self.assertEquals('abc', filename) def test_remove_files(self): random_word = lambda : ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) #Create 10 test files test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') test_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] for file in test_files: open(file, 'a').close() #Remove files using fileutil.rm_files test_file_pattern = test_file + '*' test_file_pattern2 = test_file2 + '*' fileutil.rm_files(test_file_pattern, test_file_pattern2) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern)))) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern2)))) def test_remove_dirs(self): dirs = [] for n in range(0,5): dirs.append(tempfile.mkdtemp()) for d in dirs: for n in range(0, random.choice(range(0,10))): fileutil.write_file(os.path.join(d, "test"+str(n)), "content") for n in range(0, random.choice(range(0,10))): dd = os.path.join(d, "testd"+str(n)) os.mkdir(dd) for nn in range(0, random.choice(range(0,10))): os.symlink(dd, os.path.join(dd, "sym"+str(nn))) for n in range(0, random.choice(range(0,10))): os.symlink(d, os.path.join(d, "sym"+str(n))) fileutil.rm_dirs(*dirs) for d in dirs: self.assertEqual(len(os.listdir(d)), 0) def test_get_all_files(self): random_word = lambda: ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) # Create 10 test files at the root dir and 10 other in the sub dir test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') expected_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] test_subdir = os.path.join(self.tmp_dir, 'test_dir') os.mkdir(test_subdir) test_file_in_subdir = os.path.join(test_subdir, self.test_file) test_file_in_subdir2 = os.path.join(test_subdir, 'another_file') expected_files.extend([test_file_in_subdir + random_word() for _ in range(5)] + \ [test_file_in_subdir2 + random_word() for _ in range(5)]) for file in expected_files: open(file, 'a').close() # Get All files using fileutil.get_all_files actual_files = fileutil.get_all_files(self.tmp_dir) self.assertEqual(set(expected_files), set(actual_files)) @patch('os.path.isfile') def test_update_conf_file(self, _): new_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n" existing_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ DHCP_HOSTNAME=existing\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n" bad_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n\ DHCP_HOSTNAME=no_new_line" updated_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n\ DHCP_HOSTNAME=test\n" path = 'path' with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=existing_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=bad_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) def test_clean_ioerror_ignores_missing(self): e = IOError() e.errno = errno.ENOSPC # Send no paths fileutil.clean_ioerror(e) # Send missing file(s) / directories fileutil.clean_ioerror(e, paths=['/foo/not/here', None, '/bar/not/there']) def test_clean_ioerror_ignores_unless_ioerror(self): try: d = tempfile.mkdtemp() fd, f = tempfile.mkstemp() os.close(fd) fileutil.write_file(f, 'Not empty') # Send non-IOError exception e = Exception() fileutil.clean_ioerror(e, paths=[d, f]) self.assertTrue(os.path.isdir(d)) self.assertTrue(os.path.isfile(f)) # Send unrecognized IOError e = IOError() e.errno = errno.EFAULT self.assertFalse(e.errno in fileutil.KNOWN_IOERRORS) fileutil.clean_ioerror(e, paths=[d, f]) self.assertTrue(os.path.isdir(d)) self.assertTrue(os.path.isfile(f)) finally: shutil.rmtree(d) os.remove(f) def test_clean_ioerror_removes_files(self): fd, f = tempfile.mkstemp() os.close(fd) fileutil.write_file(f, 'Not empty') e = IOError() e.errno = errno.ENOSPC fileutil.clean_ioerror(e, paths=[f]) self.assertFalse(os.path.isdir(f)) self.assertFalse(os.path.isfile(f)) def test_clean_ioerror_removes_directories(self): d1 = tempfile.mkdtemp() d2 = tempfile.mkdtemp() for n in ['foo', 'bar']: fileutil.write_file(os.path.join(d2, n), 'Not empty') e = IOError() e.errno = errno.ENOSPC fileutil.clean_ioerror(e, paths=[d1, d2]) self.assertFalse(os.path.isdir(d1)) self.assertFalse(os.path.isfile(d1)) self.assertFalse(os.path.isdir(d2)) self.assertFalse(os.path.isfile(d2)) def test_clean_ioerror_handles_a_range_of_errors(self): for err in fileutil.KNOWN_IOERRORS: e = IOError() e.errno = err d = tempfile.mkdtemp() fileutil.clean_ioerror(e, paths=[d]) self.assertFalse(os.path.isdir(d)) self.assertFalse(os.path.isfile(d)) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/utils/test_flexible_version.py000066400000000000000000000356501322477356400234030ustar00rootroot00000000000000import random import re import unittest from azurelinuxagent.common.utils.flexible_version import FlexibleVersion class TestFlexibleVersion(unittest.TestCase): def setUp(self): self.v = FlexibleVersion() def test_compile_separator(self): tests = [ '.', '', '-' ] for t in tests: t_escaped = re.escape(t) t_re = re.compile(t_escaped) self.assertEqual((t_escaped, t_re), self.v._compile_separator(t)) self.assertEqual(('', re.compile('')), self.v._compile_separator(None)) return def test_compile_pattern(self): self.v._compile_pattern() tests = { '1': True, '1.2': True, '1.2.3': True, '1.2.3.4': True, '1.2.3.4.5': True, '1alpha': True, '1.alpha': True, '1-alpha': True, '1alpha0': True, '1.alpha0': True, '1-alpha0': True, '1.2alpha': True, '1.2.alpha': True, '1.2-alpha': True, '1.2alpha0': True, '1.2.alpha0': True, '1.2-alpha0': True, '1beta': True, '1.beta': True, '1-beta': True, '1beta0': True, '1.beta0': True, '1-beta0': True, '1.2beta': True, '1.2.beta': True, '1.2-beta': True, '1.2beta0': True, '1.2.beta0': True, '1.2-beta0': True, '1rc': True, '1.rc': True, '1-rc': True, '1rc0': True, '1.rc0': True, '1-rc0': True, '1.2rc': True, '1.2.rc': True, '1.2-rc': True, '1.2rc0': True, '1.2.rc0': True, '1.2-rc0': True, '1.2.3.4alpha5': True, ' 1': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_compile_pattern_sep(self): self.v.sep = '-' self.v._compile_pattern() tests = { '1': True, '1-2': True, '1-2-3': True, '1-2-3-4': True, '1-2-3-4-5': True, '1alpha': True, '1-alpha': True, '1-alpha': True, '1alpha0': True, '1-alpha0': True, '1-alpha0': True, '1-2alpha': True, '1-2.alpha': True, '1-2-alpha': True, '1-2alpha0': True, '1-2.alpha0': True, '1-2-alpha0': True, '1beta': True, '1-beta': True, '1-beta': True, '1beta0': True, '1-beta0': True, '1-beta0': True, '1-2beta': True, '1-2.beta': True, '1-2-beta': True, '1-2beta0': True, '1-2.beta0': True, '1-2-beta0': True, '1rc': True, '1-rc': True, '1-rc': True, '1rc0': True, '1-rc0': True, '1-rc0': True, '1-2rc': True, '1-2.rc': True, '1-2-rc': True, '1-2rc0': True, '1-2.rc0': True, '1-2-rc0': True, '1-2-3-4alpha5': True, ' 1': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_compile_pattern_prerel(self): self.v.prerel_tags = ('a', 'b', 'c') self.v._compile_pattern() tests = { '1': True, '1.2': True, '1.2.3': True, '1.2.3.4': True, '1.2.3.4.5': True, '1a': True, '1.a': True, '1-a': True, '1a0': True, '1.a0': True, '1-a0': True, '1.2a': True, '1.2.a': True, '1.2-a': True, '1.2a0': True, '1.2.a0': True, '1.2-a0': True, '1b': True, '1.b': True, '1-b': True, '1b0': True, '1.b0': True, '1-b0': True, '1.2b': True, '1.2.b': True, '1.2-b': True, '1.2b0': True, '1.2.b0': True, '1.2-b0': True, '1c': True, '1.c': True, '1-c': True, '1c0': True, '1.c0': True, '1-c0': True, '1.2c': True, '1.2.c': True, '1.2-c': True, '1.2c0': True, '1.2.c0': True, '1.2-c0': True, '1.2.3.4a5': True, ' 1': False, '1.2.3.4alpha5': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_ensure_compatible_separators(self): v1 = FlexibleVersion('1.2.3') v2 = FlexibleVersion('1-2-3', sep='-') try: v1 == v2 self.assertTrue(False, "Incompatible separators failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible separators raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('alpha', 'beta', 'rc')) v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel_length(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel_order(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) v2 = FlexibleVersion('1.2.3', prerel_tags=('b', 'a')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_major(self): tests = { '1' : 1, '1.2' : 1, '1.2.3' : 1, '1.2.3.4' : 1 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).major) return def test_minor(self): tests = { '1' : 0, '1.2' : 2, '1.2.3' : 2, '1.2.3.4' : 2 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).minor) return def test_patch(self): tests = { '1' : 0, '1.2' : 0, '1.2.3' : 3, '1.2.3.4' : 3 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).patch) return def test_parse(self): tests = { "1.2.3.4": ((1, 2, 3, 4), None), "1.2.3.4alpha5": ((1, 2, 3, 4), ('alpha', 5)), "1.2.3.4-alpha5": ((1, 2, 3, 4), ('alpha', 5)), "1.2.3.4.alpha5": ((1, 2, 3, 4), ('alpha', 5)) } for test in iter(tests): expectation = tests[test] self.v._parse(test) self.assertEqual(expectation, (self.v.version, self.v.prerelease)) return def test_decrement(self): src_v = FlexibleVersion('1.0.0.0.10') dst_v = FlexibleVersion(str(src_v)) for i in range(1,10): dst_v -= 1 self.assertEqual(i, src_v.version[-1] - dst_v.version[-1]) return def test_decrement_disallows_below_zero(self): try: FlexibleVersion('1.0') - 1 self.assertTrue(False, "Decrement failed to raise an exception") except ArithmeticError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Decrement raised an unexpected exception: {0}".format(t)) return def test_increment(self): src_v = FlexibleVersion('1.0.0.0.0') dst_v = FlexibleVersion(str(src_v)) for i in range(1,10): dst_v += 1 self.assertEqual(i, dst_v.version[-1] - src_v.version[-1]) return def test_str(self): tests = [ '1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1alpha', '1.alpha', '1-alpha', '1alpha0', '1.alpha0', '1-alpha0', '1.2alpha', '1.2.alpha', '1.2-alpha', '1.2alpha0', '1.2.alpha0', '1.2-alpha0', '1beta', '1.beta', '1-beta', '1beta0', '1.beta0', '1-beta0', '1.2beta', '1.2.beta', '1.2-beta', '1.2beta0', '1.2.beta0', '1.2-beta0', '1rc', '1.rc', '1-rc', '1rc0', '1.rc0', '1-rc0', '1.2rc', '1.2.rc', '1.2-rc', '1.2rc0', '1.2.rc0', '1.2-rc0', '1.2.3.4alpha5', ] for test in tests: self.assertEqual(test, str(FlexibleVersion(test))) return def test_creation_from_flexible_version(self): tests = [ '1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1alpha', '1.alpha', '1-alpha', '1alpha0', '1.alpha0', '1-alpha0', '1.2alpha', '1.2.alpha', '1.2-alpha', '1.2alpha0', '1.2.alpha0', '1.2-alpha0', '1beta', '1.beta', '1-beta', '1beta0', '1.beta0', '1-beta0', '1.2beta', '1.2.beta', '1.2-beta', '1.2beta0', '1.2.beta0', '1.2-beta0', '1rc', '1.rc', '1-rc', '1rc0', '1.rc0', '1-rc0', '1.2rc', '1.2.rc', '1.2-rc', '1.2rc0', '1.2.rc0', '1.2-rc0', '1.2.3.4alpha5', ] for test in tests: v = FlexibleVersion(test) self.assertEqual(test, str(FlexibleVersion(v))) return def test_repr(self): v = FlexibleVersion('1,2,3rc4', ',', ['lol', 'rc']) expected = "FlexibleVersion ('1,2,3rc4', ',', ('lol', 'rc'))" self.assertEqual(expected, repr(v)) def test_order(self): test0 = ["1.7.0", "1.7.0rc0", "1.11.0"] expected0 = ['1.7.0rc0', '1.7.0', '1.11.0'] self.assertEqual(expected0, list(map(str, sorted([FlexibleVersion(v) for v in test0])))) test1 = [ '2.0.2rc2', '2.2.0beta3', '2.0.10', '2.1.0alpha42', '2.0.2beta4', '2.1.1', '2.0.1', '2.0.2rc3', '2.2.0', '2.0.0', '3.0.1', '2.1.0rc1' ] expected1 = [ '2.0.0', '2.0.1', '2.0.2beta4', '2.0.2rc2', '2.0.2rc3', '2.0.10', '2.1.0alpha42', '2.1.0rc1', '2.1.1', '2.2.0beta3', '2.2.0', '3.0.1' ] self.assertEqual(expected1, list(map(str, sorted([FlexibleVersion(v) for v in test1])))) self.assertEqual(FlexibleVersion("1.0.0.0.0.0.0.0"), FlexibleVersion("1")) self.assertFalse(FlexibleVersion("1.0") > FlexibleVersion("1.0")) self.assertFalse(FlexibleVersion("1.0") < FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") < FlexibleVersion("1.1")) self.assertTrue(FlexibleVersion("1.9") < FlexibleVersion("1.10")) self.assertTrue(FlexibleVersion("1.9.9") < FlexibleVersion("1.10.0")) self.assertTrue(FlexibleVersion("1.0.0.0") < FlexibleVersion("1.2.0.0")) self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.10") > FlexibleVersion("1.9")) self.assertTrue(FlexibleVersion("1.10.0") > FlexibleVersion("1.9.9")) self.assertTrue(FlexibleVersion("1.2.0.0") > FlexibleVersion("1.0.0.0")) self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.1")) self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.1") >= FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") == FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") >= FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.0")) self.assertFalse(FlexibleVersion("1.0") != FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.1") != FlexibleVersion("1.0")) return if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/utils/test_passwords.txt000066400000000000000000000000401322477356400222410ustar00rootroot00000000000000김치 करी hamburger caféWALinuxAgent-2.2.20/tests/utils/test_rest_util.py000066400000000000000000000537501322477356400220570ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import os import unittest from azurelinuxagent.common.exception import HttpError, \ ProtocolError, \ ResourceGoneError import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.future import httpclient, ustr from tests.tools import * class TestIOErrorCounter(AgentTestCase): def test_increment_hostplugin(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(1, counts["hostplugin"]) self.assertEqual(0, counts["protocol"]) self.assertEqual(0, counts["other"]) def test_increment_protocol(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(0, counts["hostplugin"]) self.assertEqual(1, counts["protocol"]) self.assertEqual(0, counts["other"]) def test_increment_other(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( '169.254.169.254', 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(0, counts["hostplugin"]) self.assertEqual(0, counts["protocol"]) self.assertEqual(1, counts["other"]) def test_get_and_reset(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, 80) restutil.IOErrorCounter.increment( '169.254.169.254', 80) restutil.IOErrorCounter.increment( '169.254.169.254', 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(2, counts.get("hostplugin")) self.assertEqual(1, counts.get("protocol")) self.assertEqual(2, counts.get("other")) self.assertEqual( {"hostplugin":0, "protocol":0, "other":0}, restutil.IOErrorCounter._counts) class TestHttpOperations(AgentTestCase): def test_parse_url(self): test_uri = "http://abc.def/ghi#hash?jkl=mn" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) self.assertEquals("/ghi#hash?jkl=mn", rel_uri) test_uri = "http://abc.def/" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) self.assertEquals("/", rel_uri) self.assertEquals(False, secure) test_uri = "https://abc.def/ghi?jkl=mn" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals(True, secure) test_uri = "http://abc.def:80/" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) host, port, secure, rel_uri = restutil._parse_url("") self.assertEquals(None, host) self.assertEquals(rel_uri, "") host, port, secure, rel_uri = restutil._parse_url("None") self.assertEquals(None, host) self.assertEquals(rel_uri, "None") @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_none_is_default(self, mock_host, mock_port): mock_host.return_value = None mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_configuration_overrides_env(self, mock_host, mock_port): mock_host.return_value = "host" mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual("host", h) self.assertEqual(None, p) mock_host.assert_called_once() mock_port.assert_called_once() @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_configuration_requires_host(self, mock_host, mock_port): mock_host.return_value = None mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) mock_host.assert_called_once() mock_port.assert_not_called() @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_http_uses_httpproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://foo.com:80', 'https_proxy' : 'https://bar.com:443' }): h, p = restutil._get_http_proxy() self.assertEqual("foo.com", h) self.assertEqual(80, p) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_https_uses_httpsproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://foo.com:80', 'https_proxy' : 'https://bar.com:443' }): h, p = restutil._get_http_proxy(secure=True) self.assertEqual("bar.com", h) self.assertEqual(443, p) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_ignores_user_in_httpproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://user:pw@foo.com:80' }): h, p = restutil._get_http_proxy() self.assertEqual("foo.com", h) self.assertEqual(80, p) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_direct(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar") HTTPConnection.assert_has_calls([ call("foo", 80, timeout=10) ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ call(method="GET", url="/bar", body=None, headers={}) ]) mock_conn.getresponse.assert_called_once() self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_direct_secure(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPSConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", secure=True) HTTPConnection.assert_not_called() HTTPSConnection.assert_has_calls([ call("foo", 443, timeout=10) ]) mock_conn.request.assert_has_calls([ call(method="GET", url="/bar", body=None, headers={}) ]) mock_conn.getresponse.assert_called_once() self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_proxy(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", proxy_host="foo.bar", proxy_port=23333) HTTPConnection.assert_has_calls([ call("foo.bar", 23333, timeout=10) ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ call(method="GET", url="http://foo:80/bar", body=None, headers={}) ]) mock_conn.getresponse.assert_called_once() self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_proxy_secure(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPSConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", proxy_host="foo.bar", proxy_port=23333, secure=True) HTTPConnection.assert_not_called() HTTPSConnection.assert_has_calls([ call("foo.bar", 23333, timeout=10) ]) mock_conn.request.assert_has_calls([ call(method="GET", url="https://foo:443/bar", body=None, headers={}) ]) mock_conn.getresponse.assert_called_once() self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_with_retry(self, _http_request, sleep): mock_http_resp = MagicMock() mock_http_resp.read = Mock(return_value="hehe") _http_request.return_value = mock_http_resp # Test http get resp = restutil.http_get("http://foo.bar") self.assertEquals("hehe", resp.read()) # Test https get resp = restutil.http_get("https://foo.bar") self.assertEquals("hehe", resp.read()) # Test http failure _http_request.side_effect = httpclient.HTTPException("Http failure") self.assertRaises(restutil.HttpError, restutil.http_get, "http://foo.bar") # Test http failure _http_request.side_effect = IOError("IO failure") self.assertRaises(restutil.HttpError, restutil.http_get, "http://foo.bar") @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_status_codes(self, _http_request, _sleep): _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_passed_status_codes(self, _http_request, _sleep): # Ensure the code is not part of the standard set self.assertFalse(httpclient.UNAUTHORIZED in restutil.RETRY_CODES) _http_request.side_effect = [ Mock(status=httpclient.UNAUTHORIZED), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar", retry_codes=[httpclient.UNAUTHORIZED]) self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_with_fibonacci_delay(self, _http_request, _sleep): # Ensure the code is not a throttle code self.assertFalse(httpclient.BAD_GATEWAY in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.BAD_GATEWAY) for i in range(restutil.DEFAULT_RETRIES) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=restutil.DEFAULT_RETRIES+1) self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count) self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count) self.assertEqual( [ call(restutil._compute_delay(i+1, restutil.DELAY_IN_SECONDS)) for i in range(restutil.DEFAULT_RETRIES)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_with_constant_delay_when_throttled(self, _http_request, _sleep): # Ensure the code is a throttle code self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE) for i in range(restutil.DEFAULT_RETRIES) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=restutil.DEFAULT_RETRIES+1) self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count) self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count) self.assertEqual( [call(1) for i in range(restutil.DEFAULT_RETRIES)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_for_safe_minimum_number_when_throttled(self, _http_request, _sleep): # Ensure the code is a throttle code self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE) for i in range(restutil.THROTTLE_RETRIES-1) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=1) self.assertEqual(restutil.THROTTLE_RETRIES, _http_request.call_count) self.assertEqual(restutil.THROTTLE_RETRIES-1, _sleep.call_count) self.assertEqual( [call(1) for i in range(restutil.THROTTLE_RETRIES-1)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_raises_for_bad_request(self, _http_request, _sleep): _http_request.side_effect = [ Mock(status=httpclient.BAD_REQUEST) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") self.assertEqual(1, _http_request.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_raises_for_resource_gone(self, _http_request, _sleep): _http_request.side_effect = [ Mock(status=httpclient.GONE) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") self.assertEqual(1, _http_request.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_exceptions(self, _http_request, _sleep): # Testing each exception is difficult because they have varying # signatures; for now, test one and ensure the set is unchanged recognized_exceptions = [ httpclient.NotConnected, httpclient.IncompleteRead, httpclient.ImproperConnectionState, httpclient.BadStatusLine ] self.assertEqual(recognized_exceptions, restutil.RETRY_EXCEPTIONS) _http_request.side_effect = [ httpclient.IncompleteRead(''), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_ioerrors(self, _http_request, _sleep): ioerror = IOError() ioerror.errno = 42 _http_request.side_effect = [ ioerror, Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) def test_request_failed(self): self.assertTrue(restutil.request_failed(None)) resp = Mock() for status in restutil.OK_CODES: resp.status = status self.assertFalse(restutil.request_failed(resp)) self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES) resp.status = httpclient.BAD_REQUEST self.assertTrue(restutil.request_failed(resp)) self.assertFalse( restutil.request_failed( resp, ok_codes=[httpclient.BAD_REQUEST])) def test_request_succeeded(self): self.assertFalse(restutil.request_succeeded(None)) resp = Mock() for status in restutil.OK_CODES: resp.status = status self.assertTrue(restutil.request_succeeded(resp)) self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES) resp.status = httpclient.BAD_REQUEST self.assertFalse(restutil.request_succeeded(resp)) self.assertTrue( restutil.request_succeeded( resp, ok_codes=[httpclient.BAD_REQUEST])) def test_read_response_error(self): """ Validate the read_response_error method handles encoding correctly """ responses = ['message', b'message', '\x80message\x80'] response = MagicMock() response.status = 'status' response.reason = 'reason' with patch.object(response, 'read') as patch_response: for s in responses: patch_response.return_value = s result = restutil.read_response_error(response) print("RESPONSE: {0}".format(s)) print("RESULT: {0}".format(result)) print("PRESENT: {0}".format('[status: reason]' in result)) self.assertTrue('[status: reason]' in result) self.assertTrue('message' in result) def test_read_response_bytes(self): response_bytes = '7b:0a:20:20:20:20:22:65:72:72:6f:72:43:6f:64:65:22:' \ '3a:20:22:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:' \ '69:73:20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:' \ '69:73:20:6f:70:65:72:61:74:69:6f:6e:2e:22:2c:0a:20:' \ '20:20:20:22:6d:65:73:73:61:67:65:22:3a:20:22:c3:af:' \ 'c2:bb:c2:bf:3c:3f:78:6d:6c:20:76:65:72:73:69:6f:6e:' \ '3d:22:31:2e:30:22:20:65:6e:63:6f:64:69:6e:67:3d:22:' \ '75:74:66:2d:38:22:3f:3e:3c:45:72:72:6f:72:3e:3c:43:' \ '6f:64:65:3e:49:6e:76:61:6c:69:64:42:6c:6f:62:54:79:' \ '70:65:3c:2f:43:6f:64:65:3e:3c:4d:65:73:73:61:67:65:' \ '3e:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:69:73:' \ '20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:69:73:' \ '20:6f:70:65:72:61:74:69:6f:6e:2e:0a:52:65:71:75:65:' \ '73:74:49:64:3a:63:37:34:32:39:30:63:62:2d:30:30:30:' \ '31:2d:30:30:62:35:2d:30:36:64:61:2d:64:64:36:36:36:' \ '61:30:30:30:22:2c:0a:20:20:20:20:22:64:65:74:61:69:' \ '6c:73:22:3a:20:22:22:0a:7d'.split(':') expected_response = '[HTTP Failed] [status: reason] {\n "errorCode": "The blob ' \ 'type is invalid for this operation.",\n ' \ '"message": "' \ 'InvalidBlobTypeThe ' \ 'blob type is invalid for this operation.\n' \ 'RequestId:c74290cb-0001-00b5-06da-dd666a000",' \ '\n "details": ""\n}' response_string = ''.join(chr(int(b, 16)) for b in response_bytes) response = MagicMock() response.status = 'status' response.reason = 'reason' with patch.object(response, 'read') as patch_response: patch_response.return_value = response_string result = restutil.read_response_error(response) self.assertEqual(result, expected_response) try: raise HttpError("{0}".format(result)) except HttpError as e: self.assertTrue(result in ustr(e)) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/utils/test_shell_util.py000066400000000000000000000027011322477356400221770ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * import uuid import unittest import os import azurelinuxagent.common.utils.shellutil as shellutil import test class TestrunCmd(AgentTestCase): def test_run_get_output(self): output = shellutil.run_get_output(u"ls /") self.assertNotEquals(None, output) self.assertEquals(0, output[0]) err = shellutil.run_get_output(u"ls /not-exists") self.assertNotEquals(0, err[0]) err = shellutil.run_get_output(u"ls 我") self.assertNotEquals(0, err[0]) def test_shellquote(self): self.assertEqual("\'foo\'", shellutil.quote("foo")) self.assertEqual("\'foo bar\'", shellutil.quote("foo bar")) self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar")) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.20/tests/utils/test_text_util.py000066400000000000000000000117301322477356400220560ustar00rootroot00000000000000# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from tests.tools import * import uuid import unittest import os from azurelinuxagent.common.future import ustr import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.utils.textutil import Version class TestTextUtil(AgentTestCase): def test_get_password_hash(self): with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_passwords.txt'), 'rb') as in_file: for data in in_file: # Remove bom on bytes data before it is converted into string. data = textutil.remove_bom(data) data = ustr(data, encoding='utf-8') password_hash = textutil.gen_password_hash(data, 6, 10) self.assertNotEquals(None, password_hash) def test_replace_non_ascii(self): data = ustr(b'\xef\xbb\xbfhehe', encoding='utf-8') self.assertEqual('hehe', textutil.replace_non_ascii(data)) data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdefghijklm", textutil.replace_non_ascii(data)) data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdXeXfghijkXlm", textutil.replace_non_ascii(data, replace_char='X')) self.assertEqual('', textutil.replace_non_ascii(None)) def test_remove_bom(self): #Test bom could be removed data = ustr(b'\xef\xbb\xbfhehe', encoding='utf-8') data = textutil.remove_bom(data) self.assertNotEquals(0xbb, data[0]) #bom is comprised of a sequence of three bytes and ff length of the input is shorter # than three bytes, remove_bom should not do anything data = u"\xa7" data = textutil.remove_bom(data) self.assertEquals(data, data[0]) data = u"\xa7\xef" data = textutil.remove_bom(data) self.assertEquals(u"\xa7", data[0]) self.assertEquals(u"\xef", data[1]) #Test string without BOM is not affected data = u"hehe" data = textutil.remove_bom(data) self.assertEquals(u"h", data[0]) data = u"" data = textutil.remove_bom(data) self.assertEquals(u"", data) data = u" " data = textutil.remove_bom(data) self.assertEquals(u" ", data) def test_version_compare(self): self.assertTrue(Version("1.0") < Version("1.1")) self.assertTrue(Version("1.9") < Version("1.10")) self.assertTrue(Version("1.9.9") < Version("1.10.0")) self.assertTrue(Version("1.0.0.0") < Version("1.2.0.0")) self.assertTrue(Version("1.0") <= Version("1.1")) self.assertTrue(Version("1.1") > Version("1.0")) self.assertTrue(Version("1.1") >= Version("1.0")) self.assertTrue(Version("1.0") == Version("1.0")) self.assertTrue(Version("1.0") >= Version("1.0")) self.assertTrue(Version("1.0") <= Version("1.0")) self.assertTrue(Version("1.9") < "1.10") self.assertTrue("1.9" < Version("1.10")) def test_get_bytes_from_pem(self): content = ("-----BEGIN CERTIFICATE-----\n" "certificate\n" "-----END CERTIFICATE----\n") base64_bytes = textutil.get_bytes_from_pem(content) self.assertEquals("certificate", base64_bytes) content = ("-----BEGIN PRIVATE KEY-----\n" "private key\n" "-----END PRIVATE Key-----\n") base64_bytes = textutil.get_bytes_from_pem(content) self.assertEquals("private key", base64_bytes) def test_swap_hexstring(self): data = [ ['12', 1, '21'], ['12', 2, '12'], ['12', 3, '012'], ['12', 4, '0012'], ['123', 1, '321'], ['123', 2, '2301'], ['123', 3, '123'], ['123', 4, '0123'], ['1234', 1, '4321'], ['1234', 2, '3412'], ['1234', 3, '234001'], ['1234', 4, '1234'], ['abcdef12', 1, '21fedcba'], ['abcdef12', 2, '12efcdab'], ['abcdef12', 3, 'f12cde0ab'], ['abcdef12', 4, 'ef12abcd'], ['aBcdEf12', 1, '21fEdcBa'], ['aBcdEf12', 2, '12EfcdaB'], ['aBcdEf12', 3, 'f12cdE0aB'], ['aBcdEf12', 4, 'Ef12aBcd'] ] for t in data: self.assertEqual(t[2], textutil.swap_hexstring(t[0], width=t[1])) if __name__ == '__main__': unittest.main()