pax_global_header00006660000000000000000000000064133541630670014522gustar00rootroot0000000000000052 comment=5fa49b307cd206b116b54aabf777070d63772ba2 WALinuxAgent-2.2.32/000077500000000000000000000000001335416306700141165ustar00rootroot00000000000000WALinuxAgent-2.2.32/.gitattributes000066400000000000000000000047261335416306700170220ustar00rootroot00000000000000############################################################################### # Set default behavior to automatically normalize line endings. ############################################################################### * text=auto ############################################################################### # Set default behavior for command prompt diff. # # This is need for earlier builds of msysgit that does not have it on by # default for csharp files. # Note: This is only used by command line ############################################################################### #*.cs diff=csharp ############################################################################### # Set the merge driver for project and solution files # # Merging from the command prompt will add diff markers to the files if there # are conflicts (Merging from VS is not affected by the settings below, in VS # the diff markers are never inserted). Diff markers may cause the following # file extensions to fail to load in VS. An alternative would be to treat # these files as binary and thus will always conflict and require user # intervention with every merge. To do so, just uncomment the entries below ############################################################################### #*.sln merge=binary #*.csproj merge=binary #*.vbproj merge=binary #*.vcxproj merge=binary #*.vcproj merge=binary #*.dbproj merge=binary #*.fsproj merge=binary #*.lsproj merge=binary #*.wixproj merge=binary #*.modelproj merge=binary #*.sqlproj merge=binary #*.wwaproj merge=binary ############################################################################### # behavior for image files # # image files are treated as binary by default. ############################################################################### #*.jpg binary #*.png binary #*.gif binary ############################################################################### # diff behavior for common document formats # # Convert binary document formats to text before diffing them. This feature # is only available from the command line. Turn it on by uncommenting the # entries below. ############################################################################### #*.doc diff=astextplain #*.DOC diff=astextplain #*.docx diff=astextplain #*.DOCX diff=astextplain #*.dot diff=astextplain #*.DOT diff=astextplain #*.pdf diff=astextplain #*.PDF diff=astextplain #*.rtf diff=astextplain #*.RTF diff=astextplain WALinuxAgent-2.2.32/.github/000077500000000000000000000000001335416306700154565ustar00rootroot00000000000000WALinuxAgent-2.2.32/.github/CONTRIBUTING.md000066400000000000000000000105271335416306700177140ustar00rootroot00000000000000# Contributing to Linux Guest Agent First, thank you for contributing to WALinuxAgent repository! ## Basics If you would like to become an active contributor to this project, please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). ## Table of Contents [Before starting](#before-starting) - [Github basics](#github-basics) - [Code of Conduct](#code-of-conduct) [Making Changes](#making-changes) - [Pull Requests](#pull-requests) - [Pull Request Guidelines](#pull-request-guidelines) - [Cleaning up commits](#cleaning-up-commits) - [General guidelines](#general-guidelines) - [Testing guidelines](#testing-guidelines) ## Before starting ### Github basics #### GitHub workflow If you don't have experience with Git and Github, some of the terminology and process can be confusing. [Here's a guide to understanding Github](https://guides.github.com/introduction/flow/). #### Forking the Azure/Guest-Configuration-Extension repository Unless you are working with multiple contributors on the same file, we ask that you fork the repository and submit your Pull Request from there. [Here's a guide to forks in Github](https://guides.github.com/activities/forking/). ### Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. ## Making Changes ### Pull Requests You can find all of the pull requests that have been opened in the [Pull Request](https://github.com/Azure/Guest-Configuration-Extension/pulls) section of the repository. To open your own pull request, click [here](https://github.com/Azure/WALinuxAgent/compare). When creating a pull request, keep the following in mind: - Make sure you are pointing to the fork and branch that your changes were made in - Choose the correct branch you want your pull request to be merged into - The pull request template that is provided **should be filled out**; this is not something that should just be deleted or ignored when the pull request is created - Deleting or ignoring this template will elongate the time it takes for your pull request to be reviewed ### Pull Request Guidelines A pull request template will automatically be included as a part of your PR. Please fill out the checklist as specified. Pull requests **will not be reviewed** unless they include a properly completed checklist. #### Cleaning up Commits If you are thinking about making a large change, **break up the change into small, logical, testable chunks, and organize your pull requests accordingly**. Often when a pull request is created with a large number of files changed and/or a large number of lines of code added and/or removed, GitHub will have a difficult time opening up the changes on their site. This forces the Azure Guest-Configuration-Extension team to use separate software to do a code review on the pull request. If you find yourself creating a pull request and are unable to see all the changes on GitHub, we recommend **splitting the pull request into multiple pull requests that are able to be reviewed on GitHub**. If splitting up the pull request is not an option, we recommend **creating individual commits for different parts of the pull request, which can be reviewed individually on GitHub**. For more information on cleaning up the commits in a pull request, such as how to rebase, squash, and cherry-pick, click [here](https://github.com/Azure/azure-powershell/blob/dev/documentation/cleaning-up-commits.md). #### General guidelines The following guidelines must be followed in **EVERY** pull request that is opened. - Title of the pull request is clear and informative - There are a small number of commits that each have an informative message - A description of the changes the pull request makes is included, and a reference to the issue being resolved, if the change address any - All files have the Microsoft copyright header #### Testing Guidelines The following guidelines must be followed in **EVERY** pull request that is opened. - Pull request includes test coverage for the included changesWALinuxAgent-2.2.32/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000024531335416306700212630ustar00rootroot00000000000000 ## Description Issue # --- ### PR information - [ ] The title of the PR is clear and informative. - [ ] There are a small number of commits, each of which has an informative message. This means that previously merged commits do not appear in the history of the PR. For information on cleaning up the commits in your pull request, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). - [ ] Except for special cases involving multiple contributors, the PR is started from a fork of the main repository, not a branch. - [ ] If applicable, the PR references the bug/issue that it fixes in the description. - [ ] New Unit tests were added for the changes made and Travis.CI is passing. ### Quality of Code and Contribution Guidelines - [ ] I have read the [contribution guidelines](https://github.com/Azure/WALinuxAgent/blob/master/.github/CONTRIBUTING.md).WALinuxAgent-2.2.32/.gitignore000066400000000000000000000014421335416306700161070ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # Virtualenv py3env/ # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyCharm .idea/ # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .cache nosetests.xml coverage.xml # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder target/ waagentc *.pyproj *.sln *.suo waagentc bin/waagent2.0c # rope project .ropeproject/ WALinuxAgent-2.2.32/.travis.yml000066400000000000000000000003661335416306700162340ustar00rootroot00000000000000language: python python: - "2.6" - "2.7" #- "3.2" #- "3.3" - "3.4" # command to install dependencies install: #- pip install . #- pip install -r requirements.txt - pip install pyasn1 # command to run tests script: nosetests tests WALinuxAgent-2.2.32/Changelog000066400000000000000000000021351335416306700157310ustar00rootroot00000000000000WALinuxAgent Changelog ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 12 August 2016, v2.1.6 . Improved RDMA support . Extension state migration . Alpine Linux support . Fixes for #347, #351, #353 15 July 2016, v2.1.5 . Goal state processing extension . Multi-nic improvements . Bug fixes for #145, #141, #133, #116, #187, #169, #104, #127, #163, #190, #185, #174 09 Mar 2016, WALinuxAgent 2.1.4 . Add support for FreeBSD . Fix a bug for internal extension version resolving 29 Jan 2016, WALinuxAgent 2.1.3 . Fixed endpoint probing for Azure Stack . Multiple fixes for extension handling 07 Dec 2015, WALinuxAgent 2.1.2 . Multiple fixes for extension handling and provisioning 07 Aug 2015, WALinuxAgent 2.1.1 . Support python3 . Fixed bugs for metadata protocol . Fixed a few pylint warnings . Enabled travis-ci ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 01 Jul 2015, WALinuxAgent 2.1.0 . Divide waagent into different modules WALinuxAgent-2.2.32/LICENSE.txt000066400000000000000000000261301335416306700157430ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2016 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. WALinuxAgent-2.2.32/MAINTENANCE.md000066400000000000000000000013131335416306700161600ustar00rootroot00000000000000## Microsoft Azure Linux Agent Maintenance Guide ### Version rules * Production releases are public * Test releases are for internal use * Production versions use only [major].[minor].[revision] * Test versions use [major].[minor].[revision].[build] * Test a.b.c.0 is equivalent to Prod a.b.c * Publishing to Production requires incrementing the revision and dropping the build number * We do not use pre-release labels on any builds ### Version updates * The version of the agent can be found at https://github.com/Azure/WALinuxAgent/blob/master/azurelinuxagent/common/version.py#L53 assigned to AGENT_VERSION * Update the version here and send for PR before declaring a release via GitHub WALinuxAgent-2.2.32/MANIFEST000066400000000000000000000005701335416306700152510ustar00rootroot00000000000000# file GENERATED by distutils, do NOT edit README setup.py bin/waagent config/waagent.conf config/waagent.logrotate test/test_logger.py walinuxagent/__init__.py walinuxagent/agent.py walinuxagent/conf.py walinuxagent/envmonitor.py walinuxagent/extension.py walinuxagent/install.py walinuxagent/logger.py walinuxagent/protocol.py walinuxagent/provision.py walinuxagent/util.py WALinuxAgent-2.2.32/MANIFEST.in000066400000000000000000000001141335416306700156500ustar00rootroot00000000000000recursive-include bin * recursive-include init * recursive-include config * WALinuxAgent-2.2.32/NOTICE000066400000000000000000000002411335416306700150170ustar00rootroot00000000000000Microsoft Azure Linux Agent Copyright 2012 Microsoft Corporation This product includes software developed at Microsoft Corporation (http://www.microsoft.com/). WALinuxAgent-2.2.32/README.md000066400000000000000000000437051335416306700154060ustar00rootroot00000000000000# Microsoft Azure Linux Agent ## Master branch status Each badge below represents our basic validation tests for an image, which are executed several times each day. These include provisioning, user account, disk, extension and networking scenarios. Image | Status | ------|--------| Canonical UbuntuServer 14.04.5-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-LTS__agent--bvt.svg) Canonical UbuntuServer 14.04.5-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-DAILY-LTS__agent--bvt.svg) Canonical UbuntuServer 16.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-LTS__agent--bvt.svg) Canonical UbuntuServer 16.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-DAILY-LTS__agent--bvt.svg) Canonical UbuntuServer 18.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-LTS__agent--bvt.svg) Canonical UbuntuServer 18.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-DAILY-LTS__agent--bvt.svg) Credativ Debian 8|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8__agent--bvt.svg) Credativ Debian 8-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8-DAILY__agent--bvt.svg) Credativ Debian 9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9__agent--bvt.svg) Credativ Debian 9-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9-DAILY__agent--bvt.svg) OpenLogic CentOS 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_6.9__agent--bvt.svg) OpenLogic CentOS 7.4|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_7.4__agent--bvt.svg) RedHat RHEL 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_6.9__agent--bvt.svg) RedHat RHEL 7-RAW|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_7-RAW__agent--bvt.svg) SUSE SLES 12-SP3|![badge](https://dcrbadges.blob.core.windows.net/scenarios/SUSE_SLES_12-SP3__agent--bvt.svg) ## Introduction The Microsoft Azure Linux Agent (waagent) manages Linux provisioning and VM interaction with the Azure Fabric Controller. It provides the following functionality for Linux IaaS deployments: * Image Provisioning * Creation of a user account * Configuring SSH authentication types * Deployment of SSH public keys and key pairs * Setting the host name * Publishing the host name to the platform DNS * Reporting SSH host key fingerprint to the platform * Resource Disk Management * Formatting and mounting the resource disk * Configuring swap space * Networking * Manages routes to improve compatibility with platform DHCP servers * Ensures the stability of the network interface name * Kernel * Configure virtual NUMA (disable for kernel <2.6.37) * Consume Hyper-V entropy for /dev/random * Configure SCSI timeouts for the root device (which could be remote) * Diagnostics * Console redirection to the serial port * SCVMM Deployments * Detect and bootstrap the VMM agent for Linux when running in a System Center Virtual Machine Manager 2012R2 environment * VM Extension * Inject component authored by Microsoft and Partners into Linux VM (IaaS) to enable software and configuration automation * VM Extension reference implementation on [GitHub](https://github.com/Azure/azure-linux-extensions) ## Communication The information flow from the platform to the agent occurs via two channels: * A boot-time attached DVD for IaaS deployments. This DVD includes an OVF-compliant configuration file that includes all provisioning information other than the actual SSH keypairs. * A TCP endpoint exposing a REST API used to obtain deployment and topology configuration. The agent will use an HTTP proxy if provided via the `http_proxy` (for `http` requests) or `https_proxy` (for `https` requests) environment variables. The `HttpProxy.Host` and `HttpProxy.Port` configuration variables (see below), if used, will override the environment settings. Due to limitations of Python, the agent *does not* support HTTP proxies requiring authentication. ## Requirements The following systems have been tested and are known to work with the Azure Linux Agent. Please note that this list may differ from the official list of supported systems on the Microsoft Azure Platform as described [here](http://support.microsoft.com/kb/2805216). Waagent depends on some system packages in order to function properly: * Python 2.6+ * OpenSSL 1.0+ * OpenSSH 5.3+ * Filesystem utilities: sfdisk, fdisk, mkfs, parted * Password tools: chpasswd, sudo * Text processing tools: sed, grep * Network tools: ip-route ## Installation Installation via your distribution's package repository is preferred. You can also customize your own RPM or DEB packages using the configuration samples provided (see deb and rpm sections below). For more advanced installation options, such as installing to custom locations or prefixes, you can use **setuptools** to install from source by running: ```bash sudo python setup.py install --register-service ``` You can view more installation options by running: ```bash sudo python setup.py install --help ``` The agent's log file is kept at `/var/log/waagent.log`. ## Upgrade Upgrading via your distribution's package repository is strongly preferred. If upgrading manually, same with installation above by running: ```bash sudo python setup.py install --force ``` Restart waagent service,for most of linux distributions: ```bash sudo service waagent restart ``` For Ubuntu, use: ```bash sudo service walinuxagent restart ``` For CoreOS, use: ```bash sudo systemctl restart waagent ``` ## Command line options ### Flags `-verbose`: Increase verbosity of specified command `-force`: Skip interactive confirmation for some commands ### Commands `-help`: Lists the supported commands and flags. `-deprovision`: Attempt to clean the system and make it suitable for re-provisioning, by deleting the following: * All SSH host keys (if Provisioning.RegenerateSshHostKeyPair is 'y' in the configuration file) * Nameserver configuration in /etc/resolv.conf * Root password from /etc/shadow (if Provisioning.DeleteRootPassword is 'y' in the configuration file) * Cached DHCP client leases * Resets host name to localhost.localdomain **WARNING!** Deprovision does not guarantee that the image is cleared of all sensitive information and suitable for redistribution. `-deprovision+user`: Performs everything under deprovision (above) and also deletes the last provisioned user account and associated data. `-version`: Displays the version of waagent `-serialconsole`: Configures GRUB to mark ttyS0 (the first serial port) as the boot console. This ensures that kernel bootup logs are sent to the serial port and made available for debugging. `-daemon`: Run waagent as a daemon to manage interaction with the platform. This argument is specified to waagent in the waagent init script. `-start`: Run waagent as a background process ## Configuration A configuration file (/etc/waagent.conf) controls the actions of waagent. Blank lines and lines whose first character is a `#` are ignored (end-of-line comments are *not* supported). A sample configuration file is shown below: ```yml Extensions.Enabled=y Provisioning.Enabled=y Provisioning.UseCloudInit=n Provisioning.DeleteRootPassword=n Provisioning.RegenerateSshHostKeyPair=y Provisioning.SshHostKeyPairType=rsa Provisioning.MonitorHostName=y Provisioning.DecodeCustomData=n Provisioning.ExecuteCustomData=n Provisioning.PasswordCryptId=6 Provisioning.PasswordCryptSaltLength=10 ResourceDisk.Format=y ResourceDisk.Filesystem=ext4 ResourceDisk.MountPoint=/mnt/resource ResourceDisk.MountOptions=None ResourceDisk.EnableSwap=n ResourceDisk.SwapSizeMB=0 Logs.Verbose=n OS.AllowHTTP=n OS.RootDeviceScsiTimeout=300 OS.EnableFIPS=n OS.OpensslPath=None OS.SshClientAliveInterval=180 OS.SshDir=/etc/ssh HttpProxy.Host=None HttpProxy.Port=None CGroups.EnforceLimits=y CGroups.Excluded=customscript,runcommand ``` The various configuration options are described in detail below. Configuration options are of three types : Boolean, String or Integer. The Boolean configuration options can be specified as "y" or "n". The special keyword "None" may be used for some string type configuration entries as detailed below. ### Configuration File Options #### __Extensions.Enabled__ _Type: Boolean_ _Default: y_ This allows the user to enable or disable the extension handling functionality in the agent. Valid values are "y" or "n". If extension handling is disabled, the goal state will still be processed and VM status is still reported, but only every 5 minutes. Extension config within the goal state will be ignored. Note that functionality such as password reset, ssh key updates and backups depend on extensions. Only disable this if you do not need extensions at all. _Note_: disabling extensions in this manner is not the same as running completely without the agent. In order to do that, the `provisionVMAgent` flag must be set at provisioning time, via whichever API is being used. We will provide more details on this on our wiki when it is generally available. #### __Provisioning.Enabled__ _Type: Boolean_ _Default: y_ This allows the user to enable or disable the provisioning functionality in the agent. Valid values are "y" or "n". If provisioning is disabled, SSH host and user keys in the image are preserved and any configuration specified in the Azure provisioning API is ignored. #### __Provisioning.UseCloudInit__ _Type: Boolean_ _Default: n_ This options enables / disables support for provisioning by means of cloud-init. When true ("y"), the agent will wait for cloud-init to complete before installing extensions and processing the latest goal state. _Provisioning.Enabled_ must be disabled ("n") for this option to have an effect. Setting _Provisioning.Enabled_ to true ("y") overrides this option and runs the built-in agent provisioning code. #### __Provisioning.DeleteRootPassword__ _Type: Boolean_ _Default: n_ If set, the root password in the /etc/shadow file is erased during the provisioning process. #### __Provisioning.RegenerateSshHostKeyPair__ _Type: Boolean_ _Default: y_ If set, all SSH host key pairs (ecdsa, dsa and rsa) are deleted during the provisioning process from /etc/ssh/. And a single fresh key pair is generated. The encryption type for the fresh key pair is configurable by the Provisioning.SshHostKeyPairType entry. Please note that some distributions will re-create SSH key pairs for any missing encryption types when the SSH daemon is restarted (for example, upon a reboot). #### __Provisioning.SshHostKeyPairType__ _Type: String_ _Default: rsa_ This can be set to an encryption algorithm type that is supported by the SSH daemon on the VM. The typically supported values are "rsa", "dsa" and "ecdsa". Note that "putty.exe" on Windows does not support "ecdsa". So, if you intend to use putty.exe on Windows to connect to a Linux deployment, please use "rsa" or "dsa". #### __Provisioning.MonitorHostName__ _Type: Boolean_ _Default: y_ If set, waagent will monitor the Linux VM for hostname changes (as returned by the "hostname" command) and automatically update the networking configuration in the image to reflect the change. In order to push the name change to the DNS servers, networking will be restarted in the VM. This will result in brief loss of Internet connectivity. #### __Provisioning.DecodeCustomData__ _Type: Boolean_ _Default: n_ If set, waagent will decode CustomData from Base64. #### __Provisioning.ExecuteCustomData__ _Type: Boolean_ _Default: n_ If set, waagent will execute CustomData after provisioning. #### __Provisioning.PasswordCryptId__ _Type: String_ _Default: 6_ Algorithm used by crypt when generating password hash. * 1 - MD5 * 2a - Blowfish * 5 - SHA-256 * 6 - SHA-512 #### __Provisioning.PasswordCryptSaltLength__ _Type: String_ _Default: 10_ Length of random salt used when generating password hash. #### __ResourceDisk.Format__ _Type: Boolean_ _Default: y_ If set, the resource disk provided by the platform will be formatted and mounted by waagent if the filesystem type requested by the user in "ResourceDisk.Filesystem" is anything other than "ntfs". A single partition of type Linux (83) will be made available on the disk. Note that this partition will not be formatted if it can be successfully mounted. #### __ResourceDisk.Filesystem__ _Type: String_ _Default: ext4_ This specifies the filesystem type for the resource disk. Supported values vary by Linux distribution. If the string is X, then mkfs.X should be present on the Linux image. SLES 11 images should typically use 'ext3'. BSD images should use 'ufs2' here. #### __ResourceDisk.MountPoint__ _Type: String_ _Default: /mnt/resource_ This specifies the path at which the resource disk is mounted. #### __ResourceDisk.MountOptions__ _Type: String_ _Default: None_ Specifies disk mount options to be passed to the mount -o command. This is a comma separated list of values, ex. 'nodev,nosuid'. See mount(8) for details. #### __ResourceDisk.EnableSwap__ _Type: Boolean_ _Default: n_ If set, a swap file (/swapfile) is created on the resource disk and added to the system swap space. #### __ResourceDisk.SwapSizeMB__ _Type: Integer_ _Default: 0_ The size of the swap file in megabytes. #### __Logs.Verbose__ _Type: Boolean_ _Default: n_ If set, log verbosity is boosted. Waagent logs to /var/log/waagent.log and leverages the system logrotate functionality to rotate logs. #### __OS.AllowHTTP__ _Type: Boolean_ _Default: n_ If set to `y` and SSL support is not compiled into Python, the agent will fall-back to use HTTP. Otherwise, if SSL support is not compiled into Python, the agent will fail all HTTPS requests. Note: Allowing HTTP may unintentionally expose secure data. #### __OS.EnableRDMA__ _Type: Boolean_ _Default: n_ If set, the agent will attempt to install and then load an RDMA kernel driver that matches the version of the firmware on the underlying hardware. #### __OS.EnableFIPS__ _Type: Boolean_ _Default: n_ If set, the agent will emit into the environment "OPENSSL_FIPS=1" when executing OpenSSL commands. This signals OpenSSL to use any installed FIPS-compliant libraries. Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant certificates are installed, then enabling this option will cause all OpenSSL commands to fail._ #### __OS.RootDeviceScsiTimeout__ _Type: Integer_ _Default: 300_ This configures the SCSI timeout in seconds on the root device. If not set, the system defaults are used. #### __OS.OpensslPath__ _Type: String_ _Default: None_ This can be used to specify an alternate path for the openssl binary to use for cryptographic operations. #### __OS.SshClientAliveInterval__ _Type: Integer_ _Default: 180_ This values sets the number of seconds the agent uses for the SSH ClientAliveInterval configuration option. #### __OS.SshDir__ _Type: String_ _Default: `/etc/ssh`_ This option can be used to override the normal location of the SSH configuration directory. #### __HttpProxy.Host, HttpProxy.Port__ _Type: String_ _Default: None_ If set, the agent will use this proxy server to access the internet. These values *will* override the `http_proxy` or `https_proxy` environment variables. Lastly, `HttpProxy.Host` is required (if to be used) and `HttpProxy.Port` is optional. #### __CGroups.EnforceLimits__ _Type: Boolean_ _Default: y_ If set, the agent will attempt to set cgroups limits for cpu and memory for the agent process itself as well as extension processes. See the wiki for further details on this. #### __CGroups.Excluded__ _Type: String_ _Default: customscript,runcommand_ The list of extensions which will be excluded from cgroups limits. This should be comma separated. ### Telemetry WALinuxAgent collects usage data and sends it to Microsoft to help improve our products and services. The data collected is used to track service health and assist with Azure support requests. Data collected does not include any personally identifiable information. Read our [privacy statement](http://go.microsoft.com/fwlink/?LinkId=521839) to learn more. WALinuxAgent does not support disabling telemetry at this time. WALinuxAgent must be removed to disable telemetry collection. If you need this feature, please open an issue in GitHub and explain your requirement. ### Appendix We do not maintain packaging information in this repo but some samples are shown below as a reference. See the downstream distribution repositories for officially maintained packaging. #### deb packages The official Ubuntu WALinuxAgent package can be found [here](https://launchpad.net/ubuntu/+source/walinuxagent). Run once: 1. Install required packages ```bash sudo apt-get -y install ubuntu-dev-tools pbuilder python-all debhelper ``` 2. Create the pbuilder environment ```bash sudo pbuilder create --debootstrapopts --variant=buildd ``` 3. Obtain `waagent.dsc` from a downstream package repo To compile the package, from the top-most directory: 1. Build the source package ```bash dpkg-buildpackage -S ``` 2. Build the package ```bash sudo pbuilder build waagent.dsc ``` 3. Fetch the built package, usually from `/var/cache/pbuilder/result` #### rpm packages The instructions below describe how to build an rpm package. 1. Install setuptools ```bash curl https://bootstrap.pypa.io/ez_setup.py -o - | python ``` 2. The following command will build the binary and source RPMs: ```bash python setup.py bdist_rpm ``` ----- This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. WALinuxAgent-2.2.32/__main__.py000066400000000000000000000012521335416306700162100ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.agent as agent agent.main() WALinuxAgent-2.2.32/azurelinuxagent/000077500000000000000000000000001335416306700173435ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/__init__.py000066400000000000000000000011651335416306700214570ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/azurelinuxagent/agent.py000066400000000000000000000220131335416306700210110ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Module agent """ from __future__ import print_function import os import sys import re import subprocess import traceback import azurelinuxagent.common.logger as logger import azurelinuxagent.common.event as event import azurelinuxagent.common.conf as conf from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_VERSION, \ DISTRO_NAME, DISTRO_VERSION, \ PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO, GOAL_STATE_AGENT_VERSION from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.utils import fileutil class Agent(object): def __init__(self, verbose, conf_file_path=None): """ Initialize agent running environment. """ self.conf_file_path = conf_file_path self.osutil = get_osutil() #Init stdout log level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.STDOUT, level) #Init config conf_file_path = self.conf_file_path \ if self.conf_file_path is not None \ else self.osutil.get_agent_conf_file_path() conf.load_conf_from_file(conf_file_path) #Init log verbose = verbose or conf.get_logs_verbose() level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.FILE, level, path="/var/log/waagent.log") logger.add_logger_appender(logger.AppenderType.CONSOLE, level, path="/dev/console") # See issue #1035 # logger.add_logger_appender(logger.AppenderType.TELEMETRY, # logger.LogLevel.WARNING, # path=event.add_log_event) ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): fileutil.mkdir(ext_log_dir, mode=0o755, owner="root") except Exception as e: logger.error( "Exception occurred while creating extension " "log directory {0}: {1}".format(ext_log_dir, e)) #Init event reporter event.init_event_status(conf.get_lib_dir()) event_dir = os.path.join(conf.get_lib_dir(), "events") event.init_event_logger(event_dir) event.enable_unhandled_err_dump("WALA") def daemon(self): """ Run agent daemon """ logger.set_prefix("Daemon") child_args = None \ if self.conf_file_path is None \ else "-configuration-path:{0}".format(self.conf_file_path) from azurelinuxagent.daemon import get_daemon_handler daemon_handler = get_daemon_handler() daemon_handler.run(child_args=child_args) def provision(self): """ Run provision command """ from azurelinuxagent.pa.provision import get_provision_handler provision_handler = get_provision_handler() provision_handler.run() def deprovision(self, force=False, deluser=False): """ Run deprovision command """ from azurelinuxagent.pa.deprovision import get_deprovision_handler deprovision_handler = get_deprovision_handler() deprovision_handler.run(force=force, deluser=deluser) def register_service(self): """ Register agent as a service """ print("Register {0} service".format(AGENT_NAME)) self.osutil.register_agent_service() print("Stop {0} service".format(AGENT_NAME)) self.osutil.stop_agent_service() print("Start {0} service".format(AGENT_NAME)) self.osutil.start_agent_service() def run_exthandlers(self): """ Run the update and extension handler """ logger.set_prefix("ExtHandler") from azurelinuxagent.ga.update import get_update_handler update_handler = get_update_handler() update_handler.run() def show_configuration(self): configuration = conf.get_configuration() for k in sorted(configuration.keys()): print("{0} = {1}".format(k, configuration[k])) def main(args=[]): """ Parse command line arguments, exit with usage() on error. Invoke different methods according to different command """ if len(args) <= 0: args = sys.argv[1:] command, force, verbose, conf_file_path = parse_args(args) if command == "version": version() elif command == "help": print(usage()) elif command == "start": start(conf_file_path=conf_file_path) else: try: agent = Agent(verbose, conf_file_path=conf_file_path) if command == "deprovision+user": agent.deprovision(force, deluser=True) elif command == "deprovision": agent.deprovision(force, deluser=False) elif command == "provision": agent.provision() elif command == "register-service": agent.register_service() elif command == "daemon": agent.daemon() elif command == "run-exthandlers": agent.run_exthandlers() elif command == "show-configuration": agent.show_configuration() except Exception: logger.error(u"Failed to run '{0}': {1}", command, traceback.format_exc()) def parse_args(sys_args): """ Parse command line arguments """ cmd = "help" force = False verbose = False conf_file_path = None for a in sys_args: m = re.match("^(?:[-/]*)configuration-path:([\w/\.\-_]+)", a) if not m is None: conf_file_path = m.group(1) if not os.path.exists(conf_file_path): print("Error: Configuration file {0} does not exist".format( conf_file_path), file=sys.stderr) usage() sys.exit(1) elif re.match("^([-/]*)deprovision\\+user", a): cmd = "deprovision+user" elif re.match("^([-/]*)deprovision", a): cmd = "deprovision" elif re.match("^([-/]*)daemon", a): cmd = "daemon" elif re.match("^([-/]*)start", a): cmd = "start" elif re.match("^([-/]*)register-service", a): cmd = "register-service" elif re.match("^([-/]*)run-exthandlers", a): cmd = "run-exthandlers" elif re.match("^([-/]*)version", a): cmd = "version" elif re.match("^([-/]*)verbose", a): verbose = True elif re.match("^([-/]*)force", a): force = True elif re.match("^([-/]*)show-configuration", a): cmd = "show-configuration" elif re.match("^([-/]*)(help|usage|\\?)", a): cmd = "help" else: cmd = "help" break return cmd, force, verbose, conf_file_path def version(): """ Show agent version """ print(("{0} running on {1} {2}".format(AGENT_LONG_VERSION, DISTRO_NAME, DISTRO_VERSION))) print("Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO)) print("Goal state agent: {0}".format(GOAL_STATE_AGENT_VERSION)) def usage(): """ Return agent usage message """ s = "\n" s += ("usage: {0} [-verbose] [-force] [-help] " "-configuration-path:" "-deprovision[+user]|-register-service|-version|-daemon|-start|" "-run-exthandlers|-show-configuration]" "").format(sys.argv[0]) s += "\n" return s def start(conf_file_path=None): """ Start agent daemon in a background process and set stdout/stderr to /dev/null """ devnull = open(os.devnull, 'w') args = [sys.argv[0], '-daemon'] if conf_file_path is not None: args.append('-configuration-path:{0}'.format(conf_file_path)) subprocess.Popen(args, stdout=devnull, stderr=devnull) if __name__ == '__main__' : main() WALinuxAgent-2.2.32/azurelinuxagent/common/000077500000000000000000000000001335416306700206335ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/common/__init__.py000066400000000000000000000011661335416306700227500ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/azurelinuxagent/common/cgroups.py000066400000000000000000000736051335416306700227020ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import errno import os import re import time from azurelinuxagent.common import logger, conf from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.osutil.default import BASE_CGROUPS from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION WRAPPER_CGROUP_NAME = "Agent+Extensions" METRIC_HIERARCHIES = ['cpu', 'memory'] MEMORY_DEFAULT = -1 # percentage of a single core DEFAULT_CPU_LIMIT_AGENT = 10 DEFAULT_CPU_LIMIT_EXT = 40 DEFAULT_MEM_LIMIT_MIN_MB = 256 # mb, applies to agent and extensions DEFAULT_MEM_LIMIT_MAX_MB = 512 # mb, applies to agent only DEFAULT_MEM_LIMIT_PCT = 15 # percent, applies to extensions re_user_system_times = re.compile('user (\d+)\nsystem (\d+)\n') related_services = { "Microsoft.OSTCExtensions.LinuxDiagnostic": ["omid", "omsagent-LAD", "mdsd-lde"], "Microsoft.Azure.Diagnostics.LinuxDiagnostic": ["omid", "omsagent-LAD", "mdsd-lde"], } class CGroupsException(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) # The metric classes (Cpu, Memory, etc) can all assume that CGroups is enabled, as the CGroupTelemetry # class is very careful not to call them if CGroups isn't enabled. Any tests should be disabled if the osutil # is_cgroups_support() method returns false. class Cpu(object): def __init__(self, cgt): """ Initialize data collection for the Cpu hierarchy. User must call update() before attempting to get any useful metrics. :param cgt: CGroupsTelemetry :return: """ self.cgt = cgt self.osutil = get_osutil() self.current_cpu_total = self.get_current_cpu_total() self.previous_cpu_total = 0 self.current_system_cpu = self.osutil.get_total_cpu_ticks_since_boot() self.previous_system_cpu = 0 def __str__(self): return "Cgroup: Current {0}, previous {1}; System: Current {2}, previous {3}".format( self.current_cpu_total, self.previous_cpu_total, self.current_system_cpu, self.previous_system_cpu ) def get_current_cpu_total(self): """ Compute the number of USER_HZ of CPU time (user and system) consumed by this cgroup since boot. :return: int """ cpu_total = 0 try: cpu_stat = self.cgt.cgroup.\ get_file_contents('cpu', 'cpuacct.stat') if cpu_stat is not None: m = re_user_system_times.match(cpu_stat) if m: cpu_total = int(m.groups()[0]) + int(m.groups()[1]) except CGroupsException: # There are valid reasons for file contents to be unavailable; for example, if an extension # has not yet started (or has stopped) an associated service on a VM using systemd, the cgroup for # the service will not exist ('cause systemd will tear it down). This might be a transient or a # long-lived state, so there's no point in logging it, much less emitting telemetry. pass return cpu_total def update(self): """ Update all raw data required to compute metrics of interest. The intent is to call update() once, then call the various get_*() methods which use this data, which we've collected exactly once. """ self.previous_cpu_total = self.current_cpu_total self.previous_system_cpu = self.current_system_cpu self.current_cpu_total = self.get_current_cpu_total() self.current_system_cpu = self.osutil.get_total_cpu_ticks_since_boot() def get_cpu_percent(self): """ Compute the percent CPU time used by this cgroup over the elapsed time since the last time this instance was update()ed. If the cgroup fully consumed 2 cores on a 4 core system, return 200. :return: CPU usage in percent of a single core :rtype: float """ cpu_delta = self.current_cpu_total - self.previous_cpu_total system_delta = max(1, self.current_system_cpu - self.previous_system_cpu) return round(float(cpu_delta * self.cgt.cpu_count * 100) / float(system_delta), 3) def collect(self): """ Collect and return a list of all cpu metrics. If no metrics are collected, return an empty list. :rtype: [(str, str, float)] """ self.update() usage = self.get_cpu_percent() return [("Process", "% Processor Time", usage)] class Memory(object): def __init__(self, cgt): """ Initialize data collection for the Memory hierarchy :param CGroupsTelemetry cgt: The telemetry object for which memory metrics should be collected :return: """ self.cgt = cgt def get_memory_usage(self): """ Collect memory.usage_in_bytes from the cgroup. :return: Memory usage in bytes :rtype: int """ usage = self.cgt.cgroup.get_parameter('memory', 'memory.usage_in_bytes') if not usage: usage = "0" return int(usage) def collect(self): """ Collect and return a list of all memory metrics :rtype: [(str, str, float)] """ usage = self.get_memory_usage() return [("Memory", "Total Memory Usage", usage)] class CGroupsTelemetry(object): """ Encapsulate the cgroup-based telemetry for the agent or one of its extensions, or for the aggregation across the agent and all of its extensions. These objects should have lifetimes that span the time window over which measurements are desired; in general, they're not terribly effective at providing instantaneous measurements. """ _tracked = {} _metrics = { "cpu": Cpu, "memory": Memory } _hierarchies = list(_metrics.keys()) tracked_names = set() @staticmethod def metrics_hierarchies(): return CGroupsTelemetry._hierarchies @staticmethod def track_cgroup(cgroup): """ Create a CGroupsTelemetry object to track a particular CGroups instance. Typical usage: 1) Create a CGroups object 2) Ask CGroupsTelemetry to track it 3) Tell the CGroups object to add one or more processes (or let systemd handle that, for its cgroups) :param CGroups cgroup: The cgroup to track """ name = cgroup.name if CGroups.enabled() and not CGroupsTelemetry.is_tracked(name): tracker = CGroupsTelemetry(name, cgroup=cgroup) CGroupsTelemetry._tracked[name] = tracker @staticmethod def track_systemd_service(name): """ If not already tracking it, create the CGroups object for a systemd service and track it. :param str name: Service name (without .service suffix) to be tracked. """ service_name = "{0}.service".format(name).lower() if CGroups.enabled() and not CGroupsTelemetry.is_tracked(service_name): cgroup = CGroups.for_systemd_service(service_name) tracker = CGroupsTelemetry(service_name, cgroup=cgroup) CGroupsTelemetry._tracked[service_name] = tracker @staticmethod def track_extension(name, cgroup=None): """ Create all required CGroups to track all metrics for an extension and its associated services. :param str name: Full name of the extension to be tracked :param CGroups cgroup: CGroup for the extension itself. This method will create it if none is supplied. """ if not CGroups.enabled(): return if not CGroupsTelemetry.is_tracked(name): cgroup = CGroups.for_extension(name) if cgroup is None else cgroup logger.info("Now tracking cgroup {0}".format(name)) cgroup.set_limits() CGroupsTelemetry.track_cgroup(cgroup) if CGroups.is_systemd_manager(): if name in related_services: for service_name in related_services[name]: CGroupsTelemetry.track_systemd_service(service_name) @staticmethod def track_agent(): """ Create and track the correct cgroup for the agent itself. The actual cgroup depends on whether systemd is in use, but the caller doesn't need to know that. """ if not CGroups.enabled(): return if CGroups.is_systemd_manager(): CGroupsTelemetry.track_systemd_service(AGENT_NAME) else: CGroupsTelemetry.track_cgroup(CGroups.for_extension(AGENT_NAME)) @staticmethod def is_tracked(name): return name in CGroupsTelemetry._tracked @staticmethod def stop_tracking(name): """ Stop tracking telemetry for the CGroups associated with an extension. If any system services are being tracked, those will continue to be tracked; multiple extensions might rely upon the same service. :param str name: Extension to be dropped from tracking """ if CGroupsTelemetry.is_tracked(name): del (CGroupsTelemetry._tracked[name]) @staticmethod def collect_all_tracked(): """ Return a dictionary mapping from the name of a tracked cgroup to the list of collected metrics for that cgroup. Collecting metrics is not guaranteed to be a fast operation; it's possible some other thread might add or remove tracking for a cgroup while we're doing it. To avoid "dictionary changed size during iteration" exceptions, work from a shallow copy of the _tracked dictionary. :returns: Dictionary of list collected metrics (metric class, metric name, value), by cgroup :rtype: dict(str: [(str, str, float)]) """ results = {} for cgroup_name, collector in CGroupsTelemetry._tracked.copy().items(): cgroup_name = cgroup_name if cgroup_name else WRAPPER_CGROUP_NAME results[cgroup_name] = collector.collect() return results @staticmethod def update_tracked(ext_handlers): """ Track CGroups for all enabled extensions. Track CGroups for services created by enabled extensions. Stop tracking CGroups for not-enabled extensions. :param List(ExtHandler) ext_handlers: """ if not CGroups.enabled(): return not_enabled_extensions = set() for extension in ext_handlers: if extension.properties.state == u"enabled": CGroupsTelemetry.track_extension(extension.name) else: not_enabled_extensions.add(extension.name) names_now_tracked = set(CGroupsTelemetry._tracked.keys()) if CGroupsTelemetry.tracked_names != names_now_tracked: now_tracking = " ".join("[{0}]".format(name) for name in sorted(names_now_tracked)) if len(now_tracking): logger.info("After updating cgroup telemetry, tracking {0}".format(now_tracking)) else: logger.warn("After updating cgroup telemetry, tracking no cgroups.") CGroupsTelemetry.tracked_names = names_now_tracked def __init__(self, name, cgroup=None): """ Create the necessary state to collect metrics for the agent, one of its extensions, or the aggregation across the agent and all of its extensions. To access aggregated metrics, instantiate this object with an empty string or None. :param name: str """ if name is None: name = "" self.name = name if cgroup is None: cgroup = CGroups.for_extension(name) self.cgroup = cgroup self.cpu_count = CGroups.get_num_cores() self.current_wall_time = time.time() self.previous_wall_time = 0 self.data = {} if CGroups.enabled(): for hierarchy in CGroupsTelemetry.metrics_hierarchies(): self.data[hierarchy] = CGroupsTelemetry._metrics[hierarchy](self) def collect(self): """ Return a list of collected metrics. Each element is a tuple of (metric group name, metric name, metric value) :return: [(str, str, float)] """ results = [] for collector in self.data.values(): results.extend(collector.collect()) return results class CGroups(object): """ This class represents the cgroup folders for the agent or an extension. This is a pretty lightweight object without much state worth preserving; it's not unreasonable to create one just when you need it. """ # whether cgroup support is enabled _enabled = True _hierarchies = CGroupsTelemetry.metrics_hierarchies() _use_systemd = None # Tri-state: None (i.e. "unknown"), True, False _osutil = get_osutil() @staticmethod def _construct_custom_path_for_hierarchy(hierarchy, cgroup_name): return os.path.join(BASE_CGROUPS, hierarchy, AGENT_NAME, cgroup_name).rstrip(os.path.sep) @staticmethod def _construct_systemd_path_for_hierarchy(hierarchy, cgroup_name): return os.path.join(BASE_CGROUPS, hierarchy, 'system.slice', cgroup_name).rstrip(os.path.sep) @staticmethod def for_extension(name): return CGroups(name, CGroups._construct_custom_path_for_hierarchy) @staticmethod def for_systemd_service(name): return CGroups(name.lower(), CGroups._construct_systemd_path_for_hierarchy) @staticmethod def enabled(): return CGroups._osutil.is_cgroups_supported() and CGroups._enabled @staticmethod def disable(): CGroups._enabled = False @staticmethod def enable(): CGroups._enabled = True def __init__(self, name, path_maker): """ Construct CGroups object. Create appropriately-named directory for each hierarchy of interest. :param str name: Name for the cgroup (usually the full name of the extension) :param path_maker: Function which constructs the root path for a given hierarchy where this cgroup lives """ if name == "": self.name = "Agents+Extensions" self.is_wrapper_cgroup = True else: self.name = name self.is_wrapper_cgroup = False self.cgroups = {} if not self.enabled(): return system_hierarchies = os.listdir(BASE_CGROUPS) for hierarchy in CGroups._hierarchies: if hierarchy not in system_hierarchies: self.disable() raise CGroupsException("Hierarchy {0} is not mounted".format(hierarchy)) cgroup_name = "" if self.is_wrapper_cgroup else self.name cgroup_path = path_maker(hierarchy, cgroup_name) if not os.path.isdir(cgroup_path): logger.info("Creating cgroup directory {0}".format(cgroup_path)) CGroups._try_mkdir(cgroup_path) self.cgroups[hierarchy] = cgroup_path @staticmethod def is_systemd_manager(): """ Determine if systemd is managing system services. Many extensions are structured as a set of services, including the agent itself; systemd expects those services to remain in the cgroups in which it placed them. If this process (presumed to be the agent) is in a cgroup that looks like one created by systemd, we can assume systemd is in use. :return: True if systemd is managing system services :rtype: Bool """ if not CGroups.enabled(): return False if CGroups._use_systemd is None: hierarchy = METRIC_HIERARCHIES[0] path = CGroups.get_my_cgroup_folder(hierarchy) CGroups._use_systemd = path.startswith(CGroups._construct_systemd_path_for_hierarchy(hierarchy, "")) return CGroups._use_systemd @staticmethod def _try_mkdir(path): """ Try to create a directory, recursively. If it already exists as such, do nothing. Raise the appropriate exception should an error occur. :param path: str """ if not os.path.isdir(path): try: os.makedirs(path, 0o755) except OSError as e: if e.errno == errno.EEXIST: if not os.path.isdir(path): raise CGroupsException("Create directory for cgroup {0}: " "normal file already exists with that name".format(path)) else: pass # There was a race to create the directory, but it's there now, and that's fine elif e.errno == errno.EACCES: # This is unexpected, as the agent runs as root raise CGroupsException("Create directory for cgroup {0}: permission denied".format(path)) else: raise def add(self, pid): """ Add a process to the cgroups for this agent/extension. """ if not self.enabled(): return if self.is_wrapper_cgroup: raise CGroupsException("Cannot add a process to the Agents+Extensions wrapper cgroup") if not self._osutil.check_pid_alive(pid): raise CGroupsException('PID {0} does not exist'.format(pid)) for hierarchy, cgroup in self.cgroups.items(): tasks_file = self._get_cgroup_file(hierarchy, 'cgroup.procs') fileutil.append_file(tasks_file, "{0}\n".format(pid)) def set_limits(self): """ Set per-hierarchy limits based on the cgroup name (agent or particular extension) """ if not conf.get_cgroups_enforce_limits(): return if self.name is None: return for ext in conf.get_cgroups_excluded(): if ext in self.name.lower(): logger.info('No cgroups limits for {0}'.format(self.name)) return # default values cpu_limit = DEFAULT_CPU_LIMIT_EXT mem_limit = max(DEFAULT_MEM_LIMIT_MIN_MB, round(self._osutil.get_total_mem() * DEFAULT_MEM_LIMIT_PCT / 100, 0)) # agent values if AGENT_NAME.lower() in self.name.lower(): cpu_limit = DEFAULT_CPU_LIMIT_AGENT mem_limit = min(DEFAULT_MEM_LIMIT_MAX_MB, mem_limit) msg = '{0}: {1}% {2}mb'.format(self.name, cpu_limit, mem_limit) logger.info("Setting cgroups limits for {0}".format(msg)) success = False try: self.set_cpu_limit(cpu_limit) self.set_memory_limit(mem_limit) success = True except Exception as ge: msg = '[{0}] {1}'.format(msg, ustr(ge)) raise finally: from azurelinuxagent.common.event import add_event, WALAEventOperation add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.SetCGroupsLimits, is_success=success, message=msg, log_event=False) @staticmethod def _apply_wrapper_limits(path, hierarchy): """ Find wrapping limits for the hierarchy and apply them to the cgroup denoted by the path :param path: str :param hierarchy: str """ pass @staticmethod def _setup_wrapper_groups(): """ For each hierarchy, construct the wrapper cgroup and apply the appropriate limits """ for hierarchy in METRIC_HIERARCHIES: root_dir = CGroups._construct_custom_path_for_hierarchy(hierarchy, "") CGroups._try_mkdir(root_dir) CGroups._apply_wrapper_limits(root_dir, hierarchy) @staticmethod def setup(suppress_process_add=False): """ Only needs to be called once, and should be called from the -daemon instance of the agent. Mount the cgroup fs if necessary Create wrapper cgroups for agent-plus-extensions and set limits on them; Add this process to the "agent" cgroup, if required Actual collection of metrics from cgroups happens in the -run-exthandlers instance """ if CGroups.enabled(): try: CGroups._osutil.mount_cgroups() if not suppress_process_add: CGroups._setup_wrapper_groups() pid = int(os.getpid()) if not CGroups.is_systemd_manager(): cg = CGroups.for_extension(AGENT_NAME) logger.info("Add daemon process pid {0} to {1} cgroup".format(pid, cg.name)) cg.add(pid) cg.set_limits() else: cg = CGroups.for_systemd_service(AGENT_NAME) logger.info("Add daemon process pid {0} to {1} systemd cgroup".format(pid, cg.name)) # systemd sets limits; any limits we write would be overwritten status = "ok" except CGroupsException as cge: status = cge.msg CGroups.disable() except Exception as ge: status = ustr(ge) CGroups.disable() else: status = "not supported by platform" CGroups.disable() logger.info("CGroups: {0}".format(status)) from azurelinuxagent.common.event import add_event, WALAEventOperation add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.InitializeCGroups, is_success=CGroups.enabled(), message=status, log_event=False) @staticmethod def add_to_extension_cgroup(name, pid=int(os.getpid())): """ Create cgroup directories for this extension in each of the hierarchies and add this process to the new cgroup. Should only be called when creating sub-processes and invoked inside the fork/exec window. As a result, there's no point in returning the CGroups object itself; the goal is to move the child process into the cgroup before the new code even starts running. :param str name: Short name of extension, suitable for naming directories in the filesystem :param int pid: Process id of extension to be added to the cgroup """ if not CGroups.enabled(): return if name == AGENT_NAME: logger.warn('Extension cgroup name cannot match agent cgroup name ({0})'.format(AGENT_NAME)) return try: logger.info("Move process {0} into cgroups for extension {1}".format(pid, name)) CGroups.for_extension(name).add(pid) except Exception as ex: logger.warn("Unable to move process {0} into cgroups for extension {1}: {2}".format(pid, name, ex)) @staticmethod def get_my_cgroup_path(hierarchy_id): """ Get the cgroup path "suffix" for this process for the given hierarchy ID. The leading "/" is always stripped, so the suffix is suitable for passing to os.path.join(). (If the process is in the root cgroup, an empty string is returned, and os.path.join() will still do the right thing.) :param hierarchy_id: str :return: str """ cgroup_paths = fileutil.read_file("/proc/self/cgroup") for entry in cgroup_paths.splitlines(): fields = entry.split(':') if fields[0] == hierarchy_id: return fields[2].lstrip(os.path.sep) raise CGroupsException("This process belongs to no cgroup for hierarchy ID {0}".format(hierarchy_id)) @staticmethod def get_hierarchy_id(hierarchy): """ Get the cgroups hierarchy ID for a given hierarchy name :param hierarchy: :return: str """ cgroup_states = fileutil.read_file("/proc/cgroups") for entry in cgroup_states.splitlines(): fields = entry.split('\t') if fields[0] == hierarchy: return fields[1] raise CGroupsException("Cgroup hierarchy {0} not found in /proc/cgroups".format(hierarchy)) @staticmethod def get_my_cgroup_folder(hierarchy): """ Find the path of the cgroup in which this process currently lives for the given hierarchy. :param hierarchy: str :return: str """ hierarchy_id = CGroups.get_hierarchy_id(hierarchy) return os.path.join(BASE_CGROUPS, hierarchy, CGroups.get_my_cgroup_path(hierarchy_id)) def _get_cgroup_file(self, hierarchy, file_name): return os.path.join(self.cgroups[hierarchy], file_name) @staticmethod def _convert_cpu_limit_to_fraction(value): """ Convert a CPU limit from percent (e.g. 50 meaning 50%) to a decimal fraction (0.50). :return: Fraction of one CPU to be made available (e.g. 0.5 means half a core) :rtype: float """ try: limit = float(value) except ValueError: raise CGroupsException('CPU Limit must be convertible to a float') if limit <= float(0) or limit > float(CGroups.get_num_cores() * 100): raise CGroupsException('CPU Limit must be between 0 and 100 * numCores') return limit / 100.0 def get_file_contents(self, hierarchy, file_name): """ Retrieve the value of a parameter from a hierarchy. :param str hierarchy: Name of cgroup metric hierarchy :param str file_name: Name of file within that metric hierarchy :return: Entire contents of the file :rtype: str """ if hierarchy in self.cgroups: parameter_file = self._get_cgroup_file(hierarchy, file_name) try: return fileutil.read_file(parameter_file) except Exception: raise CGroupsException("Could not retrieve cgroup file {0}/{1}".format(hierarchy, file_name)) else: raise CGroupsException("{0} subsystem not available in cgroup {1}. cgroup paths: {2}".format( hierarchy, self.name, self.cgroups)) def get_parameter(self, hierarchy, parameter_name): """ Retrieve the value of a parameter from a hierarchy. Assumes the parameter is the sole line of the file. :param str hierarchy: Name of cgroup metric hierarchy :param str parameter_name: Name of file within that metric hierarchy :return: The first line of the file, without line terminator :rtype: str """ result = "" try: values = self.get_file_contents(hierarchy, parameter_name).splitlines() result = values[0] except IndexError: parameter_filename = self._get_cgroup_file(hierarchy, parameter_name) logger.error("File {0} is empty but should not be".format(parameter_filename)) except CGroupsException as e: # ignore if the file does not exist yet pass except Exception as e: parameter_filename = self._get_cgroup_file(hierarchy, parameter_name) logger.error("Exception while attempting to read {0}: {1}".format(parameter_filename, ustr(e))) return result def set_cpu_limit(self, limit=None): """ Limit this cgroup to a percentage of a single core. limit=10 means 10% of one core; 150 means 150%, which is useful only in multi-core systems. To limit a cgroup to utilize 10% of a single CPU, use the following commands: # echo 10000 > /cgroup/cpu/red/cpu.cfs_quota_us # echo 100000 > /cgroup/cpu/red/cpu.cfs_period_us :param limit: """ if not CGroups.enabled(): return if limit is None: return if 'cpu' in self.cgroups: total_units = float(self.get_parameter('cpu', 'cpu.cfs_period_us')) limit_units = int(self._convert_cpu_limit_to_fraction(limit) * total_units) cpu_shares_file = self._get_cgroup_file('cpu', 'cpu.cfs_quota_us') logger.verbose("writing {0} to {1}".format(limit_units, cpu_shares_file)) fileutil.write_file(cpu_shares_file, '{0}\n'.format(limit_units)) else: raise CGroupsException("CPU hierarchy not available in this cgroup") @staticmethod def get_num_cores(): """ Return the number of CPU cores exposed to this system. :return: int """ return CGroups._osutil.get_processor_cores() @staticmethod def _format_memory_value(unit, limit=None): units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024} if unit not in units: raise CGroupsException("Unit must be one of {0}".format(units.keys())) if limit is None: value = MEMORY_DEFAULT else: try: limit = float(limit) except ValueError: raise CGroupsException('Limit must be convertible to a float') else: value = int(limit * units[unit]) return value def set_memory_limit(self, limit=None, unit='megabytes'): if 'memory' in self.cgroups: value = self._format_memory_value(unit, limit) memory_limit_file = self._get_cgroup_file('memory', 'memory.limit_in_bytes') logger.verbose("writing {0} to {1}".format(value, memory_limit_file)) fileutil.write_file(memory_limit_file, '{0}\n'.format(value)) else: raise CGroupsException("Memory hierarchy not available in this cgroup") WALinuxAgent-2.2.32/azurelinuxagent/common/conf.py000066400000000000000000000247031335416306700221400ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Module conf loads and parses configuration file """ import os import os.path import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import AgentConfigError DISABLE_AGENT_FILE = 'disable_agent' class ConfigurationProvider(object): """ Parse and store key:values in /etc/waagent.conf. """ def __init__(self): self.values = dict() def load(self, content): if not content: raise AgentConfigError("Can't not parse empty configuration") for line in content.split('\n'): if not line.startswith("#") and "=" in line: parts = line.split('=', 1) if len(parts) < 2: continue key = parts[0].strip() value = parts[1].split('#')[0].strip("\" ").strip() self.values[key] = value if value != "None" else None def get(self, key, default_val): val = self.values.get(key) return val if val is not None else default_val def get_switch(self, key, default_val): val = self.values.get(key) if val is not None and val.lower() == 'y': return True elif val is not None and val.lower() == 'n': return False return default_val def get_int(self, key, default_val): try: return int(self.values.get(key)) except TypeError: return default_val except ValueError: return default_val __conf__ = ConfigurationProvider() def load_conf_from_file(conf_file_path, conf=__conf__): """ Load conf file from: conf_file_path """ if os.path.isfile(conf_file_path) == False: raise AgentConfigError(("Missing configuration in {0}" "").format(conf_file_path)) try: content = fileutil.read_file(conf_file_path) conf.load(content) except IOError as err: raise AgentConfigError(("Failed to load conf file:{0}, {1}" "").format(conf_file_path, err)) __SWITCH_OPTIONS__ = { "OS.AllowHTTP": False, "OS.EnableFirewall": False, "OS.EnableFIPS": False, "OS.EnableRDMA": False, "OS.UpdateRdmaDriver": False, "OS.CheckRdmaDriver": False, "Logs.Verbose": False, "Extensions.Enabled": True, "Provisioning.Enabled": True, "Provisioning.UseCloudInit": False, "Provisioning.AllowResetSysUser": False, "Provisioning.RegenerateSshHostKeyPair": False, "Provisioning.DeleteRootPassword": False, "Provisioning.DecodeCustomData": False, "Provisioning.ExecuteCustomData": False, "Provisioning.MonitorHostName": False, "DetectScvmmEnv": False, "ResourceDisk.Format": False, "ResourceDisk.EnableSwap": False, "AutoUpdate.Enabled": True, "EnableOverProvisioning": True, "CGroups.EnforceLimits": False, } __STRING_OPTIONS__ = { "Lib.Dir": "/var/lib/waagent", "DVD.MountPoint": "/mnt/cdrom/secure", "Pid.File": "/var/run/waagent.pid", "Extension.LogDir": "/var/log/azure", "OS.OpensslPath": "/usr/bin/openssl", "OS.SshDir": "/etc/ssh", "OS.HomeDir": "/home", "OS.PasswordPath": "/etc/shadow", "OS.SudoersDir": "/etc/sudoers.d", "OS.RootDeviceScsiTimeout": None, "Provisioning.SshHostKeyPairType": "rsa", "Provisioning.PasswordCryptId": "6", "HttpProxy.Host": None, "ResourceDisk.MountPoint": "/mnt/resource", "ResourceDisk.MountOptions": None, "ResourceDisk.Filesystem": "ext3", "AutoUpdate.GAFamily": "Prod", "CGroups.Excluded": "customscript,runcommand", } __INTEGER_OPTIONS__ = { "OS.SshClientAliveInterval": 180, "Provisioning.PasswordCryptSaltLength": 10, "HttpProxy.Port": None, "ResourceDisk.SwapSizeMB": 0, "Autoupdate.Frequency": 3600 } def get_configuration(conf=__conf__): options = {} for option in __SWITCH_OPTIONS__: options[option] = conf.get_switch(option, __SWITCH_OPTIONS__[option]) for option in __STRING_OPTIONS__: options[option] = conf.get(option, __STRING_OPTIONS__[option]) for option in __INTEGER_OPTIONS__: options[option] = conf.get_int(option, __INTEGER_OPTIONS__[option]) return options def enable_firewall(conf=__conf__): return conf.get_switch("OS.EnableFirewall", False) def enable_rdma(conf=__conf__): return conf.get_switch("OS.EnableRDMA", False) or \ conf.get_switch("OS.UpdateRdmaDriver", False) or \ conf.get_switch("OS.CheckRdmaDriver", False) def enable_rdma_update(conf=__conf__): return conf.get_switch("OS.UpdateRdmaDriver", False) def get_logs_verbose(conf=__conf__): return conf.get_switch("Logs.Verbose", False) def get_lib_dir(conf=__conf__): return conf.get("Lib.Dir", "/var/lib/waagent") def get_published_hostname(conf=__conf__): return os.path.join(get_lib_dir(conf), 'published_hostname') def get_dvd_mount_point(conf=__conf__): return conf.get("DVD.MountPoint", "/mnt/cdrom/secure") def get_agent_pid_file_path(conf=__conf__): return conf.get("Pid.File", "/var/run/waagent.pid") def get_ext_log_dir(conf=__conf__): return conf.get("Extension.LogDir", "/var/log/azure") def get_fips_enabled(conf=__conf__): return conf.get_switch("OS.EnableFIPS", False) def get_openssl_cmd(conf=__conf__): return conf.get("OS.OpensslPath", "/usr/bin/openssl") def get_ssh_client_alive_interval(conf=__conf__): return conf.get("OS.SshClientAliveInterval", 180) def get_ssh_dir(conf=__conf__): return conf.get("OS.SshDir", "/etc/ssh") def get_home_dir(conf=__conf__): return conf.get("OS.HomeDir", "/home") def get_passwd_file_path(conf=__conf__): return conf.get("OS.PasswordPath", "/etc/shadow") def get_sudoers_dir(conf=__conf__): return conf.get("OS.SudoersDir", "/etc/sudoers.d") def get_sshd_conf_file_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), "sshd_config") def get_ssh_key_glob(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_*key*') def get_ssh_key_private_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key'.format(get_ssh_host_keypair_type(conf))) def get_ssh_key_public_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key.pub'.format(get_ssh_host_keypair_type(conf))) def get_root_device_scsi_timeout(conf=__conf__): return conf.get("OS.RootDeviceScsiTimeout", None) def get_ssh_host_keypair_type(conf=__conf__): keypair_type = conf.get("Provisioning.SshHostKeyPairType", "rsa") if keypair_type == "auto": ''' auto generates all supported key types and returns the rsa thumbprint as the default. ''' return "rsa" return keypair_type def get_ssh_host_keypair_mode(conf=__conf__): return conf.get("Provisioning.SshHostKeyPairType", "rsa") def get_provision_enabled(conf=__conf__): return conf.get_switch("Provisioning.Enabled", True) def get_extensions_enabled(conf=__conf__): return conf.get_switch("Extensions.Enabled", True) def get_provision_cloudinit(conf=__conf__): return conf.get_switch("Provisioning.UseCloudInit", False) def get_allow_reset_sys_user(conf=__conf__): return conf.get_switch("Provisioning.AllowResetSysUser", False) def get_regenerate_ssh_host_key(conf=__conf__): return conf.get_switch("Provisioning.RegenerateSshHostKeyPair", False) def get_delete_root_password(conf=__conf__): return conf.get_switch("Provisioning.DeleteRootPassword", False) def get_decode_customdata(conf=__conf__): return conf.get_switch("Provisioning.DecodeCustomData", False) def get_execute_customdata(conf=__conf__): return conf.get_switch("Provisioning.ExecuteCustomData", False) def get_password_cryptid(conf=__conf__): return conf.get("Provisioning.PasswordCryptId", "6") def get_password_crypt_salt_len(conf=__conf__): return conf.get_int("Provisioning.PasswordCryptSaltLength", 10) def get_monitor_hostname(conf=__conf__): return conf.get_switch("Provisioning.MonitorHostName", False) def get_httpproxy_host(conf=__conf__): return conf.get("HttpProxy.Host", None) def get_httpproxy_port(conf=__conf__): return conf.get_int("HttpProxy.Port", None) def get_detect_scvmm_env(conf=__conf__): return conf.get_switch("DetectScvmmEnv", False) def get_resourcedisk_format(conf=__conf__): return conf.get_switch("ResourceDisk.Format", False) def get_resourcedisk_enable_swap(conf=__conf__): return conf.get_switch("ResourceDisk.EnableSwap", False) def get_resourcedisk_mountpoint(conf=__conf__): return conf.get("ResourceDisk.MountPoint", "/mnt/resource") def get_resourcedisk_mountoptions(conf=__conf__): return conf.get("ResourceDisk.MountOptions", None) def get_resourcedisk_filesystem(conf=__conf__): return conf.get("ResourceDisk.Filesystem", "ext3") def get_resourcedisk_swap_size_mb(conf=__conf__): return conf.get_int("ResourceDisk.SwapSizeMB", 0) def get_autoupdate_gafamily(conf=__conf__): return conf.get("AutoUpdate.GAFamily", "Prod") def get_autoupdate_enabled(conf=__conf__): return conf.get_switch("AutoUpdate.Enabled", True) def get_autoupdate_frequency(conf=__conf__): return conf.get_int("Autoupdate.Frequency", 3600) def get_enable_overprovisioning(conf=__conf__): return conf.get_switch("EnableOverProvisioning", True) def get_allow_http(conf=__conf__): return conf.get_switch("OS.AllowHTTP", False) def get_disable_agent_file_path(conf=__conf__): return os.path.join(get_lib_dir(conf), DISABLE_AGENT_FILE) def get_cgroups_enforce_limits(conf=__conf__): return conf.get_switch("CGroups.EnforceLimits", False) def get_cgroups_excluded(conf=__conf__): excluded_value = conf.get("CGroups.Excluded", "customscript, runcommand") return [s for s in [i.strip().lower() for i in excluded_value.split(',')] if len(s) > 0] if excluded_value else [] WALinuxAgent-2.2.32/azurelinuxagent/common/dhcp.py000066400000000000000000000351231335416306700221270ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import os import socket import array import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.utils.textutil import hex_dump, hex_dump2, \ hex_dump3, \ compare_bytes, str_to_ord, \ unpack_big_endian, \ int_to_ip4_addr from azurelinuxagent.common.exception import DhcpError from azurelinuxagent.common.osutil import get_osutil # the kernel routing table representation of 168.63.129.16 KNOWN_WIRESERVER_IP_ENTRY = '10813FA8' KNOWN_WIRESERVER_IP = '168.63.129.16' def get_dhcp_handler(): return DhcpHandler() class DhcpHandler(object): """ Azure use DHCP option 245 to pass endpoint ip to VMs. """ def __init__(self): self.osutil = get_osutil() self.endpoint = None self.gateway = None self.routes = None self._request_broadcast = False self.skip_cache = False def run(self): """ Send dhcp request Configure default gateway and routes Save wire server endpoint if found """ if self.wireserver_route_exists or self.dhcp_cache_exists: return self.send_dhcp_req() self.conf_routes() def wait_for_network(self): """ Wait for network stack to be initialized. """ ipv4 = self.osutil.get_ip4_addr() while ipv4 == '' or ipv4 == '0.0.0.0': logger.info("Waiting for network.") time.sleep(10) logger.info("Try to start network interface.") self.osutil.start_network() ipv4 = self.osutil.get_ip4_addr() @property def wireserver_route_exists(self): """ Determine whether a route to the known wireserver ip already exists, and if so use that as the endpoint. This is true when running in a virtual network. :return: True if a route to KNOWN_WIRESERVER_IP exists. """ route_exists = False logger.info("Test for route to {0}".format(KNOWN_WIRESERVER_IP)) try: route_file = '/proc/net/route' if os.path.exists(route_file) and \ KNOWN_WIRESERVER_IP_ENTRY in open(route_file).read(): # reset self.gateway and self.routes # we do not need to alter the routing table self.endpoint = KNOWN_WIRESERVER_IP self.gateway = None self.routes = None route_exists = True logger.info("Route to {0} exists".format(KNOWN_WIRESERVER_IP)) else: logger.warn("No route exists to {0}".format(KNOWN_WIRESERVER_IP)) except Exception as e: logger.error( "Could not determine whether route exists to {0}: {1}".format( KNOWN_WIRESERVER_IP, e)) return route_exists @property def dhcp_cache_exists(self): """ Check whether the dhcp options cache exists and contains the wireserver endpoint, unless skip_cache is True. :return: True if the cached endpoint was found in the dhcp lease """ if self.skip_cache: return False exists = False logger.info("Checking for dhcp lease cache") cached_endpoint = self.osutil.get_dhcp_lease_endpoint() if cached_endpoint is not None: self.endpoint = cached_endpoint exists = True logger.info("Cache exists [{0}]".format(exists)) return exists def conf_routes(self): logger.info("Configure routes") logger.info("Gateway:{0}", self.gateway) logger.info("Routes:{0}", self.routes) # Add default gateway if self.gateway is not None and self.osutil.is_missing_default_route(): self.osutil.route_add(0, 0, self.gateway) if self.routes is not None: for route in self.routes: self.osutil.route_add(route[0], route[1], route[2]) def _send_dhcp_req(self, request): __waiting_duration__ = [0, 10, 30, 60, 60] for duration in __waiting_duration__: try: self.osutil.allow_dhcp_broadcast() response = socket_send(request) validate_dhcp_resp(request, response) return response except DhcpError as e: logger.warn("Failed to send DHCP request: {0}", e) time.sleep(duration) return None def send_dhcp_req(self): """ Check if DHCP is available """ (dhcp_available, endpoint) = self.osutil.is_dhcp_available() if not dhcp_available: logger.info("send_dhcp_req: DHCP not available") self.endpoint = endpoint return """ Build dhcp request with mac addr Configure route to allow dhcp traffic Stop dhcp service if necessary """ logger.info("Send dhcp request") mac_addr = self.osutil.get_mac_addr() # Do unicast first, then fallback to broadcast if fails. req = build_dhcp_request(mac_addr, self._request_broadcast) if not self._request_broadcast: self._request_broadcast = True # Temporary allow broadcast for dhcp. Remove the route when done. missing_default_route = self.osutil.is_missing_default_route() ifname = self.osutil.get_if_name() if missing_default_route: self.osutil.set_route_for_dhcp_broadcast(ifname) # In some distros, dhcp service needs to be shutdown before agent probe # endpoint through dhcp. if self.osutil.is_dhcp_enabled(): self.osutil.stop_dhcp_service() resp = self._send_dhcp_req(req) if self.osutil.is_dhcp_enabled(): self.osutil.start_dhcp_service() if missing_default_route: self.osutil.remove_route_for_dhcp_broadcast(ifname) if resp is None: raise DhcpError("Failed to receive dhcp response.") self.endpoint, self.gateway, self.routes = parse_dhcp_resp(resp) def validate_dhcp_resp(request, response): bytes_recv = len(response) if bytes_recv < 0xF6: logger.error("HandleDhcpResponse: Too few bytes received:{0}", bytes_recv) return False logger.verbose("BytesReceived:{0}", hex(bytes_recv)) logger.verbose("DHCP response:{0}", hex_dump(response, bytes_recv)) # check transactionId, cookie, MAC address cookie should never mismatch # transactionId and MAC address may mismatch if we see a response # meant from another machine if not compare_bytes(request, response, 0xEC, 4): logger.verbose("Cookie not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0xEC, 4), hex_dump3(response, 0xEC, 4)) raise DhcpError("Cookie in dhcp respones doesn't match the request") if not compare_bytes(request, response, 4, 4): logger.verbose("TransactionID not match:\nsend={0},\nreceive={1}", hex_dump3(request, 4, 4), hex_dump3(response, 4, 4)) raise DhcpError("TransactionID in dhcp respones " "doesn't match the request") if not compare_bytes(request, response, 0x1C, 6): logger.verbose("Mac Address not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0x1C, 6), hex_dump3(response, 0x1C, 6)) raise DhcpError("Mac Addr in dhcp respones " "doesn't match the request") def parse_route(response, option, i, length, bytes_recv): # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx logger.verbose("Routes at offset: {0} with length:{1}", hex(i), hex(length)) routes = [] if length < 5: logger.error("Data too small for option:{0}", option) j = i + 2 while j < (i + length + 2): mask_len_bits = str_to_ord(response[j]) mask_len_bytes = (((mask_len_bits + 7) & ~7) >> 3) mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - mask_len_bits)) j += 1 net = unpack_big_endian(response, j, mask_len_bytes) net <<= (32 - mask_len_bytes * 8) net &= mask j += mask_len_bytes gateway = unpack_big_endian(response, j, 4) j += 4 routes.append((net, mask, gateway)) if j != (i + length + 2): logger.error("Unable to parse routes") return routes def parse_ip_addr(response, option, i, length, bytes_recv): if i + 5 < bytes_recv: if length != 4: logger.error("Endpoint or Default Gateway not 4 bytes") return None addr = unpack_big_endian(response, i + 2, 4) ip_addr = int_to_ip4_addr(addr) return ip_addr else: logger.error("Data too small for option:{0}", option) return None def parse_dhcp_resp(response): """ Parse DHCP response: Returns endpoint server or None on error. """ logger.verbose("parse Dhcp Response") bytes_recv = len(response) endpoint = None gateway = None routes = None # Walk all the returned options, parsing out what we need, ignoring the # others. We need the custom option 245 to find the the endpoint we talk to # as well as to handle some Linux DHCP client incompatibilities; # options 3 for default gateway and 249 for routes; 255 is end. i = 0xF0 # offset to first option while i < bytes_recv: option = str_to_ord(response[i]) length = 0 if (i + 1) < bytes_recv: length = str_to_ord(response[i + 1]) logger.verbose("DHCP option {0} at offset:{1} with length:{2}", hex(option), hex(i), hex(length)) if option == 255: logger.verbose("DHCP packet ended at offset:{0}", hex(i)) break elif option == 249: routes = parse_route(response, option, i, length, bytes_recv) elif option == 3: gateway = parse_ip_addr(response, option, i, length, bytes_recv) logger.verbose("Default gateway:{0}, at {1}", gateway, hex(i)) elif option == 245: endpoint = parse_ip_addr(response, option, i, length, bytes_recv) logger.verbose("Azure wire protocol endpoint:{0}, at {1}", endpoint, hex(i)) else: logger.verbose("Skipping DHCP option:{0} at {1} with length {2}", hex(option), hex(i), hex(length)) i += length + 2 return endpoint, gateway, routes def socket_send(request): sock = None try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(("0.0.0.0", 68)) sock.sendto(request, ("", 67)) sock.settimeout(10) logger.verbose("Send DHCP request: Setting socket.timeout=10, " "entering recv") response = sock.recv(1024) return response except IOError as e: raise DhcpError("{0}".format(e)) finally: if sock is not None: sock.close() def build_dhcp_request(mac_addr, request_broadcast): """ Build DHCP request string. """ # # typedef struct _DHCP { # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ # UINT8 HardwareAddressType; /* htype: ethernet */ # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ # UINT8 Hops; /* hops: 0 */ # UINT8 TransactionID[4]; /* xid: random */ # UINT8 Seconds[2]; /* secs: 0 */ # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast*/ # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */ # UINT8 ServerName[64]; /* sname: 0 */ # UINT8 BootFileName[128]; /* file: 0 */ # UINT8 MagicCookie[4]; /* 99 130 83 99 */ # /* 0x63 0x82 0x53 0x63 */ # /* options -- hard code ours */ # # UINT8 MessageTypeCode; /* 53 */ # UINT8 MessageTypeLength; /* 1 */ # UINT8 MessageType; /* 1 for DISCOVER */ # UINT8 End; /* 255 */ # } DHCP; # # tuple of 244 zeros # (struct.pack_into would be good here, but requires Python 2.5) request = [0] * 244 trans_id = gen_trans_id() # Opcode = 1 # HardwareAddressType = 1 (ethernet/MAC) # HardwareAddressLength = 6 (ethernet/MAC/48 bits) for a in range(0, 3): request[a] = [1, 1, 6][a] # fill in transaction id (random number to ensure response matches request) for a in range(0, 4): request[4 + a] = str_to_ord(trans_id[a]) logger.verbose("BuildDhcpRequest: transactionId:%s,%04X" % ( hex_dump2(trans_id), unpack_big_endian(request, 4, 4))) if request_broadcast: # set broadcast flag to true to request the dhcp sever # to respond to a boradcast address, # this is useful when user dhclient fails. request[0x0A] = 0x80; # fill in ClientHardwareAddress for a in range(0, 6): request[0x1C + a] = str_to_ord(mac_addr[a]) # DHCP Magic Cookie: 99, 130, 83, 99 # MessageTypeCode = 53 DHCP Message Type # MessageTypeLength = 1 # MessageType = DHCPDISCOVER # End = 255 DHCP_END for a in range(0, 8): request[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] return array.array("B", request) def gen_trans_id(): return os.urandom(4) WALinuxAgent-2.2.32/azurelinuxagent/common/errorstate.py000066400000000000000000000021661335416306700234040ustar00rootroot00000000000000from datetime import datetime, timedelta ERROR_STATE_DELTA_DEFAULT = timedelta(minutes=15) ERROR_STATE_DELTA_INSTALL = timedelta(minutes=5) ERROR_STATE_HOST_PLUGIN_FAILURE = timedelta(minutes=5) class ErrorState(object): def __init__(self, min_timedelta=ERROR_STATE_DELTA_DEFAULT): self.min_timedelta = min_timedelta self.count = 0 self.timestamp = None def incr(self): if self.count == 0: self.timestamp = datetime.utcnow() self.count += 1 def reset(self): self.count = 0 self.timestamp = None def is_triggered(self): if self.timestamp is None: return False delta = datetime.utcnow() - self.timestamp if delta >= self.min_timedelta: return True return False @property def fail_time(self): if self.timestamp is None: return 'unknown' delta = round((datetime.utcnow() - self.timestamp).seconds / 60.0, 2) if delta < 60: return '{0} min'.format(delta) delta_hr = round(delta / 60.0, 2) return '{0} hr'.format(delta_hr) WALinuxAgent-2.2.32/azurelinuxagent/common/event.py000066400000000000000000000413431335416306700223330ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import atexit import datetime import json import os import sys import time import traceback from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import EventError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ TelemetryEvent, \ get_properties from azurelinuxagent.common.utils import fileutil, textutil from azurelinuxagent.common.version import CURRENT_VERSION _EVENT_MSG = "Event: name={0}, op={1}, message={2}, duration={3}" class WALAEventOperation: ActivateResourceDisk = "ActivateResourceDisk" AgentBlacklisted = "AgentBlacklisted" AgentEnabled = "AgentEnabled" ArtifactsProfileBlob = "ArtifactsProfileBlob" AutoUpdate = "AutoUpdate" CustomData = "CustomData" Deploy = "Deploy" Disable = "Disable" Downgrade = "Downgrade" Download = "Download" Enable = "Enable" ExtensionProcessing = "ExtensionProcessing" Firewall = "Firewall" GetArtifactExtended = "GetArtifactExtended" HealthCheck = "HealthCheck" HealthObservation = "HealthObservation" HeartBeat = "HeartBeat" HostPlugin = "HostPlugin" HostPluginHeartbeat = "HostPluginHeartbeat" HostPluginHeartbeatExtended = "HostPluginHeartbeatExtended" HttpErrors = "HttpErrors" ImdsHeartbeat = "ImdsHeartbeat" Install = "Install" InitializeCGroups = "InitializeCGroups" InitializeHostPlugin = "InitializeHostPlugin" Log = "Log" Partition = "Partition" ProcessGoalState = "ProcessGoalState" Provision = "Provision" ProvisionGuestAgent = "ProvisionGuestAgent" RemoteAccessHandling = "RemoteAccessHandling" ReportStatus = "ReportStatus" ReportStatusExtended = "ReportStatusExtended" Restart = "Restart" SequenceNumberMismatch = "SequenceNumberMismatch" SetCGroupsLimits = "SetCGroupsLimits" SkipUpdate = "SkipUpdate" UnhandledError = "UnhandledError" UnInstall = "UnInstall" Unknown = "Unknown" Upgrade = "Upgrade" Update = "Update" SHOULD_ENCODE_MESSAGE_LEN = 80 SHOULD_ENCODE_MESSAGE_OP = [ WALAEventOperation.Disable, WALAEventOperation.Enable, WALAEventOperation.Install, WALAEventOperation.UnInstall, ] class EventStatus(object): EVENT_STATUS_FILE = "event_status.json" def __init__(self): self._path = None self._status = {} def clear(self): self._status = {} self._save() def event_marked(self, name, version, op): return self._event_name(name, version, op) in self._status def event_succeeded(self, name, version, op): event = self._event_name(name, version, op) if event not in self._status: return True return self._status[event] is True def initialize(self, status_dir=conf.get_lib_dir()): self._path = os.path.join(status_dir, EventStatus.EVENT_STATUS_FILE) self._load() def mark_event_status(self, name, version, op, status): event = self._event_name(name, version, op) self._status[event] = (status is True) self._save() def _event_name(self, name, version, op): return "{0}-{1}-{2}".format(name, version, op) def _load(self): try: self._status = {} if os.path.isfile(self._path): with open(self._path, 'r') as f: self._status = json.load(f) except Exception as e: logger.warn("Exception occurred loading event status: {0}".format(e)) self._status = {} def _save(self): try: with open(self._path, 'w') as f: json.dump(self._status, f) except Exception as e: logger.warn("Exception occurred saving event status: {0}".format(e)) __event_status__ = EventStatus() __event_status_operations__ = [ WALAEventOperation.AutoUpdate, WALAEventOperation.ReportStatus ] def _encode_message(op, message): """ Gzip and base64 encode a message based on the operation. The intent of this message is to make the logs human readable and include the stdout/stderr from extension operations. Extension operations tend to generate a lot of noise, which makes it difficult to parse the line-oriented waagent.log. The compromise is to encode the stdout/stderr so we preserve the data and do not destroy the line oriented nature. The data can be recovered using the following command: $ echo '' | base64 -d | pigz -zd You may need to install the pigz command. :param op: Operation, e.g. Enable or Install :param message: Message to encode :return: gzip'ed and base64 encoded message, or the original message """ if len(message) == 0: return message if op not in SHOULD_ENCODE_MESSAGE_OP: return message try: return textutil.compress(message) except Exception: # If the message could not be encoded a dummy message ('<>') is returned. # The original message was still sent via telemetry, so all is not lost. return "<>" def _log_event(name, op, message, duration, is_success=True): global _EVENT_MSG message = _encode_message(op, message) if not is_success: logger.error(_EVENT_MSG, name, op, message, duration) else: logger.info(_EVENT_MSG, name, op, message, duration) class EventLogger(object): def __init__(self): self.event_dir = None self.periodic_events = {} def save_event(self, data): if self.event_dir is None: logger.warn("Cannot save event -- Event reporter is not initialized.") return fileutil.mkdir(self.event_dir, mode=0o700) existing_events = os.listdir(self.event_dir) if len(existing_events) >= 1000: existing_events.sort() oldest_files = existing_events[:-999] logger.warn("Too many files under: {0}, removing oldest".format(self.event_dir)) try: for f in oldest_files: os.remove(os.path.join(self.event_dir, f)) except IOError as e: raise EventError(e) filename = os.path.join(self.event_dir, ustr(int(time.time() * 1000000))) try: with open(filename + ".tmp", 'wb+') as hfile: hfile.write(data.encode("utf-8")) os.rename(filename + ".tmp", filename + ".tld") except IOError as e: raise EventError("Failed to write events to file:{0}", e) def reset_periodic(self): self.periodic_events = {} def is_period_elapsed(self, delta, h): return h not in self.periodic_events or \ (self.periodic_events[h] + delta) <= datetime.now() def add_periodic(self, delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True, force=False): h = hash(name+op+ustr(is_success)+message) if force or self.is_period_elapsed(delta, h): self.add_event(name, op=op, is_success=is_success, duration=duration, version=version, message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event) self.periodic_events[h] = datetime.now() def add_event(self, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True): if (not is_success) and log_event: _log_event(name, op, message, duration, is_success=is_success) self._add_event(duration, evt_type, is_internal, is_success, message, name, op, version, eventId=6) def _add_event(self, duration, evt_type, is_internal, is_success, message, name, op, version, eventId): event = TelemetryEvent(eventId, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) event.parameters.append(TelemetryEventParam('Message', message)) event.parameters.append(TelemetryEventParam('Duration', duration)) event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError as e: logger.error("{0}", e) def add_log_event(self, level, message): # By the time the message has gotten to this point it is formatted as # # YYYY/MM/DD HH:mm:ss.fffffff LEVEL . # # The timestamp and the level are redundant, and should be stripped. # The logging library does not schematize this data, so I am forced # to parse the message. The format is regular, so the burden is low. parts = message.split(' ', 3) msg = parts[3] if len(parts) == 4 \ else message event = TelemetryEvent(7, "FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F") event.parameters.append(TelemetryEventParam('EventName', WALAEventOperation.Log)) event.parameters.append(TelemetryEventParam('CapabilityUsed', logger.LogLevel.STRINGS[level])) event.parameters.append(TelemetryEventParam('Context1', msg)) event.parameters.append(TelemetryEventParam('Context2', '')) event.parameters.append(TelemetryEventParam('Context3', '')) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError: pass def add_metric(self, category, counter, instance, value, log_event=False): """ Create and save an event which contains a telemetry event. :param str category: The category of metric (e.g. "cpu", "memory") :param str counter: The specific metric within the category (e.g. "%idle") :param str instance: For instanced metrics, the instance identifier (filesystem name, cpu core#, etc.) :param value: Value of the metric :param bool log_event: If true, log the collected metric in the agent log """ if log_event: from azurelinuxagent.common.version import AGENT_NAME message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) _log_event(AGENT_NAME, "METRIC", message, 0) event = TelemetryEvent(4, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") event.parameters.append(TelemetryEventParam('Category', category)) event.parameters.append(TelemetryEventParam('Counter', counter)) event.parameters.append(TelemetryEventParam('Instance', instance)) event.parameters.append(TelemetryEventParam('Value', value)) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError as e: logger.error("{0}", e) __event_logger__ = EventLogger() def elapsed_milliseconds(utc_start): now = datetime.utcnow() if now < utc_start: return 0 d = now - utc_start return int(((d.days * 24 * 60 * 60 + d.seconds) * 1000) + \ (d.microseconds / 1000.0)) def report_event(op, is_success=True, message='', log_event=True): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=is_success, message=message, op=op, log_event=log_event) def report_periodic(delta, op, is_success=True, message=''): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_periodic(delta, AGENT_NAME, version=CURRENT_VERSION, is_success=is_success, message=message, op=op) def report_metric(category, counter, instance, value, log_event=False, reporter=__event_logger__): """ Send a telemetry event reporting a single instance of a performance counter. :param str category: The category of the metric (cpu, memory, etc) :param str counter: The name of the metric ("%idle", etc) :param str instance: For instanced metrics, the identifier of the instance. E.g. a disk drive name, a cpu core# :param value: The value of the metric :param bool log_event: If True, log the metric in the agent log as well :param EventLogger reporter: The EventLogger instance to which metric events should be sent """ if reporter.event_dir is None: from azurelinuxagent.common.version import AGENT_NAME logger.warn("Cannot report metric event -- Event reporter is not initialized.") message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) _log_event(AGENT_NAME, "METRIC", message, 0) return reporter.add_metric(category, counter, instance, value, log_event) def add_event(name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True, reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) return if should_emit_event(name, version, op, is_success): mark_event_status(name, version, op, is_success) reporter.add_event( name, op=op, is_success=is_success, duration=duration, version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event) def add_log_event(level, message, reporter=__event_logger__): if reporter.event_dir is None: return reporter.add_log_event(level, message) def add_periodic( delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True, force=False, reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add periodic event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) return reporter.add_periodic( delta, name, op=op, is_success=is_success, duration=duration, version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event, force=force) def mark_event_status(name, version, op, status): if op in __event_status_operations__: __event_status__.mark_event_status(name, version, op, status) def should_emit_event(name, version, op, status): return \ op not in __event_status_operations__ or \ __event_status__ is None or \ not __event_status__.event_marked(name, version, op) or \ __event_status__.event_succeeded(name, version, op) != status def init_event_logger(event_dir): __event_logger__.event_dir = event_dir def init_event_status(status_dir): __event_status__.initialize(status_dir) def dump_unhandled_err(name): if hasattr(sys, 'last_type') and hasattr(sys, 'last_value') and \ hasattr(sys, 'last_traceback'): last_type = getattr(sys, 'last_type') last_value = getattr(sys, 'last_value') last_traceback = getattr(sys, 'last_traceback') error = traceback.format_exception(last_type, last_value, last_traceback) message = "".join(error) add_event(name, is_success=False, message=message, op=WALAEventOperation.UnhandledError) def enable_unhandled_err_dump(name): atexit.register(dump_unhandled_err, name) WALinuxAgent-2.2.32/azurelinuxagent/common/exception.py000066400000000000000000000076021335416306700232100ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Defines all exceptions """ class AgentError(Exception): """ Base class of agent error. """ def __init__(self, msg, inner=None): msg = u"[{0}] {1}".format(type(self).__name__, msg) if inner is not None: msg = u"{0}\nInner error: {1}".format(msg, inner) super(AgentError, self).__init__(msg) class AgentConfigError(AgentError): """ When configure file is not found or malformed. """ def __init__(self, msg=None, inner=None): super(AgentConfigError, self).__init__(msg, inner) class AgentNetworkError(AgentError): """ When network is not available\. """ def __init__(self, msg=None, inner=None): super(AgentNetworkError, self).__init__(msg, inner) class ExtensionError(AgentError): """ When failed to execute an extension """ def __init__(self, msg=None, inner=None, code=-1): super(ExtensionError, self).__init__(msg, inner) self.code = code class ProvisionError(AgentError): """ When provision failed """ def __init__(self, msg=None, inner=None): super(ProvisionError, self).__init__(msg, inner) class ResourceDiskError(AgentError): """ Mount resource disk failed """ def __init__(self, msg=None, inner=None): super(ResourceDiskError, self).__init__(msg, inner) class DhcpError(AgentError): """ Failed to handle dhcp response """ def __init__(self, msg=None, inner=None): super(DhcpError, self).__init__(msg, inner) class OSUtilError(AgentError): """ Failed to perform operation to OS configuration """ def __init__(self, msg=None, inner=None): super(OSUtilError, self).__init__(msg, inner) class ProtocolError(AgentError): """ Azure protocol error """ def __init__(self, msg=None, inner=None): super(ProtocolError, self).__init__(msg, inner) class ProtocolNotFoundError(ProtocolError): """ Azure protocol endpoint not found """ def __init__(self, msg=None, inner=None): super(ProtocolNotFoundError, self).__init__(msg, inner) class HttpError(AgentError): """ Http request failure """ def __init__(self, msg=None, inner=None): super(HttpError, self).__init__(msg, inner) class EventError(AgentError): """ Event reporting error """ def __init__(self, msg=None, inner=None): super(EventError, self).__init__(msg, inner) class CryptError(AgentError): """ Encrypt/Decrypt error """ def __init__(self, msg=None, inner=None): super(CryptError, self).__init__(msg, inner) class UpdateError(AgentError): """ Update Guest Agent error """ def __init__(self, msg=None, inner=None): super(UpdateError, self).__init__(msg, inner) class ResourceGoneError(HttpError): """ The requested resource no longer exists (i.e., status code 410) """ def __init__(self, msg=None, inner=None): if msg is None: msg = "Resource is gone" super(ResourceGoneError, self).__init__(msg, inner) class RemoteAccessError(AgentError): """ Remote Access Error """ def __init__(self, msg=None, inner=None): super(RemoteAccessError, self).__init__(msg, inner) WALinuxAgent-2.2.32/azurelinuxagent/common/future.py000066400000000000000000000037451335416306700225300ustar00rootroot00000000000000import platform import sys # Note broken dependency handling to avoid potential backward # compatibility issues on different distributions try: import distro except Exception: pass """ Add alias for python2 and python3 libs and functions. """ if sys.version_info[0] == 3: import http.client as httpclient from urllib.parse import urlparse """Rename Python3 str to ustr""" ustr = str bytebuffer = memoryview elif sys.version_info[0] == 2: import httplib as httpclient from urlparse import urlparse """Rename Python2 unicode to ustr""" ustr = unicode bytebuffer = buffer else: raise ImportError("Unknown python version: {0}".format(sys.version_info)) def get_linux_distribution(get_full_name, supported_dists): """Abstract platform.linux_distribution() call which is deprecated as of Python 3.5 and removed in Python 3.7""" try: supported = platform._supported_dists + (supported_dists,) osinfo = list( platform.linux_distribution( full_distribution_name=get_full_name, supported_dists=supported ) ) if not osinfo or osinfo == ['', '', '']: return get_linux_distribution_from_distro(get_full_name) full_name = platform.linux_distribution()[0].strip() osinfo.append(full_name) except AttributeError: return get_linux_distribution_from_distro(get_full_name) return osinfo def get_linux_distribution_from_distro(get_full_name): """Get the distribution information from the distro Python module.""" # If we get here we have to have the distro module, thus we do # not wrap the call in a try-except block as it would mask the problem # and result in a broken agent installation osinfo = list( distro.linux_distribution( full_distribution_name=get_full_name ) ) full_name = distro.linux_distribution()[0].strip() osinfo.append(full_name) return osinfo WALinuxAgent-2.2.32/azurelinuxagent/common/logger.py000066400000000000000000000137661335416306700225010ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and openssl_bin 1.0+ # """ Log utils """ import sys from azurelinuxagent.common.future import ustr from datetime import datetime, timedelta EVERY_DAY = timedelta(days=1) EVERY_HALF_DAY = timedelta(hours=12) EVERY_HOUR = timedelta(hours=1) EVERY_HALF_HOUR = timedelta(minutes=30) EVERY_FIFTEEN_MINUTES = timedelta(minutes=15) class Logger(object): """ Logger class """ def __init__(self, logger=None, prefix=None): self.appenders = [] self.logger = self if logger is None else logger self.periodic_messages = {} self.prefix = prefix def reset_periodic(self): self.logger.periodic_messages = {} def set_prefix(self, prefix): self.prefix = prefix def is_period_elapsed(self, delta, h): return h not in self.logger.periodic_messages or \ (self.logger.periodic_messages[h] + delta) <= datetime.now() def periodic(self, delta, msg_format, *args): h = hash(msg_format) if self.is_period_elapsed(delta, h): self.info(msg_format, *args) self.logger.periodic_messages[h] = datetime.now() def verbose(self, msg_format, *args): self.log(LogLevel.VERBOSE, msg_format, *args) def info(self, msg_format, *args): self.log(LogLevel.INFO, msg_format, *args) def warn(self, msg_format, *args): self.log(LogLevel.WARNING, msg_format, *args) def error(self, msg_format, *args): self.log(LogLevel.ERROR, msg_format, *args) def log(self, level, msg_format, *args): #if msg_format is not unicode convert it to unicode if type(msg_format) is not ustr: msg_format = ustr(msg_format, errors="backslashreplace") if len(args) > 0: msg = msg_format.format(*args) else: msg = msg_format time = datetime.now().strftime(u'%Y/%m/%d %H:%M:%S.%f') level_str = LogLevel.STRINGS[level] if self.prefix is not None: log_item = u"{0} {1} {2} {3}\n".format(time, level_str, self.prefix, msg) else: log_item = u"{0} {1} {2}\n".format(time, level_str, msg) log_item = ustr(log_item.encode('ascii', "backslashreplace"), encoding="ascii") for appender in self.appenders: appender.write(level, log_item) if self.logger != self: for appender in self.logger.appenders: appender.write(level, log_item) def add_appender(self, appender_type, level, path): appender = _create_logger_appender(appender_type, level, path) self.appenders.append(appender) class ConsoleAppender(object): def __init__(self, level, path): self.level = level self.path = path def write(self, level, msg): if self.level <= level: try: with open(self.path, "w") as console: console.write(msg) except IOError: pass class FileAppender(object): def __init__(self, level, path): self.level = level self.path = path def write(self, level, msg): if self.level <= level: try: with open(self.path, "a+") as log_file: log_file.write(msg) except IOError: pass class StdoutAppender(object): def __init__(self, level): self.level = level def write(self, level, msg): if self.level <= level: try: sys.stdout.write(msg) except IOError: pass class TelemetryAppender(object): def __init__(self, level, event_func): self.level = level self.event_func = event_func def write(self, level, msg): if self.level <= level: try: self.event_func(level, msg) except IOError: pass #Initialize logger instance DEFAULT_LOGGER = Logger() class LogLevel(object): VERBOSE = 0 INFO = 1 WARNING = 2 ERROR = 3 STRINGS = [ "VERBOSE", "INFO", "WARNING", "ERROR" ] class AppenderType(object): FILE = 0 CONSOLE = 1 STDOUT = 2 TELEMETRY = 3 def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): DEFAULT_LOGGER.add_appender(appender_type, level, path) def reset_periodic(): DEFAULT_LOGGER.reset_periodic() def set_prefix(prefix): DEFAULT_LOGGER.set_prefix(prefix) def periodic(delta, msg_format, *args): DEFAULT_LOGGER.periodic(delta, msg_format, *args) def verbose(msg_format, *args): DEFAULT_LOGGER.verbose(msg_format, *args) def info(msg_format, *args): DEFAULT_LOGGER.info(msg_format, *args) def warn(msg_format, *args): DEFAULT_LOGGER.warn(msg_format, *args) def error(msg_format, *args): DEFAULT_LOGGER.error(msg_format, *args) def log(level, msg_format, *args): DEFAULT_LOGGER.log(level, msg_format, args) def _create_logger_appender(appender_type, level=LogLevel.INFO, path=None): if appender_type == AppenderType.CONSOLE: return ConsoleAppender(level, path) elif appender_type == AppenderType.FILE: return FileAppender(level, path) elif appender_type == AppenderType.STDOUT: return StdoutAppender(level) elif appender_type == AppenderType.TELEMETRY: return TelemetryAppender(level, path) else: raise ValueError("Unknown appender type") WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/000077500000000000000000000000001335416306700221525ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/__init__.py000066400000000000000000000012631335416306700242650ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.factory import get_osutil WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/alpine.py000066400000000000000000000033311335416306700237740ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class AlpineOSUtil(DefaultOSUtil): def __init__(self): super(AlpineOSUtil, self).__init__() self.agent_conf_file_path = '/etc/waagent.conf' self.jit_enabled = True def is_dhcp_enabled(self): return True def get_dhcp_pid(self): ret = shellutil.run_get_output('pidof dhcpcd', chk_err=False) if ret[0] == 0: logger.info('dhcpcd is pid {}'.format(ret[1])) return ret[1].strip() return None def restart_if(self, ifname): logger.info('restarting {} (sort of, actually SIGHUPing dhcpcd)'.format(ifname)) pid = self.get_dhcp_pid() if pid != None: ret = shellutil.run_get_output('kill -HUP {}'.format(pid)) def set_ssh_client_alive_interval(self): # Alpine will handle this. pass def conf_sshd(self, disable_password): # Alpine will handle this. pass WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/arch.py000066400000000000000000000036471335416306700234530ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class ArchUtil(DefaultOSUtil): def __init__(self): super(ArchUtil, self).__init__() self.jit_enabled = True def is_dhcp_enabled(self): return True def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, iface): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated on CoreOS. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def get_dhcp_pid(self): ret= shellutil.run_get_output("pidof systemd-networkd") return ret[1] if ret[0] == 0 else None def conf_sshd(self, disable_password): # Don't whack the system default sshd conf passWALinuxAgent-2.2.32/azurelinuxagent/common/osutil/bigip.py000066400000000000000000000321701335416306700236210ustar00rootroot00000000000000# Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import array import fcntl import os import platform import re import socket import struct import time try: # WAAgent > 2.1.3 import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil except ImportError: # WAAgent <= 2.1.3 import azurelinuxagent.logger as logger import azurelinuxagent.utils.shellutil as shellutil from azurelinuxagent.exception import OSUtilError from azurelinuxagent.distro.default.osutil import DefaultOSUtil class BigIpOSUtil(DefaultOSUtil): def __init__(self): super(BigIpOSUtil, self).__init__() def _wait_until_mcpd_is_initialized(self): """Wait for mcpd to become available All configuration happens in mcpd so we need to wait that this is available before we go provisioning the system. I call this method at the first opportunity I have (during the DVD mounting call). This ensures that the rest of the provisioning does not need to wait for mcpd to be available unless it absolutely wants to. :return bool: Returns True upon success :raises OSUtilError: Raises exception if mcpd does not come up within roughly 50 minutes (100 * 30 seconds) """ for retries in range(1, 100): # Retry until mcpd completes startup: logger.info("Checking to see if mcpd is up") rc = shellutil.run("/usr/bin/tmsh -a show sys mcp-state field-fmt 2>/dev/null | grep phase | grep running", chk_err=False) if rc == 0: logger.info("mcpd is up!") break time.sleep(30) if rc is 0: return True raise OSUtilError( "mcpd hasn't completed initialization! Cannot proceed!" ) def _save_sys_config(self): cmd = "/usr/bin/tmsh save sys config" rc = shellutil.run(cmd) if rc != 0: logger.error("WARNING: Cannot save sys config on 1st boot.") return rc def restart_ssh_service(self): return shellutil.run("/usr/bin/bigstart restart sshd", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service waagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service waagent start", chk_err=False) def register_agent_service(self): return shellutil.run("/sbin/chkconfig --add waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("/sbin/chkconfig --del waagent", chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("/sbin/pidof dhclient") return ret[1] if ret[0] == 0 else None def set_hostname(self, hostname): """Set the static hostname of the device Normally, tmsh is used to set the hostname for the system. For our purposes at this time though, I would hesitate to trust this function. Azure(Stack) uses the name that you provide in the Web UI or ARM (for example) as the value of the hostname argument to this method. The problem is that there is nowhere in the UI that specifies the restrictions and checks that tmsh has for the hostname. For example, if you set the name "bigip1" in the Web UI, Azure(Stack) considers that a perfectly valid name. When WAAgent gets around to running though, tmsh will reject that value because it is not a fully qualified domain name. The proper value should have been bigip.xxx.yyy WAAgent will not fail if this command fails, but the hostname will not be what the user set either. Currently we do not set the hostname when WAAgent starts up, so I am passing on setting it here too. :param hostname: The hostname to set on the device """ return None def set_dhcp_hostname(self, hostname): """Sets the DHCP hostname See `set_hostname` for an explanation of why I pass here :param hostname: The hostname to set on the device """ return None def useradd(self, username, expiration=None, comment=None): """Create user account using tmsh Our policy is to create two accounts when booting a BIG-IP instance. The first account is the one that the user specified when they did the instance creation. The second one is the admin account that is, or should be, built in to the system. :param username: The username that you want to add to the system :param expiration: The expiration date to use. We do not use this value. :param comment: description of the account. We do not use this value. """ if self.get_userentry(username): logger.info("User {0} already exists, skip useradd", username) return None cmd = "/usr/bin/tmsh create auth user %s partition-access add { all-partitions { role admin } } shell bash" % (username) retcode, out = shellutil.run_get_output(cmd, log_cmd=True, chk_err=True) if retcode != 0: raise OSUtilError( "Failed to create user account:{0}, retcode:{1}, output:{2}".format(username, retcode, out) ) self._save_sys_config() return retcode def chpasswd(self, username, password, crypt_id=6, salt_len=10): """Change a user's password with tmsh Since we are creating the user specified account and additionally changing the password of the built-in 'admin' account, both must be modified in this method. Note that the default method also checks for a "system level" of the user; based on the value of UID_MIN in /etc/login.defs. In our env, all user accounts have the UID 0. So we can't rely on this value. :param username: The username whose password to change :param password: The unencrypted password to set for the user :param crypt_id: If encrypting the password, the crypt_id that was used :param salt_len: If encrypting the password, the length of the salt value used to do it. """ # Start by setting the password of the user provided account cmd = "/usr/bin/tmsh modify auth user {0} password '{1}'".format(username, password) ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True) if ret != 0: raise OSUtilError( "Failed to set password for {0}: {1}".format(username, output) ) # Next, set the password of the built-in 'admin' account to be have # the same password as the user provided account userentry = self.get_userentry('admin') if userentry is None: raise OSUtilError("The 'admin' user account was not found!") cmd = "/usr/bin/tmsh modify auth user 'admin' password '{0}'".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True) if ret != 0: raise OSUtilError( "Failed to set password for 'admin': {0}".format(output) ) self._save_sys_config() return ret def del_account(self, username): """Deletes a user account. Note that the default method also checks for a "system level" of the user; based on the value of UID_MIN in /etc/login.defs. In our env, all user accounts have the UID 0. So we can't rely on this value. We also don't use sudo, so we remove that method call as well. :param username: :return: """ shellutil.run("> /var/run/utmp") shellutil.run("/usr/bin/tmsh delete auth user " + username) def get_dvd_device(self, dev_dir='/dev'): """Find BIG-IP's CD/DVD device This device is almost certainly /dev/cdrom so I added the ? to this pattern. Note that this method will return upon the first device found, but in my tests with 12.1.1 it will also find /dev/sr0 on occasion. This is NOT the correct CD/DVD device though. :todo: Consider just always returning "/dev/cdrom" here if that device device exists on all platforms that are supported on Azure(Stack) :param dev_dir: The root directory from which to look for devices """ patten = r'(sr[0-9]|hd[c-z]|cdrom[0-9]?)' for dvd in [re.match(patten, dev) for dev in os.listdir(dev_dir)]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) raise OSUtilError("Failed to get dvd device") def mount_dvd(self, **kwargs): """Mount the DVD containing the provisioningiso.iso file This is the _first_ hook that WAAgent provides for us, so this is the point where we should wait for mcpd to load. I am just overloading this method to add the mcpd wait. Then I proceed with the stock code. :param max_retry: Maximum number of retries waagent will make when mounting the provisioningiso.iso DVD :param chk_err: Whether to check for errors or not in the mounting commands """ self._wait_until_mcpd_is_initialized() return super(BigIpOSUtil, self).mount_dvd(**kwargs) def eject_dvd(self, chk_err=True): """Runs the eject command to eject the provisioning DVD BIG-IP does not include an eject command. It is sufficient to just umount the DVD disk. But I will log that we do not support this for future reference. :param chk_err: Whether or not to check for errors raised by the eject command """ logger.warn("Eject is not supported on this platform") def get_first_if(self): """Return the interface name, and ip addr of the management interface. We need to add a struct_size check here because, curiously, our 64bit platform is identified by python in Azure(Stack) as 32 bit and without adjusting the struct_size, we can't get the information we need. I believe this may be caused by only python i686 being shipped with BIG-IP instead of python x86_64?? """ iface = '' expected = 16 # how many devices should I expect... python_arc = platform.architecture()[0] if python_arc == '64bit': struct_size = 40 # for 64bit the size is 40 bytes else: struct_size = 32 # for 32bit the size is 32 bytes sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) buff = array.array('B', b'\0' * (expected * struct_size)) param = struct.pack('iL', expected*struct_size, buff.buffer_info()[0]) ret = fcntl.ioctl(sock.fileno(), 0x8912, param) retsize = (struct.unpack('iL', ret)[0]) if retsize == (expected * struct_size): logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) sock = buff.tostring() for i in range(0, struct_size * expected, struct_size): iface = self._format_single_interface_name(sock, i) # Azure public was returning "lo:1" when deploying WAF if b'lo' in iface: continue else: break return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) def _format_single_interface_name(self, sock, offset): return sock[offset:offset+16].split(b'\0', 1)[0] def route_add(self, net, mask, gateway): """Add specified route using tmsh. :param net: :param mask: :param gateway: :return: """ cmd = ("/usr/bin/tmsh create net route " "{0}/{1} gw {2}").format(net, mask, gateway) return shellutil.run(cmd, chk_err=False) def device_for_ide_port(self, port_id): """Return device name attached to ide port 'n'. Include a wait in here because BIG-IP may not have yet initialized this list of devices. :param port_id: :return: """ for retries in range(1, 100): # Retry until devices are ready if os.path.exists("/sys/bus/vmbus/devices/"): break else: time.sleep(10) return super(BigIpOSUtil, self).device_for_ide_port(port_id) WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/clearlinux.py000066400000000000000000000061701335416306700246760ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class ClearLinuxUtil(DefaultOSUtil): def __init__(self): super(ClearLinuxUtil, self).__init__() self.agent_conf_file_path = '/usr/share/defaults/waagent/waagent.conf' self.jit_enabled = True def is_dhcp_enabled(self): return True def start_network(self) : return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, iface): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def get_dhcp_pid(self): ret= shellutil.run_get_output("pidof systemd-networkd") return ret[1] if ret[0] == 0 else None def conf_sshd(self, disable_password): # Don't whack the system default sshd conf pass def del_root_password(self): try: passwd_file_path = conf.get_passwd_file_path() try: passwd_content = fileutil.read_file(passwd_file_path) if not passwd_content: # Empty file is no better than no file raise FileNotFoundError except FileNotFoundError: new_passwd = ["root:*LOCK*:14600::::::"] else: passwd = passwd_content.split('\n') new_passwd = [x for x in passwd if not x.startswith("root:")] new_passwd.insert(0, "root:*LOCK*:14600::::::") fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) except IOError as e: raise OSUtilError("Failed to delete root password:{0}".format(e)) pass WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/coreos.py000066400000000000000000000056111335416306700240210ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class CoreOSUtil(DefaultOSUtil): def __init__(self): super(CoreOSUtil, self).__init__() self.agent_conf_file_path = '/usr/share/oem/waagent.conf' self.waagent_path = '/usr/share/oem/bin/waagent' self.python_path = '/usr/share/oem/python/bin' self.jit_enabled = True if 'PATH' in os.environ: path = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: path = self.python_path os.environ['PATH'] = path if 'PYTHONPATH' in os.environ: py_path = os.environ['PYTHONPATH'] py_path = "{0}:{1}".format(py_path, self.waagent_path) else: py_path = self.waagent_path os.environ['PYTHONPATH'] = py_path def is_sys_user(self, username): # User 'core' is not a sysuser. if username == 'core': return False return super(CoreOSUtil, self).is_sys_user(username) def is_dhcp_enabled(self): return True def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, *dummy, **_): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated on CoreOS. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("systemctl show -p MainPID " "systemd-networkd", chk_err=False) pid = ret[1].split('=', 1)[-1].strip() if ret[0] == 0 else None return pid if pid != '0' else None def conf_sshd(self, disable_password): # In CoreOS, /etc/sshd_config is mount readonly. Skip the setting. pass WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/debian.py000066400000000000000000000034721335416306700237540ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class DebianOSUtil(DefaultOSUtil): def __init__(self): super(DebianOSUtil, self).__init__() self.jit_enabled = True def restart_ssh_service(self): return shellutil.run("systemctl --job-mode=ignore-dependencies try-reload-or-restart ssh", chk_err=False) def stop_agent_service(self): return shellutil.run("service azurelinuxagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("service azurelinuxagent start", chk_err=False) def start_network(self): pass def remove_rules_files(self, rules_files=""): pass def restore_rules_files(self, rules_files=""): pass def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/default.py000066400000000000000000001506241335416306700241600ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import array import base64 import datetime import errno import fcntl import glob import multiprocessing import os import platform import pwd import re import shutil import socket import struct import sys import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.utils.networkutil import RouteEntry, NetworkInterfaceCard from pwd import getpwall __RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] """ Define distro specific behavior. OSUtil class defines default behavior for all distros. Each concrete distro classes could overwrite default behavior if needed. """ IPTABLES_VERSION_PATTERN = re.compile("^[^\d\.]*([\d\.]+).*$") IPTABLES_VERSION = "iptables --version" IPTABLES_LOCKING_VERSION = FlexibleVersion('1.4.21') FIREWALL_ACCEPT = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m owner --uid-owner {3} -j ACCEPT" # Note: # -- Initially "flight" the change to ACCEPT packets and develop a metric baseline # A subsequent release will convert the ACCEPT to DROP # FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" FIREWALL_LIST = "iptables {0} -t security -L -nxv" FIREWALL_PACKETS = "iptables {0} -t security -L OUTPUT --zero OUTPUT -nxv" FIREWALL_FLUSH = "iptables {0} -t security --flush" # Precisely delete the rules created by the agent. # this rule was used <= 2.2.25. This rule helped to validate our change, and determine impact. FIREWALL_DELETE_CONNTRACK_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" FIREWALL_DELETE_OWNER_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m owner --uid-owner {2} -j ACCEPT" FIREWALL_DELETE_CONNTRACK_DROP = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" ALL_CPUS_REGEX = re.compile('^cpu .*') _enable_firewall = True DMIDECODE_CMD = 'dmidecode --string system-uuid' PRODUCT_ID_FILE = '/sys/class/dmi/id/product_uuid' UUID_PATTERN = re.compile( r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) IOCTL_SIOCGIFCONF = 0x8912 IOCTL_SIOCGIFFLAGS = 0x8913 IOCTL_SIOCGIFHWADDR = 0x8927 IFNAMSIZ = 16 IP_COMMAND_OUTPUT = re.compile('^\d+:\s+(\w+):\s+(.*)$') BASE_CGROUPS = '/sys/fs/cgroup' class DefaultOSUtil(object): def __init__(self): self.agent_conf_file_path = '/etc/waagent.conf' self.selinux = None self.disable_route_warning = False self.jit_enabled = False def get_firewall_dropped_packets(self, dst_ip=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return 0 try: wait = self.get_firewall_will_wait() rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait), log_cmd=False) if rc == 3: # Transient error that we ignore. This code fires every loop # of the daemon (60m), so we will get the value eventually. return 0 if rc != 0: return -1 pattern = re.compile(PACKET_PATTERN.format(dst_ip)) for line in output.split('\n'): m = pattern.match(line) if m is not None: return int(m.group(1)) return 0 except Exception as e: _enable_firewall = False logger.warn("Unable to retrieve firewall packets dropped" "{0}".format(ustr(e))) return -1 def get_firewall_will_wait(self): # Determine if iptables will serialize access rc, output = shellutil.run_get_output(IPTABLES_VERSION) if rc != 0: msg = "Unable to determine version of iptables" logger.warn(msg) raise Exception(msg) m = IPTABLES_VERSION_PATTERN.match(output) if m is None: msg = "iptables did not return version information" logger.warn(msg) raise Exception(msg) wait = "-w" \ if FlexibleVersion(m.group(1)) >= IPTABLES_LOCKING_VERSION \ else "" return wait def _delete_rule(self, rule): """ Continually execute the delete operation until the return code is non-zero or the limit has been reached. """ for i in range(1, 100): rc = shellutil.run(rule, chk_err=False) if rc == 1: return elif rc == 2: raise Exception("invalid firewall deletion rule '{0}'".format(rule)) def remove_firewall(self, dst_ip=None, uid=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: if dst_ip is None or uid is None: msg = "Missing arguments to enable_firewall" logger.warn(msg) raise Exception(msg) wait = self.get_firewall_will_wait() # This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25 # has aged out, keep this cleanup in place. self._delete_rule(FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip)) self._delete_rule(FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst_ip, uid)) self._delete_rule(FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst_ip)) return True except Exception as e: _enable_firewall = False logger.info("Unable to remove firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False def enable_firewall(self, dst_ip=None, uid=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: if dst_ip is None or uid is None: msg = "Missing arguments to enable_firewall" logger.warn(msg) raise Exception(msg) wait = self.get_firewall_will_wait() # If the DROP rule exists, make no changes drop_rule = FIREWALL_DROP.format(wait, "C", dst_ip) rc = shellutil.run(drop_rule, chk_err=False) if rc == 0: logger.verbose("Firewall appears established") return True elif rc == 2: self.remove_firewall(dst_ip, uid) msg = "please upgrade iptables to a version that supports the -C option" logger.warn(msg) raise Exception(msg) # Otherwise, append both rules accept_rule = FIREWALL_ACCEPT.format(wait, "A", dst_ip, uid) drop_rule = FIREWALL_DROP.format(wait, "A", dst_ip) if shellutil.run(accept_rule) != 0: msg = "Unable to add ACCEPT firewall rule '{0}'".format( accept_rule) logger.warn(msg) raise Exception(msg) if shellutil.run(drop_rule) != 0: msg = "Unable to add DROP firewall rule '{0}'".format( drop_rule) logger.warn(msg) raise Exception(msg) logger.info("Successfully added Azure fabric firewall rules") rc, output = shellutil.run_get_output(FIREWALL_LIST.format(wait)) if rc == 0: logger.info("Firewall rules:\n{0}".format(output)) else: logger.warn("Listing firewall rules failed: {0}".format(output)) return True except Exception as e: _enable_firewall = False logger.info("Unable to establish firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False def _correct_instance_id(self, id): ''' Azure stores the instance ID with an incorrect byte ordering for the first parts. For example, the ID returned by the metadata service: D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8 will be found as: 544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8 This code corrects the byte order such that it is consistent with that returned by the metadata service. ''' if not UUID_PATTERN.match(id): return id parts = id.split('-') return '-'.join([ textutil.swap_hexstring(parts[0], width=2), textutil.swap_hexstring(parts[1], width=2), textutil.swap_hexstring(parts[2], width=2), parts[3], parts[4] ]) def is_current_instance_id(self, id_that): ''' Compare two instance IDs for equality, but allow that some IDs may have been persisted using the incorrect byte ordering. ''' id_this = self.get_instance_id() return id_that == id_this or \ id_that == self._correct_instance_id(id_this) @staticmethod def is_cgroups_supported(): """ Enabled by default; disabled in WSL/Travis """ is_wsl = '-Microsoft-' in platform.platform() is_travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' base_fs_exists = os.path.exists(BASE_CGROUPS) return not is_wsl and not is_travis and base_fs_exists @staticmethod def _cgroup_path(tail=""): return os.path.join(BASE_CGROUPS, tail).rstrip(os.path.sep) def mount_cgroups(self): try: path = self._cgroup_path() if not os.path.exists(path): fileutil.mkdir(path) self.mount(device='cgroup_root', mount_point=path, option="-t tmpfs", chk_err=False) elif not os.path.isdir(self._cgroup_path()): logger.error("Could not mount cgroups: ordinary file at {0}".format(path)) return for metric_hierarchy in ['cpu,cpuacct', 'memory']: target_path = self._cgroup_path(metric_hierarchy) if not os.path.exists(target_path): fileutil.mkdir(target_path) self.mount(device=metric_hierarchy, mount_point=target_path, option="-t cgroup -o {0}".format(metric_hierarchy), chk_err=False) for metric_hierarchy in ['cpu', 'cpuacct']: target_path = self._cgroup_path(metric_hierarchy) if not os.path.exists(target_path): os.symlink(self._cgroup_path('cpu,cpuacct'), target_path) except OSError as oe: # log a warning for read-only file systems logger.warn("Could not mount cgroups: {0}", ustr(oe)) raise except Exception as e: logger.error("Could not mount cgroups: {0}", ustr(e)) raise def get_agent_conf_file_path(self): return self.agent_conf_file_path def get_instance_id(self): ''' Azure records a UUID as the instance ID First check /sys/class/dmi/id/product_uuid. If that is missing, then extracts from dmidecode If nothing works (for old VMs), return the empty string ''' if os.path.isfile(PRODUCT_ID_FILE): s = fileutil.read_file(PRODUCT_ID_FILE).strip() else: rc, s = shellutil.run_get_output(DMIDECODE_CMD) if rc != 0 or UUID_PATTERN.match(s) is None: return "" return self._correct_instance_id(s.strip()) def get_userentry(self, username): try: return pwd.getpwnam(username) except KeyError: return None def is_sys_user(self, username): """ Check whether use is a system user. If reset sys user is allowed in conf, return False Otherwise, check whether UID is less than UID_MIN """ if conf.get_allow_reset_sys_user(): return False userentry = self.get_userentry(username) uidmin = None try: uidmin_def = fileutil.get_line_startingwith("UID_MIN", "/etc/login.defs") if uidmin_def is not None: uidmin = int(uidmin_def.split()[1]) except IOError as e: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: return True else: return False def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ userentry = self.get_userentry(username) if userentry is not None: logger.info("User {0} already exists, skip useradd", username) return if expiration is not None: cmd = "useradd -m {0} -e {1}".format(username, expiration) else: cmd = "useradd -m {0}".format(username) if comment is not None: cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " "retcode:{1}, " "output:{2}").format(username, retcode, out)) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def get_users(self): return getpwall() def conf_sudoer(self, username, nopasswd=False, remove=False): sudoers_dir = conf.get_sudoers_dir() sudoers_wagent = os.path.join(sudoers_dir, 'waagent') if not remove: # for older distros create sudoers.d if not os.path.isdir(sudoers_dir): # create the sudoers.d directory fileutil.mkdir(sudoers_dir) # add the include of sudoers.d to the /etc/sudoers sudoers_file = os.path.join(sudoers_dir, os.pardir, 'sudoers') include_sudoers_dir = "\n#includedir {0}\n".format(sudoers_dir) fileutil.append_file(sudoers_file, include_sudoers_dir) sudoer = None if nopasswd: sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username) else: sudoer = "{0} ALL=(ALL) ALL".format(username) if not os.path.isfile(sudoers_wagent) or \ fileutil.findstr_in_file(sudoers_wagent, sudoer) is False: fileutil.append_file(sudoers_wagent, "{0}\n".format(sudoer)) fileutil.chmod(sudoers_wagent, 0o440) else: # remove user from sudoers if os.path.isfile(sudoers_wagent): try: content = fileutil.read_file(sudoers_wagent) sudoers = content.split("\n") sudoers = [x for x in sudoers if username not in x] fileutil.write_file(sudoers_wagent, "\n".join(sudoers)) except IOError as e: raise OSUtilError("Failed to remove sudoer: {0}".format(e)) def del_root_password(self): try: passwd_file_path = conf.get_passwd_file_path() passwd_content = fileutil.read_file(passwd_file_path) passwd = passwd_content.split('\n') new_passwd = [x for x in passwd if not x.startswith("root:")] new_passwd.insert(0, "root:*LOCK*:14600::::::") fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) except IOError as e: raise OSUtilError("Failed to delete root password:{0}".format(e)) def _norm_path(self, filepath): home = conf.get_home_dir() # Expand HOME variable if present in path path = os.path.normpath(filepath.replace("$HOME", home)) return path def deploy_ssh_keypair(self, username, keypair): """ Deploy id_rsa and id_rsa.pub """ path, thumbprint = keypair path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) lib_dir = conf.get_lib_dir() prv_path = os.path.join(lib_dir, thumbprint + '.prv') if not os.path.isfile(prv_path): raise OSUtilError("Can't find {0}.prv".format(thumbprint)) shutil.copyfile(prv_path, path) pub_path = path + '.pub' crytputil = CryptUtil(conf.get_openssl_cmd()) pub = crytputil.get_pubkey_from_prv(prv_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') os.chmod(path, 0o644) os.chmod(pub_path, 0o600) def openssl_to_openssh(self, input_file, output_file): cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.crt_to_ssh(input_file, output_file) def deploy_ssh_pubkey(self, username, pubkey): """ Deploy authorized_key """ path, thumbprint, value = pubkey if path is None: raise OSUtilError("Public key path is None") crytputil = CryptUtil(conf.get_openssl_cmd()) path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) if value is not None: if not value.startswith("ssh-"): raise OSUtilError("Bad public key: {0}".format(value)) fileutil.write_file(path, value) elif thumbprint is not None: lib_dir = conf.get_lib_dir() crt_path = os.path.join(lib_dir, thumbprint + '.crt') if not os.path.isfile(crt_path): raise OSUtilError("Can't find {0}.crt".format(thumbprint)) pub_path = os.path.join(lib_dir, thumbprint + '.pub') pub = crytputil.get_pubkey_from_crt(crt_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.openssl_to_openssh(pub_path, path) fileutil.chmod(pub_path, 0o600) else: raise OSUtilError("SSH public key Fingerprint and Value are None") self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') fileutil.chowner(path, username) fileutil.chmod(path, 0o644) def is_selinux_system(self): """ Checks and sets self.selinux = True if SELinux is available on system. """ if self.selinux == None: if shellutil.run("which getenforce", chk_err=False) == 0: self.selinux = True else: self.selinux = False return self.selinux def is_selinux_enforcing(self): """ Calls shell command 'getenforce' and returns True if 'Enforcing'. """ if self.is_selinux_system(): output = shellutil.run_get_output("getenforce")[1] return output.startswith("Enforcing") else: return False def set_selinux_context(self, path, con): """ Calls shell 'chcon' with 'path' and 'con' context. Returns exit result. """ if self.is_selinux_system(): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) return 1 return shellutil.run('chcon ' + con + ' ' + path) def conf_sshd(self, disable_password): option = "no" if disable_password else "yes" conf_file_path = conf.get_sshd_conf_file_path() conf_file = fileutil.read_file(conf_file_path).split("\n") textutil.set_ssh_config(conf_file, "PasswordAuthentication", option) textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option) textutil.set_ssh_config(conf_file, "ClientAliveInterval", str(conf.get_ssh_client_alive_interval())) fileutil.write_file(conf_file_path, "\n".join(conf_file)) logger.info("{0} SSH password-based authentication methods." .format("Disabled" if disable_password else "Enabled")) logger.info("Configured SSH client probing to keep connections alive.") def get_dvd_device(self, dev_dir='/dev'): pattern = r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9])' device_list = os.listdir(dev_dir) for dvd in [re.match(pattern, dev) for dev in device_list]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) inner_detail = "The following devices were found, but none matched " \ "the pattern [{0}]: {1}\n".format(pattern, device_list) raise OSUtilError(msg="Failed to get dvd device from {0}".format(dev_dir), inner=inner_detail) def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None, sleep_time=5): if dvd_device is None: dvd_device = self.get_dvd_device() if mount_point is None: mount_point = conf.get_dvd_mount_point() mount_list = shellutil.run_get_output("mount")[1] existing = self.get_mount_point(mount_list, dvd_device) if existing is not None: # already mounted logger.info("{0} is already mounted at {1}", dvd_device, existing) return if not os.path.isdir(mount_point): os.makedirs(mount_point) err = '' for retry in range(1, max_retry): return_code, err = self.mount(dvd_device, mount_point, option="-o ro -t udf,iso9660", chk_err=False) if return_code == 0: logger.info("Successfully mounted dvd") return else: logger.warn( "Mounting dvd failed [retry {0}/{1}, sleeping {2} sec]", retry, max_retry - 1, sleep_time) if retry < max_retry: time.sleep(sleep_time) if chk_err: raise OSUtilError("Failed to mount dvd device", inner=err) def umount_dvd(self, chk_err=True, mount_point=None): if mount_point is None: mount_point = conf.get_dvd_mount_point() return_code = self.umount(mount_point, chk_err=chk_err) if chk_err and return_code != 0: raise OSUtilError("Failed to unmount dvd device at {0}", mount_point) def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("eject {0}".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) def try_load_atapiix_mod(self): try: self.load_atapiix_mod() except Exception as e: logger.warn("Could not load ATAPI driver: {0}".format(e)) def load_atapiix_mod(self): if self.is_atapiix_mod_loaded(): return ret, kern_version = shellutil.run_get_output("uname -r") if ret != 0: raise Exception("Failed to call uname -r") mod_path = os.path.join('/lib/modules', kern_version.strip('\n'), 'kernel/drivers/ata/ata_piix.ko') if not os.path.isfile(mod_path): raise Exception("Can't find module file:{0}".format(mod_path)) ret, output = shellutil.run_get_output("insmod " + mod_path) if ret != 0: raise Exception("Error calling insmod for ATAPI CD-ROM driver") if not self.is_atapiix_mod_loaded(max_retry=3): raise Exception("Failed to load ATAPI CD-ROM driver") def is_atapiix_mod_loaded(self, max_retry=1): for retry in range(0, max_retry): ret = shellutil.run("lsmod | grep ata_piix", chk_err=False) if ret == 0: logger.info("Module driver for ATAPI CD-ROM is already present.") return True if retry < max_retry - 1: time.sleep(1) return False def mount(self, device, mount_point, option="", chk_err=True): cmd = "mount {0} {1} {2}".format(option, device, mount_point) retcode, err = shellutil.run_get_output(cmd, chk_err) if retcode != 0: detail = "[{0}] returned {1}: {2}".format(cmd, retcode, err) err = detail return retcode, err def umount(self, mount_point, chk_err=True): return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err) def allow_dhcp_broadcast(self): # Open DHCP port if iptables is enabled. # We supress error logging on error. shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) def remove_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for src in rules_files: file_name = fileutil.base_name(src) dest = os.path.join(lib_dir, file_name) if os.path.isfile(dest): os.remove(dest) if os.path.isfile(src): logger.warn("Move rules file {0} to {1}", file_name, dest) shutil.move(src, dest) def restore_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for dest in rules_files: filename = fileutil.base_name(dest) src = os.path.join(lib_dir, filename) if os.path.isfile(dest): continue if os.path.isfile(src): logger.warn("Move rules file {0} to {1}", filename, dest) shutil.move(src, dest) def get_mac_addr(self): """ Convenience function, returns mac addr bound to first non-loopback interface. """ ifname = self.get_if_name() addr = self.get_if_mac(ifname) return textutil.hexstr_to_bytearray(addr) def get_if_mac(self, ifname): """ Return the mac-address bound to the socket. """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1')) info = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFHWADDR, param) sock.close() return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]]) @staticmethod def _get_struct_ifconf_size(): """ Return the sizeof struct ifinfo. On 64-bit platforms the size is 40 bytes; on 32-bit platforms the size is 32 bytes. """ python_arc = platform.architecture()[0] struct_size = 32 if python_arc == '32bit' else 40 return struct_size def _get_all_interfaces(self): """ Return a dictionary mapping from interface name to IPv4 address. Interfaces without a name are ignored. """ expected=16 # how many devices should I expect... struct_size = DefaultOSUtil._get_struct_ifconf_size() array_size = expected * struct_size buff = array.array('B', b'\0' * array_size) param = struct.pack('iL', array_size, buff.buffer_info()[0]) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) ret = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFCONF, param) retsize = (struct.unpack('iL', ret)[0]) sock.close() if retsize == array_size: logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) ifconf_buff = buff.tostring() ifaces = {} for i in range(0, array_size, struct_size): iface = ifconf_buff[i:i+IFNAMSIZ].split(b'\0', 1)[0] if len(iface) > 0: iface_name = iface.decode('latin-1') if iface_name not in ifaces: ifaces[iface_name] = socket.inet_ntoa(ifconf_buff[i+20:i+24]) return ifaces def get_first_if(self): """ Return the interface name, and IPv4 addr of the "primary" interface or, failing that, any active non-loopback interface. """ primary = self.get_primary_interface() ifaces = self._get_all_interfaces() if primary in ifaces: return primary, ifaces[primary] for iface_name in ifaces.keys(): if not self.is_loopback(iface_name): logger.info("Choosing non-primary [{0}]".format(iface_name)) return iface_name, ifaces[iface_name] return '', '' @staticmethod def _build_route_list(proc_net_route): """ Construct a list of network route entries :param list(str) proc_net_route: Route table lines, including headers, containing at least one route :return: List of network route objects :rtype: list(RouteEntry) """ idx = 0 column_index = {} header_line = proc_net_route[0] for header in filter(lambda h: len(h) > 0, header_line.split("\t")): column_index[header.strip()] = idx idx += 1 try: idx_iface = column_index["Iface"] idx_dest = column_index["Destination"] idx_gw = column_index["Gateway"] idx_flags = column_index["Flags"] idx_metric = column_index["Metric"] idx_mask = column_index["Mask"] except KeyError: msg = "/proc/net/route is missing key information; headers are [{0}]".format(header_line) logger.error(msg) return [] route_list = [] for entry in proc_net_route[1:]: route = entry.split("\t") if len(route) > 0: route_obj = RouteEntry(route[idx_iface], route[idx_dest], route[idx_gw], route[idx_mask], route[idx_flags], route[idx_metric]) route_list.append(route_obj) return route_list def read_route_table(self): """ Return a list of strings comprising the route table, including column headers. Each line is stripped of leading or trailing whitespace but is otherwise unmolested. :return: Entries in the text route table :rtype: list(str) """ try: with open('/proc/net/route') as routing_table: return list(map(str.strip, routing_table.readlines())) except Exception as e: logger.error("Cannot read route table [{0}]", ustr(e)) return [] def get_list_of_routes(self, route_table): """ Construct a list of all network routes known to this system. :param list(str) route_table: List of text entries from route table, including headers :return: a list of network routes :rtype: list(RouteEntry) """ route_list = [] count = len(route_table) if count < 1: logger.error("/proc/net/route is missing headers") elif count == 1: logger.error("/proc/net/route contains no routes") else: route_list = DefaultOSUtil._build_route_list(route_table) return route_list def get_primary_interface(self): """ Get the name of the primary interface, which is the one with the default route attached to it; if there are multiple default routes, the primary has the lowest Metric. :return: the interface which has the default route """ # from linux/route.h RTF_GATEWAY = 0x02 DEFAULT_DEST = "00000000" hdr_iface = "Iface" hdr_dest = "Destination" hdr_flags = "Flags" hdr_metric = "Metric" idx_iface = -1 idx_dest = -1 idx_flags = -1 idx_metric = -1 primary = None primary_metric = None if not self.disable_route_warning: logger.info("Examine /proc/net/route for primary interface") with open('/proc/net/route') as routing_table: idx = 0 for header in filter(lambda h: len(h) > 0, routing_table.readline().strip(" \n").split("\t")): if header == hdr_iface: idx_iface = idx elif header == hdr_dest: idx_dest = idx elif header == hdr_flags: idx_flags = idx elif header == hdr_metric: idx_metric = idx idx = idx + 1 for entry in routing_table.readlines(): route = entry.strip(" \n").split("\t") if route[idx_dest] == DEFAULT_DEST and int(route[idx_flags]) & RTF_GATEWAY == RTF_GATEWAY: metric = int(route[idx_metric]) iface = route[idx_iface] if primary is None or metric < primary_metric: primary = iface primary_metric = metric if primary is None: primary = '' if not self.disable_route_warning: with open('/proc/net/route') as routing_table_fh: routing_table_text = routing_table_fh.read() logger.warn('Could not determine primary interface, ' 'please ensure /proc/net/route is correct') logger.warn('Contents of /proc/net/route:\n{0}'.format(routing_table_text)) logger.warn('Primary interface examination will retry silently') self.disable_route_warning = True else: logger.info('Primary interface is [{0}]'.format(primary)) self.disable_route_warning = False return primary def is_primary_interface(self, ifname): """ Indicate whether the specified interface is the primary. :param ifname: the name of the interface - eth0, lo, etc. :return: True if this interface binds the default route """ return self.get_primary_interface() == ifname def is_loopback(self, ifname): """ Determine if a named interface is loopback. """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) ifname_buff = ifname + ('\0'*256) result = fcntl.ioctl(s.fileno(), IOCTL_SIOCGIFFLAGS, ifname_buff) flags, = struct.unpack('H', result[16:18]) isloopback = flags & 8 == 8 if not self.disable_route_warning: logger.info('interface [{0}] has flags [{1}], ' 'is loopback [{2}]'.format(ifname, flags, isloopback)) s.close() return isloopback def get_dhcp_lease_endpoint(self): """ OS specific, this should return the decoded endpoint of the wireserver from option 245 in the dhcp leases file if it exists on disk. :return: The endpoint if available, or None """ return None @staticmethod def get_endpoint_from_leases_path(pathglob): """ Try to discover and decode the wireserver endpoint in the specified dhcp leases path. :param pathglob: The path containing dhcp lease files :return: The endpoint if available, otherwise None """ endpoint = None HEADER_LEASE = "lease" HEADER_OPTION_245 = "option unknown-245" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S" option_245_re = re.compile(r'\s*option\s+unknown-245\s+([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+);') logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() if HEADER_OPTION_245 in leases: cached_endpoint = None option_245_match = None expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None expired = True elif HEADER_EXPIRE in line: if "never" in line: expired = False else: try: expire_string = line.split(" ", 4)[-1].strip(";") expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME) if expire_date > datetime.datetime.utcnow(): expired = False except: logger.error("could not parse expiry token '{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format( cached_endpoint, option_245_match is not None, expired)) if not expired and cached_endpoint is not None: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching else: option_245_match = option_245_re.match(line) if option_245_match is not None: cached_endpoint = '{0}.{1}.{2}.{3}'.format( int(option_245_match.group(1), 16), int(option_245_match.group(2), 16), int(option_245_match.group(3), 16), int(option_245_match.group(4), 16)) if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: logger.info("cached endpoint not found") return endpoint def is_missing_default_route(self): route_cmd = "ip route show" routes = shellutil.run_get_output(route_cmd)[1] for route in routes.split("\n"): if route.startswith("0.0.0.0 ") or route.startswith("default "): return False return True def get_if_name(self): if_name = '' if_found = False while not if_found: if_name = self.get_first_if()[0] if_found = len(if_name) >= 2 if not if_found: time.sleep(2) return if_name def get_ip4_addr(self): return self.get_first_if()[1] def set_route_for_dhcp_broadcast(self, ifname): route_cmd = "ip route add" return shellutil.run("{0} 255.255.255.255 dev {1}".format( route_cmd, ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): route_cmd = "ip route del" shellutil.run("{0} 255.255.255.255 dev {1}".format(route_cmd, ifname), chk_err=False) def is_dhcp_available(self): return (True, '') def is_dhcp_enabled(self): return False def stop_dhcp_service(self): pass def start_dhcp_service(self): pass def start_network(self): pass def start_agent_service(self): pass def stop_agent_service(self): pass def register_agent_service(self): pass def unregister_agent_service(self): pass def restart_ssh_service(self): pass def route_add(self, net, mask, gateway): """ Add specified route """ cmd = "ip route add {0} via {1}".format(net, gateway) return shellutil.run(cmd, chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof dhclient", chk_err=False) return ret[1] if ret[0] == 0 else None def set_hostname(self, hostname): fileutil.write_file('/etc/hostname', hostname) shellutil.run("hostname {0}".format(hostname), chk_err=False) def set_dhcp_hostname(self, hostname): autosend = r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])' dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf'] for conf_file in dhclient_files: if not os.path.isfile(conf_file): continue if fileutil.findre_in_file(conf_file, autosend): #Return if auto send host-name is configured return fileutil.update_conf_file(conf_file, 'send host-name', 'send host-name "{0}";'.format(hostname)) def restart_if(self, ifname, retries=3, wait=5): retry_limit=retries+1 for attempt in range(1, retry_limit): return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname)) if return_code == 0: return logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) if attempt < retry_limit: logger.info("retrying in {0} seconds".format(wait)) time.sleep(wait) else: logger.warn("exceeded restart retries") def publish_hostname(self, hostname): self.set_dhcp_hostname(hostname) self.set_hostname_record(hostname) ifname = self.get_if_name() self.restart_if(ifname) def set_scsi_disks_timeout(self, timeout): for dev in os.listdir("/sys/block"): if dev.startswith('sd'): self.set_block_device_timeout(dev, timeout) def set_block_device_timeout(self, dev, timeout): if dev is not None and timeout is not None: file_path = "/sys/block/{0}/device/timeout".format(dev) content = fileutil.read_file(file_path) original = content.splitlines()[0].rstrip() if original != timeout: fileutil.write_file(file_path, timeout) logger.info("Set block dev timeout: {0} with timeout: {1}", dev, timeout) def get_mount_point(self, mountlist, device): """ Example of mountlist: /dev/sda1 on / type ext4 (rw) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0") none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) /dev/sdb1 on /mnt/resource type ext4 (rw) """ if (mountlist and device): for entry in mountlist.split('\n'): if(re.search(device, entry)): tokens = entry.split() #Return the 3rd column of this line return tokens[2] if len(tokens) > 2 else None return None def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ if port_id > 3: return None g0 = "00000000" if port_id > 1: g0 = "00000001" port_id = port_id - 2 device = None path = "/sys/bus/vmbus/devices/" if os.path.exists(path): try: for vmbus in os.listdir(path): deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id")) guid = deviceid.lstrip('{').split('-') if guid[0] == g0 and guid[1] == "000" + ustr(port_id): for root, dirs, files in os.walk(path + vmbus): if root.endswith("/block"): device = dirs[0] break else: # older distros for d in dirs: if ':' in d and "block" == d.split(':')[0]: device = d.split(':')[1] break break except OSError as oe: logger.warn('Could not obtain device for IDE port {0}: {1}', port_id, ustr(oe)) return device def set_hostname_record(self, hostname): fileutil.write_file(conf.get_published_hostname(), contents=hostname) def get_hostname_record(self): hostname_record = conf.get_published_hostname() if not os.path.exists(hostname_record): # this file is created at provisioning time with agents >= 2.2.3 hostname = socket.gethostname() logger.info('Hostname record does not exist, ' 'creating [{0}] with hostname [{1}]', hostname_record, hostname) self.set_hostname_record(hostname) record = fileutil.read_file(hostname_record) return record def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run("> /var/run/utmp") shellutil.run("userdel -f -r " + username) self.conf_sudoer(username, remove=True) def decode_customdata(self, data): return base64.b64decode(data).decode('utf-8') def get_total_mem(self): # Get total memory in bytes and divide by 1024**2 to get the value in MB. return os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024**2) def get_processor_cores(self): return multiprocessing.cpu_count() def check_pid_alive(self, pid): try: pid = int(pid) os.kill(pid, 0) except (ValueError, TypeError): return False except OSError as e: if e.errno == errno.EPERM: return True return False return True @property def is_64bit(self): return sys.maxsize > 2**32 @staticmethod def _get_proc_stat(): """ Get the contents of /proc/stat. # cpu 813599 3940 909253 154538746 874851 0 6589 0 0 0 # cpu0 401094 1516 453006 77276738 452939 0 3312 0 0 0 # cpu1 412505 2423 456246 77262007 421912 0 3276 0 0 0 :return: A single string with the contents of /proc/stat :rtype: str """ results = None try: results = fileutil.read_file('/proc/stat') except (OSError, IOError) as ex: logger.warn("Couldn't read /proc/stat: {0}".format(ex.strerror)) return results @staticmethod def get_total_cpu_ticks_since_boot(): """ Compute the number of USER_HZ units of time that have elapsed in all categories, across all cores, since boot. :return: int """ system_cpu = 0 proc_stat = DefaultOSUtil._get_proc_stat() if proc_stat is not None: for line in proc_stat.splitlines(): if ALL_CPUS_REGEX.match(line): system_cpu = sum(int(i) for i in line.split()[1:7]) break return system_cpu def get_nic_state(self): """ Capture NIC state (IPv4 and IPv6 addresses plus link state). :return: Dictionary of NIC state objects, with the NIC name as key :rtype: dict(str,NetworkInformationCard) """ state = {} status, output = shellutil.run_get_output("ip -a -d -o link", chk_err=False, log_cmd=False) """ 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64 2: eth0: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 3: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default \ link/ether 02:42:b5:d5:00:1d brd ff:ff:ff:ff:ff:ff promiscuity 0 \ bridge forward_delay 1500 hello_time 200 max_age 2000 ageing_time 30000 stp_state 0 priority 32768 vlan_filtering 0 vlan_protocol 802.1Q addrgenmode eui64 """ if status != 0: logger.verbose("Could not fetch NIC link info; status {0}, {1}".format(status, output)) return {} for entry in output.splitlines(): result = IP_COMMAND_OUTPUT.match(entry) if result: name = result.group(1) state[name] = NetworkInterfaceCard(name, result.group(2)) self._update_nic_state(state, "ip -4 -a -d -o address", NetworkInterfaceCard.add_ipv4, "an IPv4 address") """ 1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever 2: eth0 inet 10.145.187.220/26 brd 10.145.187.255 scope global eth0\ valid_lft forever preferred_lft forever 3: docker0 inet 192.168.43.1/24 brd 192.168.43.255 scope global docker0\ valid_lft forever preferred_lft forever """ self._update_nic_state(state, "ip -6 -a -d -o address", NetworkInterfaceCard.add_ipv6, "an IPv6 address") """ 1: lo inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever 2: eth0 inet6 fe80::20d:3aff:fe30:c35a/64 scope link \ valid_lft forever preferred_lft forever """ return state def _update_nic_state(self, state, ip_command, handler, description): """ Update the state of NICs based on the output of a specified ip subcommand. :param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects :param str ip_command: The ip command to run :param handler: A method on the NetworkInterfaceCard class :param str description: Description of the particular information being added to the state """ status, output = shellutil.run_get_output(ip_command, chk_err=True) if status != 0: return for entry in output.splitlines(): result = IP_COMMAND_OUTPUT.match(entry) if result: interface_name = result.group(1) if interface_name in state: handler(state[interface_name], result.group(2)) else: logger.error("Interface {0} has {1} but no link state".format(interface_name, description)) WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/factory.py000066400000000000000000000076211335416306700242010ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.version import * from .default import DefaultOSUtil from .arch import ArchUtil from .clearlinux import ClearLinuxUtil from .coreos import CoreOSUtil from .debian import DebianOSUtil from .freebsd import FreeBSDOSUtil from .openbsd import OpenBSDOSUtil from .redhat import RedhatOSUtil, Redhat6xOSUtil from .suse import SUSEOSUtil, SUSE11OSUtil from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ UbuntuSnappyOSUtil, Ubuntu16OSUtil, Ubuntu18OSUtil from .alpine import AlpineOSUtil from .bigip import BigIpOSUtil from .gaia import GaiaOSUtil from .iosxe import IosxeOSUtil from .nsbsd import NSBSDOSUtil from distutils.version import LooseVersion as Version def get_osutil(distro_name=DISTRO_NAME, distro_code_name=DISTRO_CODE_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "arch": return ArchUtil() if distro_name == "clear linux os for intel architecture" \ or distro_name == "clear linux software for intel architecture": return ClearLinuxUtil() if distro_name == "ubuntu": if Version(distro_version) in [Version("12.04"), Version("12.10")]: return Ubuntu12OSUtil() elif Version(distro_version) in [Version("14.04"), Version("14.10")]: return Ubuntu14OSUtil() elif Version(distro_version) in [Version('16.04'), Version('16.10'), Version('17.04')]: return Ubuntu16OSUtil() elif Version(distro_version) in [Version('18.04')]: return Ubuntu18OSUtil() elif distro_full_name == "Snappy Ubuntu Core": return UbuntuSnappyOSUtil() else: return UbuntuOSUtil() if distro_name == "alpine": return AlpineOSUtil() if distro_name == "kali": return DebianOSUtil() if distro_name == "coreos" or distro_code_name == "coreos": return CoreOSUtil() if distro_name in ("suse", "sles", "opensuse"): if distro_full_name == 'SUSE Linux Enterprise Server' \ and Version(distro_version) < Version('12') \ or distro_full_name == 'openSUSE' and Version(distro_version) < Version('13.2'): return SUSE11OSUtil() else: return SUSEOSUtil() elif distro_name == "debian": return DebianOSUtil() elif distro_name == "redhat" \ or distro_name == "centos" \ or distro_name == "oracle": if Version(distro_version) < Version("7"): return Redhat6xOSUtil() else: return RedhatOSUtil() elif distro_name == "euleros": return RedhatOSUtil() elif distro_name == "freebsd": return FreeBSDOSUtil() elif distro_name == "openbsd": return OpenBSDOSUtil() elif distro_name == "bigip": return BigIpOSUtil() elif distro_name == "gaia": return GaiaOSUtil() elif distro_name == "iosxe": return IosxeOSUtil() elif distro_name == "nsbsd": return NSBSDOSUtil() else: logger.warn("Unable to load distro implementation for {0}. Using " "default distro implementation instead.", distro_name) return DefaultOSUtil() WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/freebsd.py000066400000000000000000000240601335416306700241400ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.future import ustr class FreeBSDOSUtil(DefaultOSUtil): def __init__(self): super(FreeBSDOSUtil, self).__init__() self._scsi_disks_timeout_set = False self.jit_enabled = True def set_hostname(self, hostname): rc_file_path = '/etc/rc.conf' conf_file = fileutil.read_file(rc_file_path).split("\n") textutil.set_ini_config(conf_file, "hostname", hostname) fileutil.write_file(rc_file_path, "\n".join(conf_file)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def restart_ssh_service(self): return shellutil.run('service sshd restart', chk_err=False) def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ userentry = self.get_userentry(username) if userentry is not None: logger.warn("User {0} already exists, skip useradd", username) return if expiration is not None: cmd = "pw useradd {0} -e {1} -m".format(username, expiration) else: cmd = "pw useradd {0} -m".format(username) if comment is not None: cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " "retcode:{1}, " "output:{2}").format(username, retcode, out)) def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run('> /var/run/utx.active') shellutil.run('rmuser -y ' + username) self.conf_sudoer(username, remove=True) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) cmd = "echo '{0}'|pw usermod {1} -H 0 ".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def del_root_password(self): err = shellutil.run('pw usermod root -h -') if err: raise OSUtilError("Failed to delete root password: Failed to update password database.") def get_if_mac(self, ifname): data = self._get_net_info() if data[0] == ifname: return data[2].replace(':', '').upper() return None def get_first_if(self): return self._get_net_info()[:2] def route_add(self, net, mask, gateway): cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) return shellutil.run(cmd, chk_err=False) def is_missing_default_route(self): """ For FreeBSD, the default broadcast goes to current default gw, not a all-ones broadcast address, need to specify the route manually to get it work in a VNET environment. SEE ALSO: man ip(4) IP_ONESBCAST, """ return True def is_dhcp_enabled(self): return True def start_dhcp_service(self): shellutil.run("/etc/rc.d/dhclient start {0}".format(self.get_if_name()), chk_err=False) def allow_dhcp_broadcast(self): pass def set_route_for_dhcp_broadcast(self, ifname): return shellutil.run("route add 255.255.255.255 -iface {0}".format(ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): shellutil.run("route delete 255.255.255.255 -iface {0}".format(ifname), chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("pgrep -n dhclient", chk_err=False) return ret[1] if ret[0] == 0 else None def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("cdcontrol -f {0} eject".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) def restart_if(self, ifname): # Restart dhclient only to publish hostname shellutil.run("/etc/rc.d/dhclient restart {0}".format(ifname), chk_err=False) def get_total_mem(self): cmd = "sysctl hw.physmem |awk '{print $2}'" ret, output = shellutil.run_get_output(cmd) if ret: raise OSUtilError("Failed to get total memory: {0}".format(output)) try: return int(output)/1024/1024 except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def get_processor_cores(self): ret, output = shellutil.run_get_output("sysctl hw.ncpu |awk '{print $2}'") if ret: raise OSUtilError("Failed to get processor cores.") try: return int(output) except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def set_scsi_disks_timeout(self, timeout): if self._scsi_disks_timeout_set: return ret, output = shellutil.run_get_output('sysctl kern.cam.da.default_timeout={0}'.format(timeout)) if ret: raise OSUtilError("Failed set SCSI disks timeout: {0}".format(output)) self._scsi_disks_timeout_set = True def check_pid_alive(self, pid): return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 @staticmethod def _get_net_info(): """ There is no SIOCGIFCONF on freeBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ iface = '' inet = '' mac = '' err, output = shellutil.run_get_output('ifconfig -l ether', chk_err=False) if err: raise OSUtilError("Can't find ether interface:{0}".format(output)) ifaces = output.split() if not ifaces: raise OSUtilError("Can't find ether interface.") iface = ifaces[0] err, output = shellutil.run_get_output('ifconfig ' + iface, chk_err=False) if err: raise OSUtilError("Can't get info for interface:{0}".format(iface)) for line in output.split('\n'): if line.find('inet ') != -1: inet = line.split()[1] elif line.find('ether ') != -1: mac = line.split()[1] logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) return iface, inet, mac def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ if port_id > 3: return None g0 = "00000000" if port_id > 1: g0 = "00000001" port_id = port_id - 2 err, output = shellutil.run_get_output('sysctl dev.storvsc | grep pnpinfo | grep deviceid=') if err: return None g1 = "000" + ustr(port_id) g0g1 = "{0}-{1}".format(g0, g1) """ search 'X' from 'dev.storvsc.X.%pnpinfo: classid=32412632-86cb-44a2-9b5c-50d1417354f5 deviceid=00000000-0001-8899-0000-000000000000' """ cmd_search_ide = "sysctl dev.storvsc | grep pnpinfo | grep deviceid={0}".format(g0g1) err, output = shellutil.run_get_output(cmd_search_ide) if err: return None cmd_extract_id = cmd_search_ide + "|awk -F . '{print $3}'" err, output = shellutil.run_get_output(cmd_extract_id) """ try to search 'blkvscX' and 'storvscX' to find device name """ output = output.rstrip() cmd_search_blkvsc = "camcontrol devlist -b | grep blkvsc{0} | awk '{{print $1}}'".format(output) err, output = shellutil.run_get_output(cmd_search_blkvsc) if err == 0: output = output.rstrip() cmd_search_dev="camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): if not possible.startswith('pass'): return possible cmd_search_storvsc = "camcontrol devlist -b | grep storvsc{0} | awk '{{print $1}}'".format(output) err, output = shellutil.run_get_output(cmd_search_storvsc) if err == 0: output = output.rstrip() cmd_search_dev="camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): if not possible.startswith('pass'): return possible return None @staticmethod def get_total_cpu_ticks_since_boot(): return 0 WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/gaia.py000066400000000000000000000160441335416306700234320ustar00rootroot00000000000000# # Copyright 2017 Check Point Software Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import socket import struct import time import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr, bytebuffer import azurelinuxagent.common.logger as logger from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.utils.cryptutil import CryptUtil import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil class GaiaOSUtil(DefaultOSUtil): def __init__(self): super(GaiaOSUtil, self).__init__() def _run_clish(self, cmd, log_cmd=True): for i in xrange(10): ret, out = shellutil.run_get_output( "/bin/clish -s -c '" + cmd + "'", log_cmd=log_cmd) if not ret: break if 'NMSHST0025' in out: # Entry for [hostname] already present ret = 0 break time.sleep(2) return ret, out def useradd(self, username, expiration=None): logger.warn('useradd is not supported on GAiA') def chpasswd(self, username, password, crypt_id=6, salt_len=10): logger.info('chpasswd') passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) ret, out = self._run_clish( 'set user admin password-hash ' + passwd_hash, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format('admin', out)) def conf_sudoer(self, username, nopasswd=False, remove=False): logger.info('conf_sudoer is not supported on GAiA') def del_root_password(self): logger.info('del_root_password') ret, out = self._run_clish('set user admin password-hash *LOCK*') if ret != 0: raise OSUtilError("Failed to delete root password") def _replace_user(self, path, username): if path.startswith('$HOME'): path = '/home' + path[5:] parts = path.split('/') parts[2] = username return '/'.join(parts) def deploy_ssh_keypair(self, username, keypair): logger.info('deploy_ssh_keypair') username = 'admin' path, thumbprint = keypair path = self._replace_user(path, username) super(GaiaOSUtil, self).deploy_ssh_keypair( username, (path, thumbprint)) def openssl_to_openssh(self, input_file, output_file): cryptutil = CryptUtil(conf.get_openssl_cmd()) ret, out = shellutil.run_get_output( conf.get_openssl_cmd() + " rsa -pubin -noout -text -in '" + input_file + "'") if ret != 0: raise OSUtilError('openssl failed with {0}'.format(ret)) modulus = [] exponent = [] buf = None for line in out.split('\n'): if line.startswith('Modulus:'): buf = modulus buf.append(line) continue if line.startswith('Exponent:'): buf = exponent buf.append(line) continue if buf and line: buf.append(line.strip().replace(':', '')) def text_to_num(buf): if len(buf) == 1: return int(buf[0].split()[1]) return long(''.join(buf[1:]), 16) n = text_to_num(modulus) e = text_to_num(exponent) keydata = bytearray() keydata.extend(struct.pack('>I', len('ssh-rsa'))) keydata.extend(b'ssh-rsa') keydata.extend(struct.pack('>I', len(cryptutil.num_to_bytes(e)))) keydata.extend(cryptutil.num_to_bytes(e)) keydata.extend(struct.pack('>I', len(cryptutil.num_to_bytes(n)) + 1)) keydata.extend(b'\0') keydata.extend(cryptutil.num_to_bytes(n)) keydata_base64 = base64.b64encode(bytebuffer(keydata)) fileutil.write_file(output_file, ustr(b'ssh-rsa ' + keydata_base64 + b'\n', encoding='utf-8')) def deploy_ssh_pubkey(self, username, pubkey): logger.info('deploy_ssh_pubkey') username = 'admin' path, thumbprint, value = pubkey path = self._replace_user(path, username) super(GaiaOSUtil, self).deploy_ssh_pubkey( username, (path, thumbprint, value)) def eject_dvd(self, chk_err=True): logger.warn('eject is not supported on GAiA') def mount(self, device, mount_point, option="", chk_err=True): logger.info('mount {0} {1} {2}', device, mount_point, option) if 'udf,iso9660' in option: ret, out = super(GaiaOSUtil, self).mount( device, mount_point, option=option.replace('udf,iso9660', 'udf'), chk_err=chk_err) if not ret: return ret, out return super(GaiaOSUtil, self).mount( device, mount_point, option=option, chk_err=chk_err) def allow_dhcp_broadcast(self): logger.info('allow_dhcp_broadcast is ignored on GAiA') def remove_rules_files(self, rules_files=''): pass def restore_rules_files(self, rules_files=''): logger.info('restore_rules_files is ignored on GAiA') def restart_ssh_service(self): return shellutil.run('/sbin/service sshd condrestart', chk_err=False) def _address_to_string(self, addr): return socket.inet_ntoa(struct.pack("!I", addr)) def _get_prefix(self, mask): return str(sum([bin(int(x)).count('1') for x in mask.split('.')])) def route_add(self, net, mask, gateway): logger.info('route_add {0} {1} {2}', net, mask, gateway) if net == 0 and mask == 0: cidr = 'default' else: cidr = self._address_to_string(net) + '/' + self._get_prefix( self._address_to_string(mask)) ret, out = self._run_clish( 'set static-route ' + cidr + ' nexthop gateway address ' + self._address_to_string(gateway) + ' on') return ret def set_hostname(self, hostname): logger.warn('set_hostname is ignored on GAiA') def set_dhcp_hostname(self, hostname): logger.warn('set_dhcp_hostname is ignored on GAiA') def publish_hostname(self, hostname): logger.warn('publish_hostname is ignored on GAiA') def del_account(self, username): logger.warn('del_account is ignored on GAiA') WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/iosxe.py000066400000000000000000000061521335416306700236570ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.osutil.redhat import Redhat6xOSUtil ''' The IOSXE distribution is a variant of the Centos distribution, version 7.1. The primary difference is that IOSXE makes some assumptions about the waagent environment: - only the waagent daemon is executed - no provisioning is performed - no DHCP-based services are available ''' class IosxeOSUtil(DefaultOSUtil): def __init__(self): super(IosxeOSUtil, self).__init__() def set_hostname(self, hostname): """ Unlike redhat 6.x, redhat 7.x will set hostname via hostnamectl Due to a bug in systemd in Centos-7.0, if this call fails, fallback to hostname. """ hostnamectl_cmd = "hostnamectl set-hostname {0} --static".format(hostname) if shellutil.run(hostnamectl_cmd, chk_err=False) != 0: logger.warn("[{0}] failed, attempting fallback".format(hostnamectl_cmd)) DefaultOSUtil.set_hostname(self, hostname) def publish_hostname(self, hostname): """ Restart NetworkManager first before publishing hostname """ shellutil.run("service NetworkManager restart") super(RedhatOSUtil, self).publish_hostname(hostname) def register_agent_service(self): return shellutil.run("systemctl enable waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable waagent", chk_err=False) def openssl_to_openssh(self, input_file, output_file): DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) def is_dhcp_available(self): return (False, '168.63.129.16') def get_instance_id(self): ''' Azure records a UUID as the instance ID First check /sys/class/dmi/id/product_uuid. If that is missing, then extracts from dmidecode If nothing works (for old VMs), return the empty string ''' if os.path.isfile(PRODUCT_ID_FILE): try: s = fileutil.read_file(PRODUCT_ID_FILE).strip() return self._correct_instance_id(s.strip()) except IOError: pass rc, s = shellutil.run_get_output(DMIDECODE_CMD) if rc != 0 or UUID_PATTERN.match(s) is None: return "" return self._correct_instance_id(s.strip()) WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/nsbsd.py000066400000000000000000000130111335416306700236310ustar00rootroot00000000000000# # Copyright 2018 Stormshield # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil from azurelinuxagent.common.future import ustr import azurelinuxagent.common.conf as conf import os import time class NSBSDOSUtil(FreeBSDOSUtil): resolver = None def __init__(self): super(NSBSDOSUtil, self).__init__() if self.resolver is None: # NSBSD doesn't have a system resolver, configure a python one try: import dns.resolver except ImportError: raise OSUtilError("Python DNS resolver not available. Cannot proceed!") self.resolver = dns.resolver.Resolver() servers = [] cmd = "getconf /usr/Firewall/ConfigFiles/dns Servers | tail -n +2" ret, output = shellutil.run_get_output(cmd) for server in output.split("\n"): if server == '': break server = server[:-1] # remove last '=' cmd = "grep '{}' /etc/hosts".format(server) + " | awk '{print $1}'" ret, ip = shellutil.run_get_output(cmd) servers.append(ip) self.resolver.nameservers = servers dns.resolver.override_system_resolver(self.resolver) def set_hostname(self, hostname): shellutil.run("/usr/Firewall/sbin/setconf /usr/Firewall/System/global SystemName {0}".format(hostname)) shellutil.run("/usr/Firewall/sbin/enlog") shellutil.run("/usr/Firewall/sbin/enproxy -u") shellutil.run("/usr/Firewall/sbin/ensl -u") shellutil.run("/usr/Firewall/sbin/ennetwork -f") def restart_ssh_service(self): return shellutil.run('/usr/Firewall/sbin/enservice', chk_err=False) def conf_sshd(self, disable_password): option = "0" if disable_password else "1" shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH State 1', chk_err=False) shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH Password {}'.format(option), chk_err=False) shellutil.run('enservice', chk_err=False) logger.info("{0} SSH password-based authentication methods." .format("Disabled" if disable_password else "Enabled")) def useradd(self, username, expiration=None): """ Create user account with 'username' """ logger.warn("User creation disabled") return def del_account(self, username): logger.warn("User deletion disabled") def conf_sudoer(self, username, nopasswd=False, remove=False): logger.warn("Sudo is not enabled") def chpasswd(self, username, password, crypt_id=6, salt_len=10): cmd = "/usr/Firewall/sbin/fwpasswd -p {0}".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for admin: {0}" "").format(output)) # password set, activate webadmin and ssh access shellutil.run('setconf /usr/Firewall/ConfigFiles/webadmin ACL any && ensl', chk_err=False) def deploy_ssh_pubkey(self, username, pubkey): """ Deploy authorized_key """ path, thumbprint, value = pubkey #overide parameters super(NSBSDOSUtil, self).deploy_ssh_pubkey('admin', ["/usr/Firewall/.ssh/authorized_keys", thumbprint, value]) def del_root_password(self): logger.warn("Root password deletion disabled") def start_dhcp_service(self): shellutil.run("/usr/Firewall/sbin/nstart dhclient", chk_err=False) def stop_dhcp_service(self): shellutil.run("/usr/Firewall/sbin/nstop dhclient", chk_err=False) def get_dhcp_pid(self): ret = None pidfile = "/var/run/dhclient.pid" if os.path.isfile(pidfile): ret = fileutil.read_file(pidfile, encoding='ascii') return ret def eject_dvd(self, chk_err=True): pass def restart_if(self, ifname): # Restart dhclient only to publish hostname shellutil.run("ennetwork", chk_err=False) def set_dhcp_hostname(self, hostname): #already done by the dhcp client pass def get_firewall_dropped_packets(self, dst_ip=None): # disable iptables methods return 0 def get_firewall_will_wait(self): # disable iptables methods return "" def _delete_rule(self, rule): # disable iptables methods return def remove_firewall(self, dst_ip=None, uid=None): # disable iptables methods return True def enable_firewall(self, dst_ip=None, uid=None): # disable iptables methods return True WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/openbsd.py000066400000000000000000000325041335416306700241620ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and OpenSSL 1.0+ import os import re import time import glob import datetime import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil UUID_PATTERN = re.compile( r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) class OpenBSDOSUtil(DefaultOSUtil): def __init__(self): super(OpenBSDOSUtil, self).__init__() self.jit_enabled = True self._scsi_disks_timeout_set = False def get_instance_id(self): ret, output = shellutil.run_get_output("sysctl -n hw.uuid") if ret != 0 or UUID_PATTERN.match(output) is None: return "" return output.strip() def set_hostname(self, hostname): fileutil.write_file("/etc/myname", "{}\n".format(hostname)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def restart_ssh_service(self): return shellutil.run('rcctl restart sshd', chk_err=False) def start_agent_service(self): return shellutil.run('rcctl start waagent', chk_err=False) def stop_agent_service(self): return shellutil.run('rcctl stop waagent', chk_err=False) def register_agent_service(self): shellutil.run('chmod 0555 /etc/rc.d/waagent', chk_err=False) return shellutil.run('rcctl enable waagent', chk_err=False) def unregister_agent_service(self): return shellutil.run('rcctl disable waagent', chk_err=False) def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run("> /var/run/utmp") shellutil.run("userdel -r " + username) self.conf_sudoer(username, remove=True) def conf_sudoer(self, username, nopasswd=False, remove=False): doas_conf = "/etc/doas.conf" doas = None if not remove: if not os.path.isfile(doas_conf): # always allow root to become root doas = "permit keepenv nopass root\n" fileutil.append_file(doas_conf, doas) if nopasswd: doas = "permit keepenv nopass {0}\n".format(username) else: doas = "permit keepenv persist {0}\n".format(username) fileutil.append_file(doas_conf, doas) fileutil.chmod(doas_conf, 0o644) else: # Remove user from doas.conf if os.path.isfile(doas_conf): try: content = fileutil.read_file(doas_conf) doas = content.split("\n") doas = [x for x in doas if username not in x] fileutil.write_file(doas_conf, "\n".join(doas)) except IOError as err: raise OSUtilError("Failed to remove sudoer: " "{0}".format(err)) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user. " "Will not set passwd.").format(username)) cmd = "echo -n {0}|encrypt".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to encrypt password for {0}: {1}" "").format(username, output)) passwd_hash = output.strip() cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def del_root_password(self): ret, output = shellutil.run_get_output('usermod -p "*" root') if ret: raise OSUtilError("Failed to delete root password: " "{0}".format(output)) def get_if_mac(self, ifname): data = self._get_net_info() if data[0] == ifname: return data[2].replace(':', '').upper() return None def get_first_if(self): return self._get_net_info()[:2] def route_add(self, net, mask, gateway): cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) return shellutil.run(cmd, chk_err=False) def is_missing_default_route(self): ret = shellutil.run("route -n get default", chk_err=False) if ret == 0: return False return True def is_dhcp_enabled(self): pass def start_dhcp_service(self): pass def stop_dhcp_service(self): pass def get_dhcp_lease_endpoint(self): """ OpenBSD has a sligthly different lease file format. """ endpoint = None pathglob = '/var/db/dhclient.leases.{}'.format(self.get_first_if()[0]) HEADER_LEASE = "lease" HEADER_OPTION = "option option-245" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S %Z" logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() if HEADER_OPTION in leases: cached_endpoint = None has_option_245 = False expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None has_option_245 = False expired = True elif HEADER_OPTION in line: try: ipaddr = line.split(" ")[-1].strip(";").split(":") cached_endpoint = \ ".".join(str(int(d, 16)) for d in ipaddr) has_option_245 = True except ValueError: logger.error("could not parse '{0}'".format(line)) elif HEADER_EXPIRE in line: if "never" in line: expired = False else: try: expire_string = line.split( " ", 4)[-1].strip(";") expire_date = datetime.datetime.strptime( expire_string, FORMAT_DATETIME) if expire_date > datetime.datetime.utcnow(): expired = False except ValueError: logger.error("could not parse expiry token " "'{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired: {2}" .format(cached_endpoint, has_option_245, expired)) if not expired and cached_endpoint is not None and has_option_245: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: logger.info("cached endpoint not found") return endpoint def allow_dhcp_broadcast(self): pass def set_route_for_dhcp_broadcast(self, ifname): return shellutil.run("route add 255.255.255.255 -iface " "{0}".format(ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): shellutil.run("route delete 255.255.255.255 -iface " "{0}".format(ifname), chk_err=False) def get_dhcp_pid(self): ret, output = shellutil.run_get_output("pgrep -n dhclient", chk_err=False) return output if ret == 0 else None def get_dvd_device(self, dev_dir='/dev'): pattern = r'cd[0-9]c' for dvd in [re.match(pattern, dev) for dev in os.listdir(dev_dir)]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) raise OSUtilError("Failed to get DVD device") def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None, sleep_time=5): if dvd_device is None: dvd_device = self.get_dvd_device() if mount_point is None: mount_point = conf.get_dvd_mount_point() if not os.path.isdir(mount_point): os.makedirs(mount_point) for retry in range(0, max_retry): retcode = self.mount(dvd_device, mount_point, option="-o ro -t udf", chk_err=False) if retcode == 0: logger.info("Successfully mounted DVD") return if retry < max_retry - 1: mountlist = shellutil.run_get_output("/sbin/mount")[1] existing = self.get_mount_point(mountlist, dvd_device) if existing is not None: logger.info("{0} is mounted at {1}", dvd_device, existing) return logger.warn("Mount DVD failed: retry={0}, ret={1}", retry, retcode) time.sleep(sleep_time) if chk_err: raise OSUtilError("Failed to mount DVD.") def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("cdio eject {0}".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject DVD: ret={0}".format(retcode)) def restart_if(self, ifname, retries=3, wait=5): # Restart dhclient only to publish hostname shellutil.run("/sbin/dhclient {0}".format(ifname), chk_err=False) def get_total_mem(self): ret, output = shellutil.run_get_output("sysctl -n hw.physmem") if ret: raise OSUtilError("Failed to get total memory: {0}".format(output)) try: return int(output)/1024/1024 except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def get_processor_cores(self): ret, output = shellutil.run_get_output("sysctl -n hw.ncpu") if ret: raise OSUtilError("Failed to get processor cores.") try: return int(output) except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def set_scsi_disks_timeout(self, timeout): pass def check_pid_alive(self, pid): if not pid: return return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 @staticmethod def _get_net_info(): """ There is no SIOCGIFCONF on OpenBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ iface = '' inet = '' mac = '' ret, output = shellutil.run_get_output( 'ifconfig hvn | grep -E "^hvn.:" | sed "s/:.*//g"', chk_err=False) if ret: raise OSUtilError("Can't find ether interface:{0}".format(output)) ifaces = output.split() if not ifaces: raise OSUtilError("Can't find ether interface.") iface = ifaces[0] ret, output = shellutil.run_get_output( 'ifconfig ' + iface, chk_err=False) if ret: raise OSUtilError("Can't get info for interface:{0}".format(iface)) for line in output.split('\n'): if line.find('inet ') != -1: inet = line.split()[1] elif line.find('lladdr ') != -1: mac = line.split()[1] logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) return iface, inet, mac def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ return "wd{0}".format(port_id) @staticmethod def get_total_cpu_ticks_since_boot(): return 0 WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/redhat.py000066400000000000000000000115521335416306700237770ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import OSUtilError, CryptError import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.osutil.default import DefaultOSUtil class Redhat6xOSUtil(DefaultOSUtil): def __init__(self): super(Redhat6xOSUtil, self).__init__() self.jit_enabled = True def start_network(self): return shellutil.run("/sbin/service networking start", chk_err=False) def restart_ssh_service(self): return shellutil.run("/sbin/service sshd condrestart", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service waagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service waagent start", chk_err=False) def register_agent_service(self): return shellutil.run("chkconfig --add waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("chkconfig --del waagent", chk_err=False) def openssl_to_openssh(self, input_file, output_file): pubkey = fileutil.read_file(input_file) try: cryptutil = CryptUtil(conf.get_openssl_cmd()) ssh_rsa_pubkey = cryptutil.asn1_to_ssh(pubkey) except CryptError as e: raise OSUtilError(ustr(e)) fileutil.write_file(output_file, ssh_rsa_pubkey) # Override def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof dhclient", chk_err=False) return ret[1] if ret[0] == 0 else None def set_hostname(self, hostname): """ Set /etc/sysconfig/network """ fileutil.update_conf_file('/etc/sysconfig/network', 'HOSTNAME', 'HOSTNAME={0}'.format(hostname)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def set_dhcp_hostname(self, hostname): ifname = self.get_if_name() filepath = "/etc/sysconfig/network-scripts/ifcfg-{0}".format(ifname) fileutil.update_conf_file(filepath, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME={0}'.format(hostname)) def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.leases') class RedhatOSUtil(Redhat6xOSUtil): def __init__(self): super(RedhatOSUtil, self).__init__() def set_hostname(self, hostname): """ Unlike redhat 6.x, redhat 7.x will set hostname via hostnamectl Due to a bug in systemd in Centos-7.0, if this call fails, fallback to hostname. """ hostnamectl_cmd = "hostnamectl set-hostname {0} --static".format(hostname) if shellutil.run(hostnamectl_cmd, chk_err=False) != 0: logger.warn("[{0}] failed, attempting fallback".format(hostnamectl_cmd)) DefaultOSUtil.set_hostname(self, hostname) def publish_hostname(self, hostname): """ Restart NetworkManager first before publishing hostname """ shellutil.run("service NetworkManager restart") super(RedhatOSUtil, self).publish_hostname(hostname) def register_agent_service(self): return shellutil.run("systemctl enable waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable waagent", chk_err=False) def openssl_to_openssh(self, input_file, output_file): DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) def get_dhcp_lease_endpoint(self): # dhclient endpoint = self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.lease') if endpoint is None: # NetworkManager endpoint = self.get_endpoint_from_leases_path('/var/lib/NetworkManager/dhclient-*.lease') return endpoint WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/suse.py000066400000000000000000000072541335416306700235130ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME from azurelinuxagent.common.osutil.default import DefaultOSUtil class SUSE11OSUtil(DefaultOSUtil): def __init__(self): super(SUSE11OSUtil, self).__init__() self.jit_enabled = True self.dhclient_name='dhcpcd' def set_hostname(self, hostname): fileutil.write_file('/etc/HOSTNAME', hostname) shellutil.run("hostname {0}".format(hostname), chk_err=False) def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof {0}".format(self.dhclient_name), chk_err=False) return ret[1] if ret[0] == 0 else None def is_dhcp_enabled(self): return True def stop_dhcp_service(self): cmd = "/sbin/service {0} stop".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_dhcp_service(self): cmd = "/sbin/service {0} start".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_network(self) : return shellutil.run("/sbin/service start network", chk_err=False) def restart_ssh_service(self): return shellutil.run("/sbin/service sshd restart", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service waagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service waagent start", chk_err=False) def register_agent_service(self): return shellutil.run("/sbin/insserv waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("/sbin/insserv -r waagent", chk_err=False) class SUSEOSUtil(SUSE11OSUtil): def __init__(self): super(SUSEOSUtil, self).__init__() self.dhclient_name = 'wickedd-dhcp4' def stop_dhcp_service(self): cmd = "systemctl stop {0}".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_dhcp_service(self): cmd = "systemctl start {0}".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_network(self) : return shellutil.run("systemctl start network", chk_err=False) def restart_ssh_service(self): return shellutil.run("systemctl restart sshd", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop waagent", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start waagent", chk_err=False) def register_agent_service(self): return shellutil.run("systemctl enable waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable waagent", chk_err=False) WALinuxAgent-2.2.32/azurelinuxagent/common/osutil/ubuntu.py000066400000000000000000000102441335416306700240470ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class Ubuntu14OSUtil(DefaultOSUtil): def __init__(self): super(Ubuntu14OSUtil, self).__init__() self.jit_enabled = True def start_network(self): return shellutil.run("service networking start", chk_err=False) def stop_agent_service(self): return shellutil.run("service walinuxagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("service walinuxagent start", chk_err=False) def remove_rules_files(self, rules_files=""): pass def restore_rules_files(self, rules_files=""): pass def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') class Ubuntu12OSUtil(Ubuntu14OSUtil): def __init__(self): super(Ubuntu12OSUtil, self).__init__() # Override def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof dhclient3", chk_err=False) return ret[1] if ret[0] == 0 else None def mount_cgroups(self): pass class Ubuntu16OSUtil(Ubuntu14OSUtil): """ Ubuntu 16.04, 16.10, and 17.04. """ def __init__(self): super(Ubuntu16OSUtil, self).__init__() def register_agent_service(self): return shellutil.run("systemctl unmask walinuxagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl mask walinuxagent", chk_err=False) def mount_cgroups(self): """ Mounted by default in Ubuntu 16.04 """ pass class Ubuntu18OSUtil(Ubuntu16OSUtil): """ Ubuntu 18.04 """ def __init__(self): super(Ubuntu18OSUtil, self).__init__() def get_dhcp_pid(self): ret = shellutil.run_get_output("pidof systemd-networkd") return ret[1] if ret[0] == 0 else None def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def stop_network(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return self.start_network() def stop_dhcp_service(self): return self.stop_network() def start_agent_service(self): return shellutil.run("systemctl start walinuxagent", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop walinuxagent", chk_err=False) class UbuntuOSUtil(Ubuntu16OSUtil): def __init__(self): super(UbuntuOSUtil, self).__init__() def restart_if(self, ifname, retries=3, wait=5): """ Restart an interface by bouncing the link. systemd-networkd observes this event, and forces a renew of DHCP. """ retry_limit=retries+1 for attempt in range(1, retry_limit): return_code=shellutil.run("ip link set {0} down && ip link set {0} up".format(ifname)) if return_code == 0: return logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) if attempt < retry_limit: logger.info("retrying in {0} seconds".format(wait)) time.sleep(wait) else: logger.warn("exceeded restart retries") class UbuntuSnappyOSUtil(Ubuntu14OSUtil): def __init__(self): super(UbuntuSnappyOSUtil, self).__init__() self.conf_file_path = '/apps/walinuxagent/current/waagent.conf' WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/000077500000000000000000000000001335416306700224745ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/__init__.py000066400000000000000000000014761335416306700246150ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.protocol.util import get_protocol_util, \ OVF_FILE_NAME, \ TAG_FILE_NAME WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/healthservice.py000066400000000000000000000152331335416306700257000ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import json from azurelinuxagent.common import logger from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION class Observation(object): def __init__(self, name, is_healthy, description='', value=''): if name is None: raise ValueError("Observation name must be provided") if is_healthy is None: raise ValueError("Observation health must be provided") if value is None: value = '' if description is None: description = '' self.name = name self.is_healthy = is_healthy self.description = description self.value = value @property def as_obj(self): return { "ObservationName": self.name[:64], "IsHealthy": self.is_healthy, "Description": self.description[:128], "Value": self.value[:128] } class HealthService(object): ENDPOINT = 'http://{0}:80/HealthService' API = 'reporttargethealth' VERSION = "1.0" OBSERVER_NAME = 'WALinuxAgent' HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME = 'GuestAgentPluginHeartbeat' HOST_PLUGIN_STATUS_OBSERVATION_NAME = 'GuestAgentPluginStatus' HOST_PLUGIN_VERSIONS_OBSERVATION_NAME = 'GuestAgentPluginVersions' HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME = 'GuestAgentPluginArtifact' IMDS_OBSERVATION_NAME = 'InstanceMetadataHeartbeat' MAX_OBSERVATIONS = 10 def __init__(self, endpoint): self.endpoint = HealthService.ENDPOINT.format(endpoint) self.api = HealthService.API self.version = HealthService.VERSION self.source = HealthService.OBSERVER_NAME self.observations = list() @property def as_json(self): data = { "Api": self.api, "Version": self.version, "Source": self.source, "Observations": [o.as_obj for o in self.observations] } return json.dumps(data) def report_host_plugin_heartbeat(self, is_healthy): """ Reports a signal for /health :param is_healthy: whether the call succeeded """ self._observe(name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, is_healthy=is_healthy) self._report() def report_host_plugin_versions(self, is_healthy, response): """ Reports a signal for /versions :param is_healthy: whether the api call succeeded :param response: debugging information for failures """ self._observe(name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, is_healthy=is_healthy, value=response) self._report() def report_host_plugin_extension_artifact(self, is_healthy, source, response): """ Reports a signal for /extensionArtifact :param is_healthy: whether the api call succeeded :param source: specifies the api caller for debugging failures :param response: debugging information for failures """ self._observe(name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, is_healthy=is_healthy, description=source, value=response) self._report() def report_host_plugin_status(self, is_healthy, response): """ Reports a signal for /status :param is_healthy: whether the api call succeeded :param response: debugging information for failures """ self._observe(name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, is_healthy=is_healthy, value=response) self._report() def report_imds_status(self, is_healthy, response): """ Reports a signal for /metadata/instance :param is_healthy: whether the api call succeeded and returned valid data :param response: debugging information for failures """ self._observe(name=HealthService.IMDS_OBSERVATION_NAME, is_healthy=is_healthy, value=response) self._report() def _observe(self, name, is_healthy, value='', description=''): # ensure we keep the list size within bounds if len(self.observations) >= HealthService.MAX_OBSERVATIONS: del self.observations[:HealthService.MAX_OBSERVATIONS-1] self.observations.append(Observation(name=name, is_healthy=is_healthy, value=value, description=description)) def _report(self): logger.verbose('HealthService: report observations') try: restutil.http_post(self.endpoint, self.as_json, headers={'Content-Type': 'application/json'}) logger.verbose('HealthService: Reported observations to {0}: {1}', self.endpoint, self.as_json) except HttpError as e: logger.warn("HealthService: could not report observations: {0}", ustr(e)) finally: # report any failures via telemetry self._report_failures() # these signals are not timestamped, so there is no value in persisting data del self.observations[:] def _report_failures(self): try: logger.verbose("HealthService: report failures as telemetry") from azurelinuxagent.common.event import add_event, WALAEventOperation for o in self.observations: if not o.is_healthy: add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HealthObservation, is_success=False, message=json.dumps(o.as_obj)) except Exception as e: logger.verbose("HealthService: could not report failures: {0}".format(ustr(e))) WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/hostplugin.py000066400000000000000000000342121335416306700252440ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import datetime import json from azurelinuxagent.common import logger from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_HOST_PLUGIN_FAILURE from azurelinuxagent.common.exception import HttpError, ProtocolError, \ ResourceGoneError from azurelinuxagent.common.future import ustr, httpclient from azurelinuxagent.common.protocol.healthservice import HealthService from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.utils import textutil from azurelinuxagent.common.utils.textutil import remove_bom from azurelinuxagent.common.version import PY_VERSION_MAJOR HOST_PLUGIN_PORT = 32526 URI_FORMAT_GET_API_VERSIONS = "http://{0}:{1}/versions" URI_FORMAT_GET_EXTENSION_ARTIFACT = "http://{0}:{1}/extensionArtifact" URI_FORMAT_PUT_VM_STATUS = "http://{0}:{1}/status" URI_FORMAT_PUT_LOG = "http://{0}:{1}/vmAgentLog" URI_FORMAT_HEALTH = "http://{0}:{1}/health" API_VERSION = "2015-09-01" HEADER_CONTAINER_ID = "x-ms-containerid" HEADER_VERSION = "x-ms-version" HEADER_HOST_CONFIG_NAME = "x-ms-host-config-name" HEADER_ARTIFACT_LOCATION = "x-ms-artifact-location" HEADER_ARTIFACT_MANIFEST_LOCATION = "x-ms-artifact-manifest-location" MAXIMUM_PAGEBLOB_PAGE_SIZE = 4 * 1024 * 1024 # Max page size: 4MB class HostPluginProtocol(object): _is_default_channel = False FETCH_REPORTING_PERIOD = datetime.timedelta(minutes=1) STATUS_REPORTING_PERIOD = datetime.timedelta(minutes=1) def __init__(self, endpoint, container_id, role_config_name): if endpoint is None: raise ProtocolError("HostGAPlugin: Endpoint not provided") self.is_initialized = False self.is_available = False self.api_versions = None self.endpoint = endpoint self.container_id = container_id self.deployment_id = None self.role_config_name = role_config_name self.manifest_uri = None self.health_service = HealthService(endpoint) self.fetch_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) self.status_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) self.fetch_last_timestamp = None self.status_last_timestamp = None @staticmethod def is_default_channel(): return HostPluginProtocol._is_default_channel @staticmethod def set_default_channel(is_default): HostPluginProtocol._is_default_channel = is_default def ensure_initialized(self): if not self.is_initialized: self.api_versions = self.get_api_versions() self.is_available = API_VERSION in self.api_versions self.is_initialized = self.is_available from azurelinuxagent.common.event import WALAEventOperation, report_event report_event(WALAEventOperation.InitializeHostPlugin, is_success=self.is_available) return self.is_available def get_health(self): """ Call the /health endpoint :return: True if 200 received, False otherwise """ url = URI_FORMAT_HEALTH.format(self.endpoint, HOST_PLUGIN_PORT) logger.verbose("HostGAPlugin: Getting health from [{0}]", url) response = restutil.http_get(url, max_retry=1) return restutil.request_succeeded(response) def get_api_versions(self): url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, HOST_PLUGIN_PORT) logger.verbose("HostGAPlugin: Getting API versions at [{0}]" .format(url)) return_val = [] error_response = '' is_healthy = False try: headers = {HEADER_CONTAINER_ID: self.container_id} response = restutil.http_get(url, headers) if restutil.request_failed(response): error_response = restutil.read_response_error(response) logger.error("HostGAPlugin: Failed Get API versions: {0}".format(error_response)) is_healthy = not restutil.request_failed_at_hostplugin(response) else: return_val = ustr(remove_bom(response.read()), encoding='utf-8') is_healthy = True except HttpError as e: logger.error("HostGAPlugin: Exception Get API versions: {0}".format(e)) self.health_service.report_host_plugin_versions(is_healthy=is_healthy, response=error_response) return return_val def get_artifact_request(self, artifact_url, artifact_manifest_url=None): if not self.ensure_initialized(): raise ProtocolError("HostGAPlugin: Host plugin channel is not available") if textutil.is_str_none_or_whitespace(artifact_url): raise ProtocolError("HostGAPlugin: No extension artifact url was provided") url = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT) headers = {HEADER_VERSION: API_VERSION, HEADER_CONTAINER_ID: self.container_id, HEADER_HOST_CONFIG_NAME: self.role_config_name, HEADER_ARTIFACT_LOCATION: artifact_url} if artifact_manifest_url is not None: headers[HEADER_ARTIFACT_MANIFEST_LOCATION] = artifact_manifest_url return url, headers def report_fetch_health(self, uri, is_healthy=True, source='', response=''): if uri != URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT): return if self.should_report(is_healthy, self.fetch_error_state, self.fetch_last_timestamp, HostPluginProtocol.FETCH_REPORTING_PERIOD): self.fetch_last_timestamp = datetime.datetime.utcnow() health_signal = self.fetch_error_state.is_triggered() is False self.health_service.report_host_plugin_extension_artifact(is_healthy=health_signal, source=source, response=response) def report_status_health(self, is_healthy, response=''): if self.should_report(is_healthy, self.status_error_state, self.status_last_timestamp, HostPluginProtocol.STATUS_REPORTING_PERIOD): self.status_last_timestamp = datetime.datetime.utcnow() health_signal = self.status_error_state.is_triggered() is False self.health_service.report_host_plugin_status(is_healthy=health_signal, response=response) @staticmethod def should_report(is_healthy, error_state, last_timestamp, period): """ Determine whether a health signal should be reported :param is_healthy: whether the current measurement is healthy :param error_state: the error state which is tracking time since failure :param last_timestamp: the last measurement time stamp :param period: the reporting period :return: True if the signal should be reported, False otherwise """ if is_healthy: # we only reset the error state upon success, since we want to keep # reporting the failure; this is different to other uses of error states # which do not have a separate periodicity error_state.reset() else: error_state.incr() if last_timestamp is None: last_timestamp = datetime.datetime.utcnow() - period return datetime.datetime.utcnow() >= (last_timestamp + period) def put_vm_log(self, content): raise NotImplementedError("Unimplemented") def put_vm_status(self, status_blob, sas_url, config_blob_type=None): """ Try to upload the VM status via the host plugin /status channel :param sas_url: the blob SAS url to pass to the host plugin :param config_blob_type: the blob type from the extension config :type status_blob: StatusBlob """ if not self.ensure_initialized(): raise ProtocolError("HostGAPlugin: HostGAPlugin is not available") if status_blob is None or status_blob.vm_status is None: raise ProtocolError("HostGAPlugin: Status blob was not provided") logger.verbose("HostGAPlugin: Posting VM status") blob_type = status_blob.type if status_blob.type else config_blob_type if blob_type == "BlockBlob": self._put_block_blob_status(sas_url, status_blob) else: self._put_page_blob_status(sas_url, status_blob) def _put_block_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_block_blob_headers(len(status_blob.data)), bytearray(status_blob.data, encoding='utf-8')), headers=self._build_status_headers()) if restutil.request_failed(response): error_response = restutil.read_response_error(response) is_healthy = not restutil.request_failed_at_hostplugin(response) self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}" .format(error_response)) else: self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: Put BlockBlob status succeeded") def _put_page_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) # Convert the status into a blank-padded string whose length is modulo 512 status = bytearray(status_blob.data, encoding='utf-8') status_size = int((len(status) + 511) / 512) * 512 status = bytearray(status_blob.data.ljust(status_size), encoding='utf-8') # First, initialize an empty blob response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_page_blob_create_headers(status_size)), headers=self._build_status_headers()) if restutil.request_failed(response): error_response = restutil.read_response_error(response) is_healthy = not restutil.request_failed_at_hostplugin(response) self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError("HostGAPlugin: Failed PageBlob clean-up: {0}" .format(error_response)) else: self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: PageBlob clean-up succeeded") # Then, upload the blob in pages if sas_url.count("?") <= 0: sas_url = "{0}?comp=page".format(sas_url) else: sas_url = "{0}&comp=page".format(sas_url) start = 0 end = 0 while start < len(status): # Create the next page end = start + min(len(status) - start, MAXIMUM_PAGEBLOB_PAGE_SIZE) page_size = int((end - start + 511) / 512) * 512 buf = bytearray(page_size) buf[0: end - start] = status[start: end] # Send the page response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_page_blob_page_headers(start, end), buf), headers=self._build_status_headers()) if restutil.request_failed(response): error_response = restutil.read_response_error(response) is_healthy = not restutil.request_failed_at_hostplugin(response) self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError( "HostGAPlugin Error: Put PageBlob bytes " "[{0},{1}]: {2}".format(start, end, error_response)) # Advance to the next page (if any) start = end def _build_status_data(self, sas_url, blob_headers, content=None): headers = [] for name in iter(blob_headers.keys()): headers.append({ 'headerName': name, 'headerValue': blob_headers[name] }) data = { 'requestUri': sas_url, 'headers': headers } if not content is None: data['content'] = self._base64_encode(content) return json.dumps(data, sort_keys=True) def _build_status_headers(self): return { HEADER_VERSION: API_VERSION, "Content-type": "application/json", HEADER_CONTAINER_ID: self.container_id, HEADER_HOST_CONFIG_NAME: self.role_config_name } def _base64_encode(self, data): s = base64.b64encode(bytes(data)) if PY_VERSION_MAJOR > 2: return s.decode('utf-8') return s WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/imds.py000066400000000000000000000234251335416306700240100ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); import json import re import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.future import ustr import azurelinuxagent.common.logger as logger from azurelinuxagent.common.protocol.restapi import DataContract, set_properties from azurelinuxagent.common.utils.flexible_version import FlexibleVersion IMDS_ENDPOINT = '169.254.169.254' APIVERSION = '2018-02-01' BASE_URI = "http://{0}/metadata/instance/{1}?api-version={2}" IMDS_IMAGE_ORIGIN_UNKNOWN = 0 IMDS_IMAGE_ORIGIN_CUSTOM = 1 IMDS_IMAGE_ORIGIN_ENDORSED = 2 IMDS_IMAGE_ORIGIN_PLATFORM = 3 def get_imds_client(): return ImdsClient() # A *slightly* future proof list of endorsed distros. # -> e.g. I have predicted the future and said that 20.04-LTS will exist # and is endored. # # See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros for # more details. # # This is not an exhaustive list. This is a best attempt to mark images as # endorsed or not. Image publishers do not encode all of the requisite information # in their publisher, offer, sku, and version to definitively mark something as # endorsed or not. This is not perfect, but it is approximately 98% perfect. ENDORSED_IMAGE_INFO_MATCHER_JSON = """{ "CANONICAL": { "UBUNTUSERVER": { "List": [ "14.04.0-LTS", "14.04.1-LTS", "14.04.2-LTS", "14.04.3-LTS", "14.04.4-LTS", "14.04.5-LTS", "14.04.6-LTS", "14.04.7-LTS", "14.04.8-LTS", "16.04-LTS", "16.04.0-LTS", "18.04-LTS", "20.04-LTS", "22.04-LTS" ] } }, "COREOS": { "COREOS": { "STABLE": { "Minimum": "494.4.0" } } }, "CREDATIV": { "DEBIAN": { "Minimum": "7" } }, "OPENLOGIC": { "CENTOS": { "Minimum": "6.3", "List": [ "7-LVM", "7-RAW" ] }, "CENTOS-HPC": { "Minimum": "6.3" } }, "REDHAT": { "RHEL": { "Minimum": "6.7", "List": [ "7-LVM", "7-RAW" ] }, "RHEL-HANA": { "Minimum": "6.7" }, "RHEL-SAP": { "Minimum": "6.7" }, "RHEL-SAP-APPS": { "Minimum": "6.7" }, "RHEL-SAP-HANA": { "Minimum": "6.7" } }, "SUSE": { "SLES": { "List": [ "11-SP4", "11-SP5", "11-SP6", "12-SP1", "12-SP2", "12-SP3", "12-SP4", "12-SP5", "12-SP6" ] }, "SLES-BYOS": { "List": [ "11-SP4", "11-SP5", "11-SP6", "12-SP1", "12-SP2", "12-SP3", "12-SP4", "12-SP5", "12-SP6" ] }, "SLES-SAP": { "List": [ "11-SP4", "11-SP5", "11-SP6", "12-SP1", "12-SP2", "12-SP3", "12-SP4", "12-SP5", "12-SP6" ] } } }""" class ImageInfoMatcher(object): def __init__(self, doc): self.doc = json.loads(doc) def is_match(self, publisher, offer, sku, version): def _is_match_walk(doci, keys): key = keys.pop(0).upper() if key is None: return False if key not in doci: return False if 'List' in doci[key] and keys[0] in doci[key]['List']: return True if 'Match' in doci[key] and re.match(doci[key]['Match'], keys[0]): return True if 'Minimum' in doci[key]: try: return FlexibleVersion(keys[0]) >= FlexibleVersion(doci[key]['Minimum']) except ValueError: pass return _is_match_walk(doci[key], keys) return _is_match_walk(self.doc, [ publisher, offer, sku, version ]) class ComputeInfo(DataContract): __matcher = ImageInfoMatcher(ENDORSED_IMAGE_INFO_MATCHER_JSON) def __init__(self, location=None, name=None, offer=None, osType=None, placementGroupId=None, platformFaultDomain=None, placementUpdateDomain=None, publisher=None, resourceGroupName=None, sku=None, subscriptionId=None, tags=None, version=None, vmId=None, vmSize=None, vmScaleSetName=None, zone=None): self.location = location self.name = name self.offer = offer self.osType = osType self.placementGroupId = placementGroupId self.platformFaultDomain = platformFaultDomain self.platformUpdateDomain = placementUpdateDomain self.publisher = publisher self.resourceGroupName = resourceGroupName self.sku = sku self.subscriptionId = subscriptionId self.tags = tags self.version = version self.vmId = vmId self.vmSize = vmSize self.vmScaleSetName = vmScaleSetName self.zone = zone @property def image_info(self): return "{0}:{1}:{2}:{3}".format(self.publisher, self.offer, self.sku, self.version) @property def image_origin(self): """ An integer value describing the origin of the image. 0 -> unknown 1 -> custom - user created image 2 -> endorsed - See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros 3 -> platform - non-endorsed image that is available in the Azure Marketplace. """ try: if self.publisher == "": return IMDS_IMAGE_ORIGIN_CUSTOM if ComputeInfo.__matcher.is_match(self.publisher, self.offer, self.sku, self.version): return IMDS_IMAGE_ORIGIN_ENDORSED else: return IMDS_IMAGE_ORIGIN_PLATFORM except Exception as e: logger.warn("Could not determine the image origin from IMDS: {0}", str(e)) return IMDS_IMAGE_ORIGIN_UNKNOWN class ImdsClient(object): def __init__(self, version=APIVERSION): self._api_version = version self._headers = { 'User-Agent': restutil.HTTP_USER_AGENT, 'Metadata': True, } self._health_headers = { 'User-Agent': restutil.HTTP_USER_AGENT_HEALTH, 'Metadata': True, } pass @property def compute_url(self): return BASE_URI.format(IMDS_ENDPOINT, 'compute', self._api_version) @property def instance_url(self): return BASE_URI.format(IMDS_ENDPOINT, '', self._api_version) def get_compute(self): """ Fetch compute information. :return: instance of a ComputeInfo :rtype: ComputeInfo """ resp = restutil.http_get(self.compute_url, headers=self._headers) if restutil.request_failed(resp): raise HttpError("{0} - GET: {1}".format(resp.status, self.compute_url)) data = resp.read() data = json.loads(ustr(data, encoding="utf-8")) compute_info = ComputeInfo() set_properties('compute', compute_info, data) return compute_info def validate(self): """ Determines whether the metadata instance api returns 200, and the response is valid: compute should contain location, name, subscription id, and vm size and network should contain mac address and private ip address. :return: Tuple is_healthy: True when validation succeeds, False otherwise error_response: validation failure details to assist with debugging """ # ensure we get a 200 resp = restutil.http_get(self.instance_url, headers=self._health_headers) if restutil.request_failed(resp): return False, "{0}".format(restutil.read_response_error(resp)) # ensure the response is valid json data = resp.read() try: json_data = json.loads(ustr(data, encoding="utf-8")) except Exception as e: return False, "JSON parsing failed: {0}".format(ustr(e)) # ensure all expected fields are present and have a value try: # TODO: compute fields cannot be verified yet since we need to exclude rdfe vms (#1249) self.check_field(json_data, 'network') self.check_field(json_data['network'], 'interface') self.check_field(json_data['network']['interface'][0], 'macAddress') self.check_field(json_data['network']['interface'][0], 'ipv4') self.check_field(json_data['network']['interface'][0]['ipv4'], 'ipAddress') self.check_field(json_data['network']['interface'][0]['ipv4']['ipAddress'][0], 'privateIpAddress') except ValueError as v: return False, ustr(v) return True, '' @staticmethod def check_field(dict_obj, field): if field not in dict_obj or dict_obj[field] is None: raise ValueError('Missing field: [{0}]'.format(field)) if len(dict_obj[field]) == 0: raise ValueError('Empty field: [{0}]'.format(field)) WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/metadata.py000066400000000000000000000404021335416306700246260ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import base64 import json import os import shutil import re import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.protocol.restapi import * from azurelinuxagent.common.utils.cryptutil import CryptUtil METADATA_ENDPOINT = '169.254.169.254' APIVERSION = '2015-05-01-preview' BASE_URI = "http://{0}/Microsoft.Compute/{1}?api-version={2}" TRANSPORT_PRV_FILE_NAME = "V2TransportPrivate.pem" TRANSPORT_CERT_FILE_NAME = "V2TransportCert.pem" P7M_FILE_NAME = "Certificates.p7m" P7B_FILE_NAME = "Certificates.p7b" PEM_FILE_NAME = "Certificates.pem" KEY_AGENT_VERSION_URIS = "versionsManifestUris" KEY_URI = "uri" # TODO remote workaround for azure stack MAX_PING = 30 RETRY_PING_INTERVAL = 10 def _add_content_type(headers): if headers is None: headers = {} headers["content-type"] = "application/json" return headers class MetadataProtocol(Protocol): def __init__(self, apiversion=APIVERSION, endpoint=METADATA_ENDPOINT): self.apiversion = apiversion self.endpoint = endpoint self.identity_uri = BASE_URI.format(self.endpoint, "identity", self.apiversion) self.cert_uri = BASE_URI.format(self.endpoint, "certificates", self.apiversion) self.ext_uri = BASE_URI.format(self.endpoint, "extensionHandlers", self.apiversion) self.vmagent_uri = BASE_URI.format(self.endpoint, "vmAgentVersions", self.apiversion) self.provision_status_uri = BASE_URI.format(self.endpoint, "provisioningStatus", self.apiversion, "") self.vm_status_uri = BASE_URI.format(self.endpoint, "status/vmagent", self.apiversion, "") self.ext_status_uri = BASE_URI.format(self.endpoint, "status/extensions/{0}", self.apiversion, "") self.event_uri = BASE_URI.format(self.endpoint, "status/telemetry", self.apiversion, "") self.certs = None self.agent_manifests = None self.agent_etag = None def _get_data(self, url, headers=None): try: resp = restutil.http_get(url, headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if restutil.request_failed(resp): raise ProtocolError("{0} - GET: {1}".format(resp.status, url)) data = resp.read() etag = resp.getheader('ETag') if data is not None: data = json.loads(ustr(data, encoding="utf-8")) return data, etag def _put_data(self, url, data, headers=None): headers = _add_content_type(headers) try: resp = restutil.http_put(url, json.dumps(data), headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if restutil.request_failed(resp): raise ProtocolError("{0} - PUT: {1}".format(resp.status, url)) def _post_data(self, url, data, headers=None): headers = _add_content_type(headers) try: resp = restutil.http_post(url, json.dumps(data), headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if resp.status != httpclient.CREATED: logger.warn("{0} for POST {1}".format(resp.status, url)) def _get_trans_cert(self): trans_crt_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) if not os.path.isfile(trans_crt_file): raise ProtocolError("{0} is missing.".format(trans_crt_file)) content = fileutil.read_file(trans_crt_file) return textutil.get_bytes_from_pem(content) def detect(self): self.get_vminfo() trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) # "Install" the cert and private key to /var/lib/waagent thumbprint = cryptutil.get_thumbprint_from_crt(trans_cert_file) prv_file = os.path.join(conf.get_lib_dir(), "{0}.prv".format(thumbprint)) crt_file = os.path.join(conf.get_lib_dir(), "{0}.crt".format(thumbprint)) shutil.copyfile(trans_prv_file, prv_file) shutil.copyfile(trans_cert_file, crt_file) self.update_goal_state(forced=True) def get_vminfo(self): vminfo = VMInfo() data, etag = self._get_data(self.identity_uri) set_properties("vminfo", vminfo, data) return vminfo def get_certs(self): certlist = CertList() certificatedata = CertificateData() data, etag = self._get_data(self.cert_uri) set_properties("certlist", certlist, data) cert_list = get_properties(certlist) headers = { "x-ms-vmagent-public-x509-cert": self._get_trans_cert() } for cert_i in cert_list["certificates"]: certificate_data_uri = cert_i['certificateDataUri'] data, etag = self._get_data(certificate_data_uri, headers=headers) set_properties("certificatedata", certificatedata, data) json_certificate_data = get_properties(certificatedata) self.certs = Certificates(self, json_certificate_data) if self.certs is None: return None return self.certs def get_incarnation(self): # Always return 0 since Azure Stack does not maintain goal state # incarnation identifiers return 0 def get_vmagent_manifests(self): self.update_goal_state() data, etag = self._get_data(self.vmagent_uri) if self.agent_etag is None or self.agent_etag < etag: self.agent_etag = etag # Create a list with a single manifest # -- The protocol lacks "family," use the configured family self.agent_manifests = VMAgentManifestList() manifest = VMAgentManifest() manifest.family = family=conf.get_autoupdate_gafamily() if not KEY_AGENT_VERSION_URIS in data: raise ProtocolError( "Agent versions missing '{0}': {1}".format( KEY_AGENT_VERSION_URIS, data)) for version in data[KEY_AGENT_VERSION_URIS]: if not KEY_URI in version: raise ProtocolError( "Agent versions missing '{0': {1}".format( KEY_URI, data)) manifest_uri = VMAgentManifestUri(uri=version[KEY_URI]) manifest.versionsManifestUris.append(manifest_uri) self.agent_manifests.vmAgentManifests.append(manifest) return self.agent_manifests, self.agent_etag def get_vmagent_pkgs(self, vmagent_manifest): data = None etag = None for manifest_uri in vmagent_manifest.versionsManifestUris: try: data, etag = self._get_data(manifest_uri.uri) break except ProtocolError as e: logger.verbose( "Error retrieving agent package from {0}: {1}".format( manifest_uri, e)) if data is None: raise ProtocolError( "Failed retrieving agent package from all URIs") vmagent_pkgs = ExtHandlerPackageList() set_properties("vmAgentVersions", vmagent_pkgs, data) return vmagent_pkgs def get_ext_handlers(self, last_etag=None): self.update_goal_state() headers = { "x-ms-vmagent-public-x509-cert": self._get_trans_cert() } ext_list = ExtHandlerList() data, etag = self._get_data(self.ext_uri, headers=headers) if last_etag is None or last_etag < etag: set_properties("extensionHandlers", ext_list.extHandlers, data) return ext_list, etag def get_ext_handler_pkgs(self, ext_handler): logger.verbose("Get extension handler packages") pkg_list = ExtHandlerPackageList() manifest = None for version_uri in ext_handler.versionUris: try: manifest, etag = self._get_data(version_uri.uri) logger.verbose("Successfully downloaded manifest") break except ProtocolError as e: logger.warn("Failed to fetch manifest: {0}", e) if manifest is None: raise ValueError("Extension manifest is empty") set_properties("extensionPackages", pkg_list, manifest) return pkg_list def report_provision_status(self, provision_status): validate_param('provisionStatus', provision_status, ProvisionStatus) data = get_properties(provision_status) self._put_data(self.provision_status_uri, data) def report_vm_status(self, vm_status): validate_param('vmStatus', vm_status, VMStatus) data = get_properties(vm_status) # TODO code field is not implemented for metadata protocol yet. # Remove it handler_statuses = data['vmAgent']['extensionHandlers'] for handler_status in handler_statuses: try: handler_status.pop('code', None) except KeyError: pass self._put_data(self.vm_status_uri, data) def report_ext_status(self, ext_handler_name, ext_name, ext_status): validate_param('extensionStatus', ext_status, ExtensionStatus) data = get_properties(ext_status) uri = self.ext_status_uri.format(ext_name) self._put_data(uri, data) def report_event(self, events): validate_param('events', events, TelemetryEventList) data = get_properties(events) self._post_data(self.event_uri, data) def update_certs(self): certificates = self.get_certs() return certificates.cert_list def update_goal_state(self, forced=False, max_retry=3): # Start updating goalstate, retry on 410 for retry in range(0, max_retry): try: self.update_certs() return except: logger.verbose("Incarnation is out of date. Update goalstate.") raise ProtocolError("Exceeded max retry updating goal state") def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): success = False try: resp = restutil.http_get(uri, headers=headers, use_proxy=use_proxy) if restutil.request_succeeded(resp): fileutil.write_file(destination, bytearray(resp.read()), asbin=True) success = True except Exception as e: logger.warn("Failed to download from: {0}".format(uri), e) return success class Certificates(object): """ Object containing certificates of host and provisioned user. """ def __init__(self, client, json_text): self.cert_list = CertList() self.parse(json_text) def parse(self, json_text): """ Parse multiple certificates into seperate files. """ data = json_text["certificateData"] if data is None: logger.verbose("No data in json_text received!") return cryptutil = CryptUtil(conf.get_openssl_cmd()) p7b_file = os.path.join(conf.get_lib_dir(), P7B_FILE_NAME) # Wrapping the certificate lines. # decode and save the result into p7b_file fileutil.write_file(p7b_file, base64.b64decode(data), asbin=True) ssl_cmd = "openssl pkcs7 -text -in {0} -inform der | grep -v '^-----' " ret, data = shellutil.run_get_output(ssl_cmd.format(p7b_file)) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" "Content-Disposition: attachment; filename=\"{0}\"\n" "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" "Content-Transfer-Encoding: base64\n" "\n" "{2}").format(p7m_file, p7m_file, data) self.save_cache(p7m_file, p7m) trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) # decrypt certificates cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, pem_file) # The parsing process use public key to match prv and crt. buf = [] begin_crt = False begin_prv = False prvs = {} thumbprints = {} index = 0 v1_cert_list = [] with open(pem_file) as pem: for line in pem.readlines(): buf.append(line) if re.match(r'[-]+BEGIN.*KEY[-]+', line): begin_prv = True elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): begin_crt = True elif re.match(r'[-]+END.*KEY[-]+', line): tmp_file = self.write_to_tmp_file(index, 'prv', buf) pub = cryptutil.get_pubkey_from_prv(tmp_file) prvs[pub] = tmp_file buf = [] index += 1 begin_prv = False elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): tmp_file = self.write_to_tmp_file(index, 'crt', buf) pub = cryptutil.get_pubkey_from_crt(tmp_file) thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) thumbprints[pub] = thumbprint # Rename crt with thumbprint as the file name crt = "{0}.crt".format(thumbprint) v1_cert_list.append({ "name": None, "thumbprint": thumbprint }) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) buf = [] index += 1 begin_crt = False # Rename prv key with thumbprint as the file name for pubkey in prvs: thumbprint = thumbprints[pubkey] if thumbprint: tmp_file = prvs[pubkey] prv = "{0}.prv".format(thumbprint) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) for v1_cert in v1_cert_list: cert = Cert() set_properties("certs", cert, v1_cert) self.cert_list.certificates.append(cert) def save_cache(self, local_file, data): try: fileutil.write_file(local_file, data) except IOError as e: raise ProtocolError("Failed to write cache: {0}".format(e)) def write_to_tmp_file(self, index, suffix, buf): file_name = os.path.join(conf.get_lib_dir(), "{0}.{1}".format(index, suffix)) self.save_cache(file_name, "".join(buf)) return file_name WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/ovfenv.py000066400000000000000000000114311335416306700243510ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Copy and parse ovf-env.xml from provisioning ISO and local cache """ import os import re import shutil import xml.dom.minidom as minidom import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.future import ustr import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext OVF_VERSION = "1.0" OVF_NAME_SPACE = "http://schemas.dmtf.org/ovf/environment/1" WA_NAME_SPACE = "http://schemas.microsoft.com/windowsazure" def _validate_ovf(val, msg): if val is None: raise ProtocolError("Failed to validate OVF: {0}".format(msg)) class OvfEnv(object): """ Read, and process provisioning info from provisioning file OvfEnv.xml """ def __init__(self, xml_text): if xml_text is None: raise ValueError("ovf-env is None") logger.verbose("Load ovf-env.xml") self.hostname = None self.username = None self.user_password = None self.customdata = None self.disable_ssh_password_auth = True self.ssh_pubkeys = [] self.ssh_keypairs = [] self.provision_guest_agent = None self.parse(xml_text) def parse(self, xml_text): """ Parse xml tree, retreiving user and ssh key information. Return self. """ wans = WA_NAME_SPACE ovfns = OVF_NAME_SPACE xml_doc = parse_doc(xml_text) environment = find(xml_doc, "Environment", namespace=ovfns) _validate_ovf(environment, "Environment not found") section = find(environment, "ProvisioningSection", namespace=wans) _validate_ovf(section, "ProvisioningSection not found") version = findtext(environment, "Version", namespace=wans) _validate_ovf(version, "Version not found") if version > OVF_VERSION: logger.warn("Newer provisioning configuration detected. " "Please consider updating waagent") conf_set = find(section, "LinuxProvisioningConfigurationSet", namespace=wans) _validate_ovf(conf_set, "LinuxProvisioningConfigurationSet not found") self.hostname = findtext(conf_set, "HostName", namespace=wans) _validate_ovf(self.hostname, "HostName not found") self.username = findtext(conf_set, "UserName", namespace=wans) _validate_ovf(self.username, "UserName not found") self.user_password = findtext(conf_set, "UserPassword", namespace=wans) self.customdata = findtext(conf_set, "CustomData", namespace=wans) auth_option = findtext(conf_set, "DisableSshPasswordAuthentication", namespace=wans) if auth_option is not None and auth_option.lower() == "true": self.disable_ssh_password_auth = True else: self.disable_ssh_password_auth = False public_keys = findall(conf_set, "PublicKey", namespace=wans) for public_key in public_keys: path = findtext(public_key, "Path", namespace=wans) fingerprint = findtext(public_key, "Fingerprint", namespace=wans) value = findtext(public_key, "Value", namespace=wans) self.ssh_pubkeys.append((path, fingerprint, value)) keypairs = findall(conf_set, "KeyPair", namespace=wans) for keypair in keypairs: path = findtext(keypair, "Path", namespace=wans) fingerprint = findtext(keypair, "Fingerprint", namespace=wans) self.ssh_keypairs.append((path, fingerprint)) platform_settings_section = find(environment, "PlatformSettingsSection", namespace=wans) _validate_ovf(platform_settings_section, "PlatformSettingsSection not found") platform_settings = find(platform_settings_section, "PlatformSettings", namespace=wans) _validate_ovf(platform_settings, "PlatformSettings not found") self.provision_guest_agent = findtext(platform_settings, "ProvisionGuestAgent", namespace=wans) _validate_ovf(self.provision_guest_agent, "ProvisionGuestAgent not found") WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/restapi.py000066400000000000000000000245021335416306700245200ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import socket import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.exception import ProtocolError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.version import DISTRO_VERSION, DISTRO_NAME, CURRENT_VERSION def validate_param(name, val, expected_type): if val is None: raise ProtocolError("{0} is None".format(name)) if not isinstance(val, expected_type): raise ProtocolError(("{0} type should be {1} not {2}" "").format(name, expected_type, type(val))) def set_properties(name, obj, data): if isinstance(obj, DataContract): validate_param("Property '{0}'".format(name), data, dict) for prob_name, prob_val in data.items(): prob_full_name = "{0}.{1}".format(name, prob_name) try: prob = getattr(obj, prob_name) except AttributeError: logger.warn("Unknown property: {0}", prob_full_name) continue prob = set_properties(prob_full_name, prob, prob_val) setattr(obj, prob_name, prob) return obj elif isinstance(obj, DataContractList): validate_param("List '{0}'".format(name), data, list) for item_data in data: item = obj.item_cls() item = set_properties(name, item, item_data) obj.append(item) return obj else: return data def get_properties(obj): if isinstance(obj, DataContract): data = {} props = vars(obj) for prob_name, prob in list(props.items()): data[prob_name] = get_properties(prob) return data elif isinstance(obj, DataContractList): data = [] for item in obj: item_data = get_properties(item) data.append(item_data) return data else: return obj class DataContract(object): pass class DataContractList(list): def __init__(self, item_cls): self.item_cls = item_cls """ Data contract between guest and host """ class VMInfo(DataContract): def __init__(self, subscriptionId=None, vmName=None, containerId=None, roleName=None, roleInstanceName=None, tenantName=None): self.subscriptionId = subscriptionId self.vmName = vmName self.containerId = containerId self.roleName = roleName self.roleInstanceName = roleInstanceName self.tenantName = tenantName class CertificateData(DataContract): def __init__(self, certificateData=None): self.certificateData = certificateData class Cert(DataContract): def __init__(self, name=None, thumbprint=None, certificateDataUri=None, storeName=None, storeLocation=None): self.name = name self.thumbprint = thumbprint self.certificateDataUri = certificateDataUri self.storeLocation = storeLocation self.storeName = storeName class CertList(DataContract): def __init__(self): self.certificates = DataContractList(Cert) # TODO: confirm vmagent manifest schema class VMAgentManifestUri(DataContract): def __init__(self, uri=None): self.uri = uri class VMAgentManifest(DataContract): def __init__(self, family=None): self.family = family self.versionsManifestUris = DataContractList(VMAgentManifestUri) class VMAgentManifestList(DataContract): def __init__(self): self.vmAgentManifests = DataContractList(VMAgentManifest) class Extension(DataContract): def __init__(self, name=None, sequenceNumber=None, publicSettings=None, protectedSettings=None, certificateThumbprint=None): self.name = name self.sequenceNumber = sequenceNumber self.publicSettings = publicSettings self.protectedSettings = protectedSettings self.certificateThumbprint = certificateThumbprint class ExtHandlerProperties(DataContract): def __init__(self): self.version = None self.dependencyLevel = None self.state = None self.extensions = DataContractList(Extension) class ExtHandlerVersionUri(DataContract): def __init__(self): self.uri = None class ExtHandler(DataContract): def __init__(self, name=None): self.name = name self.properties = ExtHandlerProperties() self.versionUris = DataContractList(ExtHandlerVersionUri) def sort_key(self): level = self.properties.dependencyLevel if level is None: level = 0 # Process uninstall or disabled before enabled, in reverse order # remap 0 to -1, 1 to -2, 2 to -3, etc if self.properties.state != u"enabled": level = (0 - level) - 1 return level class ExtHandlerList(DataContract): def __init__(self): self.extHandlers = DataContractList(ExtHandler) class ExtHandlerPackageUri(DataContract): def __init__(self, uri=None): self.uri = uri class ExtHandlerPackage(DataContract): def __init__(self, version=None): self.version = version self.uris = DataContractList(ExtHandlerPackageUri) # TODO update the naming to align with metadata protocol self.isinternal = False self.disallow_major_upgrade = False class ExtHandlerPackageList(DataContract): def __init__(self): self.versions = DataContractList(ExtHandlerPackage) class VMProperties(DataContract): def __init__(self, certificateThumbprint=None): # TODO need to confirm the property name self.certificateThumbprint = certificateThumbprint class ProvisionStatus(DataContract): def __init__(self, status=None, subStatus=None, description=None): self.status = status self.subStatus = subStatus self.description = description self.properties = VMProperties() class ExtensionSubStatus(DataContract): def __init__(self, name=None, status=None, code=None, message=None): self.name = name self.status = status self.code = code self.message = message class ExtensionStatus(DataContract): def __init__(self, configurationAppliedTime=None, operation=None, status=None, seq_no=None, code=None, message=None): self.configurationAppliedTime = configurationAppliedTime self.operation = operation self.status = status self.sequenceNumber = seq_no self.code = code self.message = message self.substatusList = DataContractList(ExtensionSubStatus) class ExtHandlerStatus(DataContract): def __init__(self, name=None, version=None, status=None, code=0, message=None): self.name = name self.version = version self.status = status self.code = code self.message = message self.extensions = DataContractList(ustr) class VMAgentStatus(DataContract): def __init__(self, status=None, message=None): self.status = status self.message = message self.hostname = socket.gethostname() self.version = str(CURRENT_VERSION) self.osname = DISTRO_NAME self.osversion = DISTRO_VERSION self.extensionHandlers = DataContractList(ExtHandlerStatus) class VMStatus(DataContract): def __init__(self, status, message): self.vmAgent = VMAgentStatus(status=status, message=message) class TelemetryEventParam(DataContract): def __init__(self, name=None, value=None): self.name = name self.value = value class TelemetryEvent(DataContract): def __init__(self, eventId=None, providerId=None): self.eventId = eventId self.providerId = providerId self.parameters = DataContractList(TelemetryEventParam) class TelemetryEventList(DataContract): def __init__(self): self.events = DataContractList(TelemetryEvent) class RemoteAccessUser(DataContract): def __init__(self, name, encrypted_password, expiration): self.name = name self.encrypted_password = encrypted_password self.expiration = expiration class RemoteAccessUsersList(DataContract): def __init__(self): self.users = DataContractList(RemoteAccessUser) class Protocol(DataContract): def detect(self): raise NotImplementedError() def get_vminfo(self): raise NotImplementedError() def get_certs(self): raise NotImplementedError() def get_incarnation(self): raise NotImplementedError() def get_vmagent_manifests(self): raise NotImplementedError() def get_vmagent_pkgs(self, manifest): raise NotImplementedError() def get_ext_handlers(self): raise NotImplementedError() def get_ext_handler_pkgs(self, extension): raise NotImplementedError() def get_artifacts_profile(self): raise NotImplementedError() def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): raise NotImplementedError() def report_provision_status(self, provision_status): raise NotImplementedError() def report_vm_status(self, vm_status): raise NotImplementedError() def report_ext_status(self, ext_handler_name, ext_name, ext_status): raise NotImplementedError() def report_event(self, event): raise NotImplementedError() WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/util.py000066400000000000000000000255521335416306700240340ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import errno import os import re import shutil import threading import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import ProtocolError, OSUtilError, \ ProtocolNotFoundError, DhcpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.dhcp import get_dhcp_handler from azurelinuxagent.common.protocol.ovfenv import OvfEnv from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.common.protocol.metadata import MetadataProtocol, \ METADATA_ENDPOINT from azurelinuxagent.common.utils.restutil import IOErrorCounter OVF_FILE_NAME = "ovf-env.xml" TAG_FILE_NAME = "useMetadataEndpoint.tag" PROTOCOL_FILE_NAME = "Protocol" MAX_RETRY = 360 PROBE_INTERVAL = 10 ENDPOINT_FILE_NAME = "WireServerEndpoint" PASSWORD_PATTERN = ".*?<" PASSWORD_REPLACEMENT = "*<" class _nameset(set): def __getattr__(self, name): if name in self: return name raise AttributeError("%s not a valid value" % name) prots = _nameset(("WireProtocol", "MetadataProtocol")) def get_protocol_util(): return ProtocolUtil() class ProtocolUtil(object): """ ProtocolUtil handles initialization for protocol instance. 2 protocol types are invoked, wire protocol and metadata protocols. """ def __init__(self): self.lock = threading.Lock() self.protocol = None self.osutil = get_osutil() self.dhcp_handler = get_dhcp_handler() def copy_ovf_env(self): """ Copy ovf env file from dvd to hard disk. Remove password before save it to the disk """ dvd_mount_point = conf.get_dvd_mount_point() ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) tag_file_path = self._get_tag_file_path() try: self.osutil.mount_dvd() except OSUtilError as e: raise ProtocolError("[CopyOvfEnv] Error mounting dvd: " "{0}".format(ustr(e))) try: ovfxml = fileutil.read_file(ovf_file_path_on_dvd, remove_bom=True) ovfenv = OvfEnv(ovfxml) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error reading file " "{0}: {1}".format(ovf_file_path_on_dvd, ustr(e))) try: ovfxml = re.sub(PASSWORD_PATTERN, PASSWORD_REPLACEMENT, ovfxml) fileutil.write_file(ovf_file_path, ovfxml) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error writing file " "{0}: {1}".format(ovf_file_path, ustr(e))) try: if os.path.isfile(tag_file_path_on_dvd): logger.info("Found {0} in provisioning ISO", TAG_FILE_NAME) shutil.copyfile(tag_file_path_on_dvd, tag_file_path) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error copying file " "{0} to {1}: {2}".format(tag_file_path, tag_file_path, ustr(e))) self._cleanup_ovf_dvd() return ovfenv def _cleanup_ovf_dvd(self): try: self.osutil.umount_dvd() self.osutil.eject_dvd() except OSUtilError as e: logger.warn(ustr(e)) def get_ovf_env(self): """ Load saved ovf-env.xml """ ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) if os.path.isfile(ovf_file_path): xml_text = fileutil.read_file(ovf_file_path) return OvfEnv(xml_text) else: raise ProtocolError( "ovf-env.xml is missing from {0}".format(ovf_file_path)) def _get_protocol_file_path(self): return os.path.join( conf.get_lib_dir(), PROTOCOL_FILE_NAME) def _get_tag_file_path(self): return os.path.join( conf.get_lib_dir(), TAG_FILE_NAME) def _get_wireserver_endpoint(self): try: file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) return fileutil.read_file(file_path) except IOError as e: raise OSUtilError(ustr(e)) def _set_wireserver_endpoint(self, endpoint): try: file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) fileutil.write_file(file_path, endpoint) except IOError as e: raise OSUtilError(ustr(e)) def _detect_wire_protocol(self): endpoint = self.dhcp_handler.endpoint if endpoint is None: ''' Check if DHCP can be used to get the wire protocol endpoint ''' (dhcp_available, conf_endpoint) = self.osutil.is_dhcp_available() if dhcp_available: logger.info("WireServer endpoint is not found. Rerun dhcp handler") try: self.dhcp_handler.run() except DhcpError as e: raise ProtocolError(ustr(e)) endpoint = self.dhcp_handler.endpoint else: logger.info("_detect_wire_protocol: DHCP not available") endpoint = self._get_wireserver_endpoint() if endpoint == None: endpoint = conf_endpoint logger.info("Using hardcoded WireServer endpoint {0}", endpoint) else: logger.info("WireServer endpoint {0} read from file", endpoint) try: protocol = WireProtocol(endpoint) protocol.detect() self._set_wireserver_endpoint(endpoint) return protocol except ProtocolError as e: logger.info("WireServer is not responding. Reset endpoint") self.dhcp_handler.endpoint = None self.dhcp_handler.skip_cache = True raise e def _detect_metadata_protocol(self): protocol = MetadataProtocol() protocol.detect() return protocol def _detect_protocol(self, protocols): """ Probe protocol endpoints in turn. """ self.clear_protocol() for retry in range(0, MAX_RETRY): for protocol_name in protocols: try: protocol = self._detect_wire_protocol() \ if protocol_name == prots.WireProtocol \ else self._detect_metadata_protocol() return (protocol_name, protocol) except ProtocolError as e: logger.info("Protocol endpoint not found: {0}, {1}", protocol_name, e) if retry < MAX_RETRY - 1: logger.info("Retry detect protocols: retry={0}", retry) time.sleep(PROBE_INTERVAL) raise ProtocolNotFoundError("No protocol found.") def _get_protocol(self): """ Get protocol instance based on previous detecting result. """ protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): raise ProtocolNotFoundError("No protocol found") protocol_name = fileutil.read_file(protocol_file_path) if protocol_name == prots.WireProtocol: endpoint = self._get_wireserver_endpoint() return WireProtocol(endpoint) elif protocol_name == prots.MetadataProtocol: return MetadataProtocol() else: raise ProtocolNotFoundError(("Unknown protocol: {0}" "").format(protocol_name)) def _save_protocol(self, protocol_name): """ Save protocol endpoint """ protocol_file_path = self._get_protocol_file_path() try: fileutil.write_file(protocol_file_path, protocol_name) except IOError as e: logger.error("Failed to save protocol endpoint: {0}", e) def clear_protocol(self): """ Cleanup previous saved endpoint. """ logger.info("Clean protocol") self.protocol = None protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): return try: os.remove(protocol_file_path) except IOError as e: # Ignore file-not-found errors (since the file is being removed) if e.errno == errno.ENOENT: return logger.error("Failed to clear protocol endpoint: {0}", e) def get_protocol(self, by_file=False): """ Detect protocol by endpoints, if by_file is True, detect MetadataProtocol in priority. :returns: protocol instance """ self.lock.acquire() try: if self.protocol is not None: return self.protocol try: self.protocol = self._get_protocol() return self.protocol except ProtocolNotFoundError: pass logger.info("Detect protocol endpoints") protocols = [prots.WireProtocol] if by_file: tag_file_path = self._get_tag_file_path() if os.path.isfile(tag_file_path): protocols.insert(0, prots.MetadataProtocol) else: protocols.append(prots.MetadataProtocol) protocol_name, protocol = self._detect_protocol(protocols) IOErrorCounter.set_protocol_endpoint(endpoint=protocol.endpoint) self._save_protocol(protocol_name) self.protocol = protocol return self.protocol finally: self.lock.release() WALinuxAgent-2.2.32/azurelinuxagent/common/protocol/wire.py000066400000000000000000002036251335416306700240240ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import datetime import json import os import random import re import sys import time import traceback import xml.sax.saxutils as saxutils from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import ProtocolNotFoundError, \ ResourceGoneError from azurelinuxagent.common.future import httpclient, bytebuffer from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol, URI_FORMAT_GET_EXTENSION_ARTIFACT, \ HOST_PLUGIN_PORT from azurelinuxagent.common.protocol.restapi import * from azurelinuxagent.common.utils.archive import StateFlusher from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, \ findtext, getattrib, gettext, remove_bom, get_bytes_from_pem, parse_json from azurelinuxagent.common.version import AGENT_NAME from azurelinuxagent.common.osutil import get_osutil VERSION_INFO_URI = "http://{0}/?comp=versions" GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" HEALTH_REPORT_URI = "http://{0}/machine?comp=health" ROLE_PROP_URI = "http://{0}/machine?comp=roleProperties" TELEMETRY_URI = "http://{0}/machine?comp=telemetrydata" WIRE_SERVER_ADDR_FILE_NAME = "WireServer" INCARNATION_FILE_NAME = "Incarnation" GOAL_STATE_FILE_NAME = "GoalState.{0}.xml" HOSTING_ENV_FILE_NAME = "HostingEnvironmentConfig.xml" SHARED_CONF_FILE_NAME = "SharedConfig.xml" CERTS_FILE_NAME = "Certificates.xml" REMOTE_ACCESS_FILE_NAME = "RemoteAccess.{0}.xml" P7M_FILE_NAME = "Certificates.p7m" PEM_FILE_NAME = "Certificates.pem" EXT_CONF_FILE_NAME = "ExtensionsConfig.{0}.xml" MANIFEST_FILE_NAME = "{0}.{1}.manifest.xml" AGENTS_MANIFEST_FILE_NAME = "{0}.{1}.agentsManifest" TRANSPORT_CERT_FILE_NAME = "TransportCert.pem" TRANSPORT_PRV_FILE_NAME = "TransportPrivate.pem" PROTOCOL_VERSION = "2012-11-30" ENDPOINT_FINE_NAME = "WireServer" SHORT_WAITING_INTERVAL = 1 # 1 second class UploadError(HttpError): pass class WireProtocol(Protocol): """Slim layer to adapt wire protocol data to metadata protocol interface""" # TODO: Clean-up goal state processing # At present, some methods magically update GoalState (e.g., # get_vmagent_manifests), others (e.g., get_vmagent_pkgs) # assume its presence. A better approach would make an explicit update # call that returns the incarnation number and # establishes that number the "context" for all other calls (either by # updating the internal state of the protocol or # by having callers pass the incarnation number to the method). def __init__(self, endpoint): if endpoint is None: raise ProtocolError("WireProtocol endpoint is None") self.endpoint = endpoint self.client = WireClient(self.endpoint) def detect(self): self.client.check_wire_protocol_version() trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) self.update_goal_state(forced=True) def update_goal_state(self, forced=False, max_retry=3): self.client.update_goal_state(forced=forced, max_retry=max_retry) def get_vminfo(self): goal_state = self.client.get_goal_state() hosting_env = self.client.get_hosting_env() vminfo = VMInfo() vminfo.subscriptionId = None vminfo.vmName = hosting_env.vm_name vminfo.tenantName = hosting_env.deployment_name vminfo.roleName = hosting_env.role_name vminfo.roleInstanceName = goal_state.role_instance_id vminfo.containerId = goal_state.container_id return vminfo def get_certs(self): certificates = self.client.get_certs() return certificates.cert_list def get_incarnation(self): path = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) if os.path.exists(path): return fileutil.read_file(path) else: return 0 def get_vmagent_manifests(self): # Update goal state to get latest extensions config self.update_goal_state() goal_state = self.client.get_goal_state() ext_conf = self.client.get_ext_conf() return ext_conf.vmagent_manifests, goal_state.incarnation def get_vmagent_pkgs(self, vmagent_manifest): goal_state = self.client.get_goal_state() ga_manifest = self.client.get_gafamily_manifest(vmagent_manifest, goal_state) valid_pkg_list = self.client.filter_package_list(vmagent_manifest.family, ga_manifest, goal_state) return valid_pkg_list def get_ext_handlers(self): logger.verbose("Get extension handler config") # Update goal state to get latest extensions config self.update_goal_state() goal_state = self.client.get_goal_state() ext_conf = self.client.get_ext_conf() # In wire protocol, incarnation is equivalent to ETag return ext_conf.ext_handlers, goal_state.incarnation def get_ext_handler_pkgs(self, ext_handler): logger.verbose("Get extension handler package") goal_state = self.client.get_goal_state() man = self.client.get_ext_manifest(ext_handler, goal_state) return man.pkg_list def get_artifacts_profile(self): logger.verbose("Get In-VM Artifacts Profile") return self.client.get_artifacts_profile() def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): success = self.client.stream(uri, destination, headers=headers, use_proxy=use_proxy) if not success: logger.verbose("Download did not succeed, falling back to host plugin") host = self.client.get_host_plugin() uri, headers = host.get_artifact_request(uri, host.manifest_uri) success = self.client.stream(uri, destination, headers=headers, use_proxy=False) return success def report_provision_status(self, provision_status): validate_param("provision_status", provision_status, ProvisionStatus) if provision_status.status is not None: self.client.report_health(provision_status.status, provision_status.subStatus, provision_status.description) if provision_status.properties.certificateThumbprint is not None: thumbprint = provision_status.properties.certificateThumbprint self.client.report_role_prop(thumbprint) def report_vm_status(self, vm_status): validate_param("vm_status", vm_status, VMStatus) self.client.status_blob.set_vm_status(vm_status) self.client.upload_status_blob() def report_ext_status(self, ext_handler_name, ext_name, ext_status): validate_param("ext_status", ext_status, ExtensionStatus) self.client.status_blob.set_ext_status(ext_handler_name, ext_status) def report_event(self, events): validate_param("events", events, TelemetryEventList) self.client.report_event(events) def _build_role_properties(container_id, role_instance_id, thumbprint): xml = (u"" u"" u"" u"{0}" u"" u"" u"{1}" u"" u"" u"" u"" u"" u"" u"" u"").format(container_id, role_instance_id, thumbprint) return xml def _build_health_report(incarnation, container_id, role_instance_id, status, substatus, description): # Escape '&', '<' and '>' description = saxutils.escape(ustr(description)) detail = u'' if substatus is not None: substatus = saxutils.escape(ustr(substatus)) detail = (u"
" u"{0}" u"{1}" u"
").format(substatus, description) xml = (u"" u"" u"{0}" u"" u"{1}" u"" u"" u"{2}" u"" u"{3}" u"{4}" u"" u"" u"" u"" u"" u"").format(incarnation, container_id, role_instance_id, status, detail) return xml def ga_status_to_guest_info(ga_status): """ Convert VMStatus object to status blob format """ v1_ga_guest_info = { "computerName" : ga_status.hostname, "osName" : ga_status.osname, "osVersion" : ga_status.osversion, "version" : ga_status.version, } return v1_ga_guest_info def ga_status_to_v1(ga_status): formatted_msg = { 'lang': 'en-US', 'message': ga_status.message } v1_ga_status = { "version" : ga_status.version, "status" : ga_status.status, "formattedMessage" : formatted_msg } return v1_ga_status def ext_substatus_to_v1(sub_status_list): status_list = [] for substatus in sub_status_list: status = { "name": substatus.name, "status": substatus.status, "code": substatus.code, "formattedMessage": { "lang": "en-US", "message": substatus.message } } status_list.append(status) return status_list def ext_status_to_v1(ext_name, ext_status): if ext_status is None: return None timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_sub_status = ext_substatus_to_v1(ext_status.substatusList) v1_ext_status = { "status": { "name": ext_name, "configurationAppliedTime": ext_status.configurationAppliedTime, "operation": ext_status.operation, "status": ext_status.status, "code": ext_status.code, "formattedMessage": { "lang": "en-US", "message": ext_status.message } }, "version": 1.0, "timestampUTC": timestamp } if len(v1_sub_status) != 0: v1_ext_status['status']['substatus'] = v1_sub_status return v1_ext_status def ext_handler_status_to_v1(handler_status, ext_statuses, timestamp): v1_handler_status = { 'handlerVersion': handler_status.version, 'handlerName': handler_status.name, 'status': handler_status.status, 'code': handler_status.code, 'useExactVersion': True } if handler_status.message is not None: v1_handler_status["formattedMessage"] = { "lang": "en-US", "message": handler_status.message } if len(handler_status.extensions) > 0: # Currently, no more than one extension per handler ext_name = handler_status.extensions[0] ext_status = ext_statuses.get(ext_name) v1_ext_status = ext_status_to_v1(ext_name, ext_status) if ext_status is not None and v1_ext_status is not None: v1_handler_status["runtimeSettingsStatus"] = { 'settingsStatus': v1_ext_status, 'sequenceNumber': ext_status.sequenceNumber } return v1_handler_status def vm_status_to_v1(vm_status, ext_statuses): timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_ga_guest_info = ga_status_to_guest_info(vm_status.vmAgent) v1_ga_status = ga_status_to_v1(vm_status.vmAgent) v1_handler_status_list = [] for handler_status in vm_status.vmAgent.extensionHandlers: v1_handler_status = ext_handler_status_to_v1(handler_status, ext_statuses, timestamp) if v1_handler_status is not None: v1_handler_status_list.append(v1_handler_status) v1_agg_status = { 'guestAgentStatus': v1_ga_status, 'handlerAggregateStatus': v1_handler_status_list } v1_vm_status = { 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, 'guestOSInfo' : v1_ga_guest_info } return v1_vm_status class StatusBlob(object): def __init__(self, client): self.vm_status = None self.ext_statuses = {} self.client = client self.type = None self.data = None def set_vm_status(self, vm_status): validate_param("vmAgent", vm_status, VMStatus) self.vm_status = vm_status def set_ext_status(self, ext_handler_name, ext_status): validate_param("extensionStatus", ext_status, ExtensionStatus) self.ext_statuses[ext_handler_name] = ext_status def to_json(self): report = vm_status_to_v1(self.vm_status, self.ext_statuses) return json.dumps(report) __storage_version__ = "2014-02-14" def prepare(self, blob_type): logger.verbose("Prepare status blob") self.data = self.to_json() self.type = blob_type def upload(self, url): try: if not self.type in ["BlockBlob", "PageBlob"]: raise ProtocolError("Illegal blob type: {0}".format(self.type)) if self.type == "BlockBlob": self.put_block_blob(url, self.data) else: self.put_page_blob(url, self.data) return True except Exception as e: logger.verbose("Initial status upload failed: {0}", e) return False def get_block_blob_headers(self, blob_size): return { "Content-Length": ustr(blob_size), "x-ms-blob-type": "BlockBlob", "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-version": self.__class__.__storage_version__ } def put_block_blob(self, url, data): logger.verbose("Put block blob") headers = self.get_block_blob_headers(len(data)) resp = self.client.call_storage_service(restutil.http_put, url, data, headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to upload block blob: {0}".format(resp.status)) def get_page_blob_create_headers(self, blob_size): return { "Content-Length": "0", "x-ms-blob-content-length": ustr(blob_size), "x-ms-blob-type": "PageBlob", "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-version": self.__class__.__storage_version__ } def get_page_blob_page_headers(self, start, end): return { "Content-Length": ustr(end - start), "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-range": "bytes={0}-{1}".format(start, end - 1), "x-ms-page-write": "update", "x-ms-version": self.__class__.__storage_version__ } def put_page_blob(self, url, data): logger.verbose("Put page blob") # Convert string into bytes and align to 512 bytes data = bytearray(data, encoding='utf-8') page_blob_size = int((len(data) + 511) / 512) * 512 headers = self.get_page_blob_create_headers(page_blob_size) resp = self.client.call_storage_service(restutil.http_put, url, "", headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to clean up page blob: {0}".format(resp.status)) if url.count("?") <= 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) logger.verbose("Upload page blob") page_max = 4 * 1024 * 1024 # Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + page_max) content_size = end - start # Align to 512 bytes page_end = int((end + 511) / 512) * 512 buf_size = page_end - start buf = bytearray(buf_size) buf[0: content_size] = data[start: end] headers = self.get_page_blob_page_headers(start, page_end) resp = self.client.call_storage_service( restutil.http_put, url, bytebuffer(buf), headers) if resp is None or resp.status != httpclient.CREATED: raise UploadError( "Failed to upload page blob: {0}".format(resp.status)) start = end def event_param_to_v1(param): param_format = '' param_type = type(param.value) attr_type = "" if param_type is int: attr_type = 'mt:uint64' elif param_type is str: attr_type = 'mt:wstr' elif ustr(param_type).count("'unicode'") > 0: attr_type = 'mt:wstr' elif param_type is bool: attr_type = 'mt:bool' elif param_type is float: attr_type = 'mt:float64' return param_format.format(param.name, saxutils.quoteattr(ustr(param.value)), attr_type) def event_to_v1(event): params = "" for param in event.parameters: params += event_param_to_v1(param) event_str = ('' '' '').format(event.eventId, params) return event_str class WireClient(object): def __init__(self, endpoint): logger.info("Wire server endpoint:{0}", endpoint) self.endpoint = endpoint self.goal_state = None self.updated = None self.hosting_env = None self.shared_conf = None self.remote_access = None self.certs = None self.ext_conf = None self.host_plugin = None self.status_blob = StatusBlob(self) self.goal_state_flusher = StateFlusher(conf.get_lib_dir()) def call_wireserver(self, http_req, *args, **kwargs): try: # Never use the HTTP proxy for wireserver kwargs['use_proxy'] = False resp = http_req(*args, **kwargs) if restutil.request_failed(resp): msg = "[Wireserver Failed] URI {0} ".format(args[0]) if resp is not None: msg += " [HTTP Failed] Status Code {0}".format(resp.status) raise ProtocolError(msg) # If the GoalState is stale, pass along the exception to the caller except ResourceGoneError: raise except Exception as e: raise ProtocolError("[Wireserver Exception] {0}".format( ustr(e))) return resp def decode_config(self, data): if data is None: return None data = remove_bom(data) xml_text = ustr(data, encoding='utf-8') return xml_text def fetch_config(self, uri, headers): resp = self.call_wireserver(restutil.http_get, uri, headers=headers) return self.decode_config(resp.read()) def fetch_cache(self, local_file): if not os.path.isfile(local_file): raise ProtocolError("{0} is missing.".format(local_file)) try: return fileutil.read_file(local_file) except IOError as e: raise ProtocolError("Failed to read cache: {0}".format(e)) def save_cache(self, local_file, data): try: fileutil.write_file(local_file, data) except IOError as e: fileutil.clean_ioerror(e, paths=[local_file]) raise ProtocolError("Failed to write cache: {0}".format(e)) @staticmethod def call_storage_service(http_req, *args, **kwargs): # Default to use the configured HTTP proxy if not 'use_proxy' in kwargs or kwargs['use_proxy'] is None: kwargs['use_proxy'] = True return http_req(*args, **kwargs) def fetch_manifest(self, version_uris): logger.verbose("Fetch manifest") version_uris_shuffled = version_uris random.shuffle(version_uris_shuffled) for version in version_uris_shuffled: # GA expects a location and failoverLocation in ExtensionsConfig, but # this is not always the case. See #1147. if version.uri is None: logger.verbose('The specified manifest URL is empty, ignored.') continue response = None if not HostPluginProtocol.is_default_channel(): response = self.fetch(version.uri) if not response: if HostPluginProtocol.is_default_channel(): logger.verbose("Using host plugin as default channel") else: logger.verbose("Failed to download manifest, " "switching to host plugin") try: host = self.get_host_plugin() uri, headers = host.get_artifact_request(version.uri) response = self.fetch(uri, headers, use_proxy=False) # If the HostPlugin rejects the request, # let the error continue, but set to use the HostPlugin except ResourceGoneError: HostPluginProtocol.set_default_channel(True) raise host.manifest_uri = version.uri logger.verbose("Manifest downloaded successfully from host plugin") if not HostPluginProtocol.is_default_channel(): logger.info("Setting host plugin as default channel") HostPluginProtocol.set_default_channel(True) if response: return response raise ProtocolError("Failed to fetch manifest from all sources") def stream(self, uri, destination, headers=None, use_proxy=None): success = False logger.verbose("Fetch [{0}] with headers [{1}] to file [{2}]", uri, headers, destination) response = self._fetch_response(uri, headers, use_proxy) if response is not None: chunk_size = 1024 * 1024 # 1MB buffer try: with open(destination, 'wb', chunk_size) as destination_fh: complete = False while not complete: chunk = response.read(chunk_size) destination_fh.write(chunk) complete = len(chunk) < chunk_size success = True except Exception as e: logger.error('Error streaming {0} to {1}: {2}'.format(uri, destination, ustr(e))) return success def fetch(self, uri, headers=None, use_proxy=None, decode=True): logger.verbose("Fetch [{0}] with headers [{1}]", uri, headers) content = None response = self._fetch_response(uri, headers, use_proxy) if response is not None: response_content = response.read() content = self.decode_config(response_content) if decode else response_content return content def _fetch_response(self, uri, headers=None, use_proxy=None): resp = None try: resp = self.call_storage_service( restutil.http_get, uri, headers=headers, use_proxy=use_proxy) if restutil.request_failed(resp): error_response = restutil.read_response_error(resp) msg = "Fetch failed from [{0}]: {1}".format(uri, error_response) logger.warn(msg) if self.host_plugin is not None: self.host_plugin.report_fetch_health(uri, is_healthy=not restutil.request_failed_at_hostplugin(resp), source='WireClient', response=error_response) raise ProtocolError(msg) else: if self.host_plugin is not None: self.host_plugin.report_fetch_health(uri, source='WireClient') except (HttpError, ProtocolError, IOError) as e: logger.verbose("Fetch failed from [{0}]: {1}", uri, e) if isinstance(e, ResourceGoneError): raise return resp def update_hosting_env(self, goal_state): if goal_state.hosting_env_uri is None: raise ProtocolError("HostingEnvironmentConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) xml_text = self.fetch_config(goal_state.hosting_env_uri, self.get_header()) self.save_cache(local_file, xml_text) self.hosting_env = HostingEnv(xml_text) def update_shared_conf(self, goal_state): if goal_state.shared_conf_uri is None: raise ProtocolError("SharedConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) xml_text = self.fetch_config(goal_state.shared_conf_uri, self.get_header()) self.save_cache(local_file, xml_text) self.shared_conf = SharedConfig(xml_text) def update_certs(self, goal_state): if goal_state.certs_uri is None: return local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) xml_text = self.fetch_config(goal_state.certs_uri, self.get_header_for_cert()) self.save_cache(local_file, xml_text) self.certs = Certificates(self, xml_text) def update_remote_access_conf(self, goal_state): if goal_state.remote_access_uri is None: # Nothing in accounts data. Just return, nothing to do. return xml_text = self.fetch_config(goal_state.remote_access_uri, self.get_header_for_cert()) self.remote_access = RemoteAccess(xml_text) local_file = os.path.join(conf.get_lib_dir(), REMOTE_ACCESS_FILE_NAME.format(self.remote_access.incarnation)) self.save_cache(local_file, xml_text) def get_remote_access(self): incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) incarnation = self.fetch_cache(incarnation_file) file_name = REMOTE_ACCESS_FILE_NAME.format(incarnation) remote_access_file = os.path.join(conf.get_lib_dir(), file_name) if not os.path.isfile(remote_access_file): # no remote access data. return None xml_text = self.fetch_cache(remote_access_file) remote_access = RemoteAccess(xml_text) return remote_access def update_ext_conf(self, goal_state): if goal_state.ext_uri is None: logger.info("ExtensionsConfig.xml uri is empty") self.ext_conf = ExtensionsConfig(None) return incarnation = goal_state.incarnation local_file = os.path.join(conf.get_lib_dir(), EXT_CONF_FILE_NAME.format(incarnation)) xml_text = self.fetch_config(goal_state.ext_uri, self.get_header()) self.save_cache(local_file, xml_text) self.ext_conf = ExtensionsConfig(xml_text) def update_goal_state(self, forced=False, max_retry=3): incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) uri = GOAL_STATE_URI.format(self.endpoint) goal_state = None for retry in range(0, max_retry): try: if goal_state is None: xml_text = self.fetch_config(uri, self.get_header()) goal_state = GoalState(xml_text) if not forced: last_incarnation = None if os.path.isfile(incarnation_file): last_incarnation = fileutil.read_file( incarnation_file) new_incarnation = goal_state.incarnation if last_incarnation is not None and \ last_incarnation == new_incarnation: # Goalstate is not updated. return self.goal_state_flusher.flush(datetime.utcnow()) self.goal_state = goal_state file_name = GOAL_STATE_FILE_NAME.format(goal_state.incarnation) goal_state_file = os.path.join(conf.get_lib_dir(), file_name) self.save_cache(goal_state_file, xml_text) self.update_hosting_env(goal_state) self.update_shared_conf(goal_state) self.update_certs(goal_state) self.update_ext_conf(goal_state) self.update_remote_access_conf(goal_state) self.save_cache(incarnation_file, goal_state.incarnation) if self.host_plugin is not None: self.host_plugin.container_id = goal_state.container_id self.host_plugin.role_config_name = goal_state.role_config_name return except IOError as e: logger.warn("IOError processing goal state, retrying [{0}]", ustr(e)) except ResourceGoneError: logger.info("Goal state is stale, re-fetching") goal_state = None except ProtocolError as e: if retry < max_retry - 1: logger.verbose("ProtocolError processing goal state, retrying [{0}]", ustr(e)) else: logger.error("ProtocolError processing goal state, giving up [{0}]", ustr(e)) except Exception as e: if retry < max_retry-1: logger.verbose("Exception processing goal state, retrying: [{0}]", ustr(e)) else: logger.error("Exception processing goal state, giving up: [{0}]", ustr(e)) raise ProtocolError("Exceeded max retry updating goal state") def get_goal_state(self): if self.goal_state is None: incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) incarnation = self.fetch_cache(incarnation_file) file_name = GOAL_STATE_FILE_NAME.format(incarnation) goal_state_file = os.path.join(conf.get_lib_dir(), file_name) xml_text = self.fetch_cache(goal_state_file) self.goal_state = GoalState(xml_text) return self.goal_state def get_hosting_env(self): if self.hosting_env is None: local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) xml_text = self.fetch_cache(local_file) self.hosting_env = HostingEnv(xml_text) return self.hosting_env def get_shared_conf(self): if self.shared_conf is None: local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) xml_text = self.fetch_cache(local_file) self.shared_conf = SharedConfig(xml_text) return self.shared_conf def get_certs(self): if self.certs is None: local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) xml_text = self.fetch_cache(local_file) self.certs = Certificates(self, xml_text) if self.certs is None: return None return self.certs def get_current_handlers(self): handler_list = list() try: incarnation = self.fetch_cache(os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME)) ext_conf = ExtensionsConfig(self.fetch_cache(os.path.join(conf.get_lib_dir(), EXT_CONF_FILE_NAME.format(incarnation)))) handler_list = ext_conf.ext_handlers.extHandlers except ProtocolError as pe: # cache file is missing, nothing to do logger.verbose(ustr(pe)) except Exception as e: logger.error("Could not obtain current handlers: {0}", ustr(e)) return handler_list def get_ext_conf(self): if self.ext_conf is None: goal_state = self.get_goal_state() if goal_state.ext_uri is None: self.ext_conf = ExtensionsConfig(None) else: local_file = EXT_CONF_FILE_NAME.format(goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_cache(local_file) self.ext_conf = ExtensionsConfig(xml_text) return self.ext_conf def get_ext_manifest(self, ext_handler, goal_state): for update_goal_state in [False, True]: try: if update_goal_state: self.update_goal_state(forced=True) goal_state = self.get_goal_state() local_file = MANIFEST_FILE_NAME.format( ext_handler.name, goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_manifest(ext_handler.versionUris) self.save_cache(local_file, xml_text) return ExtensionManifest(xml_text) except ResourceGoneError: continue raise ProtocolError("Failed to retrieve extension manifest") def filter_package_list(self, family, ga_manifest, goal_state): complete_list = ga_manifest.pkg_list agent_manifest = os.path.join(conf.get_lib_dir(), AGENTS_MANIFEST_FILE_NAME.format( family, goal_state.incarnation)) if not os.path.exists(agent_manifest): # clear memory cache ga_manifest.allowed_versions = None # create disk cache with open(agent_manifest, mode='w') as manifest_fh: for version in complete_list.versions: manifest_fh.write('{0}\n'.format(version.version)) fileutil.chmod(agent_manifest, 0o644) return complete_list else: # use allowed versions from cache, otherwise from disk if ga_manifest.allowed_versions is None: with open(agent_manifest, mode='r') as manifest_fh: ga_manifest.allowed_versions = [v.strip('\n') for v in manifest_fh.readlines()] # use the updated manifest urls for allowed versions allowed_list = ExtHandlerPackageList() allowed_list.versions = [version for version in complete_list.versions if version.version in ga_manifest.allowed_versions] return allowed_list def get_gafamily_manifest(self, vmagent_manifest, goal_state): for update_goal_state in [False, True]: try: if update_goal_state: self.update_goal_state(forced=True) goal_state = self.get_goal_state() self._remove_stale_agent_manifest( vmagent_manifest.family, goal_state.incarnation) local_file = MANIFEST_FILE_NAME.format( vmagent_manifest.family, goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_manifest( vmagent_manifest.versionsManifestUris) fileutil.write_file(local_file, xml_text) return ExtensionManifest(xml_text) except ResourceGoneError: continue raise ProtocolError("Failed to retrieve GAFamily manifest") def _remove_stale_agent_manifest(self, family, incarnation): """ The incarnation number can reset at any time, which means there could be a stale agentsManifest on disk. Stale files are cleaned on demand as new goal states arrive from WireServer. If the stale file is not removed agent upgrade may be delayed. :param family: GA family, e.g. Prod or Test :param incarnation: incarnation of the current goal state """ fn = AGENTS_MANIFEST_FILE_NAME.format( family, incarnation) agent_manifest = os.path.join(conf.get_lib_dir(), fn) if os.path.exists(agent_manifest): os.unlink(agent_manifest) def check_wire_protocol_version(self): uri = VERSION_INFO_URI.format(self.endpoint) version_info_xml = self.fetch_config(uri, None) version_info = VersionInfo(version_info_xml) preferred = version_info.get_preferred() if PROTOCOL_VERSION == preferred: logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) elif PROTOCOL_VERSION in version_info.get_supported(): logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) logger.info("Server preferred version:{0}", preferred) else: error = ("Agent supported wire protocol version: {0} was not " "advised by Fabric.").format(PROTOCOL_VERSION) raise ProtocolNotFoundError(error) def upload_status_blob(self): self.update_goal_state() ext_conf = self.get_ext_conf() if ext_conf.status_upload_blob is None: self.update_goal_state(forced=True) ext_conf = self.get_ext_conf() if ext_conf.status_upload_blob is None: raise ProtocolNotFoundError("Status upload uri is missing") blob_type = ext_conf.status_upload_blob_type if blob_type not in ["BlockBlob", "PageBlob"]: blob_type = "BlockBlob" logger.verbose("Status Blob type is unspecified, assuming BlockBlob") try: self.status_blob.prepare(blob_type) except Exception as e: raise ProtocolError("Exception creating status blob: {0}", ustr(e)) # Swap the order of use for the HostPlugin vs. the "direct" route. # Prefer the use of HostPlugin. If HostPlugin fails fall back to the # direct route. # # The code previously preferred the "direct" route always, and only fell back # to the HostPlugin *if* there was an error. We would like to move to # the HostPlugin for all traffic, but this is a big change. We would like # to see how this behaves at scale, and have a fallback should things go # wrong. This is why we try HostPlugin then direct. try: host = self.get_host_plugin() host.put_vm_status(self.status_blob, ext_conf.status_upload_blob, ext_conf.status_upload_blob_type) return except ResourceGoneError: # do not attempt direct, force goal state update and wait to try again self.update_goal_state(forced=True) return except Exception as e: # for all other errors, fall back to direct msg = "Falling back to direct upload: {0}".format(ustr(e)) self.report_status_event(msg, is_success=True) try: if self.status_blob.upload(ext_conf.status_upload_blob): return except Exception as e: msg = "Exception uploading status blob: {0}".format(ustr(e)) self.report_status_event(msg, is_success=False) raise ProtocolError("Failed to upload status blob via either channel") def report_role_prop(self, thumbprint): goal_state = self.get_goal_state() role_prop = _build_role_properties(goal_state.container_id, goal_state.role_instance_id, thumbprint) role_prop = role_prop.encode("utf-8") role_prop_uri = ROLE_PROP_URI.format(self.endpoint) headers = self.get_header_for_xml_content() try: resp = self.call_wireserver(restutil.http_post, role_prop_uri, role_prop, headers=headers) except HttpError as e: raise ProtocolError((u"Failed to send role properties: " u"{0}").format(e)) if resp.status != httpclient.ACCEPTED: raise ProtocolError((u"Failed to send role properties: " u",{0}: {1}").format(resp.status, resp.read())) def report_health(self, status, substatus, description): goal_state = self.get_goal_state() health_report = _build_health_report(goal_state.incarnation, goal_state.container_id, goal_state.role_instance_id, status, substatus, description) health_report = health_report.encode("utf-8") health_report_uri = HEALTH_REPORT_URI.format(self.endpoint) headers = self.get_header_for_xml_content() try: # 30 retries with 10s sleep gives ~5min for wireserver updates; # this is retried 3 times with 15s sleep before throwing a # ProtocolError, for a total of ~15min. resp = self.call_wireserver(restutil.http_post, health_report_uri, health_report, headers=headers, max_retry=30, retry_delay=15) except HttpError as e: raise ProtocolError((u"Failed to send provision status: " u"{0}").format(e)) if restutil.request_failed(resp): raise ProtocolError((u"Failed to send provision status: " u",{0}: {1}").format(resp.status, resp.read())) def send_event(self, provider_id, event_str): uri = TELEMETRY_URI.format(self.endpoint) data_format = ('' '' '{1}' '' '') data = data_format.format(provider_id, event_str) try: header = self.get_header_for_xml_content() resp = self.call_wireserver(restutil.http_post, uri, data, header) except HttpError as e: raise ProtocolError("Failed to send events:{0}".format(e)) if restutil.request_failed(resp): logger.verbose(resp.read()) raise ProtocolError( "Failed to send events:{0}".format(resp.status)) def report_event(self, event_list): buf = {} # Group events by providerId for event in event_list.events: if event.providerId not in buf: buf[event.providerId] = "" event_str = event_to_v1(event) if len(event_str) >= 63 * 1024: logger.warn("Single event too large: {0}", event_str[300:]) continue if len(buf[event.providerId] + event_str) >= 63 * 1024: self.send_event(event.providerId, buf[event.providerId]) buf[event.providerId] = "" buf[event.providerId] = buf[event.providerId] + event_str # Send out all events left in buffer. for provider_id in list(buf.keys()): if len(buf[provider_id]) > 0: self.send_event(provider_id, buf[provider_id]) def report_status_event(self, message, is_success): from azurelinuxagent.common.event import report_event, \ WALAEventOperation report_event(op=WALAEventOperation.ReportStatus, is_success=is_success, message=message, log_event=not is_success) def get_header(self): return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION } def get_header_for_xml_content(self): return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION, "Content-Type": "text/xml;charset=utf-8" } def get_header_for_cert(self): trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) content = self.fetch_cache(trans_cert_file) cert = get_bytes_from_pem(content) return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION, "x-ms-cipher-name": "DES_EDE3_CBC", "x-ms-guest-agent-public-x509-cert": cert } def get_host_plugin(self): if self.host_plugin is None: goal_state = self.get_goal_state() self.host_plugin = HostPluginProtocol(self.endpoint, goal_state.container_id, goal_state.role_config_name) return self.host_plugin def has_artifacts_profile_blob(self): return self.ext_conf and not \ textutil.is_str_none_or_whitespace(self.ext_conf.artifacts_profile_blob) def get_artifacts_profile(self): artifacts_profile = None for update_goal_state in [False, True]: try: if update_goal_state: self.update_goal_state(forced=True) if self.has_artifacts_profile_blob(): blob = self.ext_conf.artifacts_profile_blob profile = None if not HostPluginProtocol.is_default_channel(): logger.verbose("Retrieving the artifacts profile") profile = self.fetch(blob) if profile is None: if HostPluginProtocol.is_default_channel(): logger.verbose("Using host plugin as default channel") else: logger.verbose("Failed to download artifacts profile, " "switching to host plugin") host = self.get_host_plugin() uri, headers = host.get_artifact_request(blob) profile = self.fetch(uri, headers, use_proxy=False) if not textutil.is_str_empty(profile): logger.verbose("Artifacts profile downloaded") try: artifacts_profile = InVMArtifactsProfile(profile) except Exception: logger.warn("Could not parse artifacts profile blob") msg = "Content: [{0}]".format(profile) logger.verbose(msg) from azurelinuxagent.common.event import report_event, WALAEventOperation report_event(op=WALAEventOperation.ArtifactsProfileBlob, is_success=False, message=msg, log_event=False) return artifacts_profile except ResourceGoneError: HostPluginProtocol.set_default_channel(True) continue except Exception as e: logger.warn("Exception retrieving artifacts profile: {0}".format(ustr(e))) return None class VersionInfo(object): def __init__(self, xml_text): """ Query endpoint server for wire protocol version. Fail if our desired protocol version is not seen. """ logger.verbose("Load Version.xml") self.parse(xml_text) def parse(self, xml_text): xml_doc = parse_doc(xml_text) preferred = find(xml_doc, "Preferred") self.preferred = findtext(preferred, "Version") logger.info("Fabric preferred wire protocol version:{0}", self.preferred) self.supported = [] supported = find(xml_doc, "Supported") supported_version = findall(supported, "Version") for node in supported_version: version = gettext(node) logger.verbose("Fabric supported wire protocol version:{0}", version) self.supported.append(version) def get_preferred(self): return self.preferred def get_supported(self): return self.supported class GoalState(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("GoalState.xml is None") logger.verbose("Load GoalState.xml") self.incarnation = None self.expected_state = None self.hosting_env_uri = None self.shared_conf_uri = None self.remote_access_uri = None self.certs_uri = None self.ext_uri = None self.role_instance_id = None self.role_config_name = None self.container_id = None self.load_balancer_probe_port = None self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ Request configuration data from endpoint server. """ self.xml_text = xml_text xml_doc = parse_doc(xml_text) self.incarnation = findtext(xml_doc, "Incarnation") self.expected_state = findtext(xml_doc, "ExpectedState") self.hosting_env_uri = findtext(xml_doc, "HostingEnvironmentConfig") self.shared_conf_uri = findtext(xml_doc, "SharedConfig") self.certs_uri = findtext(xml_doc, "Certificates") self.ext_uri = findtext(xml_doc, "ExtensionsConfig") role_instance = find(xml_doc, "RoleInstance") self.role_instance_id = findtext(role_instance, "InstanceId") role_config = find(role_instance, "Configuration") self.role_config_name = findtext(role_config, "ConfigName") container = find(xml_doc, "Container") self.container_id = findtext(container, "ContainerId") self.remote_access_uri = findtext(container, "RemoteAccessInfo") lbprobe_ports = find(xml_doc, "LBProbePorts") self.load_balancer_probe_port = findtext(lbprobe_ports, "Port") return self class HostingEnv(object): """ parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ def __init__(self, xml_text): if xml_text is None: raise ValueError("HostingEnvironmentConfig.xml is None") logger.verbose("Load HostingEnvironmentConfig.xml") self.vm_name = None self.role_name = None self.deployment_name = None self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ parse and create HostingEnvironmentConfig.xml. """ self.xml_text = xml_text xml_doc = parse_doc(xml_text) incarnation = find(xml_doc, "Incarnation") self.vm_name = getattrib(incarnation, "instance") role = find(xml_doc, "Role") self.role_name = getattrib(role, "name") deployment = find(xml_doc, "Deployment") self.deployment_name = getattrib(deployment, "name") return self class SharedConfig(object): """ parse role endpoint server and goal state config. """ def __init__(self, xml_text): logger.verbose("Load SharedConfig.xml") self.parse(xml_text) def parse(self, xml_text): """ parse and write configuration to file SharedConfig.xml. """ # Not used currently return self class RemoteAccess(object): """ Object containing information about user accounts """ # # # # # # # # # # # # # def __init__(self, xml_text): logger.verbose("Load RemoteAccess.xml") self.version = None self.incarnation = None self.user_list = RemoteAccessUsersList() self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ Parse xml document containing user account information """ if xml_text is None or len(xml_text) == 0: return None self.xml_text = xml_text xml_doc = parse_doc(xml_text) self.incarnation = findtext(xml_doc, "Incarnation") self.version = findtext(xml_doc, "Version") user_collection = find(xml_doc, "Users") users = findall(user_collection, "User") for user in users: remote_access_user = self.parse_user(user) self.user_list.users.append(remote_access_user) return self def parse_user(self, user): name = findtext(user, "Name") encrypted_password = findtext(user, "Password") expiration = findtext(user, "Expiration") remote_access_user = RemoteAccessUser(name, encrypted_password, expiration) return remote_access_user class UserAccount(object): """ Stores information about single user account """ def __init__(self): self.Name = None self.EncryptedPassword = None self.Password = None self.Expiration = None self.Groups = [] class Certificates(object): """ Object containing certificates of host and provisioned user. """ def __init__(self, client, xml_text): logger.verbose("Load Certificates.xml") self.client = client self.cert_list = CertList() self.parse(xml_text) def parse(self, xml_text): """ Parse multiple certificates into seperate files. """ xml_doc = parse_doc(xml_text) data = findtext(xml_doc, "Data") if data is None: return cryptutil = CryptUtil(conf.get_openssl_cmd()) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" "Content-Disposition: attachment; filename=\"{0}\"\n" "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" "Content-Transfer-Encoding: base64\n" "\n" "{2}").format(p7m_file, p7m_file, data) self.client.save_cache(p7m_file, p7m) trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) # decrypt certificates cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, pem_file) # The parsing process use public key to match prv and crt. buf = [] begin_crt = False begin_prv = False prvs = {} thumbprints = {} index = 0 v1_cert_list = [] with open(pem_file) as pem: for line in pem.readlines(): buf.append(line) if re.match(r'[-]+BEGIN.*KEY[-]+', line): begin_prv = True elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): begin_crt = True elif re.match(r'[-]+END.*KEY[-]+', line): tmp_file = self.write_to_tmp_file(index, 'prv', buf) pub = cryptutil.get_pubkey_from_prv(tmp_file) prvs[pub] = tmp_file buf = [] index += 1 begin_prv = False elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): tmp_file = self.write_to_tmp_file(index, 'crt', buf) pub = cryptutil.get_pubkey_from_crt(tmp_file) thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) thumbprints[pub] = thumbprint # Rename crt with thumbprint as the file name crt = "{0}.crt".format(thumbprint) v1_cert_list.append({ "name": None, "thumbprint": thumbprint }) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) buf = [] index += 1 begin_crt = False # Rename prv key with thumbprint as the file name for pubkey in prvs: thumbprint = thumbprints[pubkey] if thumbprint: tmp_file = prvs[pubkey] prv = "{0}.prv".format(thumbprint) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) for v1_cert in v1_cert_list: cert = Cert() set_properties("certs", cert, v1_cert) self.cert_list.certificates.append(cert) def write_to_tmp_file(self, index, suffix, buf): file_name = os.path.join(conf.get_lib_dir(), "{0}.{1}".format(index, suffix)) self.client.save_cache(file_name, "".join(buf)) return file_name class ExtensionsConfig(object): """ parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. Install if true, remove if it is set to false. """ def __init__(self, xml_text): logger.verbose("Load ExtensionsConfig.xml") self.ext_handlers = ExtHandlerList() self.vmagent_manifests = VMAgentManifestList() self.status_upload_blob = None self.status_upload_blob_type = None self.artifacts_profile_blob = None if xml_text is not None: self.parse(xml_text) def parse(self, xml_text): """ Write configuration to file ExtensionsConfig.xml. """ xml_doc = parse_doc(xml_text) ga_families_list = find(xml_doc, "GAFamilies") ga_families = findall(ga_families_list, "GAFamily") for ga_family in ga_families: family = findtext(ga_family, "Name") uris_list = find(ga_family, "Uris") uris = findall(uris_list, "Uri") manifest = VMAgentManifest() manifest.family = family for uri in uris: manifestUri = VMAgentManifestUri(uri=gettext(uri)) manifest.versionsManifestUris.append(manifestUri) self.vmagent_manifests.vmAgentManifests.append(manifest) plugins_list = find(xml_doc, "Plugins") plugins = findall(plugins_list, "Plugin") plugin_settings_list = find(xml_doc, "PluginSettings") plugin_settings = findall(plugin_settings_list, "Plugin") for plugin in plugins: ext_handler = self.parse_plugin(plugin) self.ext_handlers.extHandlers.append(ext_handler) self.parse_plugin_settings(ext_handler, plugin_settings) self.status_upload_blob = findtext(xml_doc, "StatusUploadBlob") self.artifacts_profile_blob = findtext(xml_doc, "InVMArtifactsProfileBlob") status_upload_node = find(xml_doc, "StatusUploadBlob") self.status_upload_blob_type = getattrib(status_upload_node, "statusBlobType") logger.verbose("Extension config shows status blob type as [{0}]", self.status_upload_blob_type) def parse_plugin(self, plugin): ext_handler = ExtHandler() ext_handler.name = getattrib(plugin, "name") ext_handler.properties.version = getattrib(plugin, "version") ext_handler.properties.state = getattrib(plugin, "state") try: ext_handler.properties.dependencyLevel = int(getattrib(plugin, "dependencyLevel")) except ValueError: ext_handler.properties.dependencyLevel = 0 location = getattrib(plugin, "location") failover_location = getattrib(plugin, "failoverlocation") for uri in [location, failover_location]: version_uri = ExtHandlerVersionUri() version_uri.uri = uri ext_handler.versionUris.append(version_uri) return ext_handler def parse_plugin_settings(self, ext_handler, plugin_settings): if plugin_settings is None: return name = ext_handler.name version = ext_handler.properties.version settings = [x for x in plugin_settings \ if getattrib(x, "name") == name and \ getattrib(x, "version") == version] if settings is None or len(settings) == 0: return runtime_settings = None runtime_settings_node = find(settings[0], "RuntimeSettings") seqNo = getattrib(runtime_settings_node, "seqNo") runtime_settings_str = gettext(runtime_settings_node) try: runtime_settings = json.loads(runtime_settings_str) except ValueError as e: logger.error("Invalid extension settings") return for plugin_settings_list in runtime_settings["runtimeSettings"]: handler_settings = plugin_settings_list["handlerSettings"] ext = Extension() # There is no "extension name" in wire protocol. # Put ext.name = ext_handler.name ext.sequenceNumber = seqNo ext.publicSettings = handler_settings.get("publicSettings") ext.protectedSettings = handler_settings.get("protectedSettings") thumbprint = handler_settings.get( "protectedSettingsCertThumbprint") ext.certificateThumbprint = thumbprint ext_handler.properties.extensions.append(ext) class ExtensionManifest(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("ExtensionManifest is None") logger.verbose("Load ExtensionManifest.xml") self.pkg_list = ExtHandlerPackageList() self.allowed_versions = None self.parse(xml_text) def parse(self, xml_text): xml_doc = parse_doc(xml_text) self._handle_packages(findall(find(xml_doc, "Plugins"), "Plugin"), False) self._handle_packages(findall(find(xml_doc, "InternalPlugins"), "Plugin"), True) def _handle_packages(self, packages, isinternal): for package in packages: version = findtext(package, "Version") disallow_major_upgrade = findtext(package, "DisallowMajorVersionUpgrade") if disallow_major_upgrade is None: disallow_major_upgrade = '' disallow_major_upgrade = disallow_major_upgrade.lower() == "true" uris = find(package, "Uris") uri_list = findall(uris, "Uri") uri_list = [gettext(x) for x in uri_list] pkg = ExtHandlerPackage() pkg.version = version pkg.disallow_major_upgrade = disallow_major_upgrade for uri in uri_list: pkg_uri = ExtHandlerVersionUri() pkg_uri.uri = uri pkg.uris.append(pkg_uri) pkg.isinternal = isinternal self.pkg_list.versions.append(pkg) # Do not extend this class class InVMArtifactsProfile(object): """ deserialized json string of InVMArtifactsProfile. It is expected to contain the following fields: * inVMArtifactsProfileBlobSeqNo * profileId (optional) * onHold (optional) * certificateThumbprint (optional) * encryptedHealthChecks (optional) * encryptedApplicationProfile (optional) """ def __init__(self, artifacts_profile): if not textutil.is_str_empty(artifacts_profile): self.__dict__.update(parse_json(artifacts_profile)) def is_on_hold(self): # hasattr() is not available in Python 2.6 if 'onHold' in self.__dict__: return self.onHold.lower() == 'true' return False WALinuxAgent-2.2.32/azurelinuxagent/common/rdma.py000066400000000000000000000317241335416306700221370ustar00rootroot00000000000000# Windows Azure Linux Agent # # Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Handle packages and modules to enable RDMA for IB networking """ import os import re import time import threading import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME dapl_config_paths = [ '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf' ] def setup_rdma_device(): logger.verbose("Parsing SharedConfig XML contents for RDMA details") xml_doc = parse_doc( fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME))) if xml_doc is None: logger.error("Could not parse SharedConfig XML document") return instance_elem = find(xml_doc, "Instance") if not instance_elem: logger.error("Could not find in SharedConfig document") return rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address") if not rdma_ipv4_addr: logger.error( "Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document") return rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress") if not rdma_mac_addr: logger.error( "Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document") return # add colons to the MAC address (e.g. 00155D33FF1D -> # 00:15:5D:33:FF:1D) rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2] for i in range(0, len(rdma_mac_addr), 2)]) logger.info("Found RDMA details. IPv4={0} MAC={1}".format( rdma_ipv4_addr, rdma_mac_addr)) # Set up the RDMA device with collected informatino RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start() logger.info("RDMA: device is set up") return class RDMAHandler(object): driver_module_name = 'hv_network_direct' @staticmethod def get_rdma_version(): """Retrieve the firmware version information from the system. This depends on information provided by the Linux kernel.""" kvp_key_size = 512 kvp_value_size = 2048 driver_info_source = '/var/lib/hyperv/.kvp_pool_0' base_kernel_err_msg = 'Kernel does not provide the necessary ' base_kernel_err_msg += 'information or the kvp daemon is not running.' if not os.path.isfile(driver_info_source): error_msg = 'RDMA: Source file "%s" does not exist. ' error_msg += base_kernel_err_msg logger.error(error_msg % driver_info_source) return f = open(driver_info_source) while True : key = f.read(kvp_key_size) value = f.read(kvp_value_size) if key and value : key_0 = key.split("\x00")[0] value_0 = value.split("\x00")[0] if key_0 == "NdDriverVersion" : f.close() return value_0 else : break f.close() error_msg = 'RDMA: NdDriverVersion not found in "%s"' logger.error(error_msg % driver_info_source) return @staticmethod def is_kvp_daemon_running(): """Look for kvp daemon names in ps -ef output and return True/False """ # for centos, the hypervkvpd and the hv_kvp_daemon both are ok. # for suse, it uses hv_kvp_daemon kvp_daemon_names = ['hypervkvpd', 'hv_kvp_daemon'] exitcode, ps_out = shellutil.run_get_output("ps -ef") if exitcode != 0: raise Exception('RDMA: ps -ef failed: %s' % ps_out) for n in kvp_daemon_names: if n in ps_out: logger.info('RDMA: kvp daemon (%s) is running' % n) return True else: logger.verbose('RDMA: kvp daemon (%s) is not running' % n) return False def load_driver_module(self): """Load the kernel driver, this depends on the proper driver to be installed with the install_driver() method""" logger.info("RDMA: probing module '%s'" % self.driver_module_name) result = shellutil.run('modprobe --first-time %s' % self.driver_module_name) if result != 0: error_msg = 'Could not load "%s" kernel module. ' error_msg += 'Run "modprobe --first-time %s" as root for more details' logger.error( error_msg % (self.driver_module_name, self.driver_module_name) ) return False logger.info('RDMA: Loaded the kernel driver successfully.') return True def install_driver(self): """Install the driver. This is distribution specific and must be overwritten in the child implementation.""" logger.error('RDMAHandler.install_driver not implemented') def is_driver_loaded(self): """Check if the network module is loaded in kernel space""" cmd = 'lsmod | grep ^%s' % self.driver_module_name status, loaded_modules = shellutil.run_get_output(cmd) logger.info('RDMA: Checking if the module loaded.') if loaded_modules: logger.info('RDMA: module loaded.') return True logger.info('RDMA: module not loaded.') return False def reboot_system(self): """Reboot the system. This is required as the kernel module for the rdma driver cannot be unloaded with rmmod""" logger.info('RDMA: Rebooting system.') ret = shellutil.run('shutdown -r now') if ret != 0: logger.error('RDMA: Failed to reboot the system') dapl_config_paths = [ '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf'] class RDMADeviceHandler(object): """ Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma interface. """ rdma_dev = '/dev/hvnd_rdma' device_check_timeout_sec = 120 device_check_interval_sec = 1 ipv4_addr = None mac_adr = None def __init__(self, ipv4_addr, mac_addr): self.ipv4_addr = ipv4_addr self.mac_addr = mac_addr def start(self): """ Start a thread in the background to process the RDMA tasks and returns. """ logger.info("RDMA: starting device processing in the background.") threading.Thread(target=self.process).start() def process(self): try: RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr) skip_rdma_device = False module_name = "hv_network_direct" retcode,out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False) if retcode == 0: module_name = out.strip() else: logger.info("RDMA: failed to resolve module name. Use original name") retcode,out = shellutil.run_get_output("modprobe %s" % module_name) if retcode != 0: logger.error("RDMA: failed to load module %s" % module_name) return retcode,out = shellutil.run_get_output("modinfo %s" % module_name) if retcode == 0: version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) if version: v1 = int(version.groups(0)[0]) v2 = int(version.groups(0)[1]) if v1>4 or v1==4 and v2>0: logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later") skip_rdma_device = True else: logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.") else: logger.warn("RDMA: failed to get module info on hv_network_direct.") if not skip_rdma_device: RDMADeviceHandler.wait_rdma_device( self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec) RDMADeviceHandler.write_rdma_config_to_device( self.rdma_dev, self.ipv4_addr, self.mac_addr) RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) except Exception as e: logger.error("RDMA: device processing failed: {0}".format(e)) @staticmethod def update_dat_conf(paths, ipv4_addr): """ Looks at paths for dat.conf file and updates the ip address for the infiniband interface. """ logger.info("Updating DAPL configuration file") for f in paths: logger.info("RDMA: trying {0}".format(f)) if not os.path.isfile(f): logger.info( "RDMA: DAPL config not found at {0}".format(f)) continue logger.info("RDMA: DAPL config is at: {0}".format(f)) cfg = fileutil.read_file(f) new_cfg = RDMADeviceHandler.replace_dat_conf_contents( cfg, ipv4_addr) fileutil.write_file(f, new_cfg) logger.info("RDMA: DAPL configuration is updated") return raise Exception("RDMA: DAPL configuration file not found at predefined paths") @staticmethod def replace_dat_conf_contents(cfg, ipv4_addr): old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format( ipv4_addr) return re.sub(old, new, cfg) @staticmethod def write_rdma_config_to_device(path, ipv4_addr, mac_addr): data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr) logger.info( "RDMA: Updating device with configuration: {0}".format(data)) with open(path, "w") as f: logger.info("RDMA: Device opened for writing") f.write(data) logger.info("RDMA: Updated device with IPv4/MAC addr successfully") @staticmethod def generate_rdma_config(ipv4_addr, mac_addr): return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr) @staticmethod def wait_rdma_device(path, timeout_sec, check_interval_sec): logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec)) total_retries = timeout_sec/check_interval_sec n = 0 while n < total_retries: if os.path.exists(path): logger.info("RDMA: device ready") return logger.verbose( "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) time.sleep(check_interval_sec) n += 1 logger.error("RDMA device wait timed out") raise Exception("The device did not show up in {0} seconds ({1} retries)".format( timeout_sec, total_retries)) @staticmethod def update_network_interface(mac_addr, ipv4_addr): netmask=16 logger.info("RDMA: will update the network interface with IPv4/MAC") if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr) logger.info("RDMA: network interface found: {0}", if_name) logger.info("RDMA: bringing network interface up") if shellutil.run("ifconfig {0} up".format(if_name)) != 0: raise Exception("Could not bring up RMDA interface: {0}".format(if_name)) logger.info("RDMA: configuring IPv4 addr and netmask on interface") addr = '{0}/{1}'.format(ipv4_addr, netmask) if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0: raise Exception("Could set addr to {1} on {0}".format(if_name, addr)) logger.info("RDMA: network address and netmask configured on interface") @staticmethod def get_interface_by_mac(mac): ret, output = shellutil.run_get_output("ifconfig -a") if ret != 0: raise Exception("Failed to list network interfaces") output = output.replace('\n', '') match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), output, re.IGNORECASE) if match is None: raise Exception("Failed to get ifname with mac: {0}".format(mac)) output = match.group(0) eths = re.findall(r"eth\d", output) if eths is None or len(eths) == 0: raise Exception("ifname with mac: {0} not found".format(mac)) return eths[-1] WALinuxAgent-2.2.32/azurelinuxagent/common/utils/000077500000000000000000000000001335416306700217735ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/common/utils/__init__.py000066400000000000000000000011661335416306700241100ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/azurelinuxagent/common/utils/archive.py000066400000000000000000000141531335416306700237720ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. import errno import os import re import shutil import zipfile from azurelinuxagent.common.utils import fileutil import azurelinuxagent.common.logger as logger """ archive.py The module supports the archiving of guest agent state. Guest agent state is flushed whenever there is a incarnation change. The flush is archived periodically (once a day). The process works as follows whenever a new incarnation arrives. 1. Flush - move all state files to a new directory under .../history/timestamp/. 2. Archive - enumerate all directories under .../history/timestamp and create a .zip file named timestamp.zip. Delete the archive directory 3. Purge - glob the list .zip files, sort by timestamp in descending order, keep the first 50 results, and delete the rest. ... is the directory where the agent's state resides, by default this is /var/lib/waagent. The timestamp is an ISO8601 formatted value. """ ARCHIVE_DIRECTORY_NAME = 'history' MAX_ARCHIVED_STATES = 50 CACHE_PATTERNS = [ re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) ] # 2018-04-06T08:21:37.142697 # 2018-04-06T08:21:37.142697.zip ARCHIVE_PATTERNS_DIRECTORY = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+$') ARCHIVE_PATTERNS_ZIP = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+\.zip$') class StateFlusher(object): def __init__(self, lib_dir): self._source = lib_dir d = os.path.join(self._source, ARCHIVE_DIRECTORY_NAME) if not os.path.exists(d): try: fileutil.mkdir(d) except OSError as e: if e.errno != errno.EEXIST: logger.error("{0} : {1}", self._source, e.strerror) def flush(self, timestamp): files = self._get_files_to_archive() if len(files) == 0: return if self._mkdir(timestamp): self._archive(files, timestamp) else: self._purge(files) def history_dir(self, timestamp): return os.path.join(self._source, ARCHIVE_DIRECTORY_NAME, timestamp.isoformat()) def _get_files_to_archive(self): files = [] for f in os.listdir(self._source): full_path = os.path.join(self._source, f) for pattern in CACHE_PATTERNS: m = pattern.match(f) if m is not None: files.append(full_path) break return files def _archive(self, files, timestamp): for f in files: dst = os.path.join(self.history_dir(timestamp), os.path.basename(f)) shutil.move(f, dst) def _purge(self, files): for f in files: os.remove(f) def _mkdir(self, timestamp): d = self.history_dir(timestamp) try: fileutil.mkdir(d, mode=0o700) return True except IOError as e: logger.error("{0} : {1}".format(d, e.strerror)) return False # TODO: use @total_ordering once RHEL/CentOS and SLES 11 are EOL. # @total_ordering first appeared in Python 2.7 and 3.2 # If there are more use cases for @total_ordering, I will # consider re-implementing it. class State(object): def __init__(self, path, timestamp): self._path = path self._timestamp = timestamp @property def timestamp(self): return self._timestamp def delete(self): pass def archive(self): pass def __eq__(self, other): return self._timestamp == other.timestamp def __ne__(self, other): return self._timestamp != other.timestamp def __lt__(self, other): return self._timestamp < other.timestamp def __gt__(self, other): return self._timestamp > other.timestamp def __le__(self, other): return self._timestamp <= other.timestamp def __ge__(self, other): return self._timestamp >= other.timestamp class StateZip(State): def __init__(self, path, timestamp): super(StateZip,self).__init__(path, timestamp) def delete(self): os.remove(self._path) class StateDirectory(State): def __init__(self, path, timestamp): super(StateDirectory, self).__init__(path, timestamp) def delete(self): shutil.rmtree(self._path) def archive(self): fn_tmp = "{0}.zip.tmp".format(self._path) fn = "{0}.zip".format(self._path) ziph = zipfile.ZipFile(fn_tmp, 'w') for f in os.listdir(self._path): full_path = os.path.join(self._path, f) ziph.write(full_path, f, zipfile.ZIP_DEFLATED) ziph.close() os.rename(fn_tmp, fn) shutil.rmtree(self._path) class StateArchiver(object): def __init__(self, lib_dir): self._source = os.path.join(lib_dir, ARCHIVE_DIRECTORY_NAME) if not os.path.isdir(self._source): try: fileutil.mkdir(self._source, mode=0o700) except IOError as e: if e.errno != errno.EEXIST: logger.error("{0} : {1}", self._source, e.strerror) def purge(self): """ Delete "old" archive directories and .zip archives. Old is defined as any directories or files older than the X newest ones. """ states = self._get_archive_states() states.sort(reverse=True) for state in states[MAX_ARCHIVED_STATES:]: state.delete() def archive(self): states = self._get_archive_states() for state in states: state.archive() def _get_archive_states(self): states = [] for f in os.listdir(self._source): full_path = os.path.join(self._source, f) m = ARCHIVE_PATTERNS_DIRECTORY.match(f) if m is not None: states.append(StateDirectory(full_path, m.group(0))) m = ARCHIVE_PATTERNS_ZIP.match(f) if m is not None: states.append(StateZip(full_path, m.group(0))) return states WALinuxAgent-2.2.32/azurelinuxagent/common/utils/cryptutil.py000066400000000000000000000147411335416306700244130ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import errno import struct import sys import os.path import subprocess from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import CryptError import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil DECRYPT_SECRET_CMD = "{0} cms -decrypt -inform DER -inkey {1} -in /dev/stdin" class CryptUtil(object): def __init__(self, openssl_cmd): self.openssl_cmd = openssl_cmd def gen_transport_cert(self, prv_file, crt_file): """ Create ssl certificate for https communication with endpoint server. """ cmd = ("{0} req -x509 -nodes -subj /CN=LinuxTransport -days 730 " "-newkey rsa:2048 -keyout {1} " "-out {2}").format(self.openssl_cmd, prv_file, crt_file) rc = shellutil.run(cmd) if rc != 0: logger.error("Failed to create {0} and {1} certificates".format( prv_file, crt_file)) def get_pubkey_from_prv(self, file_name): if not os.path.exists(file_name): raise IOError(errno.ENOENT, "File not found", file_name) else: cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd, file_name) pub = shellutil.run_get_output(cmd)[1] return pub def get_pubkey_from_crt(self, file_name): if not os.path.exists(file_name): raise IOError(errno.ENOENT, "File not found", file_name) else: cmd = "{0} x509 -in {1} -pubkey -noout".format(self.openssl_cmd, file_name) pub = shellutil.run_get_output(cmd)[1] return pub def get_thumbprint_from_crt(self, file_name): if not os.path.exists(file_name): raise IOError(errno.ENOENT, "File not found", file_name) else: cmd = "{0} x509 -in {1} -fingerprint -noout".format(self.openssl_cmd, file_name) thumbprint = shellutil.run_get_output(cmd)[1] thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() return thumbprint def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file): if not os.path.exists(p7m_file): raise IOError(errno.ENOENT, "File not found", p7m_file) elif not os.path.exists(trans_prv_file): raise IOError(errno.ENOENT, "File not found", trans_prv_file) else: cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " "| {4} pkcs12 -nodes -password pass: -out {5}" "").format(self.openssl_cmd, p7m_file, trans_prv_file, trans_cert_file, self.openssl_cmd, pem_file) shellutil.run(cmd) rc = shellutil.run(cmd) if rc != 0: logger.error("Failed to decrypt {0}".format(p7m_file)) def crt_to_ssh(self, input_file, output_file): shellutil.run("ssh-keygen -i -m PKCS8 -f {0} >> {1}".format(input_file, output_file)) def asn1_to_ssh(self, pubkey): lines = pubkey.split("\n") lines = [x for x in lines if not x.startswith("----")] base64_encoded = "".join(lines) try: #TODO remove pyasn1 dependency from pyasn1.codec.der import decoder as der_decoder der_encoded = base64.b64decode(base64_encoded) der_encoded = der_decoder.decode(der_encoded)[0][1] key = der_decoder.decode(self.bits_to_bytes(der_encoded))[0] n=key[0] e=key[1] keydata = bytearray() keydata.extend(struct.pack('>I', len("ssh-rsa"))) keydata.extend(b"ssh-rsa") keydata.extend(struct.pack('>I', len(self.num_to_bytes(e)))) keydata.extend(self.num_to_bytes(e)) keydata.extend(struct.pack('>I', len(self.num_to_bytes(n)) + 1)) keydata.extend(b"\0") keydata.extend(self.num_to_bytes(n)) keydata_base64 = base64.b64encode(bytebuffer(keydata)) return ustr(b"ssh-rsa " + keydata_base64 + b"\n", encoding='utf-8') except ImportError as e: raise CryptError("Failed to load pyasn1.codec.der") def num_to_bytes(self, num): """ Pack number into bytes. Retun as string. """ result = bytearray() while num: result.append(num & 0xFF) num >>= 8 result.reverse() return result def bits_to_bytes(self, bits): """ Convert an array contains bits, [0,1] to a byte array """ index = 7 byte_array = bytearray() curr = 0 for bit in bits: curr = curr | (bit << index) index = index - 1 if index == -1: byte_array.append(curr) curr = 0 index = 7 return bytes(byte_array) def decrypt_secret(self, encrypted_password, private_key): try: decoded = base64.b64decode(encrypted_password) args = DECRYPT_SECRET_CMD.format(self.openssl_cmd, private_key).split(' ') p = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) p.stdin.write(decoded) output = p.communicate()[0] retcode = p.poll() if retcode: raise subprocess.CalledProcessError(retcode, "openssl cms -decrypt", output=output) return output.decode('utf-16') except Exception as e: raise CryptError("Error decoding secret", e) WALinuxAgent-2.2.32/azurelinuxagent/common/utils/fileutil.py000066400000000000000000000146431335416306700241720ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ File operation util functions """ import errno as errno import glob import os import pwd import re import shutil import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import ustr KNOWN_IOERRORS = [ errno.EIO, # I/O error errno.ENOMEM, # Out of memory errno.ENFILE, # File table overflow errno.EMFILE, # Too many open files errno.ENOSPC, # Out of space errno.ENAMETOOLONG, # Name too long errno.ELOOP, # Too many symbolic links encountered 121 # Remote I/O error (errno.EREMOTEIO -- not present in all Python 2.7+) ] def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): """ Read and return contents of 'filepath'. """ mode = 'rb' with open(filepath, mode) as in_file: data = in_file.read() if data is None: return None if asbin: return data if remove_bom: # remove bom on bytes data before it is converted into string. data = textutil.remove_bom(data) data = ustr(data, encoding=encoding) return data def write_file(filepath, contents, asbin=False, encoding='utf-8', append=False): """ Write 'contents' to 'filepath'. """ mode = "ab" if append else "wb" data = contents if not asbin: data = contents.encode(encoding) with open(filepath, mode) as out_file: out_file.write(data) def append_file(filepath, contents, asbin=False, encoding='utf-8'): """ Append 'contents' to 'filepath'. """ write_file(filepath, contents, asbin=asbin, encoding=encoding, append=True) def base_name(path): head, tail = os.path.split(path) return tail def get_line_startingwith(prefix, filepath): """ Return line from 'filepath' if the line startswith 'prefix' """ for line in read_file(filepath).split('\n'): if line.startswith(prefix): return line return None def mkdir(dirpath, mode=None, owner=None): if not os.path.isdir(dirpath): os.makedirs(dirpath) if mode is not None: chmod(dirpath, mode) if owner is not None: chowner(dirpath, owner) def chowner(path, owner): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) else: owner_info = pwd.getpwnam(owner) os.chown(path, owner_info[2], owner_info[3]) def chmod(path, mode): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) else: os.chmod(path, mode) def rm_files(*args): for paths in args: # find all possible file paths for path in glob.glob(paths): if os.path.isfile(path): os.remove(path) def rm_dirs(*args): """ Remove the contents of each directory """ for p in args: if not os.path.isdir(p): continue for pp in os.listdir(p): path = os.path.join(p, pp) if os.path.isfile(path): os.remove(path) elif os.path.islink(path): os.unlink(path) elif os.path.isdir(path): shutil.rmtree(path) def trim_ext(path, ext): if not ext.startswith("."): ext = "." + ext return path.split(ext)[0] if path.endswith(ext) else path def update_conf_file(path, line_start, val, chk_err=False): conf = [] if not os.path.isfile(path) and chk_err: raise IOError("Can't find config file:{0}".format(path)) conf = read_file(path).split('\n') conf = [x for x in conf if x is not None and len(x) > 0 and not x.startswith(line_start)] conf.append(val) write_file(path, '\n'.join(conf) + '\n') def search_file(target_dir_name, target_file_name): for root, dirs, files in os.walk(target_dir_name): for file_name in files: if file_name == target_file_name: return os.path.join(root, file_name) return None def chmod_tree(path, mode): for root, dirs, files in os.walk(path): for file_name in files: os.chmod(os.path.join(root, file_name), mode) def findstr_in_file(file_path, line_str): """ Return True if the line is in the file; False otherwise. (Trailing whitespace is ignored.) """ try: with open(file_path, 'r') as fh: for line in fh.readlines(): if line_str == line.rstrip(): return True except Exception: # swallow exception pass return False def findre_in_file(file_path, line_re): """ Return match object if found in file. """ try: with open(file_path, 'r') as fh: pattern = re.compile(line_re) for line in fh.readlines(): match = re.search(pattern, line) if match: return match except: pass return None def get_all_files(root_path): """ Find all files under the given root path """ result = [] for root, dirs, files in os.walk(root_path): result.extend([os.path.join(root, file) for file in files]) return result def clean_ioerror(e, paths=[]): """ Clean-up possibly bad files and directories after an IO error. The code ignores *all* errors since disk state may be unhealthy. """ if isinstance(e, IOError) and e.errno in KNOWN_IOERRORS: for path in paths: if path is None: continue try: if os.path.isdir(path): shutil.rmtree(path, ignore_errors=True) else: os.remove(path) except Exception: # swallow exception pass WALinuxAgent-2.2.32/azurelinuxagent/common/utils/flexible_version.py000066400000000000000000000160721335416306700257120ustar00rootroot00000000000000from distutils import version import re class FlexibleVersion(version.Version): """ A more flexible implementation of distutils.version.StrictVersion The implementation allows to specify: - an arbitrary number of version numbers: not only '1.2.3' , but also '1.2.3.4.5' - the separator between version numbers: '1-2-3' is allowed when '-' is specified as separator - a flexible pre-release separator: '1.2.3.alpha1', '1.2.3-alpha1', and '1.2.3alpha1' are considered equivalent - an arbitrary ordering of pre-release tags: 1.1alpha3 < 1.1beta2 < 1.1rc1 < 1.1 when ["alpha", "beta", "rc"] is specified as pre-release tag list Inspiration from this discussion at StackOverflow: http://stackoverflow.com/questions/12255554/sort-versions-in-python """ def __init__(self, vstring=None, sep='.', prerel_tags=('alpha', 'beta', 'rc')): version.Version.__init__(self) if sep is None: sep = '.' if prerel_tags is None: prerel_tags = () self.sep = sep self.prerel_sep = '' self.prerel_tags = tuple(prerel_tags) if prerel_tags is not None else () self._compile_pattern() self.prerelease = None self.version = () if vstring: self._parse(str(vstring)) return _nn_version = 'version' _nn_prerel_sep = 'prerel_sep' _nn_prerel_tag = 'tag' _nn_prerel_num = 'tag_num' _re_prerel_sep = r'(?P<{pn}>{sep})?'.format( pn=_nn_prerel_sep, sep='|'.join(map(re.escape, ('.', '-')))) @property def major(self): return self.version[0] if len(self.version) > 0 else 0 @property def minor(self): return self.version[1] if len(self.version) > 1 else 0 @property def patch(self): return self.version[2] if len(self.version) > 2 else 0 def _parse(self, vstring): m = self.version_re.match(vstring) if not m: raise ValueError("Invalid version number '{0}'".format(vstring)) self.prerelease = None self.version = () self.prerel_sep = m.group(self._nn_prerel_sep) tag = m.group(self._nn_prerel_tag) tag_num = m.group(self._nn_prerel_num) if tag is not None and tag_num is not None: self.prerelease = (tag, int(tag_num) if len(tag_num) else None) self.version = tuple(map(int, self.sep_re.split(m.group(self._nn_version)))) return def __add__(self, increment): version = list(self.version) version[-1] += increment vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) def __sub__(self, decrement): version = list(self.version) if version[-1] <= 0: raise ArithmeticError("Cannot decrement final numeric component of {0} below zero" \ .format(self)) version[-1] -= decrement vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) def __repr__(self): return "{cls} ('{vstring}', '{sep}', {prerel_tags})"\ .format( cls=self.__class__.__name__, vstring=str(self), sep=self.sep, prerel_tags=self.prerel_tags) def __str__(self): return self._assemble(self.version, self.sep, self.prerel_sep, self.prerelease) def __ge__(self, that): return not self.__lt__(that) def __gt__(self, that): return (not self.__lt__(that)) and (not self.__eq__(that)) def __le__(self, that): return (self.__lt__(that)) or (self.__eq__(that)) def __lt__(self, that): this_version, that_version = self._ensure_compatible(that) if this_version != that_version \ or self.prerelease is None and that.prerelease is None: return this_version < that_version if self.prerelease is not None and that.prerelease is None: return True if self.prerelease is None and that.prerelease is not None: return False this_index = self.prerel_tags_set[self.prerelease[0]] that_index = self.prerel_tags_set[that.prerelease[0]] if this_index == that_index: return self.prerelease[1] < that.prerelease[1] return this_index < that_index def __ne__(self, that): return not self.__eq__(that) def __eq__(self, that): this_version, that_version = self._ensure_compatible(that) if this_version != that_version: return False if self.prerelease != that.prerelease: return False return True def matches(self, that): if self.sep != that.sep or len(self.version) > len(that.version): return False for i in range(len(self.version)): if self.version[i] != that.version[i]: return False if self.prerel_tags: return self.prerel_tags == that.prerel_tags return True def _assemble(self, version, sep, prerel_sep, prerelease): s = sep.join(map(str, version)) if prerelease is not None: if prerel_sep is not None: s += prerel_sep s += prerelease[0] if prerelease[1] is not None: s += str(prerelease[1]) return s def _compile_pattern(self): sep, self.sep_re = self._compile_separator(self.sep) if self.prerel_tags: tags = '|'.join(re.escape(tag) for tag in self.prerel_tags) self.prerel_tags_set = dict(zip(self.prerel_tags, range(len(self.prerel_tags)))) release_re = '(?:{prerel_sep}(?P<{tn}>{tags})(?P<{nn}>\d*))?'.format( prerel_sep=self._re_prerel_sep, tags=tags, tn=self._nn_prerel_tag, nn=self._nn_prerel_num) else: release_re = '' version_re = r'^(?P<{vn}>\d+(?:(?:{sep}\d+)*)?){rel}$'.format( vn=self._nn_version, sep=sep, rel=release_re) self.version_re = re.compile(version_re) return def _compile_separator(self, sep): if sep is None: return '', re.compile('') return re.escape(sep), re.compile(re.escape(sep)) def _ensure_compatible(self, that): """ Ensures the instances have the same structure and, if so, returns length compatible version lists (so that x.y.0.0 is equivalent to x.y). """ if self.prerel_tags != that.prerel_tags or self.sep != that.sep: raise ValueError("Unable to compare: versions have different structures") this_version = list(self.version[:]) that_version = list(that.version[:]) while len(this_version) < len(that_version): this_version.append(0) while len(that_version) < len(this_version): that_version.append(0) return this_version, that_version WALinuxAgent-2.2.32/azurelinuxagent/common/utils/networkutil.py000066400000000000000000000065741335416306700247500ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # class RouteEntry(object): """ Represents a single route. The destination, gateway, and mask members are hex representations of the IPv4 address in network byte order. """ def __init__(self, interface, destination, gateway, mask, flags, metric): self.interface = interface self.destination = destination self.gateway = gateway self.mask = mask self.flags = int(flags, 16) self.metric = int(metric) @staticmethod def _net_hex_to_dotted_quad(value): if len(value) != 8: raise Exception("String to dotted quad conversion must be 8 characters") octets = [] for idx in range(6, -2, -2): octets.append(str(int(value[idx:idx+2], 16))) return ".".join(octets) def destination_quad(self): return self._net_hex_to_dotted_quad(self.destination) def gateway_quad(self): return self._net_hex_to_dotted_quad(self.gateway) def mask_quad(self): return self._net_hex_to_dotted_quad(self.mask) def to_json(self): f = '{{"Iface": "{0}", "Destination": "{1}", "Gateway": "{2}", "Mask": "{3}", "Flags": "{4:#06x}", "Metric": "{5}"}}' return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), self.flags, self.metric) def __str__(self): f = "Iface: {0}\tDestination: {1}\tGateway: {2}\tMask: {3}\tFlags: {4:#06x}\tMetric: {5}" return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), self.flags, self.metric) def __repr__(self): return 'RouteEntry("{0}", "{1}", "{2}", "{3}", "{4:#04x}", "{5}")'\ .format(self.interface, self.destination, self.gateway, self.mask, self.flags, self.metric) class NetworkInterfaceCard: def __init__(self, name, link_info): self.name = name self.ipv4 = set() self.ipv6 = set() self.link = link_info def add_ipv4(self, info): self.ipv4.add(info) def add_ipv6(self, info): self.ipv6.add(info) def __eq__(self, other): return self.link == other.link and \ self.ipv4 == other.ipv4 and \ self.ipv6 == other.ipv6 @staticmethod def _json_array(items): return "[{0}]".format(",".join(['"{0}"'.format(x) for x in sorted(items)])) def __str__(self): entries = ['"name": "{0}"'.format(self.name), '"link": "{0}"'.format(self.link)] if len(self.ipv4) > 0: entries.append('"ipv4": {0}'.format(self._json_array(self.ipv4))) if len(self.ipv6) > 0: entries.append('"ipv6": {0}'.format(self._json_array(self.ipv6))) return "{{ {0} }}".format(", ".join(entries)) WALinuxAgent-2.2.32/azurelinuxagent/common/utils/processutil.py000066400000000000000000000157711335416306700247340ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import multiprocessing import subprocess import sys import os import time import signal from errno import ESRCH from multiprocessing import Process import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import ExtensionError from azurelinuxagent.common.future import ustr TELEMETRY_MESSAGE_MAX_LEN = 3200 def sanitize(s): return ustr(s, encoding='utf-8', errors='backslashreplace') def format_stdout_stderr(stdout, stderr, max_len=TELEMETRY_MESSAGE_MAX_LEN): """ Format stdout and stderr's output to make it suitable in telemetry. The goal is to maximize the amount of output given the constraints of telemetry. For example, if there is more stderr output than stdout output give more buffer space to stderr. :param str stdout: characters captured from stdout :param str stderr: characters captured from stderr :param int max_len: maximum length of the string to return :return: a string formatted with stdout and stderr that is less than or equal to max_len. :rtype: str """ template = "[stdout]\n{0}\n\n[stderr]\n{1}" # +6 == len("{0}") + len("{1}") max_len_each = int((max_len - len(template) + 6) / 2) if max_len_each <= 0: return '' def to_s(captured_stdout, stdout_offset, captured_stderr, stderr_offset): s = template.format(captured_stdout[stdout_offset:], captured_stderr[stderr_offset:]) return s if len(stdout) + len(stderr) < max_len: return to_s(stdout, 0, stderr, 0) elif len(stdout) < max_len_each: bonus = max_len_each - len(stdout) stderr_len = min(max_len_each + bonus, len(stderr)) return to_s(stdout, 0, stderr, -1*stderr_len) elif len(stderr) < max_len_each: bonus = max_len_each - len(stderr) stdout_len = min(max_len_each + bonus, len(stdout)) return to_s(stdout, -1*stdout_len, stderr, 0) else: return to_s(stdout, -1*max_len_each, stderr, -1*max_len_each) def _destroy_process(process, signal_to_send=signal.SIGKILL): """ Completely destroy the target process. Close the stdout/stderr pipes, kill the process, reap the zombie. If process is the leader of a process group, kill the entire process group. :param Popen process: Process to be sent a signal :param int signal_to_send: Signal number to be sent """ process.stdout.close() process.stderr.close() try: pid = process.pid if os.getpgid(pid) == pid: os.killpg(pid, signal_to_send) else: os.kill(pid, signal_to_send) process.wait() except OSError as e: if e.errno != ESRCH: raise pass # If the process is already gone, that's fine def capture_from_process_poll(process, cmd, timeout, code): """ Capture output from the process if it does not fork, or forks and completes quickly. """ retry = timeout while retry > 0 and process.poll() is None: time.sleep(1) retry -= 1 # process did not fork, timeout expired if retry == 0: os.killpg(os.getpgid(process.pid), signal.SIGKILL) stdout, stderr = process.communicate() msg = format_stdout_stderr(sanitize(stdout), sanitize(stderr)) raise ExtensionError("Timeout({0}): {1}\n{2}".format(timeout, cmd, msg), code=code) # process completed or forked return_code = process.wait() if return_code != 0: raise ExtensionError("Non-zero exit code: {0}, {1}".format(return_code, cmd), code=code) stderr = b'' stdout = b'cannot collect stdout' # attempt non-blocking process communication to capture output def proc_comm(_process, _return): try: _stdout, _stderr = _process.communicate() _return[0] = _stdout _return[1] = _stderr except Exception: pass try: mgr = multiprocessing.Manager() ret_dict = mgr.dict() cproc = Process(target=proc_comm, args=(process, ret_dict)) cproc.start() # allow 1s to capture output cproc.join(1) if len(ret_dict) == 2: stdout = ret_dict[0] stderr = ret_dict[1] except Exception: pass return stdout, stderr def capture_from_process_no_timeout(process, cmd, code): try: stdout, stderr = process.communicate() except OSError as e: _destroy_process(process, signal.SIGKILL) raise ExtensionError("Error while running '{0}': {1}".format(cmd, e.strerror), code=code) except Exception as e: _destroy_process(process, signal.SIGKILL) raise ExtensionError("Exception while running '{0}': {1}".format(cmd, e), code=code) return stdout, stderr def capture_from_process_raw(process, cmd, timeout, code): """ Captures stdout and stderr from an already-created process. :param subprocess.Popen process: Created by subprocess.Popen() :param str cmd: The command string to be included in any exceptions :param int timeout: Number of seconds the process is permitted to run :return: The stdout and stderr captured from the process :rtype: (str, str) :raises ExtensionError: if a timeout occurred or if anything was raised by Popen.communicate() """ if not timeout: stdout, stderr = capture_from_process_no_timeout(process, cmd, code) else: if os.getpgid(process.pid) != process.pid: _destroy_process(process, signal.SIGKILL) raise ExtensionError("Subprocess was not root of its own process group", code=code) stdout, stderr = capture_from_process_poll(process, cmd, timeout, code) return stdout, stderr def capture_from_process(process, cmd, timeout=0, code=-1): """ Captures stdout and stderr from an already-created process. The output is "cooked" into a string of reasonable length. :param subprocess.Popen process: Created by subprocess.Popen() :param str cmd: The command string to be included in any exceptions :param int timeout: Number of seconds the process is permitted to run :return: The stdout and stderr captured from the process :rtype: (str, str) :raises ExtensionError: if a timeout occurred or if anything was raised by Popen.communicate() """ stdout, stderr = capture_from_process_raw(process, cmd, timeout, code) return format_stdout_stderr(sanitize(stdout), sanitize(stderr)) WALinuxAgent-2.2.32/azurelinuxagent/common/utils/restutil.py000066400000000000000000000356041335416306700242300ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import threading import time import traceback import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import httpclient, urlparse, ustr from azurelinuxagent.common.version import PY_VERSION_MAJOR, AGENT_NAME, GOAL_STATE_AGENT_VERSION SECURE_WARNING_EMITTED = False DEFAULT_RETRIES = 6 DELAY_IN_SECONDS = 1 THROTTLE_RETRIES = 25 THROTTLE_DELAY_IN_SECONDS = 1 REDACTED_TEXT = "" SAS_TOKEN_RETRIEVAL_REGEX = re.compile(r'^(https?://[a-zA-Z0-9.].*sig=)([a-zA-Z0-9%-]*)(.*)$') RETRY_CODES = [ httpclient.RESET_CONTENT, httpclient.PARTIAL_CONTENT, httpclient.FORBIDDEN, httpclient.INTERNAL_SERVER_ERROR, httpclient.NOT_IMPLEMENTED, httpclient.BAD_GATEWAY, httpclient.SERVICE_UNAVAILABLE, httpclient.GATEWAY_TIMEOUT, httpclient.INSUFFICIENT_STORAGE, 429, # Request Rate Limit Exceeded ] RESOURCE_GONE_CODES = [ httpclient.GONE ] OK_CODES = [ httpclient.OK, httpclient.CREATED, httpclient.ACCEPTED ] HOSTPLUGIN_UPSTREAM_FAILURE_CODES = [ 502 ] THROTTLE_CODES = [ httpclient.FORBIDDEN, httpclient.SERVICE_UNAVAILABLE, 429, # Request Rate Limit Exceeded ] RETRY_EXCEPTIONS = [ httpclient.NotConnected, httpclient.IncompleteRead, httpclient.ImproperConnectionState, httpclient.BadStatusLine ] HTTP_PROXY_ENV = "http_proxy" HTTPS_PROXY_ENV = "https_proxy" HTTP_USER_AGENT = "{0}/{1}".format(AGENT_NAME, GOAL_STATE_AGENT_VERSION) HTTP_USER_AGENT_HEALTH = "{0}+health".format(HTTP_USER_AGENT) INVALID_CONTAINER_CONFIGURATION = "InvalidContainerConfiguration" DEFAULT_PROTOCOL_ENDPOINT = '168.63.129.16' HOST_PLUGIN_PORT = 32526 class IOErrorCounter(object): _lock = threading.RLock() _protocol_endpoint = DEFAULT_PROTOCOL_ENDPOINT _counts = {"hostplugin":0, "protocol":0, "other":0} @staticmethod def increment(host=None, port=None): with IOErrorCounter._lock: if host == IOErrorCounter._protocol_endpoint: if port == HOST_PLUGIN_PORT: IOErrorCounter._counts["hostplugin"] += 1 else: IOErrorCounter._counts["protocol"] += 1 else: IOErrorCounter._counts["other"] += 1 @staticmethod def get_and_reset(): with IOErrorCounter._lock: counts = IOErrorCounter._counts.copy() IOErrorCounter.reset() return counts @staticmethod def reset(): with IOErrorCounter._lock: IOErrorCounter._counts = {"hostplugin":0, "protocol":0, "other":0} @staticmethod def set_protocol_endpoint(endpoint=DEFAULT_PROTOCOL_ENDPOINT): IOErrorCounter._protocol_endpoint = endpoint def _compute_delay(retry_attempt=1, delay=DELAY_IN_SECONDS): fib = (1, 1) for n in range(retry_attempt): fib = (fib[1], fib[0]+fib[1]) return delay*fib[1] def _is_retry_status(status, retry_codes=RETRY_CODES): return status in retry_codes def _is_retry_exception(e): return len([x for x in RETRY_EXCEPTIONS if isinstance(e, x)]) > 0 def _is_throttle_status(status): return status in THROTTLE_CODES def _is_invalid_container_configuration(response): result = False if response is not None and response.status == httpclient.BAD_REQUEST: error_detail = read_response_error(response) result = INVALID_CONTAINER_CONFIGURATION in error_detail return result def _parse_url(url): o = urlparse(url) rel_uri = o.path if o.fragment: rel_uri = "{0}#{1}".format(rel_uri, o.fragment) if o.query: rel_uri = "{0}?{1}".format(rel_uri, o.query) secure = False if o.scheme.lower() == "https": secure = True return o.hostname, o.port, secure, rel_uri def _get_http_proxy(secure=False): # Prefer the configuration settings over environment variables host = conf.get_httpproxy_host() port = None if not host is None: port = conf.get_httpproxy_port() else: http_proxy_env = HTTPS_PROXY_ENV if secure else HTTP_PROXY_ENV http_proxy_url = None for v in [http_proxy_env, http_proxy_env.upper()]: if v in os.environ: http_proxy_url = os.environ[v] break if not http_proxy_url is None: host, port, _, _ = _parse_url(http_proxy_url) return host, port def redact_sas_tokens_in_urls(url): return SAS_TOKEN_RETRIEVAL_REGEX.sub(r"\1" + REDACTED_TEXT + r"\3", url) def _http_request(method, host, rel_uri, port=None, data=None, secure=False, headers=None, proxy_host=None, proxy_port=None): headers = {} if headers is None else headers headers['Connection'] = 'close' use_proxy = proxy_host is not None and proxy_port is not None if port is None: port = 443 if secure else 80 if 'User-Agent' not in headers: headers['User-Agent'] = HTTP_USER_AGENT if use_proxy: conn_host, conn_port = proxy_host, proxy_port scheme = "https" if secure else "http" url = "{0}://{1}:{2}{3}".format(scheme, host, port, rel_uri) else: conn_host, conn_port = host, port url = rel_uri if secure: conn = httpclient.HTTPSConnection(conn_host, conn_port, timeout=10) if use_proxy: conn.set_tunnel(host, port) else: conn = httpclient.HTTPConnection(conn_host, conn_port, timeout=10) logger.verbose("HTTP connection [{0}] [{1}] [{2}] [{3}]", method, redact_sas_tokens_in_urls(url), data, headers) conn.request(method=method, url=url, body=data, headers=headers) return conn.getresponse() def http_request(method, url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): global SECURE_WARNING_EMITTED host, port, secure, rel_uri = _parse_url(url) # Use the HTTP(S) proxy proxy_host, proxy_port = (None, None) if use_proxy: proxy_host, proxy_port = _get_http_proxy(secure=secure) if proxy_host or proxy_port: logger.verbose("HTTP proxy: [{0}:{1}]", proxy_host, proxy_port) # If httplib module is not built with ssl support, # fallback to HTTP if allowed if secure and not hasattr(httpclient, "HTTPSConnection"): if not conf.get_allow_http(): raise HttpError("HTTPS is unavailable and required") secure = False if not SECURE_WARNING_EMITTED: logger.warn("Python does not include SSL support") SECURE_WARNING_EMITTED = True # If httplib module doesn't support HTTPS tunnelling, # fallback to HTTP if allowed if secure and \ proxy_host is not None and \ proxy_port is not None \ and not hasattr(httpclient.HTTPSConnection, "set_tunnel"): if not conf.get_allow_http(): raise HttpError("HTTPS tunnelling is unavailable and required") secure = False if not SECURE_WARNING_EMITTED: logger.warn("Python does not support HTTPS tunnelling") SECURE_WARNING_EMITTED = True msg = '' attempt = 0 delay = 0 was_throttled = False while attempt < max_retry: if attempt > 0: # Compute the request delay # -- Use a fixed delay if the server ever rate-throttles the request # (with a safe, minimum number of retry attempts) # -- Otherwise, compute a delay that is the product of the next # item in the Fibonacci series and the initial delay value delay = THROTTLE_DELAY_IN_SECONDS \ if was_throttled \ else _compute_delay(retry_attempt=attempt, delay=retry_delay) logger.verbose("[HTTP Retry] " "Attempt {0} of {1} will delay {2} seconds: {3}", attempt+1, max_retry, delay, msg) time.sleep(delay) attempt += 1 try: resp = _http_request(method, host, rel_uri, port=port, data=data, secure=secure, headers=headers, proxy_host=proxy_host, proxy_port=proxy_port) logger.verbose("[HTTP Response] Status Code {0}", resp.status) if request_failed(resp): if _is_retry_status(resp.status, retry_codes=retry_codes): msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format(method, url, resp.status) # Note if throttled and ensure a safe, minimum number of # retry attempts if _is_throttle_status(resp.status): was_throttled = True max_retry = max(max_retry, THROTTLE_RETRIES) continue if resp.status in RESOURCE_GONE_CODES: raise ResourceGoneError() # Map invalid container configuration errors to resource gone in # order to force a goal state refresh, which in turn updates the # container-id header passed to HostGAPlugin. # See #1294. if _is_invalid_container_configuration(resp): raise ResourceGoneError() return resp except httpclient.HTTPException as e: clean_url = redact_sas_tokens_in_urls(url) msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format(method, clean_url, e) if _is_retry_exception(e): continue break except IOError as e: IOErrorCounter.increment(host=host, port=port) clean_url = redact_sas_tokens_in_urls(url) msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format(method, clean_url, e) continue raise HttpError("{0} -- {1} attempts made".format(msg, attempt)) def http_get(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("GET", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_head(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("HEAD", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_post(url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("POST", url, data, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_put(url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("PUT", url, data, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_delete(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("DELETE", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def request_failed(resp, ok_codes=OK_CODES): return not request_succeeded(resp, ok_codes=ok_codes) def request_succeeded(resp, ok_codes=OK_CODES): return resp is not None and resp.status in ok_codes def request_failed_at_hostplugin(resp, upstream_failure_codes=HOSTPLUGIN_UPSTREAM_FAILURE_CODES): """ Host plugin will return 502 for any upstream issue, so a failure is any 5xx except 502 """ return resp is not None and resp.status >= 500 and resp.status not in upstream_failure_codes def read_response_error(resp): result = '' if resp is not None: try: result = "[HTTP Failed] [{0}: {1}] {2}".format( resp.status, resp.reason, resp.read()) # this result string is passed upstream to several methods # which do a raise HttpError() or a format() of some kind; # as a result it cannot have any unicode characters if PY_VERSION_MAJOR < 3: result = ustr(result, encoding='ascii', errors='ignore') else: result = result\ .encode(encoding='ascii', errors='ignore')\ .decode(encoding='ascii', errors='ignore') result = textutil.replace_non_ascii(result) except Exception: logger.warn(traceback.format_exc()) return result WALinuxAgent-2.2.32/azurelinuxagent/common/utils/shellutil.py000066400000000000000000000103401335416306700243500ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import subprocess import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr if not hasattr(subprocess, 'check_output'): def check_output(*popenargs, **kwargs): r"""Backport from subprocess module from python 2.7""" if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, ' 'it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output # Exception classes used by this module. class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return ("Command '{0}' returned non-zero exit status {1}" "").format(self.cmd, self.returncode) subprocess.check_output = check_output subprocess.CalledProcessError = CalledProcessError """ Shell command util functions """ def has_command(cmd): """ Return True if the given command is on the path """ return not run(cmd, False) def run(cmd, chk_err=True): """ Calls run_get_output on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ retcode, out = run_get_output(cmd, chk_err) return retcode def run_get_output(cmd, chk_err=True, log_cmd=True): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: logger.verbose(u"Command: [{0}]", cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) output = ustr(output, encoding='utf-8', errors="backslashreplace") except subprocess.CalledProcessError as e: output = ustr(e.output, encoding='utf-8', errors="backslashreplace") if chk_err: msg = u"Command: [{0}], " \ u"return code: [{1}], " \ u"result: [{2}]".format(cmd, e.returncode, output) logger.error(msg) return e.returncode, output except Exception as e: if chk_err: logger.error(u"Command [{0}] raised unexpected exception: [{1}]" .format(cmd, ustr(e))) return -1, ustr(e) return 0, output def quote(word_list): """ Quote a list or tuple of strings for Unix Shell as words, using the byte-literal single quote. The resulting string is safe for use with ``shell=True`` in ``subprocess``, and in ``os.system``. ``assert shlex.split(ShellQuote(wordList)) == wordList``. See POSIX.1:2013 Vol 3, Chap 2, Sec 2.2.2: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02_02 """ if not isinstance(word_list, (tuple, list)): word_list = (word_list,) return " ".join(list("'{0}'".format(s.replace("'", "'\\''")) for s in word_list)) # End shell command util functions WALinuxAgent-2.2.32/azurelinuxagent/common/utils/textutil.py000066400000000000000000000236511335416306700242360ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import base64 import crypt import hashlib import random import re import string import struct import sys import zlib import xml.dom.minidom as minidom def parse_doc(xml_text): """ Parse xml document from string """ # The minidom lib has some issue with unicode in python2. # Encode the string into utf-8 first xml_text = xml_text.encode('utf-8') return minidom.parseString(xml_text) def findall(root, tag, namespace=None): """ Get all nodes by tag and namespace under Node root. """ if root is None: return [] if namespace is None: return root.getElementsByTagName(tag) else: return root.getElementsByTagNameNS(namespace, tag) def find(root, tag, namespace=None): """ Get first node by tag and namespace under Node root. """ nodes = findall(root, tag, namespace=namespace) if nodes is not None and len(nodes) >= 1: return nodes[0] else: return None def gettext(node): """ Get node text """ if node is None: return None for child in node.childNodes: if child.nodeType == child.TEXT_NODE: return child.data return None def findtext(root, tag, namespace=None): """ Get text of node by tag and namespace under Node root. """ node = find(root, tag, namespace=namespace) return gettext(node) def getattrib(node, attr_name): """ Get attribute of xml node """ if node is not None: return node.getAttribute(attr_name) else: return None def unpack(buf, offset, range): """ Unpack bytes into python values. """ result = 0 for i in range: result = (result << 8) | str_to_ord(buf[offset + i]) return result def unpack_little_endian(buf, offset, length): """ Unpack little endian bytes into python values. """ return unpack(buf, offset, list(range(length - 1, -1, -1))) def unpack_big_endian(buf, offset, length): """ Unpack big endian bytes into python values. """ return unpack(buf, offset, list(range(0, length))) def hex_dump3(buf, offset, length): """ Dump range of buf in formatted hex. """ return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]]) def hex_dump2(buf): """ Dump buf in formatted hex. """ return hex_dump3(buf, 0, len(buf)) def is_in_range(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) def is_printable(ch): """ Return True if character is displayable. """ return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z')) or is_in_range(ch, str_to_ord('a'), str_to_ord('z')) or is_in_range(ch, str_to_ord('0'), str_to_ord('9'))) def hex_dump(buffer, size): """ Return Hex formated dump of a 'buffer' of 'size'. """ if size < 0: size = len(buffer) result = "" for i in range(0, size): if (i % 16) == 0: result += "%06X: " % i byte = buffer[i] if type(byte) == str: byte = ord(byte.decode('latin1')) result += "%02X " % byte if (i & 15) == 7: result += " " if ((i + 1) % 16) == 0 or (i + 1) == size: j = i while ((j + 1) % 16) != 0: result += " " if (j & 7) == 7: result += " " j += 1 result += " " for j in range(i - (i % 16), i + 1): byte = buffer[j] if type(byte) == str: byte = str_to_ord(byte.decode('latin1')) k = '.' if is_printable(byte): k = chr(byte) result += k if (i + 1) != size: result += "\n" return result def str_to_ord(a): """ Allows indexing into a string or an array of integers transparently. Generic utility function. """ if type(a) == type(b'') or type(a) == type(u''): a = ord(a) return a def compare_bytes(a, b, start, length): for offset in range(start, start + length): if str_to_ord(a[offset]) != str_to_ord(b[offset]): return False return True def int_to_ip4_addr(a): """ Build DHCP request string. """ return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, (a) & 0xFF) def hexstr_to_bytearray(a): """ Return hex string packed into a binary struct. """ b = b"" for c in range(0, len(a) // 2): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b def set_ssh_config(config, name, val): found = False no_match = -1 match_start = no_match for i in range(0, len(config)): if config[i].startswith(name) and match_start == no_match: config[i] = "{0} {1}".format(name, val) found = True elif config[i].lower().startswith("match"): if config[i].lower().startswith("match all"): # outside match block match_start = no_match elif match_start == no_match: # inside match block match_start = i if not found: if match_start != no_match: i = match_start config.insert(i, "{0} {1}".format(name, val)) return config def set_ini_config(config, name, val): notfound = True nameEqual = name + '=' length = len(config) text = "{0}=\"{1}\"".format(name, val) for i in reversed(range(0, length)): if config[i].startswith(nameEqual): config[i] = text notfound = False break if notfound: config.insert(length - 1, text) def replace_non_ascii(incoming, replace_char=''): outgoing = '' if incoming is not None: for c in incoming: if str_to_ord(c) > 128: outgoing += replace_char else: outgoing += c return outgoing def remove_bom(c): """ bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8. """ if not is_str_none_or_whitespace(c) and \ len(c) > 2 and \ str_to_ord(c[0]) > 128 and \ str_to_ord(c[1]) > 128 and \ str_to_ord(c[2]) > 128: c = c[3:] return c def gen_password_hash(password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) if sys.version_info[0] == 2: # if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt password = password.encode('utf-8') return crypt.crypt(password, salt) def get_bytes_from_pem(pem_str): base64_bytes = "" for line in pem_str.split('\n'): if "----" not in line: base64_bytes += line return base64_bytes def compress(s): """ Compress a string, and return the base64 encoded result of the compression. This method returns a string instead of a byte array. It is expected that this method is called to compress smallish strings, not to compress the contents of a file. The output of this method is suitable for embedding in log statements. """ from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8') return base64.b64encode(zlib.compress(s)) def b64encode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8') return base64.b64encode(s) def b64decode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64decode(s).decode('utf-8') return base64.b64decode(s) def safe_shlex_split(s): import shlex from azurelinuxagent.common.version import PY_VERSION if PY_VERSION[:2] == (2, 6): return shlex.split(s.encode('utf-8')) return shlex.split(s) def swap_hexstring(s, width=2): r = len(s) % width if r != 0: s = ('0' * (width - (len(s) % width))) + s return ''.join(reversed( re.findall( r'[a-f0-9]{{{0}}}'.format(width), s, re.IGNORECASE))) def parse_json(json_str): """ Parse json string and return a resulting dictionary """ # trim null and whitespaces result = None if not is_str_empty(json_str): import json result = json.loads(json_str.rstrip(' \t\r\n\0')) return result def is_str_none_or_whitespace(s): return s is None or len(s) == 0 or s.isspace() def is_str_empty(s): return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0')) def hash_strings(string_list): """ Compute a cryptographic hash of a list of strings :param string_list: The strings to be hashed :return: The cryptographic hash (digest) of the strings in the order provided """ sha1_hash = hashlib.sha1() for item in string_list: sha1_hash.update(item.encode()) return sha1_hash.digest() WALinuxAgent-2.2.32/azurelinuxagent/common/version.py000066400000000000000000000163751335416306700227060ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import platform import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.future import ustr, get_linux_distribution def get_f5_platform(): """ Add this workaround for detecting F5 products because BIG-IP/IQ/etc do not show their version info in the /etc/product-version location. Instead, the version and product information is contained in the /VERSION file. """ result = [None, None, None, None] f5_version = re.compile("^Version: (\d+\.\d+\.\d+)") f5_product = re.compile("^Product: ([\w-]+)") with open('/VERSION', 'r') as fh: content = fh.readlines() for line in content: version_matches = f5_version.match(line) product_matches = f5_product.match(line) if version_matches: result[1] = version_matches.group(1) elif product_matches: result[3] = product_matches.group(1) if result[3] == "BIG-IP": result[0] = "bigip" result[2] = "bigip" elif result[3] == "BIG-IQ": result[0] = "bigiq" result[2] = "bigiq" elif result[3] == "iWorkflow": result[0] = "iworkflow" result[2] = "iworkflow" return result def get_checkpoint_platform(): take = build = release = "" full_name = open("/etc/cp-release").read().strip() with open("/etc/cloud-version") as f: for line in f: k, _, v = line.partition(": ") v = v.strip() if k == "release": release = v elif k == "take": take = v elif k == "build": build = v return ["gaia", take + "." + build, release, full_name] def get_distro(): if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['freebsd', release, '', 'freebsd'] elif 'OpenBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['openbsd', release, '', 'openbsd'] elif 'Linux' in platform.system(): osinfo = get_linux_distribution(0, 'alpine') elif 'NS-BSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['nsbsd', release, '', 'nsbsd'] else: try: # dist() removed in Python 3.7 osinfo = list(platform.dist()) + [''] except: osinfo = ['UNKNOWN', 'FFFF', '', ''] # The platform.py lib has issue with detecting oracle linux distribution. # Merge the following patch provided by oracle as a temporary fix. if os.path.exists("/etc/oracle-release"): osinfo[2] = "oracle" osinfo[3] = "Oracle Linux" if os.path.exists("/etc/euleros-release"): osinfo[0] = "euleros" # The platform.py lib has issue with detecting BIG-IP linux distribution. # Merge the following patch provided by F5. if os.path.exists("/shared/vadc"): osinfo = get_f5_platform() if os.path.exists("/etc/cp-release"): osinfo = get_checkpoint_platform() if os.path.exists("/home/guestshell/azure"): osinfo = ['iosxe', 'csr1000v', '', 'Cisco IOSXE Linux'] # Remove trailing whitespace and quote in distro name osinfo[0] = osinfo[0].strip('"').strip(' ').lower() return osinfo AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" AGENT_VERSION = '2.2.32.2' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """ The Azure Linux Agent supports the provisioning and running of Linux VMs in the Azure cloud. This package should be installed on Linux disk images that are built to run in the Azure environment. """ AGENT_DIR_GLOB = "{0}-*".format(AGENT_NAME) AGENT_PKG_GLOB = "{0}-*.zip".format(AGENT_NAME) AGENT_PATTERN = "{0}-(.*)".format(AGENT_NAME) AGENT_NAME_PATTERN = re.compile(AGENT_PATTERN) AGENT_PKG_PATTERN = re.compile(AGENT_PATTERN+"\.zip") AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN)) EXT_HANDLER_PATTERN = b".*/WALinuxAgent-(\d+.\d+.\d+[.\d+]*).*-run-exthandlers" EXT_HANDLER_REGEX = re.compile(EXT_HANDLER_PATTERN) __distro__ = get_distro() DISTRO_NAME = __distro__[0] DISTRO_VERSION = __distro__[1] DISTRO_CODE_NAME = __distro__[2] DISTRO_FULL_NAME = __distro__[3] PY_VERSION = sys.version_info PY_VERSION_MAJOR = sys.version_info[0] PY_VERSION_MINOR = sys.version_info[1] PY_VERSION_MICRO = sys.version_info[2] # Set the CURRENT_AGENT and CURRENT_VERSION to match the agent directory name # - This ensures the agent will "see itself" using the same name and version # as the code that downloads agents. def set_current_agent(): path = os.getcwd() lib_dir = conf.get_lib_dir() if lib_dir[-1] != os.path.sep: lib_dir += os.path.sep agent = path[len(lib_dir):].split(os.path.sep)[0] match = AGENT_NAME_PATTERN.match(agent) if match: version = match.group(1) else: agent = AGENT_LONG_VERSION version = AGENT_VERSION return agent, FlexibleVersion(version) def is_agent_package(path): path = os.path.basename(path) return not re.match(AGENT_PKG_PATTERN, path) is None def is_agent_path(path): path = os.path.basename(path) return not re.match(AGENT_NAME_PATTERN, path) is None CURRENT_AGENT, CURRENT_VERSION = set_current_agent() def set_goal_state_agent(): agent = None if os.path.isdir("/proc"): pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] else: pids = [] for pid in pids: try: pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() match = EXT_HANDLER_REGEX.match(pname) if match: agent = match.group(1) if PY_VERSION_MAJOR > 2: agent = agent.decode('UTF-8') break except IOError: continue if agent is None: agent = CURRENT_VERSION return agent GOAL_STATE_AGENT_VERSION = set_goal_state_agent() def is_current_agent_installed(): return CURRENT_AGENT == AGENT_LONG_VERSION def is_snappy(): """ Add this workaround for detecting Snappy Ubuntu Core temporarily, until ubuntu fixed this bug: https://bugs.launchpad.net/snappy/+bug/1481086 """ if os.path.exists("/etc/motd"): motd = fileutil.read_file("/etc/motd") if "snappy" in motd: return True return False if is_snappy(): DISTRO_FULL_NAME = "Snappy Ubuntu Core" WALinuxAgent-2.2.32/azurelinuxagent/daemon/000077500000000000000000000000001335416306700206065ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/daemon/__init__.py000066400000000000000000000012611335416306700227170ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.main import get_daemon_handler WALinuxAgent-2.2.32/azurelinuxagent/daemon/main.py000066400000000000000000000140061335416306700221050ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import sys import time import traceback import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.cgroups import CGroups from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.wire import WireClient from azurelinuxagent.common.rdma import setup_rdma_device from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_NAME, \ AGENT_VERSION, \ DISTRO_NAME, DISTRO_VERSION, PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler from azurelinuxagent.daemon.scvmm import get_scvmm_handler from azurelinuxagent.ga.update import get_update_handler from azurelinuxagent.pa.provision import get_provision_handler from azurelinuxagent.pa.rdma import get_rdma_handler OPENSSL_FIPS_ENVIRONMENT = "OPENSSL_FIPS" def get_daemon_handler(): return DaemonHandler() class DaemonHandler(object): """ Main thread of daemon. It will invoke other threads to do actual work """ def __init__(self): self.running = True self.osutil = get_osutil() def run(self, child_args=None): logger.info("{0} Version:{1}", AGENT_LONG_NAME, AGENT_VERSION) logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION) logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO) self.check_pid() self.initialize_environment() CGroups.setup() # If FIPS is enabled, set the OpenSSL environment variable # Note: # -- Subprocesses inherit the current environment if conf.get_fips_enabled(): os.environ[OPENSSL_FIPS_ENVIRONMENT] = '1' while self.running: try: self.daemon(child_args) except Exception as e: err_msg = traceback.format_exc() add_event(name=AGENT_NAME, is_success=False, message=ustr(err_msg), op=WALAEventOperation.UnhandledError) logger.warn("Daemon ended with exception -- Sleep 15 seconds and restart daemon") time.sleep(15) def check_pid(self): """Check whether daemon is already running""" pid = None pid_file = conf.get_agent_pid_file_path() if os.path.isfile(pid_file): pid = fileutil.read_file(pid_file) if self.osutil.check_pid_alive(pid): logger.info("Daemon is already running: {0}", pid) sys.exit(0) fileutil.write_file(pid_file, ustr(os.getpid())) def sleep_if_disabled(self): agent_disabled_file_path = conf.get_disable_agent_file_path() if os.path.exists(agent_disabled_file_path): import threading logger.warn("Disabling the guest agent by sleeping forever; " "to re-enable, remove {0} and restart" .format(agent_disabled_file_path)) self.running = False disable_event = threading.Event() disable_event.wait() def initialize_environment(self): # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir()) def daemon(self, child_args=None): logger.info("Run daemon") self.protocol_util = get_protocol_util() self.scvmm_handler = get_scvmm_handler() self.resourcedisk_handler = get_resourcedisk_handler() self.rdma_handler = get_rdma_handler() self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() if conf.get_detect_scvmm_env(): self.scvmm_handler.run() if conf.get_resourcedisk_format(): self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): self.rdma_handler.install_driver() logger.info("RDMA capabilities are enabled in configuration") try: # Ensure the most recent SharedConfig is available # - Changes to RDMA state may not increment the goal state # incarnation number. A forced update ensures the most # current values. protocol = self.protocol_util.get_protocol() client = protocol.client if client is None or type(client) is not WireClient: raise Exception("Attempt to setup RDMA without Wireserver") client.update_goal_state(forced=True) setup_rdma_device() except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") self.sleep_if_disabled() while self.running: self.update_handler.run_latest(child_args=child_args) WALinuxAgent-2.2.32/azurelinuxagent/daemon/resourcedisk/000077500000000000000000000000001335416306700233105ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/daemon/resourcedisk/__init__.py000066400000000000000000000013471335416306700254260ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.resourcedisk.factory import get_resourcedisk_handler WALinuxAgent-2.2.32/azurelinuxagent/daemon/resourcedisk/default.py000066400000000000000000000330071335416306700253110ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import sys import threading from time import sleep import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr import azurelinuxagent.common.conf as conf from azurelinuxagent.common.event import add_event, WALAEventOperation import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.version import AGENT_NAME DATALOSS_WARNING_FILE_NAME = "DATALOSS_WARNING_README.txt" DATA_LOSS_WARNING = """\ WARNING: THIS IS A TEMPORARY DISK. Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. Please do not use this disk for storing any personal or application data. For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx """ class ResourceDiskHandler(object): def __init__(self): self.osutil = get_osutil() self.fs = conf.get_resourcedisk_filesystem() def start_activate_resource_disk(self): disk_thread = threading.Thread(target=self.run) disk_thread.start() def run(self): mount_point = None if conf.get_resourcedisk_format(): mount_point = self.activate_resource_disk() if mount_point is not None and \ conf.get_resourcedisk_enable_swap(): self.enable_swap(mount_point) def activate_resource_disk(self): logger.info("Activate resource disk") try: mount_point = conf.get_resourcedisk_mountpoint() mount_point = self.mount_resource_disk(mount_point) warning_file = os.path.join(mount_point, DATALOSS_WARNING_FILE_NAME) try: fileutil.write_file(warning_file, DATA_LOSS_WARNING) except IOError as e: logger.warn("Failed to write data loss warning:{0}", e) return mount_point except ResourceDiskError as e: logger.error("Failed to mount resource disk {0}", e) add_event(name=AGENT_NAME, is_success=False, message=ustr(e), op=WALAEventOperation.ActivateResourceDisk) def enable_swap(self, mount_point): logger.info("Enable swap") try: size_mb = conf.get_resourcedisk_swap_size_mb() self.create_swap_space(mount_point, size_mb) except ResourceDiskError as e: logger.error("Failed to enable swap {0}", e) def reread_partition_table(self, device): if shellutil.run("sfdisk -R {0}".format(device), chk_err=False): shellutil.run("blockdev --rereadpt {0}".format(device), chk_err=False) def mount_resource_disk(self, mount_point): device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") device = "/dev/{0}".format(device) partition = device + "1" mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, device) if existing: logger.info("Resource disk [{0}] is already mounted [{1}]", partition, existing) return existing try: fileutil.mkdir(mount_point, mode=0o755) except OSError as ose: msg = "Failed to create mount point " \ "directory [{0}]: {1}".format(mount_point, ose) logger.error(msg) raise ResourceDiskError(msg=msg, inner=ose) logger.info("Examining partition table") ret = shellutil.run_get_output("parted {0} print".format(device)) if ret[0]: raise ResourceDiskError("Could not determine partition info for " "{0}: {1}".format(device, ret[1])) force_option = 'F' if self.fs == 'xfs': force_option = 'f' mkfs_string = "mkfs.{0} -{2} {1}".format(self.fs, partition, force_option) if "gpt" in ret[1]: logger.info("GPT detected, finding partitions") parts = [x for x in ret[1].split("\n") if re.match("^\s*[0-9]+", x)] logger.info("Found {0} GPT partition(s).", len(parts)) if len(parts) > 1: logger.info("Removing old GPT partitions") for i in range(1, len(parts) + 1): logger.info("Remove partition {0}", i) shellutil.run("parted {0} rm {1}".format(device, i)) logger.info("Creating new GPT partition") shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("GPT not detected, determining filesystem") ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device)) ptype = ret[1].strip() if ptype == "7" and self.fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device)) self.reread_partition_table(device) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("The partition type is {0}", ptype) mount_options = conf.get_resourcedisk_mountoptions() mount_string = self.get_mount_string(mount_options, partition, mount_point) attempts = 5 while not os.path.exists(partition) and attempts > 0: logger.info("Waiting for partition [{0}], {1} attempts remaining", partition, attempts) sleep(5) attempts -= 1 if not os.path.exists(partition): raise ResourceDiskError("Partition was not created [{0}]".format(partition)) logger.info("Mount resource disk [{0}]", mount_string) ret, output = shellutil.run_get_output(mount_string, chk_err=False) # if the exit code is 32, then the resource disk can be already mounted if ret == 32 and output.find("is already mounted") != -1: logger.warn("Could not mount resource disk: {0}", output) elif ret != 0: # Some kernels seem to issue an async partition re-read after a # 'parted' command invocation. This causes mount to fail if the # partition re-read is not complete by the time mount is # attempted. Seen in CentOS 7.2. Force a sequential re-read of # the partition and try mounting. logger.warn("Failed to mount resource disk. " "Retry mounting after re-reading partition info.") self.reread_partition_table(device) ret, output = shellutil.run_get_output(mount_string) if ret: logger.warn("Failed to mount resource disk. " "Attempting to format and retry mount. [{0}]", output) shellutil.run(mkfs_string) ret, output = shellutil.run_get_output(mount_string) if ret: raise ResourceDiskError("Could not mount {0} " "after syncing partition table: " "[{1}] {2}".format(partition, ret, output)) logger.info("Resource disk {0} is mounted at {1} with {2}", device, mount_point, self.fs) return mount_point def change_partition_type(self, suppress_message, option_str): """ use sfdisk to change partition type. First try with --part-type; if fails, fall back to -c """ command_to_use = '--part-type' input = "sfdisk {0} {1} {2}".format(command_to_use, '-f' if suppress_message else '', option_str) err_code, output = shellutil.run_get_output(input, chk_err=False, log_cmd=True) # fall back to -c if err_code != 0: logger.info("sfdisk with --part-type failed [{0}], retrying with -c", err_code) command_to_use = '-c' input = "sfdisk {0} {1} {2}".format(command_to_use, '-f' if suppress_message else '', option_str) err_code, output = shellutil.run_get_output(input, log_cmd=True) if err_code == 0: logger.info('{0} succeeded', input) else: logger.error('{0} failed [{1}: {2}]', input, err_code, output) return err_code, output @staticmethod def get_mount_string(mount_options, partition, mount_point): if mount_options is not None: return 'mount -o {0} {1} {2}'.format(mount_options, partition, mount_point) else: return 'mount {0} {1}'.format(partition, mount_point) def create_swap_space(self, mount_point, size_mb): size_kb = size_mb * 1024 size = size_kb * 1024 swapfile = os.path.join(mount_point, 'swapfile') swaplist = shellutil.run_get_output("swapon -s")[1] if swapfile in swaplist \ and os.path.isfile(swapfile) \ and os.path.getsize(swapfile) == size: logger.info("Swap already enabled") return if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: logger.info("Remove old swap file") shellutil.run("swapoff -a", chk_err=False) os.remove(swapfile) if not os.path.isfile(swapfile): logger.info("Create swap file") self.mkfile(swapfile, size_kb * 1024) shellutil.run("mkswap {0}".format(swapfile)) if shellutil.run("swapon {0}".format(swapfile)): raise ResourceDiskError("{0}".format(swapfile)) logger.info("Enabled {0}KB of swap at {1}".format(size_kb, swapfile)) def mkfile(self, filename, nbytes): """ Create a non-sparse file of that size. Deletes and replaces existing file. To allow efficient execution, fallocate will be tried first. This includes ``os.posix_fallocate`` on Python 3.3+ (unix) and the ``fallocate`` command in the popular ``util-linux{,-ng}`` package. A dd fallback will be tried too. When size < 64M, perform single-pass dd. Otherwise do two-pass dd. """ if not isinstance(nbytes, int): nbytes = int(nbytes) if nbytes <= 0: raise ResourceDiskError("Invalid swap size [{0}]".format(nbytes)) if os.path.isfile(filename): os.remove(filename) # If file system is xfs, use dd right away as we have been reported that # swap enabling fails in xfs fs when disk space is allocated with fallocate ret = 0 fn_sh = shellutil.quote((filename,)) if self.fs != 'xfs': # os.posix_fallocate if sys.version_info >= (3, 3): # Probable errors: # - OSError: Seen on Cygwin, libc notimpl? # - AttributeError: What if someone runs this under... try: with open(filename, 'w') as f: os.posix_fallocate(f.fileno(), 0, nbytes) return 0 except: # Not confident with this thing, just keep trying... pass # fallocate command ret = shellutil.run( u"umask 0077 && fallocate -l {0} {1}".format(nbytes, fn_sh)) if ret == 0: return ret logger.info("fallocate unsuccessful, falling back to dd") # dd fallback dd_maxbs = 64 * 1024 ** 2 dd_cmd = "umask 0077 && dd if=/dev/zero bs={0} count={1} " \ "conv=notrunc of={2}" blocks = int(nbytes / dd_maxbs) if blocks > 0: ret = shellutil.run(dd_cmd.format(dd_maxbs, blocks, fn_sh)) << 8 remains = int(nbytes % dd_maxbs) if remains > 0: ret += shellutil.run(dd_cmd.format(remains, 1, fn_sh)) if ret == 0: logger.info("dd successful") else: logger.error("dd unsuccessful") return ret WALinuxAgent-2.2.32/azurelinuxagent/daemon/resourcedisk/factory.py000066400000000000000000000025351335416306700253360ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.version import DISTRO_NAME, \ DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ResourceDiskHandler from .freebsd import FreeBSDResourceDiskHandler from .openbsd import OpenBSDResourceDiskHandler from distutils.version import LooseVersion as Version def get_resourcedisk_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "freebsd": return FreeBSDResourceDiskHandler() if distro_name == "openbsd": return OpenBSDResourceDiskHandler() return ResourceDiskHandler() WALinuxAgent-2.2.32/azurelinuxagent/daemon/resourcedisk/freebsd.py000066400000000000000000000121031335416306700252710ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class FreeBSDResourceDiskHandler(ResourceDiskHandler): """ This class handles resource disk mounting for FreeBSD. The resource disk locates at following slot: scbus2 on blkvsc1 bus 0: at scbus2 target 1 lun 0 (da1,pass2) There are 2 variations based on partition table type: 1. MBR: The resource disk partition is /dev/da1s1 2. GPT: The resource disk partition is /dev/da1p2, /dev/da1p1 is for reserved usage. """ def __init__(self): super(FreeBSDResourceDiskHandler, self).__init__() @staticmethod def parse_gpart_list(data): dic = {} for line in data.split('\n'): if line.find("Geom name: ") != -1: geom_name = line[11:] elif line.find("scheme: ") != -1: dic[geom_name] = line[8:] return dic def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ufs': raise ResourceDiskError("Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) # 1. Detect device err, output = shellutil.run_get_output('gpart list') if err: raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) disks = self.parse_gpart_list(output) device = self.osutil.device_for_ide_port(1) if device is None or not device in disks: # fallback logic to find device err, output = shellutil.run_get_output('camcontrol periphlist 2:1:0') if err: # try again on "3:1:0" err, output = shellutil.run_get_output('camcontrol periphlist 3:1:0') if err: raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' for line in output.split('\n'): index = line.find(':') if index > 0: geom_name = line[:index] if geom_name in disks: device = geom_name break if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Detect partition partition_table_type = disks[device] if partition_table_type == 'MBR': provider_name = device + 's1' elif partition_table_type == 'GPT': provider_name = device + 'p2' else: raise ResourceDiskError("Unsupported partition table type:{0}".format(output)) err, output = shellutil.run_get_output('gpart show -p {0}'.format(device)) if err or output.find(provider_name) == -1: raise ResourceDiskError("Resource disk partition not found.") partition = '/dev/' + provider_name logger.info('Resource disk partition {0} found.', partition) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info('Creating {0} filesystem on partition {1}'.format(fs, partition)) err, output = shellutil.run_get_output('newfs -U {0}'.format(partition)) if err: raise ResourceDiskError("Failed to create new filesystem on partition {0}, error:{1}" .format(partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to mount partition {0}, error {1}".format(partition, output)) logger.info("Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs) return mount_point WALinuxAgent-2.2.32/azurelinuxagent/daemon/resourcedisk/openbsd.py000066400000000000000000000114431335416306700253170ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and OpenSSL 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class OpenBSDResourceDiskHandler(ResourceDiskHandler): def __init__(self): super(OpenBSDResourceDiskHandler, self).__init__() # Fase File System (FFS) is UFS if self.fs == 'ufs' or self.fs == 'ufs2': self.fs = 'ffs' def create_swap_space(self, mount_point, size_mb): pass def enable_swap(self, mount_point): size_mb = conf.get_resourcedisk_swap_size_mb() if size_mb: logger.info("Enable swap") device = self.osutil.device_for_ide_port(1) err, output = shellutil.run_get_output("swapctl -a /dev/" "{0}b".format(device), chk_err=False) if err: logger.error("Failed to enable swap, error {0}", output) def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ffs': raise ResourceDiskError("Unsupported filesystem type: {0}, only " "ufs/ffs is supported.".format(fs)) # 1. Get device device = self.osutil.device_for_ide_port(1) if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Get partition partition = "/dev/{0}a".format(device) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(self.fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info('Creating {0} filesystem on {1}'.format(fs, device)) fdisk_cmd = "/sbin/fdisk -yi {0}".format(device) err, output = shellutil.run_get_output(fdisk_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to create new MBR on {0}, " "error: {1}".format(device, output)) size_mb = conf.get_resourcedisk_swap_size_mb() if size_mb: if size_mb > 512 * 1024: size_mb = 512 * 1024 disklabel_cmd = ("echo -e '{0} 1G-* 50%\nswap 1-{1}M 50%' " "| disklabel -w -A -T /dev/stdin " "{2}").format(mount_point, size_mb, device) ret, output = shellutil.run_get_output( disklabel_cmd, chk_err=False) if ret: raise ResourceDiskError("Failed to create new disklabel " "on {0}, error " "{1}".format(device, output)) err, output = shellutil.run_get_output("newfs -O2 {0}a" "".format(device)) if err: raise ResourceDiskError("Failed to create new filesystem on " "partition {0}, error " "{1}".format(partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to mount partition {0}, " "error {1}".format(partition, output)) logger.info("Resource disk partition {0} is mounted at {1} with fstype " "{2}", partition, mount_point, fs) return mount_point WALinuxAgent-2.2.32/azurelinuxagent/daemon/scvmm.py000066400000000000000000000053101335416306700223040ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import re import os import sys import subprocess import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf from azurelinuxagent.common.osutil import get_osutil VMM_CONF_FILE_NAME = "linuxosconfiguration.xml" VMM_STARTUP_SCRIPT_NAME= "install" def get_scvmm_handler(): return ScvmmHandler() class ScvmmHandler(object): def __init__(self): self.osutil = get_osutil() def detect_scvmm_env(self, dev_dir='/dev'): logger.info("Detecting Microsoft System Center VMM Environment") found=False # try to load the ATAPI driver, continue on failure self.osutil.try_load_atapiix_mod() # cycle through all available /dev/sr*|hd*|cdrom*|cd* looking for the scvmm configuration file mount_point = conf.get_dvd_mount_point() for devices in filter(lambda x: x is not None, [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]?|cd[0-9]+)', dev) for dev in os.listdir(dev_dir)]): dvd_device = os.path.join(dev_dir, devices.group(0)) self.osutil.mount_dvd(max_retry=1, chk_err=False, dvd_device=dvd_device, mount_point=mount_point) found = os.path.isfile(os.path.join(mount_point, VMM_CONF_FILE_NAME)) if found: self.start_scvmm_agent(mount_point=mount_point) break else: self.osutil.umount_dvd(chk_err=False, mount_point=mount_point) return found def start_scvmm_agent(self, mount_point=None): logger.info("Starting Microsoft System Center VMM Initialization " "Process") if mount_point is None: mount_point = conf.get_dvd_mount_point() startup_script = os.path.join(mount_point, VMM_STARTUP_SCRIPT_NAME) devnull = open(os.devnull, 'w') subprocess.Popen(["/bin/bash", startup_script, "-p " + mount_point], stdout=devnull, stderr=devnull) def run(self): if self.detect_scvmm_env(): logger.info("Exiting") time.sleep(300) sys.exit(0) WALinuxAgent-2.2.32/azurelinuxagent/distro/000077500000000000000000000000001335416306700206475ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/distro/__init__.py000066400000000000000000000011661335416306700227640ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/azurelinuxagent/distro/suse/000077500000000000000000000000001335416306700216265ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/distro/suse/__init__.py000066400000000000000000000011661335416306700237430ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/azurelinuxagent/ga/000077500000000000000000000000001335416306700177325ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/ga/__init__.py000066400000000000000000000011661335416306700220470ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/azurelinuxagent/ga/env.py000066400000000000000000000146711335416306700211050ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import re import os import socket import time import threading import operator import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.dhcp import get_dhcp_handler from azurelinuxagent.common.event import add_periodic, WALAEventOperation from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.wire import INCARNATION_FILE_NAME from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.utils.archive import StateArchiver from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION CACHE_PATTERNS = [ re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) ] MAXIMUM_CACHED_FILES = 50 ARCHIVE_INTERVAL = datetime.timedelta(hours=24) def get_env_handler(): return EnvHandler() class EnvHandler(object): """ Monitor changes to dhcp and hostname. If dhcp client process re-start has occurred, reset routes, dhcp with fabric. Monitor scsi disk. If new scsi disk found, set timeout """ def __init__(self): self.osutil = get_osutil() self.dhcp_handler = get_dhcp_handler() self.protocol_util = get_protocol_util() self.stopped = True self.hostname = None self.dhcp_id = None self.server_thread = None self.dhcp_warning_enabled = True self.last_archive = None self.archiver = StateArchiver(conf.get_lib_dir()) def run(self): if not self.stopped: logger.info("Stop existing env monitor service.") self.stop() self.stopped = False logger.info("Start env monitor service.") self.dhcp_handler.conf_routes() self.hostname = self.osutil.get_hostname_record() self.dhcp_id = self.osutil.get_dhcp_pid() self.start() def is_alive(self): return self.server_thread.is_alive() def start(self): self.server_thread = threading.Thread(target=self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() def monitor(self): """ Monitor firewall rules Monitor dhcp client pid and hostname. If dhcp client process re-start has occurred, reset routes. Purge unnecessary files from disk cache. """ protocol = self.protocol_util.get_protocol() reset_firewall_fules = False while not self.stopped: self.osutil.remove_rules_files() if conf.enable_firewall(): # If the rules ever change we must reset all rules and start over again. # # There was a rule change at 2.2.26, which started dropping non-root traffic # to WireServer. The previous rules allowed traffic. Having both rules in # place negated the fix in 2.2.26. if not reset_firewall_fules: self.osutil.remove_firewall(dst_ip=protocol.endpoint, uid=os.getuid()) reset_firewall_fules = True success = self.osutil.enable_firewall( dst_ip=protocol.endpoint, uid=os.getuid()) add_periodic( logger.EVERY_HOUR, AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Firewall, is_success=success, log_event=False) timeout = conf.get_root_device_scsi_timeout() if timeout is not None: self.osutil.set_scsi_disks_timeout(timeout) if conf.get_monitor_hostname(): self.handle_hostname_update() self.handle_dhclient_restart() self.archive_history() time.sleep(5) def handle_hostname_update(self): curr_hostname = socket.gethostname() if curr_hostname != self.hostname: logger.info("EnvMonitor: Detected hostname change: {0} -> {1}", self.hostname, curr_hostname) self.osutil.set_hostname(curr_hostname) self.osutil.publish_hostname(curr_hostname) self.hostname = curr_hostname def handle_dhclient_restart(self): if self.dhcp_id is None: if self.dhcp_warning_enabled: logger.warn("Dhcp client is not running. ") self.dhcp_id = self.osutil.get_dhcp_pid() # disable subsequent error logging self.dhcp_warning_enabled = self.dhcp_id is not None return # the dhcp process has not changed since the last check if self.osutil.check_pid_alive(self.dhcp_id.strip()): return new_pid = self.osutil.get_dhcp_pid() if new_pid is not None and new_pid != self.dhcp_id: logger.info("EnvMonitor: Detected dhcp client restart. " "Restoring routing table.") self.dhcp_handler.conf_routes() self.dhcp_id = new_pid def archive_history(self): """ Purge history if we have exceed the maximum count. Create a .zip of the history that has been preserved. """ if self.last_archive is not None \ and datetime.datetime.utcnow() < \ self.last_archive + ARCHIVE_INTERVAL: return self.archiver.purge() self.archiver.archive() def stop(self): """ Stop server communication and join the thread to main thread. """ self.stopped = True if self.server_thread is not None: self.server_thread.join() WALinuxAgent-2.2.32/azurelinuxagent/ga/exthandlers.py000066400000000000000000001373341335416306700226400ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import glob import json import operator import os import random import re import shutil import stat import subprocess import time import traceback import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.version as version from azurelinuxagent.common.cgroups import CGroups, CGroupsTelemetry from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_DELTA_DEFAULT, ERROR_STATE_DELTA_INSTALL from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds, report_event from azurelinuxagent.common.exception import ExtensionError, ProtocolError, ProtocolNotFoundError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ ExtensionStatus, \ ExtensionSubStatus, \ VMStatus, ExtHandler, \ get_properties, \ set_properties from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.utils.processutil import capture_from_process from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION # HandlerEnvironment.json schema version HANDLER_ENVIRONMENT_VERSION = 1.0 EXTENSION_STATUS_ERROR = 'error' VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] HANDLER_PATTERN = "^([^-]+)-(\d+(?:\.\d+)*)" HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN+"$", re.IGNORECASE) HANDLER_PKG_EXT = ".zip" HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN + r"\.zip$", re.IGNORECASE) def validate_has_key(obj, key, fullname): if key not in obj: raise ExtensionError("Missing: {0}".format(fullname)) def validate_in_range(val, valid_range, name): if val not in valid_range: raise ExtensionError("Invalid {0}: {1}".format(name, val)) def parse_formatted_message(formatted_message): if formatted_message is None: return None validate_has_key(formatted_message, 'lang', 'formattedMessage/lang') validate_has_key(formatted_message, 'message', 'formattedMessage/message') return formatted_message.get('message') def parse_ext_substatus(substatus): # Check extension sub status format validate_has_key(substatus, 'status', 'substatus/status') validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, 'substatus/status') status = ExtensionSubStatus() status.name = substatus.get('name') status.status = substatus.get('status') status.code = substatus.get('code', 0) formatted_message = substatus.get('formattedMessage') status.message = parse_formatted_message(formatted_message) return status def parse_ext_status(ext_status, data): if data is None or len(data) is None: return # Currently, only the first status will be reported data = data[0] # Check extension status format validate_has_key(data, 'status', 'status') status_data = data['status'] validate_has_key(status_data, 'status', 'status/status') status = status_data['status'] if status not in VALID_EXTENSION_STATUS: status = EXTENSION_STATUS_ERROR applied_time = status_data.get('configurationAppliedTime') ext_status.configurationAppliedTime = applied_time ext_status.operation = status_data.get('operation') ext_status.status = status ext_status.code = status_data.get('code', 0) formatted_message = status_data.get('formattedMessage') ext_status.message = parse_formatted_message(formatted_message) substatus_list = status_data.get('substatus') if substatus_list is None: return for substatus in substatus_list: if substatus is not None: ext_status.substatusList.append(parse_ext_substatus(substatus)) def migrate_handler_state(): """ Migrate handler state and status (if they exist) from an agent-owned directory into the handler-owned config directory Notes: - The v2.0.x branch wrote all handler-related state into the handler-owned config directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). - The v2.1.x branch original moved that state into an agent-owned handler state directory (e.g., /var/lib/waagent/handler_state). - This move can cause v2.1.x agents to multiply invoke a handler's install command. It also makes clean-up more difficult since the agent must remove the state as well as the handler directory. """ handler_state_path = os.path.join(conf.get_lib_dir(), "handler_state") if not os.path.isdir(handler_state_path): return for handler_path in glob.iglob(os.path.join(handler_state_path, "*")): handler = os.path.basename(handler_path) handler_config_path = os.path.join(conf.get_lib_dir(), handler, "config") if os.path.isdir(handler_config_path): for file in ("State", "Status"): from_path = os.path.join(handler_state_path, handler, file.lower()) to_path = os.path.join(handler_config_path, "Handler" + file) if os.path.isfile(from_path) and not os.path.isfile(to_path): try: shutil.move(from_path, to_path) except Exception as e: logger.warn( "Exception occurred migrating {0} {1} file: {2}", handler, file, str(e)) try: shutil.rmtree(handler_state_path) except Exception as e: logger.warn("Exception occurred removing {0}: {1}", handler_state_path, str(e)) return class ExtHandlerState(object): NotInstalled = "NotInstalled" Installed = "Installed" Enabled = "Enabled" Failed = "Failed" def get_exthandlers_handler(): return ExtHandlersHandler() class ExtHandlersHandler(object): def __init__(self): self.protocol_util = get_protocol_util() self.protocol = None self.ext_handlers = None self.last_etag = None self.log_report = False self.log_etag = True self.log_process = False self.report_status_error_state = ErrorState() self.get_artifact_error_state = ErrorState(min_timedelta=ERROR_STATE_DELTA_INSTALL) def run(self): self.ext_handlers, etag = None, None try: self.protocol = self.protocol_util.get_protocol() self.ext_handlers, etag = self.protocol.get_ext_handlers() self.get_artifact_error_state.reset() except Exception as e: msg = u"Exception retrieving extension handlers: {0}".format(ustr(e)) detailed_msg = '{0} {1}'.format(msg, traceback.format_exc()) self.get_artifact_error_state.incr() if self.get_artifact_error_state.is_triggered(): add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.GetArtifactExtended, is_success=False, message="Failed to get extension artifact for over " "{0): {1}".format(self.get_artifact_error_state.min_timedelta, msg)) self.get_artifact_error_state.reset() else: logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=detailed_msg) return try: msg = u"Handle extensions updates for incarnation {0}".format(etag) logger.verbose(msg) # Log status report success on new config self.log_report = True self.handle_ext_handlers(etag) self.last_etag = etag self.report_ext_handlers_status() self.cleanup_outdated_handlers() except Exception as e: msg = u"Exception processing extension handlers: {0}".format( ustr(e)) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=msg) return def cleanup_outdated_handlers(self): handlers = [] pkgs = [] # Build a collection of uninstalled handlers and orphaned packages # Note: # -- An orphaned package is one without a corresponding handler # directory for item in os.listdir(conf.get_lib_dir()): path = os.path.join(conf.get_lib_dir(), item) if version.is_agent_package(path) or version.is_agent_path(path): continue if os.path.isdir(path): if re.match(HANDLER_NAME_PATTERN, item) is None: continue try: eh = ExtHandler() separator = item.rfind('-') eh.name = item[0:separator] eh.properties.version = str(FlexibleVersion(item[separator+1:])) handler = ExtHandlerInstance(eh, self.protocol) except Exception: continue if handler.get_handler_state() != ExtHandlerState.NotInstalled: continue handlers.append(handler) elif os.path.isfile(path) and \ not os.path.isdir(path[0:-len(HANDLER_PKG_EXT)]): if not re.match(HANDLER_PKG_PATTERN, item): continue pkgs.append(path) # Then, remove the orphaned packages for pkg in pkgs: try: os.remove(pkg) logger.verbose("Removed orphaned extension package {0}".format(pkg)) except OSError as e: logger.warn("Failed to remove orphaned package {0}: {1}".format(pkg, e.strerror)) # Finally, remove the directories and packages of the # uninstalled handlers for handler in handlers: handler.rm_ext_handler_dir() pkg = os.path.join(conf.get_lib_dir(), handler.get_full_name() + HANDLER_PKG_EXT) if os.path.isfile(pkg): try: os.remove(pkg) logger.verbose("Removed extension package {0}".format(pkg)) except OSError as e: logger.warn("Failed to remove extension package {0}: {1}".format(pkg, e.strerror)) def handle_ext_handlers(self, etag=None): if not conf.get_extensions_enabled(): logger.verbose("Extension handling is disabled") return if self.ext_handlers.extHandlers is None or \ len(self.ext_handlers.extHandlers) == 0: logger.verbose("No extension handler config found") return if conf.get_enable_overprovisioning(): artifacts_profile = self.protocol.get_artifacts_profile() if artifacts_profile and artifacts_profile.is_on_hold(): logger.info("Extension handling is on hold") return self.ext_handlers.extHandlers.sort(key=operator.methodcaller('sort_key')) for ext_handler in self.ext_handlers.extHandlers: # TODO: handle install in sequence, enable in parallel self.handle_ext_handler(ext_handler, etag) def handle_ext_handler(self, ext_handler, etag): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) try: state = ext_handler.properties.state if ext_handler_i.decide_version(target_state=state) is None: version = ext_handler_i.ext_handler.properties.version name = ext_handler_i.ext_handler.name err_msg = "Unable to find version {0} in manifest for extension {1}".format(version, name) ext_handler_i.set_operation(WALAEventOperation.Download) ext_handler_i.set_handler_status(message=ustr(err_msg), code=-1) ext_handler_i.report_event(message=ustr(err_msg), is_success=False) return self.get_artifact_error_state.reset() if not ext_handler_i.is_upgrade and self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Version {0} is current for etag {1}", ext_handler_i.pkg.version, etag) self.log_etag = False return self.log_etag = True ext_handler_i.logger.info("Target handler state: {0}", state) if state == u"enabled": self.handle_enable(ext_handler_i) elif state == u"disabled": self.handle_disable(ext_handler_i) elif state == u"uninstall": self.handle_uninstall(ext_handler_i) else: message = u"Unknown ext handler state:{0}".format(state) raise ExtensionError(message) except ExtensionError as e: self.handle_handle_ext_handler_error(ext_handler_i, e, e.code) except Exception as e: self.handle_handle_ext_handler_error(ext_handler_i, e) def handle_handle_ext_handler_error(self, ext_handler_i, e, code=-1): msg = ustr(e) ext_handler_i.set_handler_status(message=msg, code=code) self.get_artifact_error_state.incr() if self.get_artifact_error_state.is_triggered(): report_event(op=WALAEventOperation.GetArtifactExtended, message="Failed to get artifact for over " "{0}: {1}".format(self.get_artifact_error_state.min_timedelta, msg), is_success=False) self.get_artifact_error_state.reset() else: ext_handler_i.logger.warn(msg) def handle_enable(self, ext_handler_i): self.log_process = True old_ext_handler_i = ext_handler_i.get_installed_ext_handler() handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Enable] current handler state is: {0}", handler_state.lower()) if handler_state == ExtHandlerState.NotInstalled: ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled) ext_handler_i.download() ext_handler_i.update_settings() if old_ext_handler_i is None: ext_handler_i.install() elif ext_handler_i.version_ne(old_ext_handler_i): old_ext_handler_i.disable() ext_handler_i.copy_status_files(old_ext_handler_i) if ext_handler_i.version_gt(old_ext_handler_i): ext_handler_i.update() else: old_ext_handler_i.update(version=ext_handler_i.ext_handler.properties.version) old_ext_handler_i.uninstall() old_ext_handler_i.rm_ext_handler_dir() ext_handler_i.update_with_install() else: ext_handler_i.update_settings() ext_handler_i.enable() def handle_disable(self, ext_handler_i): self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Disable] current handler state is: {0}", handler_state.lower()) if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() def handle_uninstall(self, ext_handler_i): self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Uninstall] current handler state is: {0}", handler_state.lower()) if handler_state != ExtHandlerState.NotInstalled: if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() ext_handler_i.uninstall() ext_handler_i.rm_ext_handler_dir() def report_ext_handlers_status(self): """ Go through handler_state dir, collect and report status """ vm_status = VMStatus(status="Ready", message="Guest Agent is running") if self.ext_handlers is not None: for ext_handler in self.ext_handlers.extHandlers: try: self.report_ext_handler_status(vm_status, ext_handler) except ExtensionError as e: add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=ustr(e)) logger.verbose("Report vm agent status") try: self.protocol.report_vm_status(vm_status) if self.log_report: logger.verbose("Completed vm agent status report") self.report_status_error_state.reset() except ProtocolNotFoundError as e: self.report_status_error_state.incr() message = "Failed to report vm agent status: {0}".format(e) logger.verbose(message) except ProtocolError as e: self.report_status_error_state.incr() message = "Failed to report vm agent status: {0}".format(e) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=message) if self.report_status_error_state.is_triggered(): message = "Failed to report vm agent status for more than {0}"\ .format(self.report_status_error_state.min_timedelta) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ReportStatusExtended, is_success=False, message=message) self.report_status_error_state.reset() def report_ext_handler_status(self, vm_status, ext_handler): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) handler_status = ext_handler_i.get_handler_status() if handler_status is None: return handler_state = ext_handler_i.get_handler_state() if handler_state != ExtHandlerState.NotInstalled: try: active_exts = ext_handler_i.report_ext_status() handler_status.extensions.extend(active_exts) except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=e.code) try: heartbeat = ext_handler_i.collect_heartbeat() if heartbeat is not None: handler_status.status = heartbeat.get('status') except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=e.code) vm_status.vmAgent.extensionHandlers.append(handler_status) class ExtHandlerInstance(object): def __init__(self, ext_handler, protocol): self.ext_handler = ext_handler self.protocol = protocol self.operation = None self.pkg = None self.pkg_file = None self.is_upgrade = False self.logger = None self.set_logger() try: fileutil.mkdir(self.get_log_dir(), mode=0o755) except IOError as e: self.logger.error(u"Failed to create extension log dir: {0}", e) log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") self.logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, log_file) def decide_version(self, target_state=None): self.logger.verbose("Decide which version to use") try: pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler) except ProtocolError as e: raise ExtensionError("Failed to get ext handler pkgs", e) # Determine the desired and installed versions requested_version = FlexibleVersion(str(self.ext_handler.properties.version)) installed_version_string = self.get_installed_version() installed_version = requested_version \ if installed_version_string is None \ else FlexibleVersion(installed_version_string) # Divide packages # - Find the installed package (its version must exactly match) # - Find the internal candidate (its version must exactly match) # - Separate the public packages selected_pkg = None installed_pkg = None pkg_list.versions.sort(key=lambda p: FlexibleVersion(p.version)) for pkg in pkg_list.versions: pkg_version = FlexibleVersion(pkg.version) if pkg_version == installed_version: installed_pkg = pkg if requested_version.matches(pkg_version): selected_pkg = pkg # Finally, update the version only if not downgrading # Note: # - A downgrade, which will be bound to the same major version, # is allowed if the installed version is no longer available if target_state == u"uninstall" or target_state == u"disabled": if installed_pkg is None: msg = "Failed to find installed version of {0} " \ "to uninstall".format(self.ext_handler.name) self.logger.warn(msg) self.pkg = installed_pkg self.ext_handler.properties.version = str(installed_version) \ if installed_version is not None else None else: self.pkg = selected_pkg if self.pkg is not None: self.ext_handler.properties.version = str(selected_pkg.version) # Note if the selected package is different than that installed if installed_pkg is None \ or (self.pkg is not None and FlexibleVersion(self.pkg.version) != FlexibleVersion(installed_pkg.version)): self.is_upgrade = True if self.pkg is not None: self.logger.verbose("Use version: {0}", self.pkg.version) self.set_logger() return self.pkg def set_logger(self): prefix = "[{0}]".format(self.get_full_name()) self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) def version_gt(self, other): self_version = self.ext_handler.properties.version other_version = other.ext_handler.properties.version return FlexibleVersion(self_version) > FlexibleVersion(other_version) def version_ne(self, other): self_version = self.ext_handler.properties.version other_version = other.ext_handler.properties.version return FlexibleVersion(self_version) != FlexibleVersion(other_version) def get_installed_ext_handler(self): lastest_version = self.get_installed_version() if lastest_version is None: return None installed_handler = ExtHandler() set_properties("ExtHandler", installed_handler, get_properties(self.ext_handler)) installed_handler.properties.version = lastest_version return ExtHandlerInstance(installed_handler, self.protocol) def get_installed_version(self): lastest_version = None for path in glob.iglob(os.path.join(conf.get_lib_dir(), self.ext_handler.name + "-*")): if not os.path.isdir(path): continue separator = path.rfind('-') version_from_path = FlexibleVersion(path[separator+1:]) state_path = os.path.join(path, 'config', 'HandlerState') if not os.path.exists(state_path) or \ fileutil.read_file(state_path) == \ ExtHandlerState.NotInstalled: logger.verbose("Ignoring version of uninstalled extension: " "{0}".format(path)) continue if lastest_version is None or lastest_version < version_from_path: lastest_version = version_from_path return str(lastest_version) if lastest_version is not None else None def copy_status_files(self, old_ext_handler_i): self.logger.info("Copy status files from old plugin to new") old_ext_dir = old_ext_handler_i.get_base_dir() new_ext_dir = self.get_base_dir() old_ext_mrseq_file = os.path.join(old_ext_dir, "mrseq") if os.path.isfile(old_ext_mrseq_file): shutil.copy2(old_ext_mrseq_file, new_ext_dir) old_ext_status_dir = old_ext_handler_i.get_status_dir() new_ext_status_dir = self.get_status_dir() if os.path.isdir(old_ext_status_dir): for status_file in os.listdir(old_ext_status_dir): status_file = os.path.join(old_ext_status_dir, status_file) if os.path.isfile(status_file): shutil.copy2(status_file, new_ext_status_dir) def set_operation(self, op): self.operation = op def report_event(self, message="", is_success=True, duration=0, log_event=True): ext_handler_version = self.ext_handler.properties.version add_event(name=self.ext_handler.name, version=ext_handler_version, message=message, op=self.operation, is_success=is_success, duration=duration, log_event=log_event) def download(self): begin_utc = datetime.datetime.utcnow() self.logger.verbose("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") uris_shuffled = self.pkg.uris random.shuffle(uris_shuffled) file_downloaded = False for uri in uris_shuffled: try: destination = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") file_downloaded = self.protocol.download_ext_handler_pkg(uri.uri, destination) if file_downloaded and os.path.exists(destination): self.pkg_file = destination break except Exception as e: logger.warn("Error while downloading extension: {0}", ustr(e)) if not file_downloaded: raise ExtensionError("Failed to download extension", code=1001) self.logger.verbose("Unzip extension package") try: zipfile.ZipFile(self.pkg_file).extractall(self.get_base_dir()) os.remove(self.pkg_file) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to unzip extension package", e, code=1001) # Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) duration = elapsed_milliseconds(begin_utc) self.report_event(message="Download succeeded", duration=duration) self.logger.info("Initialize extension directory") # Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to save HandlerManifest.json", e) # Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) seq_no, status_path = self.get_status_file_path() if status_path is not None: now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") status = { "version": 1.0, "timestampUTC": now, "status": { "name": self.ext_handler.name, "operation": "Enabling Handler", "status": "transitioning", "code": 0 } } fileutil.write_file(status_path, json.dumps(status)) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to create status or config dir", e) # Save HandlerEnvironment.json self.create_handler_env() def enable(self): self.set_operation(WALAEventOperation.Enable) man = self.load_manifest() enable_cmd = man.get_enable_command() self.logger.info("Enable extension [{0}]".format(enable_cmd)) self.launch_command(enable_cmd, timeout=300, extension_error_code=1009) self.set_handler_state(ExtHandlerState.Enabled) self.set_handler_status(status="Ready", message="Plugin enabled") def disable(self): self.set_operation(WALAEventOperation.Disable) man = self.load_manifest() disable_cmd = man.get_disable_command() self.logger.info("Disable extension [{0}]".format(disable_cmd)) self.launch_command(disable_cmd, timeout=900, extension_error_code=1010) self.set_handler_state(ExtHandlerState.Installed) self.set_handler_status(status="NotReady", message="Plugin disabled") def install(self): man = self.load_manifest() install_cmd = man.get_install_command() self.logger.info("Install extension [{0}]".format(install_cmd)) self.set_operation(WALAEventOperation.Install) self.launch_command(install_cmd, timeout=900, extension_error_code=1007) self.set_handler_state(ExtHandlerState.Installed) def uninstall(self): try: self.set_operation(WALAEventOperation.UnInstall) man = self.load_manifest() uninstall_cmd = man.get_uninstall_command() self.logger.info("Uninstall extension [{0}]".format(uninstall_cmd)) self.launch_command(uninstall_cmd) except ExtensionError as e: self.report_event(message=ustr(e), is_success=False) def rm_ext_handler_dir(self): try: base_dir = self.get_base_dir() if os.path.isdir(base_dir): self.logger.info("Remove extension handler directory: {0}", base_dir) shutil.rmtree(base_dir) except IOError as e: message = "Failed to remove extension handler directory: {0}".format(e) self.report_event(message=message, is_success=False) self.logger.warn(message) def update(self, version=None): if version is None: version = self.ext_handler.properties.version try: self.set_operation(WALAEventOperation.Update) man = self.load_manifest() update_cmd = man.get_update_command() self.logger.info("Update extension [{0}]".format(update_cmd)) self.launch_command(update_cmd, timeout=900, extension_error_code=1008, env={'VERSION': version}) except ExtensionError: # prevent the handler update from being retried self.set_handler_state(ExtHandlerState.Failed) raise def update_with_install(self): man = self.load_manifest() if man.is_update_with_install(): self.install() else: self.logger.info("UpdateWithInstall not set. " "Skip install during upgrade.") self.set_handler_state(ExtHandlerState.Installed) def get_largest_seq_no(self): seq_no = -1 conf_dir = self.get_conf_dir() for item in os.listdir(conf_dir): item_path = os.path.join(conf_dir, item) if os.path.isfile(item_path): try: separator = item.rfind(".") if separator > 0 and item[separator + 1:] == 'settings': curr_seq_no = int(item.split('.')[0]) if curr_seq_no > seq_no: seq_no = curr_seq_no except (ValueError, IndexError, TypeError): self.logger.verbose("Failed to parse file name: {0}", item) continue return seq_no def get_status_file_path(self, extension=None): path = None seq_no = self.get_largest_seq_no() # Issue 1116: use the sequence number from goal state where possible if extension is not None and extension.sequenceNumber is not None: try: gs_seq_no = int(extension.sequenceNumber) if gs_seq_no != seq_no: add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.SequenceNumberMismatch, is_success=False, message="Goal state: {0}, disk: {1}".format(gs_seq_no, seq_no), log_event=False) seq_no = gs_seq_no except ValueError: logger.error('Sequence number [{0}] does not appear to be valid'.format(extension.sequenceNumber)) if seq_no > -1: path = os.path.join( self.get_status_dir(), "{0}.status".format(seq_no)) return seq_no, path def collect_ext_status(self, ext): self.logger.verbose("Collect extension status") seq_no, ext_status_file = self.get_status_file_path(ext) if seq_no == -1: return None ext_status = ExtensionStatus(seq_no=seq_no) try: data_str = fileutil.read_file(ext_status_file) data = json.loads(data_str) parse_ext_status(ext_status, data) except IOError as e: ext_status.message = u"Failed to get status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" except ExtensionError as e: ext_status.message = u"Malformed status file {0}".format(e) ext_status.code = e.code ext_status.status = "error" except ValueError as e: ext_status.message = u"Malformed status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" return ext_status def report_ext_status(self): active_exts = [] # TODO Refactor or remove this common code pattern (for each extension subordinate to an ext_handler, do X). for ext in self.ext_handler.properties.extensions: ext_status = self.collect_ext_status(ext) if ext_status is None: continue try: self.protocol.report_ext_status(self.ext_handler.name, ext.name, ext_status) active_exts.append(ext.name) except ProtocolError as e: self.logger.error(u"Failed to report extension status: {0}", e) return active_exts def collect_heartbeat(self): man = self.load_manifest() if not man.is_report_heartbeat(): return heartbeat_file = os.path.join(conf.get_lib_dir(), self.get_heartbeat_file()) if not os.path.isfile(heartbeat_file): raise ExtensionError("Failed to get heart beat file") if not self.is_responsive(heartbeat_file): return { "status": "Unresponsive", "code": -1, "message": "Extension heartbeat is not responsive" } try: heartbeat_json = fileutil.read_file(heartbeat_file) heartbeat = json.loads(heartbeat_json)[0]['heartbeat'] except IOError as e: raise ExtensionError("Failed to get heartbeat file:{0}".format(e)) except (ValueError, KeyError) as e: raise ExtensionError("Malformed heartbeat file: {0}".format(e)) return heartbeat @staticmethod def is_responsive(heartbeat_file): """ Was heartbeat_file updated within the last ten (10) minutes? :param heartbeat_file: str :return: bool """ last_update = int(time.time() - os.stat(heartbeat_file).st_mtime) return last_update <= 600 def launch_command(self, cmd, timeout=300, extension_error_code=1000, env=None): begin_utc = datetime.datetime.utcnow() self.logger.verbose("Launch command: [{0}]", cmd) base_dir = self.get_base_dir() if env is None: env = {} env.update(os.environ) try: # This should be .run(), but due to the wide variety # of Python versions we must support we must use .communicate(). # Some extensions erroneously begin cmd with a slash; don't interpret those # as root-relative. (Issue #1170) full_path = os.path.join(base_dir, cmd.lstrip(os.path.sep)) def pre_exec_function(): """ Change process state before the actual target process is started. Effectively, this runs between the fork() and the exec() of sub-process creation. :return: """ os.setsid() CGroups.add_to_extension_cgroup(self.ext_handler.name) process = subprocess.Popen(full_path, shell=True, cwd=base_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, preexec_fn=pre_exec_function) except OSError as e: raise ExtensionError("Failed to launch '{0}': {1}".format(full_path, e.strerror), code=extension_error_code) cg = CGroups.for_extension(self.ext_handler.name) CGroupsTelemetry.track_extension(self.ext_handler.name, cg) msg = capture_from_process(process, cmd, timeout, extension_error_code) ret = process.poll() if ret is None: raise ExtensionError("Process {0} was not terminated: {1}\n{2}".format(process.pid, cmd, msg), code=extension_error_code) if ret != 0: raise ExtensionError("Non-zero exit code: {0}, {1}\n{2}".format(ret, cmd, msg), code=extension_error_code) duration = elapsed_milliseconds(begin_utc) log_msg = "{0}\n{1}".format(cmd, "\n".join([line for line in msg.split('\n') if line != ""])) self.logger.verbose(log_msg) self.report_event(message=log_msg, duration=duration, log_event=False) def load_manifest(self): man_file = self.get_manifest_file() try: data = json.loads(fileutil.read_file(man_file)) except (IOError, OSError) as e: raise ExtensionError('Failed to load manifest file ({0}): {1}'.format(man_file, e.strerror), code=1002) except ValueError: raise ExtensionError('Malformed manifest file ({0}).'.format(man_file), code=1003) return HandlerManifest(data[0]) def update_settings_file(self, settings_file, settings): settings_file = os.path.join(self.get_conf_dir(), settings_file) try: fileutil.write_file(settings_file, settings) except IOError as e: fileutil.clean_ioerror(e, paths=[settings_file]) raise ExtensionError(u"Failed to update settings file", e) def update_settings(self): if self.ext_handler.properties.extensions is None or \ len(self.ext_handler.properties.extensions) == 0: # This is the behavior of waagent 2.0.x # The new agent has to be consistent with the old one. self.logger.info("Extension has no settings, write empty 0.settings") self.update_settings_file("0.settings", "") return for ext in self.ext_handler.properties.extensions: settings = { 'publicSettings': ext.publicSettings, 'protectedSettings': ext.protectedSettings, 'protectedSettingsCertThumbprint': ext.certificateThumbprint } ext_settings = { "runtimeSettings": [{ "handlerSettings": settings }] } settings_file = "{0}.settings".format(ext.sequenceNumber) self.logger.info("Update settings file: {0}", settings_file) self.update_settings_file(settings_file, json.dumps(ext_settings)) def create_handler_env(self): env = [{ "name": self.ext_handler.name, "version": HANDLER_ENVIRONMENT_VERSION, "handlerEnvironment": { "logFolder": self.get_log_dir(), "configFolder": self.get_conf_dir(), "statusFolder": self.get_status_dir(), "heartbeatFile": self.get_heartbeat_file() } }] try: fileutil.write_file(self.get_env_file(), json.dumps(env)) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to save handler environment", e) def set_handler_state(self, handler_state): state_dir = self.get_conf_dir() state_file = os.path.join(state_dir, "HandlerState") try: if not os.path.exists(state_dir): fileutil.mkdir(state_dir, mode=0o700) fileutil.write_file(state_file, handler_state) except IOError as e: fileutil.clean_ioerror(e, paths=[state_file]) self.logger.error("Failed to set state: {0}", e) def get_handler_state(self): state_dir = self.get_conf_dir() state_file = os.path.join(state_dir, "HandlerState") if not os.path.isfile(state_file): return ExtHandlerState.NotInstalled try: return fileutil.read_file(state_file) except IOError as e: self.logger.error("Failed to get state: {0}", e) return ExtHandlerState.NotInstalled def set_handler_status(self, status="NotReady", message="", code=0): state_dir = self.get_conf_dir() handler_status = ExtHandlerStatus() handler_status.name = self.ext_handler.name handler_status.version = str(self.ext_handler.properties.version) handler_status.message = message handler_status.code = code handler_status.status = status status_file = os.path.join(state_dir, "HandlerStatus") try: handler_status_json = json.dumps(get_properties(handler_status)) if handler_status_json is not None: fileutil.write_file(status_file, handler_status_json) else: self.logger.error("Failed to create JSON document of handler status for {0} version {1}".format( self.ext_handler.name, self.ext_handler.properties.version)) except (IOError, ValueError, ProtocolError) as e: fileutil.clean_ioerror(e, paths=[status_file]) self.logger.error("Failed to save handler status: {0}, {1}", ustr(e), traceback.format_exc()) def get_handler_status(self): state_dir = self.get_conf_dir() status_file = os.path.join(state_dir, "HandlerStatus") if not os.path.isfile(status_file): return None try: data = json.loads(fileutil.read_file(status_file)) handler_status = ExtHandlerStatus() set_properties("ExtHandlerStatus", handler_status, data) return handler_status except (IOError, ValueError) as e: self.logger.error("Failed to get handler status: {0}", e) def get_full_name(self): return "{0}-{1}".format(self.ext_handler.name, self.ext_handler.properties.version) def get_base_dir(self): return os.path.join(conf.get_lib_dir(), self.get_full_name()) def get_status_dir(self): return os.path.join(self.get_base_dir(), "status") def get_conf_dir(self): return os.path.join(self.get_base_dir(), 'config') def get_heartbeat_file(self): return os.path.join(self.get_base_dir(), 'heartbeat.log') def get_manifest_file(self): return os.path.join(self.get_base_dir(), 'HandlerManifest.json') def get_env_file(self): return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json') def get_log_dir(self): return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name) class HandlerEnvironment(object): def __init__(self, data): self.data = data def get_version(self): return self.data["version"] def get_log_dir(self): return self.data["handlerEnvironment"]["logFolder"] def get_conf_dir(self): return self.data["handlerEnvironment"]["configFolder"] def get_status_dir(self): return self.data["handlerEnvironment"]["statusFolder"] def get_heartbeat_file(self): return self.data["handlerEnvironment"]["heartbeatFile"] class HandlerManifest(object): def __init__(self, data): if data is None or data['handlerManifest'] is None: raise ExtensionError('Malformed manifest file.') self.data = data def get_name(self): return self.data["name"] def get_version(self): return self.data["version"] def get_install_command(self): return self.data['handlerManifest']["installCommand"] def get_uninstall_command(self): return self.data['handlerManifest']["uninstallCommand"] def get_update_command(self): return self.data['handlerManifest']["updateCommand"] def get_enable_command(self): return self.data['handlerManifest']["enableCommand"] def get_disable_command(self): return self.data['handlerManifest']["disableCommand"] def is_report_heartbeat(self): return self.data['handlerManifest'].get('reportHeartbeat', False) def is_update_with_install(self): update_mode = self.data['handlerManifest'].get('updateMode') if update_mode is None: return True return update_mode.lower() == "updatewithinstall" WALinuxAgent-2.2.32/azurelinuxagent/ga/monitor.py000066400000000000000000000462461335416306700220070ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import json import os import platform import time import threading import traceback import uuid import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.errorstate import ErrorState from azurelinuxagent.common.cgroups import CGroups, CGroupsTelemetry from azurelinuxagent.common.event import add_event, report_metric, WALAEventOperation from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.healthservice import HealthService from azurelinuxagent.common.protocol.imds import get_imds_client from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ TelemetryEventList, \ TelemetryEvent, \ set_properties import azurelinuxagent.common.utils.networkutil as networkutil from azurelinuxagent.common.utils.restutil import IOErrorCounter from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib, hash_strings from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_CODE_NAME, AGENT_LONG_VERSION, \ AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION def parse_event(data_str): try: return parse_json_event(data_str) except ValueError: return parse_xml_event(data_str) def parse_xml_param(param_node): name = getattrib(param_node, "Name") value_str = getattrib(param_node, "Value") attr_type = getattrib(param_node, "T") value = value_str if attr_type == 'mt:uint64': value = int(value_str) elif attr_type == 'mt:bool': value = bool(value_str) elif attr_type == 'mt:float64': value = float(value_str) return TelemetryEventParam(name, value) def parse_xml_event(data_str): try: xml_doc = parse_doc(data_str) event_id = getattrib(find(xml_doc, "Event"), 'id') provider_id = getattrib(find(xml_doc, "Provider"), 'id') event = TelemetryEvent(event_id, provider_id) param_nodes = findall(xml_doc, 'Param') for param_node in param_nodes: event.parameters.append(parse_xml_param(param_node)) return event except Exception as e: raise ValueError(ustr(e)) def parse_json_event(data_str): data = json.loads(data_str) event = TelemetryEvent() set_properties("TelemetryEvent", event, data) return event def get_monitor_handler(): return MonitorHandler() class MonitorHandler(object): EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1) TELEMETRY_HEARTBEAT_PERIOD = datetime.timedelta(minutes=30) CGROUP_TELEMETRY_PERIOD = datetime.timedelta(minutes=5) # host plugin HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5) # imds IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3) def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.imds_client = get_imds_client() self.event_thread = None self.last_event_collection = None self.last_telemetry_heartbeat = None self.last_cgroup_telemetry = None self.last_host_plugin_heartbeat = None self.last_imds_heartbeat = None self.protocol = None self.health_service = None self.last_route_table_hash = b'' self.last_nic_state = {} self.counter = 0 self.sysinfo = [] self.should_run = True self.heartbeat_id = str(uuid.uuid4()).upper() self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD) self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD) def run(self): self.init_protocols() self.init_sysinfo() self.init_cgroups() self.start() def stop(self): self.should_run = False if self.is_alive(): self.event_thread.join() def init_protocols(self): self.protocol = self.protocol_util.get_protocol() self.health_service = HealthService(self.protocol.endpoint) def is_alive(self): return self.event_thread is not None and self.event_thread.is_alive() def start(self): self.event_thread = threading.Thread(target=self.daemon) self.event_thread.setDaemon(True) self.event_thread.start() def init_sysinfo(self): osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), DISTRO_NAME, DISTRO_VERSION, DISTRO_CODE_NAME, platform.release()) self.sysinfo.append(TelemetryEventParam("OSVersion", osversion)) self.sysinfo.append( TelemetryEventParam("GAVersion", CURRENT_AGENT)) try: ram = self.osutil.get_total_mem() processors = self.osutil.get_processor_cores() self.sysinfo.append(TelemetryEventParam("RAM", ram)) self.sysinfo.append(TelemetryEventParam("Processors", processors)) except OSUtilError as e: logger.warn("Failed to get system info: {0}", e) try: vminfo = self.protocol.get_vminfo() self.sysinfo.append(TelemetryEventParam("VMName", vminfo.vmName)) self.sysinfo.append(TelemetryEventParam("TenantName", vminfo.tenantName)) self.sysinfo.append(TelemetryEventParam("RoleName", vminfo.roleName)) self.sysinfo.append(TelemetryEventParam("RoleInstanceName", vminfo.roleInstanceName)) self.sysinfo.append(TelemetryEventParam("ContainerId", vminfo.containerId)) except ProtocolError as e: logger.warn("Failed to get system info: {0}", e) try: vminfo = self.imds_client.get_compute() self.sysinfo.append(TelemetryEventParam('Location', vminfo.location)) self.sysinfo.append(TelemetryEventParam('SubscriptionId', vminfo.subscriptionId)) self.sysinfo.append(TelemetryEventParam('ResourceGroupName', vminfo.resourceGroupName)) self.sysinfo.append(TelemetryEventParam('VMId', vminfo.vmId)) self.sysinfo.append(TelemetryEventParam('ImageOrigin', vminfo.image_origin)) except (HttpError, ValueError) as e: logger.warn("failed to get IMDS info: {0}", e) def collect_event(self, evt_file_name): try: logger.verbose("Found event file: {0}", evt_file_name) with open(evt_file_name, "rb") as evt_file: # if fail to open or delete the file, throw exception data_str = evt_file.read().decode("utf-8", 'ignore') logger.verbose("Processed event file: {0}", evt_file_name) os.remove(evt_file_name) return data_str except IOError as e: msg = "Failed to process {0}, {1}".format(evt_file_name, e) raise EventError(msg) def collect_and_send_events(self): if self.last_event_collection is None: self.last_event_collection = datetime.datetime.utcnow() - MonitorHandler.EVENT_COLLECTION_PERIOD if datetime.datetime.utcnow() >= (self.last_event_collection + MonitorHandler.EVENT_COLLECTION_PERIOD): try: event_list = TelemetryEventList() event_dir = os.path.join(conf.get_lib_dir(), "events") event_files = os.listdir(event_dir) for event_file in event_files: if not event_file.endswith(".tld"): continue event_file_path = os.path.join(event_dir, event_file) try: data_str = self.collect_event(event_file_path) except EventError as e: logger.error("{0}", e) continue try: event = parse_event(data_str) self.add_sysinfo(event) event_list.events.append(event) except (ValueError, ProtocolError) as e: logger.warn("Failed to decode event file: {0}", e) continue if len(event_list.events) == 0: return try: self.protocol.report_event(event_list) except ProtocolError as e: logger.error("{0}", e) except Exception as e: logger.warn("Failed to send events: {0}", e) self.last_event_collection = datetime.datetime.utcnow() def daemon(self): min_delta = min(MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD, MonitorHandler.CGROUP_TELEMETRY_PERIOD, MonitorHandler.EVENT_COLLECTION_PERIOD, MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD, MonitorHandler.IMDS_HEARTBEAT_PERIOD).seconds while self.should_run: self.send_telemetry_heartbeat() self.send_cgroup_telemetry() self.collect_and_send_events() self.send_host_plugin_heartbeat() self.send_imds_heartbeat() self.log_altered_network_configuration() time.sleep(min_delta) def add_sysinfo(self, event): sysinfo_names = [v.name for v in self.sysinfo] for param in event.parameters: if param.name in sysinfo_names: logger.verbose("Remove existing event parameter: [{0}:{1}]", param.name, param.value) event.parameters.remove(param) event.parameters.extend(self.sysinfo) def send_imds_heartbeat(self): """ Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have successfully called and validated a response in the last IMDS_HEALTH_PERIOD. """ if self.last_imds_heartbeat is None: self.last_imds_heartbeat = datetime.datetime.utcnow() - MonitorHandler.IMDS_HEARTBEAT_PERIOD if datetime.datetime.utcnow() >= (self.last_imds_heartbeat + MonitorHandler.IMDS_HEARTBEAT_PERIOD): try: is_currently_healthy, response = self.imds_client.validate() if is_currently_healthy: self.imds_errorstate.reset() else: self.imds_errorstate.incr() is_healthy = self.imds_errorstate.is_triggered() is False logger.verbose("IMDS health: {0} [{1}]", is_healthy, response) self.health_service.report_imds_status(is_healthy, response) except Exception as e: msg = "Exception sending imds heartbeat: {0}".format(ustr(e)) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ImdsHeartbeat, is_success=False, message=msg, log_event=False) self.last_imds_heartbeat = datetime.datetime.utcnow() def send_host_plugin_heartbeat(self): """ Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD. """ if self.last_host_plugin_heartbeat is None: self.last_host_plugin_heartbeat = datetime.datetime.utcnow() - MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD if datetime.datetime.utcnow() >= (self.last_host_plugin_heartbeat + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD): try: host_plugin = self.protocol.client.get_host_plugin() host_plugin.ensure_initialized() is_currently_healthy = host_plugin.get_health() if is_currently_healthy: self.host_plugin_errorstate.reset() else: self.host_plugin_errorstate.incr() is_healthy = self.host_plugin_errorstate.is_triggered() is False logger.verbose("HostGAPlugin health: {0}", is_healthy) self.health_service.report_host_plugin_heartbeat(is_healthy) if not is_healthy: add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HostPluginHeartbeatExtended, is_success=False, message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time), log_event=False) except Exception as e: msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e)) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HostPluginHeartbeat, is_success=False, message=msg, log_event=False) self.last_host_plugin_heartbeat = datetime.datetime.utcnow() def send_telemetry_heartbeat(self): if self.last_telemetry_heartbeat is None: self.last_telemetry_heartbeat = datetime.datetime.utcnow() - MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD): try: incarnation = self.protocol.get_incarnation() dropped_packets = self.osutil.get_firewall_dropped_packets(self.protocol.endpoint) msg = "{0};{1};{2};{3}".format(incarnation, self.counter, self.heartbeat_id, dropped_packets) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, message=msg, log_event=False) self.counter += 1 io_errors = IOErrorCounter.get_and_reset() hostplugin_errors = io_errors.get("hostplugin") protocol_errors = io_errors.get("protocol") other_errors = io_errors.get("other") if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0: msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors, protocol_errors, other_errors) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HttpErrors, is_success=True, message=msg, log_event=False) except Exception as e: logger.warn("Failed to send heartbeat: {0}", e) self.last_telemetry_heartbeat = datetime.datetime.utcnow() @staticmethod def init_cgroups(): # Track metrics for the roll-up cgroup and for the agent cgroup try: CGroupsTelemetry.track_cgroup(CGroups.for_extension("")) CGroupsTelemetry.track_agent() except Exception as e: # when a hierarchy is not mounted, we raise an exception # and we should therefore only issue a warning, since this # is not unexpected logger.warn("Monitor: cgroups not initialized: {0}", ustr(e)) logger.verbose(traceback.format_exc()) def send_cgroup_telemetry(self): if self.last_cgroup_telemetry is None: self.last_cgroup_telemetry = datetime.datetime.utcnow() if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.CGROUP_TELEMETRY_PERIOD): try: for cgroup_name, metrics in CGroupsTelemetry.collect_all_tracked().items(): for metric_group, metric_name, value in metrics: if value > 0: report_metric(metric_group, metric_name, cgroup_name, value) except Exception as e: logger.warn("Monitor: failed to collect cgroups performance metrics: {0}", ustr(e)) logger.verbose(traceback.format_exc()) # Look for extension cgroups we're not already tracking and track them try: CGroupsTelemetry.update_tracked(self.protocol.client.get_current_handlers()) except Exception as e: logger.warn("Monitor: failed to update cgroups tracked extensions: {0}", ustr(e)) logger.verbose(traceback.format_exc()) self.last_cgroup_telemetry = datetime.datetime.utcnow() def log_altered_network_configuration(self): """ Check various pieces of network configuration and, if altered since the last check, log the new state. """ raw_route_list = self.osutil.read_route_table() digest = hash_strings(raw_route_list) if digest != self.last_route_table_hash: self.last_route_table_hash = digest route_list = self.osutil.get_list_of_routes(raw_route_list) logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list)))) nic_state = self.osutil.get_nic_state() if nic_state != self.last_nic_state: description = "Initial" if self.last_nic_state == {} else "Updated" logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values())))) self.last_nic_state = nic_state WALinuxAgent-2.2.32/azurelinuxagent/ga/remoteaccess.py000066400000000000000000000202521335416306700227620ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import glob import json import operator import os import os.path import pwd import random import re import shutil import stat import subprocess import textwrap import time import traceback import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.version as version import azurelinuxagent.common.protocol.wire import azurelinuxagent.common.protocol.metadata as metadata from datetime import datetime, timedelta from pwd import getpwall from azurelinuxagent.common.errorstate import ErrorState from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds from azurelinuxagent.common.exception import ExtensionError, ProtocolError, RemoteAccessError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ ExtensionStatus, \ ExtensionSubStatus, \ VMStatus, ExtHandler, \ get_properties, \ set_properties from azurelinuxagent.common.protocol.metadata import MetadataProtocol from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.utils.processutil import capture_from_process from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION from azurelinuxagent.common.osutil import get_osutil REMOTE_USR_EXPIRATION_FORMAT = "%a, %d %b %Y %H:%M:%S %Z" DATE_FORMAT = "%Y-%m-%d" TRANSPORT_PRIVATE_CERT = "TransportPrivate.pem" REMOTE_ACCESS_ACCOUNT_COMMENT = "JIT_Account" MAX_TRY_ATTEMPT = 5 FAILED_ATTEMPT_THROTTLE = 1 def get_remote_access_handler(): return RemoteAccessHandler() class RemoteAccessHandler(object): def __init__(self): self.os_util = get_osutil() self.protocol_util = get_protocol_util() self.protocol = None self.cryptUtil = CryptUtil(conf.get_openssl_cmd()) self.remote_access = None self.incarnation = 0 self.error_message = "" def run(self): try: if self.os_util.jit_enabled: self.protocol = self.protocol_util.get_protocol() current_incarnation = self.protocol.get_incarnation() if self.incarnation != current_incarnation: # something changed. Handle remote access if any. self.incarnation = current_incarnation self.remote_access = self.protocol.client.get_remote_access() self.handle_remote_access() except Exception as e: msg = u"Exception processing remote access handler: {0} {1}".format(ustr(e), traceback.format_exc()) logger.error(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.RemoteAccessHandling, is_success=False, message=msg) def handle_remote_access(self): # Get JIT user accounts. all_users = self.os_util.get_users() existing_jit_users = set(u[0] for u in all_users if self.validate_jit_user(u[4])) self.err_message = "" if self.remote_access is not None: goal_state_users = set(u.name for u in self.remote_access.user_list.users) for acc in self.remote_access.user_list.users: try: raw_expiration = acc.expiration account_expiration = datetime.strptime(raw_expiration, REMOTE_USR_EXPIRATION_FORMAT) now = datetime.utcnow() if acc.name not in existing_jit_users and now < account_expiration: self.add_user(acc.name, acc.encrypted_password, account_expiration) elif acc.name in existing_jit_users and now > account_expiration: # user account expired, delete it. logger.info("user {0} expired from remote_access".format(acc.name)) self.remove_user(acc.name) except RemoteAccessError as rae: self.err_message = self.err_message + "Error processing user {0}. Exception: {1}"\ .format(acc.name, ustr(rae)) for user in existing_jit_users: try: if user not in goal_state_users: # user explicitly removed logger.info("User {0} removed from remote_access".format(user)) self.remove_user(user) except RemoteAccessError as rae: self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ .format(user, ustr(rae)) else: # All users removed, remove any remaining JIT accounts. for user in existing_jit_users: try: logger.info("User {0} removed from remote_access. remote_access empty".format(user)) self.remove_user(user) except RemoteAccessError as rae: self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ .format(user, ustr(rae)) def validate_jit_user(self, comment): return comment == REMOTE_ACCESS_ACCOUNT_COMMENT def add_user(self, username, encrypted_password, account_expiration): try: expiration_date = (account_expiration + timedelta(days=1)).strftime(DATE_FORMAT) logger.verbose("Adding user {0} with expiration date {1}".format(username, expiration_date)) self.os_util.useradd(username, expiration_date, REMOTE_ACCESS_ACCOUNT_COMMENT) except Exception as e: raise RemoteAccessError("Error adding user {0}. {1}".format(username, ustr(e))) try: prv_key = os.path.join(conf.get_lib_dir(), TRANSPORT_PRIVATE_CERT) pwd = self.cryptUtil.decrypt_secret(encrypted_password, prv_key) self.os_util.chpasswd(username, pwd, conf.get_password_cryptid(), conf.get_password_crypt_salt_len()) self.os_util.conf_sudoer(username) logger.info("User '{0}' added successfully with expiration in {1}".format(username, expiration_date)) except Exception as e: error = "Error adding user {0}. {1} ".format(username, str(e)) try: self.handle_failed_create(username) error += "cleanup successful" except RemoteAccessError as rae: error += "and error cleaning up {0}".format(str(rae)) raise RemoteAccessError("Error adding user {0} cleanup successful".format(username), ustr(e)) def handle_failed_create(self, username): try: self.delete_user(username) except Exception as e: raise RemoteAccessError("Failed to clean up after account creation for {0}.".format(username), e) def remove_user(self, username): try: self.delete_user(username) except Exception as e: raise RemoteAccessError("Failed to delete user {0}".format(username), e) def delete_user(self, username): self.os_util.del_account(username) logger.info("User deleted {0}".format(username)) WALinuxAgent-2.2.32/azurelinuxagent/ga/update.py000066400000000000000000001135051335416306700215730ustar00rootroot00000000000000# Windows Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import json import os import platform import random import re import shutil import signal import stat import subprocess import sys import time import traceback import zipfile from datetime import datetime, timedelta import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.restutil as restutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.event import add_event, add_periodic, \ elapsed_milliseconds, \ WALAEventOperation from azurelinuxagent.common.exception import ProtocolError, \ ResourceGoneError, \ UpdateError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \ AGENT_DIR_GLOB, AGENT_PKG_GLOB, \ AGENT_PATTERN, AGENT_NAME_PATTERN, AGENT_DIR_PATTERN, \ CURRENT_AGENT, CURRENT_VERSION, \ is_current_agent_installed from azurelinuxagent.ga.exthandlers import HandlerManifest AGENT_ERROR_FILE = "error.json" # File name for agent error record AGENT_MANIFEST_FILE = "HandlerManifest.json" AGENT_PARTITION_FILE = "partition" CHILD_HEALTH_INTERVAL = 15 * 60 CHILD_LAUNCH_INTERVAL = 5 * 60 CHILD_LAUNCH_RESTART_MAX = 3 CHILD_POLL_INTERVAL = 60 MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted GOAL_STATE_INTERVAL = 3 GOAL_STATE_INTERVAL_DISABLED = 5 * 60 ORPHAN_WAIT_INTERVAL = 15 * 60 AGENT_SENTINEL_FILE = "current_version" READONLY_FILE_GLOBS = [ "*.crt", "*.p7m", "*.pem", "*.prv", "ovf-env.xml" ] def get_update_handler(): return UpdateHandler() def get_python_cmd(): major_version = platform.python_version_tuple()[0] return "python" if int(major_version) <= 2 else "python{0}".format(major_version) class UpdateHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.running = True self.last_attempt_time = None self.agents = [] self.child_agent = None self.child_launch_time = None self.child_launch_attempts = 0 self.child_process = None self.signal_handler = None def run_latest(self, child_args=None): """ This method is called from the daemon to find and launch the most current, downloaded agent. Note: - Most events should be tagged to the launched agent (agent_version) """ if self.child_process is not None: raise Exception("Illegal attempt to launch multiple goal state Agent processes") if self.signal_handler is None: self.signal_handler = signal.signal(signal.SIGTERM, self.forward_signal) latest_agent = self.get_latest_agent() if latest_agent is None: logger.info(u"Installed Agent {0} is the most current agent", CURRENT_AGENT) agent_cmd = "python -u {0} -run-exthandlers".format(sys.argv[0]) agent_dir = os.getcwd() agent_name = CURRENT_AGENT agent_version = CURRENT_VERSION else: logger.info(u"Determined Agent {0} to be the latest agent", latest_agent.name) agent_cmd = latest_agent.get_agent_cmd() agent_dir = latest_agent.get_agent_dir() agent_name = latest_agent.name agent_version = latest_agent.version if child_args is not None: agent_cmd = "{0} {1}".format(agent_cmd, child_args) try: # Launch the correct Python version for python-based agents cmds = textutil.safe_shlex_split(agent_cmd) if cmds[0].lower() == "python": cmds[0] = get_python_cmd() agent_cmd = " ".join(cmds) self._evaluate_agent_health(latest_agent) self.child_process = subprocess.Popen( cmds, cwd=agent_dir, stdout=sys.stdout, stderr=sys.stderr, env=os.environ) logger.verbose(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) # If the most current agent is the installed agent and update is enabled, # assume updates are likely available and poll every second. # This reduces the start-up impact of finding / launching agent updates on # fresh VMs. if latest_agent is None and conf.get_autoupdate_enabled(): poll_interval = 1 else: poll_interval = CHILD_POLL_INTERVAL ret = None start_time = time.time() while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: time.sleep(poll_interval) try: ret = self.child_process.poll() except OSError: # if child_process has terminated, calling poll could raise an exception ret = -1 if ret is not None: break if ret is None or ret <= 0: msg = u"Agent {0} launched with command '{1}' is successfully running".format( agent_name, agent_cmd) logger.info(msg) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=True, message=msg, log_event=False) if ret is None: ret = self.child_process.wait() else: msg = u"Agent {0} launched with command '{1}' failed with return code: {2}".format( agent_name, agent_cmd, ret) logger.warn(msg) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, message=msg) if ret is not None and ret > 0: msg = u"Agent {0} launched with command '{1}' returned code: {2}".format( agent_name, agent_cmd, ret) logger.warn(msg) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) except Exception as e: # Ignore child errors during termination if self.running: msg = u"Agent {0} launched with command '{1}' failed with exception: {2}".format( agent_name, agent_cmd, ustr(e)) logger.warn(msg) detailed_message = '{0} {1}'.format(msg, traceback.format_exc()) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, message=detailed_message) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) self.child_process = None return def run(self): """ This is the main loop which watches for agent and extension updates. """ try: logger.info(u"Agent {0} is running as the goal state agent", CURRENT_AGENT) # Launch monitoring threads from azurelinuxagent.ga.monitor import get_monitor_handler monitor_thread = get_monitor_handler() monitor_thread.run() from azurelinuxagent.ga.env import get_env_handler env_thread = get_env_handler() env_thread.run() from azurelinuxagent.ga.exthandlers import get_exthandlers_handler, migrate_handler_state exthandlers_handler = get_exthandlers_handler() migrate_handler_state() from azurelinuxagent.ga.remoteaccess import get_remote_access_handler remote_access_handler = get_remote_access_handler() self._ensure_no_orphans() self._emit_restart_event() self._ensure_partition_assigned() self._ensure_readonly_files() goal_state_interval = GOAL_STATE_INTERVAL \ if conf.get_extensions_enabled() \ else GOAL_STATE_INTERVAL_DISABLED while self.running: if self._is_orphaned: logger.info("Agent {0} is an orphan -- exiting", CURRENT_AGENT) break if not monitor_thread.is_alive(): logger.warn(u"Monitor thread died, restarting") monitor_thread.start() if not env_thread.is_alive(): logger.warn(u"Environment thread died, restarting") env_thread.start() if self._upgrade_available(): available_agent = self.get_latest_agent() if available_agent is None: logger.info( "Agent {0} is reverting to the installed agent -- exiting", CURRENT_AGENT) else: logger.info( u"Agent {0} discovered update {1} -- exiting", CURRENT_AGENT, available_agent.name) break utc_start = datetime.utcnow() last_etag = exthandlers_handler.last_etag exthandlers_handler.run() remote_access_handler.run() if last_etag != exthandlers_handler.last_etag: self._ensure_readonly_files() duration = elapsed_milliseconds(utc_start) logger.info('ProcessGoalState completed [incarnation {0}; {1} ms]', exthandlers_handler.last_etag, duration) add_event( AGENT_NAME, op=WALAEventOperation.ProcessGoalState, duration=duration, message="Incarnation {0}".format(exthandlers_handler.last_etag)) time.sleep(goal_state_interval) except Exception as e: msg = u"Agent {0} failed with exception: {1}".format(CURRENT_AGENT, ustr(e)) self._set_sentinel(msg=msg) logger.warn(msg) logger.warn(traceback.format_exc()) sys.exit(1) # additional return here because sys.exit is mocked in unit tests return self._shutdown() sys.exit(0) def forward_signal(self, signum, frame): if signum == signal.SIGTERM: self._shutdown() if self.child_process is None: return logger.info( u"Agent {0} forwarding signal {1} to {2}", CURRENT_AGENT, signum, self.child_agent.name if self.child_agent is not None else CURRENT_AGENT) self.child_process.send_signal(signum) if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL): self.signal_handler(signum, frame) elif self.signal_handler is signal.SIG_DFL: if signum == signal.SIGTERM: self._shutdown() sys.exit(0) return def get_latest_agent(self): """ If autoupdate is enabled, return the most current, downloaded, non-blacklisted agent which is not the current version (if any). Otherwise, return None (implying to use the installed agent). """ if not conf.get_autoupdate_enabled(): return None self._find_agents() available_agents = [agent for agent in self.agents if agent.is_available and agent.version > FlexibleVersion(AGENT_VERSION)] return available_agents[0] if len(available_agents) >= 1 else None def _emit_restart_event(self): try: if not self._is_clean_start: msg = u"Agent did not terminate cleanly: {0}".format( fileutil.read_file(self._sentinel_file_path())) logger.info(msg) add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Restart, is_success=False, message=msg) except Exception: pass return def _ensure_no_orphans(self, orphan_wait_interval=ORPHAN_WAIT_INTERVAL): pid_files, ignored = self._write_pid_file() for pid_file in pid_files: try: pid = fileutil.read_file(pid_file) wait_interval = orphan_wait_interval while self.osutil.check_pid_alive(pid): wait_interval -= GOAL_STATE_INTERVAL if wait_interval <= 0: logger.warn( u"{0} forcibly terminated orphan process {1}", CURRENT_AGENT, pid) os.kill(pid, signal.SIGKILL) break logger.info( u"{0} waiting for orphan process {1} to terminate", CURRENT_AGENT, pid) time.sleep(GOAL_STATE_INTERVAL) os.remove(pid_file) except Exception as e: logger.warn( u"Exception occurred waiting for orphan agent to terminate: {0}", ustr(e)) return def _ensure_partition_assigned(self): """ Assign the VM to a partition (0 - 99). Downloaded updates may be configured to run on only some VMs; the assigned partition determines eligibility. """ if not os.path.exists(self._partition_file): partition = ustr(int(datetime.utcnow().microsecond / 10000)) fileutil.write_file(self._partition_file, partition) add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Partition, is_success=True, message=partition) def _ensure_readonly_files(self): for g in READONLY_FILE_GLOBS: for path in glob.iglob(os.path.join(conf.get_lib_dir(), g)): os.chmod(path, stat.S_IRUSR) def _evaluate_agent_health(self, latest_agent): """ Evaluate the health of the selected agent: If it is restarting too frequently, raise an Exception to force blacklisting. """ if latest_agent is None: self.child_agent = None return if self.child_agent is None or latest_agent.version != self.child_agent.version: self.child_agent = latest_agent self.child_launch_time = None self.child_launch_attempts = 0 if self.child_launch_time is None: self.child_launch_time = time.time() self.child_launch_attempts += 1 if (time.time() - self.child_launch_time) <= CHILD_LAUNCH_INTERVAL \ and self.child_launch_attempts >= CHILD_LAUNCH_RESTART_MAX: msg = u"Agent {0} restarted more than {1} times in {2} seconds".format( self.child_agent.name, CHILD_LAUNCH_RESTART_MAX, CHILD_LAUNCH_INTERVAL) raise Exception(msg) return def _filter_blacklisted_agents(self): self.agents = [agent for agent in self.agents if not agent.is_blacklisted] def _find_agents(self): """ Load all non-blacklisted agents currently on disk. """ try: self._set_agents(self._load_agents()) self._filter_blacklisted_agents() except Exception as e: logger.warn(u"Exception occurred loading available agents: {0}", ustr(e)) return def _get_host_plugin(self, protocol=None): return protocol.client.get_host_plugin() \ if protocol and \ type(protocol) is WireProtocol and \ protocol.client \ else None def _get_pid_parts(self): pid_file = conf.get_agent_pid_file_path() pid_dir = os.path.dirname(pid_file) pid_name = os.path.basename(pid_file) pid_re = re.compile("(\d+)_{0}".format(re.escape(pid_name))) return pid_dir, pid_name, pid_re def _get_pid_files(self): pid_dir, pid_name, pid_re = self._get_pid_parts() pid_files = [os.path.join(pid_dir, f) for f in os.listdir(pid_dir) if pid_re.match(f)] pid_files.sort(key=lambda f: int(pid_re.match(os.path.basename(f)).group(1))) return pid_files @property def _is_clean_start(self): return not os.path.isfile(self._sentinel_file_path()) @property def _is_orphaned(self): parent_pid = os.getppid() if parent_pid in (1, None): return True if not os.path.isfile(conf.get_agent_pid_file_path()): return True return fileutil.read_file(conf.get_agent_pid_file_path()) != ustr(parent_pid) def _is_version_eligible(self, version): # Ensure the installed version is always eligible if version == CURRENT_VERSION and is_current_agent_installed(): return True for agent in self.agents: if agent.version == version: return agent.is_available return False def _load_agents(self): path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) return [GuestAgent(path=agent_dir) for agent_dir in glob.iglob(path) if os.path.isdir(agent_dir)] def _partition(self): return int(fileutil.read_file(self._partition_file)) @property def _partition_file(self): return os.path.join(conf.get_lib_dir(), AGENT_PARTITION_FILE) def _purge_agents(self): """ Remove from disk all directories and .zip files of unknown agents (without removing the current, running agent). """ path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) known_versions = [agent.version for agent in self.agents] if CURRENT_VERSION not in known_versions: logger.verbose( u"Running Agent {0} was not found in the agent manifest - adding to list", CURRENT_VERSION) known_versions.append(CURRENT_VERSION) for agent_path in glob.iglob(path): try: name = fileutil.trim_ext(agent_path, "zip") m = AGENT_DIR_PATTERN.match(name) if m is not None and FlexibleVersion(m.group(1)) not in known_versions: if os.path.isfile(agent_path): logger.info(u"Purging outdated Agent file {0}", agent_path) os.remove(agent_path) else: logger.info(u"Purging outdated Agent directory {0}", agent_path) shutil.rmtree(agent_path) except Exception as e: logger.warn(u"Purging {0} raised exception: {1}", agent_path, ustr(e)) return def _set_agents(self, agents=[]): self.agents = agents self.agents.sort(key=lambda agent: agent.version, reverse=True) return def _set_sentinel(self, agent=CURRENT_AGENT, msg="Unknown cause"): try: fileutil.write_file( self._sentinel_file_path(), "[{0}] [{1}]".format(agent, msg)) except Exception as e: logger.warn( u"Exception writing sentinel file {0}: {1}", self._sentinel_file_path(), str(e)) return def _sentinel_file_path(self): return os.path.join(conf.get_lib_dir(), AGENT_SENTINEL_FILE) def _shutdown(self): self.running = False if not os.path.isfile(self._sentinel_file_path()): return try: os.remove(self._sentinel_file_path()) except Exception as e: logger.warn( u"Exception removing sentinel file {0}: {1}", self._sentinel_file_path(), str(e)) return def _upgrade_available(self, base_version=CURRENT_VERSION): # Emit an event expressing the state of AutoUpdate # Note: # - Duplicate events get suppressed; state transitions always emit add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.AutoUpdate, is_success=conf.get_autoupdate_enabled()) # Ignore new agents if updating is disabled if not conf.get_autoupdate_enabled(): return False now = time.time() if self.last_attempt_time is not None: next_attempt_time = self.last_attempt_time + \ conf.get_autoupdate_frequency() else: next_attempt_time = now if next_attempt_time > now: return False family = conf.get_autoupdate_gafamily() logger.verbose("Checking for agent family {0} updates", family) self.last_attempt_time = now protocol = self.protocol_util.get_protocol() for update_goal_state in [False, True]: try: if update_goal_state: protocol.update_goal_state(forced=True) manifest_list, etag = protocol.get_vmagent_manifests() manifests = [m for m in manifest_list.vmAgentManifests \ if m.family == family and \ len(m.versionsManifestUris) > 0] if len(manifests) == 0: logger.verbose(u"Incarnation {0} has no {1} agent updates", etag, family) return False pkg_list = protocol.get_vmagent_pkgs(manifests[0]) # Set the agents to those available for download at least as # current as the existing agent and remove from disk any agent # no longer reported to the VM. # Note: # The code leaves on disk available, but blacklisted, agents # so as to preserve the state. Otherwise, those agents could be # again downloaded and inappropriately retried. host = self._get_host_plugin(protocol=protocol) self._set_agents([GuestAgent(pkg=pkg, host=host) \ for pkg in pkg_list.versions]) self._purge_agents() self._filter_blacklisted_agents() # Return True if current agent is no longer available or an # agent with a higher version number is available return not self._is_version_eligible(base_version) \ or (len(self.agents) > 0 \ and self.agents[0].version > base_version) except Exception as e: if isinstance(e, ResourceGoneError): continue msg = u"Exception retrieving agent manifests: {0}".format( ustr(traceback.format_exc())) logger.warn(msg) add_event( AGENT_NAME, op=WALAEventOperation.Download, version=CURRENT_VERSION, is_success=False, message=msg) return False def _write_pid_file(self): pid_files = self._get_pid_files() pid_dir, pid_name, pid_re = self._get_pid_parts() previous_pid_file = None \ if len(pid_files) <= 0 \ else pid_files[-1] pid_index = -1 \ if previous_pid_file is None \ else int(pid_re.match(os.path.basename(previous_pid_file)).group(1)) pid_file = os.path.join(pid_dir, "{0}_{1}".format(pid_index+1, pid_name)) try: fileutil.write_file(pid_file, ustr(os.getpid())) logger.info(u"{0} running as process {1}", CURRENT_AGENT, ustr(os.getpid())) except Exception as e: pid_file = None logger.warn( u"Expection writing goal state agent {0} pid to {1}: {2}", CURRENT_AGENT, pid_file, ustr(e)) return pid_files, pid_file class GuestAgent(object): def __init__(self, path=None, pkg=None, host=None): self.pkg = pkg self.host = host version = None if path is not None: m = AGENT_DIR_PATTERN.match(path) if m == None: raise UpdateError(u"Illegal agent directory: {0}".format(path)) version = m.group(1) elif self.pkg is not None: version = pkg.version if version == None: raise UpdateError(u"Illegal agent version: {0}".format(version)) self.version = FlexibleVersion(version) location = u"disk" if path is not None else u"package" logger.verbose(u"Loading Agent {0} from {1}", self.name, location) self.error = GuestAgentError(self.get_agent_error_file()) self.error.load() try: self._ensure_downloaded() self._ensure_loaded() except Exception as e: if isinstance(e, ResourceGoneError): raise # The agent was improperly blacklisting versions due to a timeout # encountered while downloading a later version. Errors of type # socket.error are IOError, so this should provide sufficient # protection against a large class of I/O operation failures. if isinstance(e, IOError): raise # Note the failure, blacklist the agent if the package downloaded # - An exception with a downloaded package indicates the package # is corrupt (e.g., missing the HandlerManifest.json file) self.mark_failure(is_fatal=os.path.isfile(self.get_agent_pkg_path())) msg = u"Agent {0} install failed with exception: {1}".format( self.name, ustr(e)) logger.warn(msg) add_event( AGENT_NAME, version=self.version, op=WALAEventOperation.Install, is_success=False, message=msg) @property def name(self): return "{0}-{1}".format(AGENT_NAME, self.version) def get_agent_cmd(self): return self.manifest.get_enable_command() def get_agent_dir(self): return os.path.join(conf.get_lib_dir(), self.name) def get_agent_error_file(self): return os.path.join(conf.get_lib_dir(), self.name, AGENT_ERROR_FILE) def get_agent_manifest_path(self): return os.path.join(self.get_agent_dir(), AGENT_MANIFEST_FILE) def get_agent_pkg_path(self): return ".".join((os.path.join(conf.get_lib_dir(), self.name), "zip")) def clear_error(self): self.error.clear() self.error.save() @property def is_available(self): return self.is_downloaded and not self.is_blacklisted @property def is_blacklisted(self): return self.error is not None and self.error.is_blacklisted @property def is_downloaded(self): return self.is_blacklisted or \ os.path.isfile(self.get_agent_manifest_path()) def mark_failure(self, is_fatal=False): try: if not os.path.isdir(self.get_agent_dir()): os.makedirs(self.get_agent_dir()) self.error.mark_failure(is_fatal=is_fatal) self.error.save() if self.error.is_blacklisted: logger.warn(u"Agent {0} is permanently blacklisted", self.name) except Exception as e: logger.warn(u"Agent {0} failed recording error state: {1}", self.name, ustr(e)) def _ensure_downloaded(self): logger.verbose(u"Ensuring Agent {0} is downloaded", self.name) if self.is_downloaded: logger.verbose(u"Agent {0} was previously downloaded - skipping download", self.name) return if self.pkg is None: raise UpdateError(u"Agent {0} is missing package and download URIs".format( self.name)) self._download() self._unpack() msg = u"Agent {0} downloaded successfully".format(self.name) logger.verbose(msg) add_event( AGENT_NAME, version=self.version, op=WALAEventOperation.Install, is_success=True, message=msg) def _ensure_loaded(self): self._load_manifest() self._load_error() def _download(self): uris_shuffled = self.pkg.uris random.shuffle(uris_shuffled) for uri in uris_shuffled: if not HostPluginProtocol.is_default_channel() and self._fetch(uri.uri): break elif self.host is not None and self.host.ensure_initialized(): if not HostPluginProtocol.is_default_channel(): logger.warn("Download failed, switching to host plugin") else: logger.verbose("Using host plugin as default channel") uri, headers = self.host.get_artifact_request(uri.uri, self.host.manifest_uri) try: if self._fetch(uri, headers=headers, use_proxy=False): if not HostPluginProtocol.is_default_channel(): logger.verbose("Setting host plugin as default channel") HostPluginProtocol.set_default_channel(True) break else: logger.warn("Host plugin download failed") # If the HostPlugin rejects the request, # let the error continue, but set to use the HostPlugin except ResourceGoneError: HostPluginProtocol.set_default_channel(True) raise else: logger.error("No download channels available") if not os.path.isfile(self.get_agent_pkg_path()): msg = u"Unable to download Agent {0} from any URI".format(self.name) add_event( AGENT_NAME, op=WALAEventOperation.Download, version=CURRENT_VERSION, is_success=False, message=msg) raise UpdateError(msg) def _fetch(self, uri, headers=None, use_proxy=True): package = None try: is_healthy = True error_response = '' resp = restutil.http_get(uri, use_proxy=use_proxy, headers=headers) if restutil.request_succeeded(resp): package = resp.read() fileutil.write_file(self.get_agent_pkg_path(), bytearray(package), asbin=True) logger.verbose(u"Agent {0} downloaded from {1}", self.name, uri) else: error_response = restutil.read_response_error(resp) logger.verbose("Fetch was unsuccessful [{0}]", error_response) is_healthy = not restutil.request_failed_at_hostplugin(resp) if self.host is not None: self.host.report_fetch_health(uri, is_healthy, source='GuestAgent', response=error_response) except restutil.HttpError as http_error: if isinstance(http_error, ResourceGoneError): raise logger.verbose(u"Agent {0} download from {1} failed [{2}]", self.name, uri, http_error) return package is not None def _load_error(self): try: self.error = GuestAgentError(self.get_agent_error_file()) self.error.load() logger.verbose(u"Agent {0} error state: {1}", self.name, ustr(self.error)) except Exception as e: logger.warn(u"Agent {0} failed loading error state: {1}", self.name, ustr(e)) def _load_manifest(self): path = self.get_agent_manifest_path() if not os.path.isfile(path): msg = u"Agent {0} is missing the {1} file".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) with open(path, "r") as manifest_file: try: manifests = json.load(manifest_file) except Exception as e: msg = u"Agent {0} has a malformed {1}".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) if type(manifests) is list: if len(manifests) <= 0: msg = u"Agent {0} has an empty {1}".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) manifest = manifests[0] else: manifest = manifests try: self.manifest = HandlerManifest(manifest) if len(self.manifest.get_enable_command()) <= 0: raise Exception(u"Manifest is missing the enable command") except Exception as e: msg = u"Agent {0} has an illegal {1}: {2}".format( self.name, AGENT_MANIFEST_FILE, ustr(e)) raise UpdateError(msg) logger.verbose( u"Agent {0} loaded manifest from {1}", self.name, self.get_agent_manifest_path()) logger.verbose(u"Successfully loaded Agent {0} {1}: {2}", self.name, AGENT_MANIFEST_FILE, ustr(self.manifest.data)) return def _unpack(self): try: if os.path.isdir(self.get_agent_dir()): shutil.rmtree(self.get_agent_dir()) zipfile.ZipFile(self.get_agent_pkg_path()).extractall(self.get_agent_dir()) except Exception as e: fileutil.clean_ioerror(e, paths=[self.get_agent_dir(), self.get_agent_pkg_path()]) msg = u"Exception unpacking Agent {0} from {1}: {2}".format( self.name, self.get_agent_pkg_path(), ustr(e)) raise UpdateError(msg) if not os.path.isdir(self.get_agent_dir()): msg = u"Unpacking Agent {0} failed to create directory {1}".format( self.name, self.get_agent_dir()) raise UpdateError(msg) logger.verbose( u"Agent {0} unpacked successfully to {1}", self.name, self.get_agent_dir()) return class GuestAgentError(object): def __init__(self, path): if path is None: raise UpdateError(u"GuestAgentError requires a path") self.path = path self.clear() return def mark_failure(self, is_fatal=False): self.last_failure = time.time() self.failure_count += 1 self.was_fatal = is_fatal return def clear(self): self.last_failure = 0.0 self.failure_count = 0 self.was_fatal = False return @property def is_blacklisted(self): return self.was_fatal or self.failure_count >= MAX_FAILURE def load(self): if self.path is not None and os.path.isfile(self.path): with open(self.path, 'r') as f: self.from_json(json.load(f)) return def save(self): if os.path.isdir(os.path.dirname(self.path)): with open(self.path, 'w') as f: json.dump(self.to_json(), f) return def from_json(self, data): self.last_failure = max( self.last_failure, data.get(u"last_failure", 0.0)) self.failure_count = max( self.failure_count, data.get(u"failure_count", 0)) self.was_fatal = self.was_fatal or data.get(u"was_fatal", False) return def to_json(self): data = { u"last_failure": self.last_failure, u"failure_count": self.failure_count, u"was_fatal" : self.was_fatal } return data def __str__(self): return "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( self.last_failure, self.failure_count, self.was_fatal) WALinuxAgent-2.2.32/azurelinuxagent/pa/000077500000000000000000000000001335416306700177435ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/pa/__init__.py000066400000000000000000000011661335416306700220600ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/000077500000000000000000000000001335416306700223045ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/__init__.py000066400000000000000000000013501335416306700244140ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.deprovision.factory import get_deprovision_handler __all__ = ["get_deprovision_handler"] WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/arch.py000066400000000000000000000024511335416306700235750ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class ArchDeprovisionHandler(DeprovisionHandler): def __init__(self): super(ArchDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(ArchDeprovisionHandler, self).setup(deluser) warnings.append("WARNING! /etc/machine-id will be removed.") files_to_del = ['/etc/machine-id'] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) return warnings, actions WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/clearlinux.py000066400000000000000000000022371335416306700250300ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class ClearLinuxDeprovisionHandler(DeprovisionHandler): def __init__(self, distro): self.distro = distro def setup(self, deluser): warnings, actions = super(ClearLinuxDeprovisionHandler, self).setup(deluser) # Probably should just wipe /etc and /var here return warnings, actions WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/coreos.py000066400000000000000000000024601335416306700241520ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class CoreOSDeprovisionHandler(DeprovisionHandler): def __init__(self): super(CoreOSDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(CoreOSDeprovisionHandler, self).setup(deluser) warnings.append("WARNING! /etc/machine-id will be removed.") files_to_del = ['/etc/machine-id'] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) return warnings, actions WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/default.py000066400000000000000000000216761335416306700243160ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import os.path import re import signal import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common import version from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN def read_input(message): if sys.version_info[0] >= 3: return input(message) else: return raw_input(message) class DeprovisionAction(object): def __init__(self, func, args=[], kwargs={}): self.func = func self.args = args self.kwargs = kwargs def invoke(self): self.func(*self.args, **self.kwargs) class DeprovisionHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.actions_running = False signal.signal(signal.SIGINT, self.handle_interrupt_signal) def del_root_password(self, warnings, actions): warnings.append("WARNING! root password will be disabled. " "You will not be able to login as root.") actions.append(DeprovisionAction(self.osutil.del_root_password)) def del_user(self, warnings, actions): try: ovfenv = self.protocol_util.get_ovf_env() except ProtocolError: warnings.append("WARNING! ovf-env.xml is not found.") warnings.append("WARNING! Skip delete user.") return username = ovfenv.username warnings.append(("WARNING! {0} account and entire home directory " "will be deleted.").format(username)) actions.append(DeprovisionAction(self.osutil.del_account, [username])) def regen_ssh_host_key(self, warnings, actions): warnings.append("WARNING! All SSH host key pairs will be deleted.") actions.append(DeprovisionAction(fileutil.rm_files, [conf.get_ssh_key_glob()])) def stop_agent_service(self, warnings, actions): warnings.append("WARNING! The waagent service will be stopped.") actions.append(DeprovisionAction(self.osutil.stop_agent_service)) def del_dirs(self, warnings, actions): dirs = [conf.get_lib_dir(), conf.get_ext_log_dir()] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs)) def del_files(self, warnings, actions): files = ['/root/.bash_history', '/var/log/waagent.log'] actions.append(DeprovisionAction(fileutil.rm_files, files)) # For OpenBSD actions.append(DeprovisionAction(fileutil.rm_files, ["/etc/random.seed", "/var/db/host.random", "/etc/isakmpd/local.pub", "/etc/isakmpd/private/local.key", "/etc/iked/private/local.key", "/etc/iked/local.pub"])) def del_resolv(self, warnings, actions): warnings.append("WARNING! /etc/resolv.conf will be deleted.") files_to_del = ["/etc/resolv.conf"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) def del_dhcp_lease(self, warnings, actions): warnings.append("WARNING! Cached DHCP leases will be deleted.") dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) # For FreeBSD and OpenBSD actions.append(DeprovisionAction(fileutil.rm_files, ["/var/db/dhclient.leases.*"])) # For FreeBSD, NM controlled actions.append(DeprovisionAction(fileutil.rm_files, ["/var/lib/NetworkManager/dhclient-*.lease"])) def del_ext_handler_files(self, warnings, actions): ext_dirs = [d for d in os.listdir(conf.get_lib_dir()) if os.path.isdir(os.path.join(conf.get_lib_dir(), d)) and re.match(HANDLER_NAME_PATTERN, d) is not None and not version.is_agent_path(d)] for ext_dir in ext_dirs: ext_base = os.path.join(conf.get_lib_dir(), ext_dir) files = glob.glob(os.path.join(ext_base, 'status', '*.status')) files += glob.glob(os.path.join(ext_base, 'config', '*.settings')) files += glob.glob(os.path.join(ext_base, 'config', 'HandlerStatus')) files += glob.glob(os.path.join(ext_base, 'mrseq')) if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) def del_lib_dir_files(self, warnings, actions): known_files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'partition', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint' ] known_files_glob = [ 'Extensions.*.xml', 'ExtensionsConfig.*.xml', 'GoalState.*.xml' ] lib_dir = conf.get_lib_dir() files = [f for f in \ [os.path.join(lib_dir, kf) for kf in known_files] \ if os.path.isfile(f)] for p in known_files_glob: files += glob.glob(os.path.join(lib_dir, p)) if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) def reset_hostname(self, warnings, actions): localhost = ["localhost.localdomain"] actions.append(DeprovisionAction(self.osutil.set_hostname, localhost)) actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname, localhost)) def setup(self, deluser): warnings = [] actions = [] self.stop_agent_service(warnings, actions) if conf.get_regenerate_ssh_host_key(): self.regen_ssh_host_key(warnings, actions) self.del_dhcp_lease(warnings, actions) self.reset_hostname(warnings, actions) if conf.get_delete_root_password(): self.del_root_password(warnings, actions) self.del_dirs(warnings, actions) self.del_files(warnings, actions) self.del_resolv(warnings, actions) if deluser: self.del_user(warnings, actions) return warnings, actions def setup_changed_unique_id(self): warnings = [] actions = [] self.del_dhcp_lease(warnings, actions) self.del_lib_dir_files(warnings, actions) self.del_ext_handler_files(warnings, actions) return warnings, actions def run(self, force=False, deluser=False): warnings, actions = self.setup(deluser) self.do_warnings(warnings) if self.do_confirmation(force=force): self.do_actions(actions) def run_changed_unique_id(self): ''' Clean-up files and directories that may interfere when the VM unique identifier has changed. While users *should* manually deprovision a VM, the files removed by this routine will help keep the agent from getting confused (since incarnation and extension settings, among other items, will no longer be monotonically increasing). ''' warnings, actions = self.setup_changed_unique_id() self.do_warnings(warnings) self.do_actions(actions) def do_actions(self, actions): self.actions_running = True for action in actions: action.invoke() self.actions_running = False def do_confirmation(self, force=False): if force: return True confirm = read_input("Do you want to proceed (y/n)") return True if confirm.lower().startswith('y') else False def do_warnings(self, warnings): for warning in warnings: print(warning) def handle_interrupt_signal(self, signum, frame): if not self.actions_running: print("Deprovision is interrupted.") sys.exit(0) print ('Deprovisioning may not be interrupted.') return WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/factory.py000066400000000000000000000032551335416306700243320ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import DeprovisionHandler from .arch import ArchDeprovisionHandler from .clearlinux import ClearLinuxDeprovisionHandler from .coreos import CoreOSDeprovisionHandler from .ubuntu import UbuntuDeprovisionHandler, Ubuntu1804DeprovisionHandler from distutils.version import LooseVersion as Version def get_deprovision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "arch": return ArchDeprovisionHandler() if distro_name == "ubuntu": if Version(distro_version) in [Version('18.04')]: return Ubuntu1804DeprovisionHandler() else: return UbuntuDeprovisionHandler() if distro_name == "coreos": return CoreOSDeprovisionHandler() if distro_name == "clear linux": return ClearLinuxDeprovisionHandler() return DeprovisionHandler() WALinuxAgent-2.2.32/azurelinuxagent/pa/deprovision/ubuntu.py000066400000000000000000000041321335416306700242000ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class UbuntuDeprovisionHandler(DeprovisionHandler): def __init__(self): super(UbuntuDeprovisionHandler, self).__init__() def del_resolv(self, warnings, actions): if os.path.realpath( '/etc/resolv.conf') != '/run/resolvconf/resolv.conf': warnings.append("WARNING! /etc/resolv.conf will be deleted.") files_to_del = ["/etc/resolv.conf"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) else: warnings.append("WARNING! /etc/resolvconf/resolv.conf.d/tail " "and /etc/resolvconf/resolv.conf.d/original will " "be deleted.") files_to_del = ["/etc/resolvconf/resolv.conf.d/tail", "/etc/resolvconf/resolv.conf.d/original"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) class Ubuntu1804DeprovisionHandler(UbuntuDeprovisionHandler): def __init__(self): super(Ubuntu1804DeprovisionHandler, self).__init__() def del_resolv(self, warnings, actions): # no changes will be made to /etc/resolv.conf warnings.append("WARNING! /etc/resolv.conf will NOT be removed, this is a behavior change to earlier " "versions of Ubuntu.") WALinuxAgent-2.2.32/azurelinuxagent/pa/provision/000077500000000000000000000000001335416306700217735ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/pa/provision/__init__.py000066400000000000000000000012751335416306700241110ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.provision.factory import get_provision_handler WALinuxAgent-2.2.32/azurelinuxagent/pa/provision/cloudinit.py000066400000000000000000000135631335416306700243470ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import os.path import time from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.event import elapsed_milliseconds, WALAEventOperation from azurelinuxagent.common.exception import ProvisionError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.common.protocol.ovfenv import OvfEnv from azurelinuxagent.pa.provision.default import ProvisionHandler class CloudInitProvisionHandler(ProvisionHandler): def __init__(self): super(CloudInitProvisionHandler, self).__init__() def run(self): # If provision is enabled, run default provision handler if conf.get_provision_enabled(): logger.warn("Provisioning flag is enabled, which overrides using " "cloud-init; running the default provisioning code") super(CloudInitProvisionHandler, self).run() return try: if super(CloudInitProvisionHandler, self).is_provisioned(): logger.info("Provisioning already completed, skipping.") return utc_start = datetime.utcnow() logger.info("Running CloudInit provisioning handler") self.wait_for_ovfenv() self.protocol_util.get_protocol() self.report_not_ready("Provisioning", "Starting") thumbprint = self.wait_for_ssh_host_key() self.write_provisioned() logger.info("Finished provisioning") self.report_ready(thumbprint) self.report_event("Provisioning with cloud-init succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) except ProvisionError as e: msg = "Provisioning with cloud-init failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) self.report_event(msg) return def wait_for_ovfenv(self, max_retry=1800, sleep_time=1): """ Wait for cloud-init to copy ovf-env.xml file from provision ISO """ ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) for retry in range(0, max_retry): if os.path.isfile(ovf_file_path): try: ovf_env = OvfEnv(fileutil.read_file(ovf_file_path)) self.handle_provision_guest_agent(ovf_env.provision_guest_agent) return except ProtocolError as pe: raise ProvisionError("OVF xml could not be parsed " "[{0}]: {1}".format(ovf_file_path, ustr(pe))) else: if retry < max_retry - 1: logger.info( "Waiting for cloud-init to copy ovf-env.xml to {0} " "[{1} retries remaining, " "sleeping {2}s]".format(ovf_file_path, max_retry - retry, sleep_time)) if not self.validate_cloud_init(): logger.warn("cloud-init does not appear to be running") time.sleep(sleep_time) raise ProvisionError("Giving up, ovf-env.xml was not copied to {0} " "after {1}s".format(ovf_file_path, max_retry * sleep_time)) def wait_for_ssh_host_key(self, max_retry=1800, sleep_time=1): """ Wait for cloud-init to generate ssh host key """ keypair_type = conf.get_ssh_host_keypair_type() path = conf.get_ssh_key_public_path() for retry in range(0, max_retry): if os.path.isfile(path): logger.info("ssh host key found at: {0}".format(path)) try: thumbprint = self.get_ssh_host_key_thumbprint(chk_err=False) logger.info("Thumbprint obtained from : {0}".format(path)) return thumbprint except ProvisionError: logger.warn("Could not get thumbprint from {0}".format(path)) if retry < max_retry - 1: logger.info("Waiting for ssh host key be generated at {0} " "[{1} attempts remaining, " "sleeping {2}s]".format(path, max_retry - retry, sleep_time)) if not self.validate_cloud_init(): logger.warn("cloud-init does not appear to be running") time.sleep(sleep_time) raise ProvisionError("Giving up, ssh host key was not found at {0} " "after {1}s".format(path, max_retry * sleep_time)) WALinuxAgent-2.2.32/azurelinuxagent/pa/provision/default.py000066400000000000000000000273421335416306700240010ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Provision handler """ import os import os.path import re import time from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.future import ustr from azurelinuxagent.common.event import add_event, WALAEventOperation, \ elapsed_milliseconds from azurelinuxagent.common.exception import ProvisionError, ProtocolError, \ OSUtilError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol.restapi import ProvisionStatus from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.version import AGENT_NAME CUSTOM_DATA_FILE = "CustomData" CLOUD_INIT_PATTERN = b".*/bin/cloud-init.*" CLOUD_INIT_REGEX = re.compile(CLOUD_INIT_PATTERN) PROVISIONED_FILE = 'provisioned' class ProvisionHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() def run(self): if not conf.get_provision_enabled(): logger.info("Provisioning is disabled, skipping.") self.write_provisioned() self.report_ready() return try: utc_start = datetime.utcnow() thumbprint = None if self.is_provisioned(): logger.info("Provisioning already completed, skipping.") return logger.info("Running default provisioning handler") if not self.validate_cloud_init(is_expected=False): raise ProvisionError("cloud-init appears to be running, " "this is not expected, cannot continue") logger.info("Copying ovf-env.xml") ovf_env = self.protocol_util.copy_ovf_env() self.protocol_util.get_protocol(by_file=True) self.report_not_ready("Provisioning", "Starting") logger.info("Starting provisioning") self.provision(ovf_env) thumbprint = self.reg_ssh_host_key() self.osutil.restart_ssh_service() self.write_provisioned() self.report_event("Provisioning succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) self.handle_provision_guest_agent(ovf_env.provision_guest_agent) self.report_ready(thumbprint) logger.info("Provisioning complete") except (ProtocolError, ProvisionError) as e: msg = "Provisioning failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) self.report_event(msg, is_success=False) return @staticmethod def validate_cloud_init(is_expected=True): is_running = False if os.path.isdir("/proc"): pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] else: pids = [] for pid in pids: try: with open(os.path.join('/proc', pid, 'cmdline'), 'rb') as fh: pname = fh.read() if CLOUD_INIT_REGEX.match(pname): is_running = True msg = "cloud-init is running [PID {0}, {1}]".format(pid, pname) if is_expected: logger.verbose(msg) else: logger.error(msg) break except IOError: continue return is_running == is_expected @staticmethod def _get_uptime_seconds(): try: with open('/proc/uptime') as fh: uptime, _ = fh.readline().split() return uptime except: return 0 def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files(conf.get_ssh_key_glob()) if conf.get_ssh_host_keypair_mode() == "auto": ''' The -A option generates all supported key types. This is supported since OpenSSH 5.9 (2011). ''' shellutil.run("ssh-keygen -A") else: keygen_cmd = "ssh-keygen -N '' -t {0} -f {1}" shellutil.run(keygen_cmd. format(keypair_type, conf.get_ssh_key_private_path())) return self.get_ssh_host_key_thumbprint() def get_ssh_host_key_thumbprint(self, chk_err=True): cmd = "ssh-keygen -lf {0}".format(conf.get_ssh_key_public_path()) ret = shellutil.run_get_output(cmd, chk_err=chk_err) if ret[0] == 0: return ret[1].rstrip().split()[1].replace(':', '') else: raise ProvisionError(("Failed to generate ssh host key: " "ret={0}, out= {1}").format(ret[0], ret[1])) def provisioned_file_path(self): return os.path.join(conf.get_lib_dir(), PROVISIONED_FILE) def is_provisioned(self): ''' A VM is considered provisionend *anytime* the provisioning sentinel file exists and not provisioned *anytime* the file is absent. If the VM was provisioned using an agent that did not record the VM unique identifier, the provisioning file will be re-written to include the identifier. A warning is logged *if* the VM unique identifier has changed since VM was provisioned. ''' if not os.path.isfile(self.provisioned_file_path()): return False s = fileutil.read_file(self.provisioned_file_path()).strip() if not self.osutil.is_current_instance_id(s): if len(s) > 0: logger.warn("VM is provisioned, " "but the VM unique identifier has changed -- " "clearing cached state") from azurelinuxagent.pa.deprovision \ import get_deprovision_handler deprovision_handler = get_deprovision_handler() deprovision_handler.run_changed_unique_id() self.write_provisioned() self.report_ready() return True def write_provisioned(self): fileutil.write_file( self.provisioned_file_path(), get_osutil().get_instance_id()) @staticmethod def write_agent_disabled(): logger.warn("Disabling guest agent in accordance with ovf-env.xml") fileutil.write_file(conf.get_disable_agent_file_path(), '') def handle_provision_guest_agent(self, provision_guest_agent): self.report_event(message=provision_guest_agent, is_success=True, duration=0, operation=WALAEventOperation.ProvisionGuestAgent) if provision_guest_agent and provision_guest_agent.lower() == 'false': self.write_agent_disabled() def provision(self, ovfenv): logger.info("Handle ovf-env.xml.") try: logger.info("Set hostname [{0}]".format(ovfenv.hostname)) self.osutil.set_hostname(ovfenv.hostname) logger.info("Publish hostname [{0}]".format(ovfenv.hostname)) self.osutil.publish_hostname(ovfenv.hostname) self.config_user_account(ovfenv) self.save_customdata(ovfenv) if conf.get_delete_root_password(): self.osutil.del_root_password() except OSUtilError as e: raise ProvisionError("Failed to provision: {0}".format(ustr(e))) def config_user_account(self, ovfenv): logger.info("Create user account if not exists") self.osutil.useradd(ovfenv.username) if ovfenv.user_password is not None: logger.info("Set user password.") crypt_id = conf.get_password_cryptid() salt_len = conf.get_password_crypt_salt_len() self.osutil.chpasswd(ovfenv.username, ovfenv.user_password, crypt_id=crypt_id, salt_len=salt_len) logger.info("Configure sudoer") self.osutil.conf_sudoer(ovfenv.username, nopasswd=ovfenv.user_password is None) logger.info("Configure sshd") self.osutil.conf_sshd(ovfenv.disable_ssh_password_auth) self.deploy_ssh_pubkeys(ovfenv) self.deploy_ssh_keypairs(ovfenv) def save_customdata(self, ovfenv): customdata = ovfenv.customdata if customdata is None: return lib_dir = conf.get_lib_dir() if conf.get_decode_customdata() or conf.get_execute_customdata(): logger.info("Decode custom data") customdata = self.osutil.decode_customdata(customdata) logger.info("Save custom data") customdata_file = os.path.join(lib_dir, CUSTOM_DATA_FILE) fileutil.write_file(customdata_file, customdata) if conf.get_execute_customdata(): start = time.time() logger.info("Execute custom data") os.chmod(customdata_file, 0o700) shellutil.run(customdata_file) add_event(name=AGENT_NAME, duration=int(time.time() - start), is_success=True, op=WALAEventOperation.CustomData) def deploy_ssh_pubkeys(self, ovfenv): for pubkey in ovfenv.ssh_pubkeys: logger.info("Deploy ssh public key.") self.osutil.deploy_ssh_pubkey(ovfenv.username, pubkey) def deploy_ssh_keypairs(self, ovfenv): for keypair in ovfenv.ssh_keypairs: logger.info("Deploy ssh key pairs.") self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) def report_event(self, message, is_success=False, duration=0, operation=WALAEventOperation.Provision): add_event(name=AGENT_NAME, message=message, duration=duration, is_success=is_success, op=operation) def report_not_ready(self, sub_status, description): status = ProvisionStatus(status="NotReady", subStatus=sub_status, description=description) try: protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: logger.error("Reporting NotReady failed: {0}", e) self.report_event(ustr(e)) def report_ready(self, thumbprint=None): status = ProvisionStatus(status="Ready") status.properties.certificateThumbprint = thumbprint try: protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: logger.error("Reporting Ready failed: {0}", e) self.report_event(ustr(e)) WALinuxAgent-2.2.32/azurelinuxagent/pa/provision/factory.py000066400000000000000000000022351335416306700240160ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.conf as conf from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ProvisionHandler from .cloudinit import CloudInitProvisionHandler def get_provision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if conf.get_provision_cloudinit(): return CloudInitProvisionHandler() return ProvisionHandler() WALinuxAgent-2.2.32/azurelinuxagent/pa/rdma/000077500000000000000000000000001335416306700206665ustar00rootroot00000000000000WALinuxAgent-2.2.32/azurelinuxagent/pa/rdma/__init__.py000066400000000000000000000012631335416306700230010ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.rdma.factory import get_rdma_handler WALinuxAgent-2.2.32/azurelinuxagent/pa/rdma/centos.py000066400000000000000000000241641335416306700225420ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import os import re import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class CentOSRDMAHandler(RDMAHandler): rdma_user_mode_package_name = 'microsoft-hyper-v-rdma' rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma' rdma_wrapper_package_name = 'msft-rdma-drivers' hyper_v_package_name = "hypervkvpd" hyper_v_package_name_new = "microsoft-hyper-v" version_major = None version_minor = None def __init__(self, distro_version): v = distro_version.split('.') if len(v) < 2: raise Exception('Unexpected centos version: %s' % distro_version) self.version_major, self.version_minor = v[0], v[1] def install_driver(self): """ Install the KVP daemon and the appropriate RDMA driver package for the RDMA firmware. """ # Check and install the KVP deamon if it not running time.sleep(10) # give some time for the hv_hvp_daemon to start up. kvpd_running = RDMAHandler.is_kvp_daemon_running() logger.info('RDMA: kvp daemon running: %s' % kvpd_running) if not kvpd_running: self.check_or_install_kvp_daemon() time.sleep(10) # wait for post-install reboot or kvp to come up # Find out RDMA firmware version and see if the existing package needs # updating or if the package is missing altogether (and install it) fw_version = RDMAHandler.get_rdma_version() if not fw_version: raise Exception('Cannot determine RDMA firmware version') logger.info("RDMA: found firmware version: {0}".format(fw_version)) fw_version = self.get_int_rdma_version(fw_version) installed_pkg = self.get_rdma_package_info() if installed_pkg: logger.info( 'RDMA: driver package present: {0}'.format(installed_pkg)) if self.is_rdma_package_up_to_date(installed_pkg, fw_version): logger.info('RDMA: driver package is up-to-date') return else: logger.info('RDMA: driver package needs updating') self.update_rdma_package(fw_version) else: logger.info('RDMA: driver package is NOT installed') self.update_rdma_package(fw_version) def is_rdma_package_up_to_date(self, pkg, fw_version): # Example match (pkg name, -, followed by 3 segments, fw_version and -): # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 # - fw_version=142 pattern = '{0}-(\d+\.){{3,}}({1})-'.format(self.rdma_user_mode_package_name, fw_version) return re.match(pattern, pkg) @staticmethod def get_int_rdma_version(version): s = version.split('.') if len(s) == 0: raise Exception('Unexpected RDMA firmware version: "%s"' % version) return s[0] def get_rdma_package_info(self): """ Returns the installed rdma package name or None """ ret, output = shellutil.run_get_output( 'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False) if ret != 0: return None return output def update_rdma_package(self, fw_version): logger.info("RDMA: updating RDMA packages") self.refresh_repos() self.force_install_package(self.rdma_wrapper_package_name) self.install_rdma_drivers(fw_version) def force_install_package(self, pkg_name): """ Attempts to remove existing package and installs the package """ logger.info('RDMA: Force installing package: %s' % pkg_name) if self.uninstall_package(pkg_name) != 0: logger.info('RDMA: Erasing package failed but will continue') if self.install_package(pkg_name) != 0: raise Exception('Failed to install package "{0}"'.format(pkg_name)) logger.info('RDMA: installation completed: %s' % pkg_name) @staticmethod def uninstall_package(pkg_name): return shellutil.run('yum erase -y -q {0}'.format(pkg_name)) @staticmethod def install_package(pkg_name): return shellutil.run('yum install -y -q {0}'.format(pkg_name)) def refresh_repos(self): logger.info("RDMA: refreshing yum repos") if shellutil.run('yum clean all') != 0: raise Exception('Cleaning yum repositories failed') if shellutil.run('yum updateinfo') != 0: raise Exception('Failed to act on yum repo update information') logger.info("RDMA: repositories refreshed") def install_rdma_drivers(self, fw_version): """ Installs the drivers from /opt/rdma/rhel[Major][Minor] directory, particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or src). Tries to uninstall them first. """ pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format( self.version_major, self.version_minor) logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir)) if not os.path.isdir(pkg_dir): raise Exception('RDMA packages directory %s is missing' % pkg_dir) pkgs = os.listdir(pkg_dir) logger.info('RDMA: found %d files in package directory' % len(pkgs)) # Uninstal KVP daemon first (if exists) self.uninstall_kvp_driver_package_if_exists() # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) kmod_pkg = self.get_file_by_pattern( pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) if not kmod_pkg: raise Exception("RDMA kernel mode package not found") kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg) self.uninstall_pkg_and_install_from( 'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path) # Install user mode driver (microsoft-hyper-v-rdma-*) umod_pkg = self.get_file_by_pattern( pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) if not umod_pkg: raise Exception("RDMA user mode package not found") umod_pkg_path = os.path.join(pkg_dir, umod_pkg) self.uninstall_pkg_and_install_from( 'user mode', self.rdma_user_mode_package_name, umod_pkg_path) logger.info("RDMA: driver packages installed") if not self.load_driver_module() or not self.is_driver_loaded(): logger.info("RDMA: driver module is not loaded; reboot required") self.reboot_system() else: logger.info("RDMA: kernel module is loaded") @staticmethod def get_file_by_pattern(list, pattern): for l in list: if re.match(pattern, l): return l return None def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path): logger.info( "RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path)) logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name) if self.uninstall_package(pkg_name) == 0: logger.info("RDMA: Successfully uninstaled %s" % pkg_name) logger.info( "RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path)) if self.install_package(pkg_path) != 0: raise Exception( "Failed to install RDMA {0} package".format(pkg_type)) @staticmethod def is_package_installed(pkg): """Runs rpm -q and checks return code to find out if a package is installed""" return shellutil.run("rpm -q %s" % pkg, chk_err=False) == 0 def uninstall_kvp_driver_package_if_exists(self): logger.info('RDMA: deleting existing kvp driver packages') kvp_pkgs = [self.hyper_v_package_name, self.hyper_v_package_name_new] for kvp_pkg in kvp_pkgs: if not self.is_package_installed(kvp_pkg): logger.info( "RDMA: kvp package %s does not exist, skipping" % kvp_pkg) else: logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg) if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0: logger.info("RDMA: successfully erased package") else: logger.error("RDMA: failed to erase package") def check_or_install_kvp_daemon(self): """Checks if kvp daemon package is installed, if not installs the package and reboots the machine. """ logger.info("RDMA: Checking kvp daemon packages.") kvp_pkgs = [self.hyper_v_package_name, self.hyper_v_package_name_new] for pkg in kvp_pkgs: logger.info("RDMA: Checking if package %s installed" % pkg) installed = self.is_package_installed(pkg) if installed: raise Exception('RDMA: package %s is installed, but the kvp daemon is not running' % pkg) kvp_pkg_to_install=self.hyper_v_package_name logger.info("RDMA: no kvp drivers installed, will install '%s'" % kvp_pkg_to_install) logger.info("RDMA: trying to install kvp package '%s'" % kvp_pkg_to_install) if self.install_package(kvp_pkg_to_install) != 0: raise Exception("RDMA: failed to install kvp daemon package '%s'" % kvp_pkg_to_install) logger.info("RDMA: package '%s' successfully installed" % kvp_pkg_to_install) logger.info("RDMA: Machine will now be rebooted.") self.reboot_system()WALinuxAgent-2.2.32/azurelinuxagent/pa/rdma/factory.py000066400000000000000000000031161335416306700227100ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.version import DISTRO_FULL_NAME, DISTRO_VERSION from azurelinuxagent.common.rdma import RDMAHandler from .suse import SUSERDMAHandler from .centos import CentOSRDMAHandler from .ubuntu import UbuntuRDMAHandler def get_rdma_handler( distro_full_name=DISTRO_FULL_NAME, distro_version=DISTRO_VERSION ): """Return the handler object for RDMA driver handling""" if ( (distro_full_name == 'SUSE Linux Enterprise Server' or distro_full_name == 'SLES') and int(distro_version) > 11 ): return SUSERDMAHandler() if distro_full_name == 'CentOS Linux' or distro_full_name == 'CentOS': return CentOSRDMAHandler(distro_version) if distro_full_name == 'Ubuntu': return UbuntuRDMAHandler() logger.info("No RDMA handler exists for distro='{0}' version='{1}'", distro_full_name, distro_version) return RDMAHandler() WALinuxAgent-2.2.32/azurelinuxagent/pa/rdma/suse.py000066400000000000000000000154241335416306700222250ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2017 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class SUSERDMAHandler(RDMAHandler): def install_driver(self): """Install the appropriate driver package for the RDMA firmware""" fw_version = RDMAHandler.get_rdma_version() if not fw_version: error_msg = 'RDMA: Could not determine firmware version. ' error_msg += 'Therefore, no driver will be installed.' logger.error(error_msg) return zypper_install = 'zypper -n in %s' zypper_install_noref = 'zypper -n --no-refresh in %s' zypper_lock = 'zypper addlock %s' zypper_remove = 'zypper -n rm %s' zypper_search = 'zypper -n se -s %s' zypper_unlock = 'zypper removelock %s' package_name = 'msft-rdma-kmp-default' cmd = zypper_search % package_name status, repo_package_info = shellutil.run_get_output(cmd) driver_package_versions = [] driver_package_installed = False for entry in repo_package_info.split('\n'): if package_name in entry: sections = entry.split('|') if len(sections) < 4: error_msg = 'RDMA: Unexpected output from"%s": "%s"' logger.error(error_msg % (cmd, entry)) continue installed = sections[0].strip() version = sections[3].strip() driver_package_versions.append(version) if fw_version in version and installed.startswith('i'): info_msg = 'RDMA: Matching driver package "%s-%s" ' info_msg += 'is already installed, nothing to do.' logger.info(info_msg % (package_name, version)) return True if installed.startswith('i'): # A driver with a different version is installed driver_package_installed = True cmd = zypper_unlock % package_name result = shellutil.run(cmd) info_msg = 'Driver with different version installed ' info_msg += 'unlocked package "%s".' logger.info(info_msg % (package_name)) # If we get here the driver package is installed but the # version doesn't match or no package is installed requires_reboot = False if driver_package_installed: # Unloading the particular driver with rmmod does not work # We have to reboot after the new driver is installed if self.is_driver_loaded(): info_msg = 'RDMA: Currently loaded driver does not match the ' info_msg += 'firmware implementation, reboot will be required.' logger.info(info_msg) requires_reboot = True logger.info("RDMA: removing package %s" % package_name) cmd = zypper_remove % package_name shellutil.run(cmd) logger.info("RDMA: removed package %s" % package_name) logger.info("RDMA: looking for fw version %s in packages" % fw_version) for entry in driver_package_versions: if fw_version not in entry: logger.info("Package '%s' is not a match." % entry) else: logger.info("Package '%s' is a match. Installing." % entry) complete_name = '%s-%s' % (package_name, entry) cmd = zypper_install % complete_name result = shellutil.run(cmd) if result: error_msg = 'RDMA: Failed install of package "%s" ' error_msg += 'from available repositories.' logger.error(error_msg % complete_name) msg = 'RDMA: Successfully installed "%s" from ' msg += 'configured repositories' logger.info(msg % complete_name) # Lock the package so it does not accidentally get updated cmd = zypper_lock % package_name result = shellutil.run(cmd) info_msg = 'Applied lock to "%s"' % package_name logger.info(info_msg) if not self.load_driver_module() or requires_reboot: self.reboot_system() return True else: logger.info("RDMA: No suitable match in repos. Trying local.") local_packages = glob.glob('/opt/microsoft/rdma/*.rpm') for local_package in local_packages: logger.info("Examining: %s" % local_package) if local_package.endswith('.src.rpm'): continue if ( package_name in local_package and fw_version in local_package ): logger.info("RDMA: Installing: %s" % local_package) cmd = zypper_install_noref % local_package result = shellutil.run(cmd) if result and result != 106: error_msg = 'RDMA: Failed install of package "%s" ' error_msg += 'from local package cache' logger.error(error_msg % local_package) break msg = 'RDMA: Successfully installed "%s" from ' msg += 'local package cache' logger.info(msg % (local_package)) # Lock the package so it does not accidentally get updated cmd = zypper_lock % package_name result = shellutil.run(cmd) info_msg = 'Applied lock to "%s"' % package_name logger.info(info_msg) if not self.load_driver_module() or requires_reboot: self.reboot_system() return True else: error_msg = 'Unable to find driver package that matches ' error_msg += 'RDMA firmware version "%s"' % fw_version logger.error(error_msg) return WALinuxAgent-2.2.32/azurelinuxagent/pa/rdma/ubuntu.py000066400000000000000000000121771335416306700225720ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import os import re import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class UbuntuRDMAHandler(RDMAHandler): def install_driver(self): #Install the appropriate driver package for the RDMA firmware nd_version = RDMAHandler.get_rdma_version() if not nd_version: logger.error("RDMA: Could not determine firmware version. No driver will be installed") return #replace . with _, we are looking for number like 144_0 nd_version = re.sub('\.', '_', nd_version) #Check to see if we need to reconfigure driver status,module_name = shellutil.run_get_output('modprobe -R hv_network_direct', chk_err=False) if status != 0: logger.info("RDMA: modprobe -R hv_network_direct failed. Use module name hv_network_direct") module_name = "hv_network_direct" else: module_name = module_name.strip() logger.info("RDMA: current RDMA driver %s nd_version %s" % (module_name, nd_version)) if module_name == 'hv_network_direct_%s' % nd_version: logger.info("RDMA: driver is installed and ND version matched. Skip reconfiguring driver") return #Reconfigure driver if one is available status,output = shellutil.run_get_output('modinfo hv_network_direct_%s' % nd_version); if status == 0: logger.info("RDMA: driver with ND version is installed. Link to module name") self.update_modprobed_conf(nd_version) return #Driver not found. We need to check to see if we need to update kernel if not conf.enable_rdma_update(): logger.info("RDMA: driver update is disabled. Skip kernel update") return status,output = shellutil.run_get_output('uname -r') if status != 0: return if not re.search('-azure$', output): logger.error("RDMA: skip driver update on non-Azure kernel") return kernel_version = re.sub('-azure$', '', output) kernel_version = re.sub('-', '.', kernel_version) #Find the new kernel package version status,output = shellutil.run_get_output('apt-get update') if status != 0: return status,output = shellutil.run_get_output('apt-cache show --no-all-versions linux-azure') if status != 0: return r = re.search('Version: (\S+)', output) if not r: logger.error("RDMA: version not found in package linux-azure.") return package_version = r.groups()[0] #Remove the ending . after package_version = re.sub("\.\d+$", "", package_version) logger.info('RDMA: kernel_version=%s package_version=%s' % (kernel_version, package_version)) kernel_version_array = [ int(x) for x in kernel_version.split('.') ] package_version_array = [ int(x) for x in package_version.split('.') ] if kernel_version_array < package_version_array: logger.info("RDMA: newer version available, update kernel and reboot") status,output = shellutil.run_get_output('apt-get -y install linux-azure') if status: logger.error("RDMA: kernel update failed") return self.reboot_system() else: logger.error("RDMA: no kernel update is avaiable for ND version %s" % nd_version) def update_modprobed_conf(self, nd_version): #Update /etc/modprobe.d/vmbus-rdma.conf to point to the correct driver modprobed_file = '/etc/modprobe.d/vmbus-rdma.conf' lines = '' if not os.path.isfile(modprobed_file): logger.info("RDMA: %s not found, it will be created" % modprobed_file) else: f = open(modprobed_file, 'r') lines = f.read() f.close() r = re.search('alias hv_network_direct hv_network_direct_\S+', lines) if r: lines = re.sub('alias hv_network_direct hv_network_direct_\S+', 'alias hv_network_direct hv_network_direct_%s' % nd_version, lines) else: lines += '\nalias hv_network_direct hv_network_direct_%s\n' % nd_version f = open('/etc/modprobe.d/vmbus-rdma.conf', 'w') f.write(lines) f.close() logger.info("RDMA: hv_network_direct alias updated to ND %s" % nd_version) WALinuxAgent-2.2.32/bin/000077500000000000000000000000001335416306700146665ustar00rootroot00000000000000WALinuxAgent-2.2.32/bin/waagent000077500000000000000000000027511335416306700162470ustar00rootroot00000000000000#!/usr/bin/env python # # Azure Linux Agent # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import os import imp import sys if __name__ == '__main__' : import azurelinuxagent.agent as agent """ Invoke main method of agent """ agent.main() if __name__ == 'waagent': """ Load waagent2.0 to support old version of extensions """ if sys.version_info[0] == 3: raise ImportError("waagent2.0 doesn't support python3") bin_path = os.path.dirname(os.path.abspath(__file__)) agent20_path = os.path.join(bin_path, "waagent2.0") if not os.path.isfile(agent20_path): raise ImportError("Can't load waagent") agent20 = imp.load_source('waagent', agent20_path) __all__ = dir(agent20) WALinuxAgent-2.2.32/bin/waagent2.0000066400000000000000000007550231335416306700164730ustar00rootroot00000000000000#!/usr/bin/env python # # Azure Linux Agent # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import crypt import random import array import base64 import httplib import os import os.path import platform import pwd import re import shutil import socket import SocketServer import struct import string import subprocess import sys import tempfile import textwrap import threading import time import traceback import xml.dom.minidom import fcntl import inspect import zipfile import json import datetime import xml.sax.saxutils from distutils.version import LooseVersion if not hasattr(subprocess,'check_output'): def check_output(*popenargs, **kwargs): r"""Backport from subprocess module from python 2.7""" if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output # Exception classes used by this module. class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) subprocess.check_output=check_output subprocess.CalledProcessError=CalledProcessError GuestAgentName = "WALinuxAgent" GuestAgentLongName = "Azure Linux Agent" GuestAgentVersion = "WALinuxAgent-2.0.16" ProtocolVersion = "2012-11-30" #WARNING this value is used to confirm the correct fabric protocol. Config = None WaAgent = None DiskActivated = False Openssl = "openssl" Children = [] ExtensionChildren = [] VMM_STARTUP_SCRIPT_NAME='install' VMM_CONFIG_FILE_NAME='linuxosconfiguration.xml' global RulesFiles RulesFiles = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] VarLibDhcpDirectories = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] EtcDhcpClientConfFiles = ["/etc/dhcp/dhclient.conf", "/etc/dhcp3/dhclient.conf"] global LibDir LibDir = "/var/lib/waagent" global provisioned provisioned=False global provisionError provisionError=None HandlerStatusToAggStatus = {"installed":"Installing", "enabled":"Ready", "unintalled":"NotReady", "disabled":"NotReady"} WaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ext4 # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ README_FILENAME="DATALOSS_WARNING_README.txt" README_FILECONTENT="""\ WARNING: THIS IS A TEMPORARY DISK. Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. Please do not use this disk for storing any personal or application data. For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx """ ############################################################ # BEGIN DISTRO CLASS DEFS ############################################################ ############################################################ # AbstractDistro ############################################################ class AbstractDistro(object): """ AbstractDistro defines a skeleton neccesary for a concrete Distro class. Generic methods and attributes are kept here, distribution specific attributes and behavior are to be placed in the concrete child named distroDistro, where distro is the string returned by calling python platform.linux_distribution()[0]. So for CentOS the derived class is called 'centosDistro'. """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux=None self.service_cmd='/usr/sbin/service' self.ssh_service_restart_option='restart' self.ssh_service_name='ssh' self.ssh_config_file='/etc/ssh/sshd_config' self.hostname_file_path='/etc/hostname' self.dhcp_client_name='dhclient' self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'useradd', 'usermod', 'openssl', 'sfdisk', 'fdisk', 'mkfs', 'sed', 'grep', 'sudo', 'parted' ] self.init_script_file='/etc/init.d/waagent' self.agent_package_name='WALinuxAgent' self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log",'/etc/resolv.conf' ] self.agent_files_to_uninstall = ["/etc/waagent.conf", "/etc/logrotate.d/waagent"] self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT=' self.getpidcmd = 'pidof' self.mount_dvd_cmd = 'mount' self.sudoers_dir_base = '/etc' self.waagent_conf_file = WaagentConf self.shadow_file_mode=0600 self.shadow_file_path="/etc/shadow" self.dhcp_enabled = False def isSelinuxSystem(self): """ Checks and sets self.selinux = True if SELinux is available on system. """ if self.selinux == None: if Run("which getenforce",chk_err=False): self.selinux = False else: self.selinux = True return self.selinux def isSelinuxRunning(self): """ Calls shell command 'getenforce' and returns True if 'Enforcing'. """ if self.isSelinuxSystem(): return RunGetOutput("getenforce")[1].startswith("Enforcing") else: return False def setSelinuxEnforce(self,state): """ Calls shell command 'setenforce' with 'state' and returns resulting exit code. """ if self.isSelinuxSystem(): if state: s = '1' else: s='0' return Run("setenforce "+s) def setSelinuxContext(self,path,cn): """ Calls shell 'chcon' with 'path' and 'cn' context. Returns exit result. """ if self.isSelinuxSystem(): if not os.path.exists(path): Error("Path does not exist: {0}".format(path)) return 1 return Run('chcon ' + cn + ' ' + path) def setHostname(self,name): """ Shell call to hostname. Returns resulting exit code. """ return Run('hostname ' + name) def publishHostname(self,name): """ Set the contents of the hostname file to 'name'. Return 1 on failure. """ try: r=SetFileContents(self.hostname_file_path, name) for f in EtcDhcpClientConfFiles: if os.path.exists(f) and FindStringInFile(f,r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])') == None : r=ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', "send host-name \"" + name + "\";\n" + "\n".join(filter(lambda a: not a.startswith("send host-name"), GetFileContents('/etc/dhcp/dhclient.conf').split('\n')))) except: return 1 return r def installAgentServiceScriptFiles(self): """ Create the waagent support files for service installation. Called by registerAgentService() Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def registerAgentService(self): """ Calls installAgentService to create service files. Shell exec service registration commands. (e.g. chkconfig --add waagent) Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def uninstallAgentService(self): """ Call service subsystem to remove waagent script. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' start') def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' stop',False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_name + " " + self.ssh_service_restart_option retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def sshDeployPublicKey(self,fprint,path): """ Generic sshDeployPublicKey - over-ridden in some concrete Distro classes due to minor differences in openssl packages deployed """ error=0 SshPubKey = OvfEnv().OpensslToSsh(fprint) if SshPubKey != None: AppendFileContents(path, SshPubKey) else: Error("Failed: " + fprint + ".crt -> " + path) error = 1 return error def checkPackageInstalled(self,p): """ Query package database for prescence of an installed package. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def checkPackageUpdateable(self,p): """ Online check if updated package of walinuxagent is available. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def deleteRootPassword(self): """ Generic root password removal. """ filepath="/etc/shadow" ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n" + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n')))) os.chmod(filepath,self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0') Log("Root password deleted.") return 0 def changePass(self,user,password): Log("Change user password") crypt_id = Config.get("Provisioning.PasswordCryptId") if crypt_id is None: crypt_id = "6" salt_len = Config.get("Provisioning.PasswordCryptSaltLength") try: salt_len = int(salt_len) if salt_len < 0 or salt_len > 10: salt_len = 10 except (ValueError, TypeError): salt_len = 10 return self.chpasswd(user, password, crypt_id=crypt_id, salt_len=salt_len) def chpasswd(self, username, password, crypt_id=6, salt_len=10): passwd_hash = self.gen_password_hash(password, crypt_id, salt_len) cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = RunGetOutput(cmd, log_cmd=False) if ret != 0: return "Failed to set password for {0}: {1}".format(username, output) def gen_password_hash(self, password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) return crypt.crypt(password, salt) def load_ata_piix(self): return WaAgent.TryLoadAtapiix() def unload_ata_piix(self): """ Generic function to remove ata_piix.ko. """ return WaAgent.TryUnloadAtapiix() def deprovisionWarnUser(self): """ Generic user warnings used at deprovision. """ print("WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.") def deprovisionDeleteFiles(self): """ Files to delete when VM is deprovisioned """ for a in VarLibDhcpDirectories: Run("rm -f " + a + "/*") # Clear LibDir, remove nameserver and root bash history for f in os.listdir(LibDir) + self.fileBlackList: try: os.remove(f) except: pass return 0 def uninstallDeleteFiles(self): """ Files to delete when agent is uninstalled. """ for f in self.agent_files_to_uninstall: try: os.remove(f) except: pass return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m= __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self,buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot+'/etc'): os.mkdir(buildroot+'/etc') SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot+'/etc/logrotate.d'): os.mkdir(buildroot+'/etc/logrotate.d') SetFileContents(buildroot+'/etc/logrotate.d/waagent', WaagentLogrotate) self.init_script_file=buildroot+self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def GetIpv4Address(self): """ Return the ip of the first active non-loopback interface. """ addr='' iface,addr=GetFirstActiveNetworkInterfaceNonLoopback() return addr def GetMacAddress(self): return GetMacAddress() def GetInterfaceName(self): return GetFirstActiveNetworkInterfaceNonLoopback()[0] def RestartInterface(self, iface, max_retry=3): for retry in range(1, max_retry + 1): ret = Run("ifdown " + iface + " && ifup " + iface) if ret == 0: return Log("Failed to restart interface: {0}, ret={1}".format(iface, ret)) if retry < max_retry: Log("Retry restart interface in 5 seconds") time.sleep(5) def CreateAccount(self,user, password, expiration, thumbprint): return CreateAccount(user, password, expiration, thumbprint) def DeleteAccount(self,user): return DeleteAccount(user) def ActivateResourceDisk(self): """ Format, mount, and if specified in the configuration set resource disk as swap. """ global DiskActivated format = Config.get("ResourceDisk.Format") if format == None or format.lower().startswith("n"): DiskActivated = True return device = DeviceForIdePort(1) if device == None: Error("ActivateResourceDisk: Unable to detect disk topology.") return device = "/dev/" + device mountlist = RunGetOutput("mount")[1] mountpoint = GetMountPoint(mountlist, device) if(mountpoint): Log("ActivateResourceDisk: " + device + "1 is already mounted.") else: mountpoint = Config.get("ResourceDisk.MountPoint") if mountpoint == None: mountpoint = "/mnt/resource" CreateDir(mountpoint, "root", 0755) fs = Config.get("ResourceDisk.Filesystem") if fs == None: fs = "ext3" partition = device + "1" #Check partition type Log("Detect GPT...") ret = RunGetOutput("parted {0} print".format(device)) if ret[0] == 0 and "gpt" in ret[1]: Log("GPT detected.") #GPT(Guid Partition Table) is used. #Get partitions. parts = filter(lambda x : re.match("^\s*[0-9]+", x), ret[1].split("\n")) #If there are more than 1 partitions, remove all partitions #and create a new one using the entire disk space. if len(parts) > 1: for i in range(1, len(parts) + 1): Run("parted {0} rm {1}".format(device, i)) Run("parted {0} mkpart primary 0% 100%".format(device)) Run("mkfs." + fs + " " + partition + " -F") else: existingFS = RunGetOutput("sfdisk -q -c " + device + " 1", chk_err=False)[1].rstrip() if existingFS == "7" and fs != "ntfs": Run("sfdisk -c " + device + " 1 83") Run("mkfs." + fs + " " + partition) if Run("mount " + partition + " " + mountpoint, chk_err=False): #If mount failed, try to format the partition and mount again Warn("Failed to mount resource disk. Retry mounting.") Run("mkfs." + fs + " " + partition + " -F") if Run("mount " + partition + " " + mountpoint): Error("ActivateResourceDisk: Failed to mount resource disk (" + partition + ").") return Log("Resource disk (" + partition + ") is mounted at " + mountpoint + " with fstype " + fs) #Create README file under the root of resource disk SetFileContents(os.path.join(mountpoint,README_FILENAME), README_FILECONTENT) DiskActivated = True #Create swap space swap = Config.get("ResourceDisk.EnableSwap") if swap == None or swap.lower().startswith("n"): return sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024 if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024): os.remove(mountpoint + "/swapfile") if not os.path.isfile(mountpoint + "/swapfile"): Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB)) Run("mkswap " + mountpoint + "/swapfile") if not Run("swapon " + mountpoint + "/swapfile"): Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile") else: Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile") def Install(self): return Install() def mediaHasFilesystem(self,dsk): if len(dsk) == 0 : return False if Run("LC_ALL=C fdisk -l " + dsk + " | grep Disk"): return False return True def mountDVD(self,dvd,location): return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location) def GetHome(self): return GetHome() def getDhcpClientName(self): return self.dhcp_client_name def initScsiDiskTimeout(self): """ Set the SCSI disk timeout when the agent starts running """ self.setScsiDiskTimeout() def setScsiDiskTimeout(self): """ Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout """ try: scsiTimeout = Config.get("OS.RootDeviceScsiTimeout") for diskName in [disk for disk in os.listdir("/sys/block") if disk.startswith("sd")]: self.setBlockDeviceTimeout(diskName, scsiTimeout) except: pass def setBlockDeviceTimeout(self, device, timeout): """ Set SCSI disk timeout by set /sys/block/sd*/device/timeout """ if timeout != None and device: filePath = "/sys/block/" + device + "/device/timeout" if(GetFileContents(filePath).splitlines()[0].rstrip() != timeout): SetFileContents(filePath,timeout) Log("SetBlockDeviceTimeout: Update the device " + device + " with timeout " + timeout) def waitForSshHostKey(self, path): """ Provide a dummy waiting, since by default, ssh host key is created by waagent and the key should already been created. """ if(os.path.isfile(path)): return True else: Error("Can't find host key: {0}".format(path)) return False def isDHCPEnabled(self): return self.dhcp_enabled def stopDHCP(self): """ Stop the system DHCP client so that the agent can bind on its port. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('stopDHCP method missing') def startDHCP(self): """ Start the system DHCP client. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('startDHCP method missing') def translateCustomData(self, data): """ Translate the custom data from a Base64 encoding. Default to no-op. """ decodeCustomData = Config.get("Provisioning.DecodeCustomData") if decodeCustomData != None and decodeCustomData.lower().startswith("y"): return base64.b64decode(data) return data def getConfigurationPath(self): return "/etc/waagent.conf" def getProcessorCores(self): return int(RunGetOutput("grep 'processor.*:' /proc/cpuinfo |wc -l")[1]) def getTotalMemory(self): return int(RunGetOutput("grep MemTotal /proc/meminfo |awk '{print $2}'")[1])/1024 def getInterfaceNameByMac(self, mac): ret, output = RunGetOutput("ifconfig -a") if ret != 0: raise Exception("Failed to get network interface info") output = output.replace('\n', '') match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), output, re.IGNORECASE) if match is None: raise Exception("Failed to get ifname with mac: {0}".format(mac)) output = match.group(0) eths = re.findall(r"eth\d", output) if eths is None or len(eths) == 0: raise Exception("Failed to get ifname with mac: {0}".format(mac)) return eths[-1] def configIpV4(self, ifName, addr, netmask=24): ret, output = RunGetOutput("ifconfig {0} up".format(ifName)) if ret != 0: raise Exception("Failed to bring up {0}: {1}".format(ifName, output)) ret, output = RunGetOutput("ifconfig {0} {1}/{2}".format(ifName, addr, netmask)) if ret != 0: raise Exception("Failed to config ipv4 for {0}: {1}".format(ifName, output)) def setDefaultGateway(self, gateway): Run("/sbin/route add default gw" + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " netmask " + mask + " gw " + gateway, chk_err=False) ############################################################ # GentooDistro ############################################################ gentoo_init_file = """\ #!/sbin/runscript command=/usr/sbin/waagent pidfile=/var/run/waagent.pid command_args=-daemon command_background=true name="Azure Linux Agent" depend() { need localmount use logger network after bootmisc modules } """ class gentooDistro(AbstractDistro): """ Gentoo distro concrete class """ def __init__(self): # super(gentooDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_name='sshd' self.hostname_file_path='/etc/conf.d/hostname' self.dhcp_client_name='dhcpcd' self.shadow_file_mode=0640 self.init_file=gentoo_init_file def publishHostname(self,name): try: if (os.path.isfile(self.hostname_file_path)): r=ReplaceFileContentsAtomic(self.hostname_file_path, "hostname=\"" + name + "\"\n" + "\n".join(filter(lambda a: not a.startswith("hostname="), GetFileContents(self.hostname_file_path).split("\n")))) except: return 1 return r def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0755) def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('rc-update add ' + self.agent_service_name + ' default') def uninstallAgentService(self): return Run('rc-update del ' + self.agent_service_name + ' default') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self,p): if Run('eix -I ^' + p + '$',chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run('eix -u ^' + p + '$',chk_err=False): return 0 else: return 1 def RestartInterface(self, iface): Run("/etc/init.d/net." + iface + " restart") ############################################################ # SuSEDistro ############################################################ suse_init_file = """\ #! /bin/sh # # Azure Linux Agent sysV init script # # Copyright 2013 Microsoft Corporation # Copyright SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # /etc/init.d/waagent # # and symbolic link # # /usr/sbin/rcwaagent # # System startup script for the waagent # ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network sshd # Required-Stop: $network sshd # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Description: Start the AzureLinuxAgent ### END INIT INFO PYTHON=/usr/bin/python WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } . /etc/rc.status # First reset status of this service rc_reset # Return values acc. to LSB for all commands but status: # 0 - success # 1 - misc error # 2 - invalid or excess args # 3 - unimplemented feature (e.g. reload) # 4 - insufficient privilege # 5 - program not installed # 6 - program not configured # # Note that starting an already running service, stopping # or restarting a not-running service as well as the restart # with force-reload (in case signalling is not supported) are # considered a success. case "$1" in start) echo -n "Starting AzureLinuxAgent" ## Start daemon with startproc(8). If this fails ## the echo return value is set appropriate. startproc -f ${PYTHON} ${WAZD_BIN} -daemon rc_status -v ;; stop) echo -n "Shutting down AzureLinuxAgent" ## Stop daemon with killproc(8) and if this fails ## set echo the echo return value. killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; try-restart) ## Stop the service and if this succeeds (i.e. the ## service was running before), start it again. $0 status >/dev/null && $0 restart rc_status ;; restart) ## Stop the service and regardless of whether it was ## running or not, start it again. $0 stop sleep 1 $0 start rc_status ;; force-reload|reload) rc_status ;; status) echo -n "Checking for service AzureLinuxAgent " ## Check status with checkproc(8), if process is running ## checkproc will return with exit status 0. checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; probe) ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit """ class SuSEDistro(AbstractDistro): """ SuSE Distro concrete class Put SuSE specific behavior here... """ def __init__(self): super(SuSEDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_name='sshd' self.kernel_boot_options_file='/boot/grub/menu.lst' self.hostname_file_path='/etc/HOSTNAME' self.requiredDeps += [ "/sbin/insserv" ] self.init_file=suse_init_file self.dhcp_client_name='dhcpcd' if ((DistInfo(fullname=1)[0] == 'SUSE Linux Enterprise Server' and DistInfo()[1] >= '12') or \ (DistInfo(fullname=1)[0] == 'openSUSE' and DistInfo()[1] >= '13.2')): self.dhcp_client_name='wickedd-dhcp4' self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' self.getpidcmd='pidof ' self.dhcp_enabled=True def checkPackageInstalled(self,p): if Run("rpm -q " + p,chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run("zypper list-updates | grep " + p,chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) except: pass def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('insserv ' + self.agent_service_name) def uninstallAgentService(self): return Run('insserv -r ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def startDHCP(self): Run("service " + self.dhcp_client_name + " start", chk_err=False) def stopDHCP(self): Run("service " + self.dhcp_client_name + " stop", chk_err=False) ############################################################ # redhatDistro ############################################################ redhat_init_file= """\ #!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -daemon & } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL """ class redhatDistro(AbstractDistro): """ Redhat Distro concrete class Put Redhat specific behavior here... """ def __init__(self): super(redhatDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_restart_option='condrestart' self.ssh_service_name='sshd' self.hostname_file_path= None if DistInfo()[1] < '7.0' else '/etc/hostname' self.init_file=redhat_init_file self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' def publishHostname(self,name): super(redhatDistro,self).publishHostname(name) if DistInfo()[1] < '7.0' : filepath = "/etc/sysconfig/network" if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("HOSTNAME"), GetFileContents(filepath).split('\n')))) ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('chkconfig --add waagent') def uninstallAgentService(self): return Run('chkconfig --del ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self,p): if Run("yum list installed " + p,chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run("yum check-update | grep "+ p,chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if DistInfo()[1] < '7.0' and self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m= __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 ############################################################ # centosDistro ############################################################ class centosDistro(redhatDistro): """ CentOS Distro concrete class Put CentOS specific behavior here... """ def __init__(self): super(centosDistro,self).__init__() ############################################################ # eulerosDistro ############################################################ class eulerosDistro(redhatDistro): """ EulerOS Distro concrete class Put EulerOS specific behavior here... """ def __init__(self): super(eulerosDistro,self).__init__() ############################################################ # oracleDistro ############################################################ class oracleDistro(redhatDistro): """ Oracle Distro concrete class Put Oracle specific behavior here... """ def __init__(self): super(oracleDistro, self).__init__() ############################################################ # asianuxDistro ############################################################ class asianuxDistro(redhatDistro): """ Asianux Distro concrete class Put Asianux specific behavior here... """ def __init__(self): super(asianuxDistro,self).__init__() ############################################################ # CoreOSDistro ############################################################ class CoreOSDistro(AbstractDistro): """ CoreOS Distro concrete class Put CoreOS specific behavior here... """ CORE_UID = 500 def __init__(self): super(CoreOSDistro,self).__init__() self.requiredDeps += [ "/usr/bin/systemctl" ] self.agent_service_name = 'waagent' self.init_script_file='/etc/systemd/system/waagent.service' self.fileBlackList.append("/etc/machine-id") self.dhcp_client_name='systemd-networkd' self.getpidcmd='pidof ' self.shadow_file_mode=0640 self.waagent_path='/usr/share/oem/bin' self.python_path='/usr/share/oem/python/bin' self.dhcp_enabled=True if 'PATH' in os.environ: os.environ['PATH'] = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: os.environ['PATH'] = self.python_path if 'PYTHONPATH' in os.environ: os.environ['PYTHONPATH'] = "{0}:{1}".format(os.environ['PYTHONPATH'], self.waagent_path) else: os.environ['PYTHONPATH'] = self.waagent_path def checkPackageInstalled(self,p): """ There is no package manager in CoreOS. Return 1 since it must be preinstalled. """ return 1 def checkDependencies(self): for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self,p): """ There is no package manager in CoreOS. Return 0 since it can't be updated via package. """ return 0 def startAgentService(self): return Run('systemctl start ' + self.agent_service_name) def stopAgentService(self): return Run('systemctl stop ' + self.agent_service_name) def restartSshService(self): """ SSH is socket activated on CoreOS. No need to restart it. """ return 0 def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 def RestartInterface(self, iface): Run("systemctl restart systemd-networkd") def CreateAccount(self, user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd --create-home --password '*' " + user if expiration != None: command += " --expiredate " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user, password) try: if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def startDHCP(self): Run("systemctl start " + self.dhcp_client_name, chk_err=False) def stopDHCP(self): Run("systemctl stop " + self.dhcp_client_name, chk_err=False) def translateCustomData(self, data): return base64.b64decode(data) def getConfigurationPath(self): return "/usr/share/oem/waagent.conf" ############################################################ # debianDistro ############################################################ debian_init_file = """\ #!/bin/sh ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network $syslog # Required-Stop: $network $syslog # Should-Start: $network $syslog # Should-Stop: $network $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: AzureLinuxAgent # Description: AzureLinuxAgent ### END INIT INFO . /lib/lsb/init-functions OPTIONS="-daemon" WAZD_BIN=/usr/sbin/waagent WAZD_PID=/var/run/waagent.pid case "$1" in start) log_begin_msg "Starting AzureLinuxAgent..." pid=$( pidofproc $WAZD_BIN ) if [ -n "$pid" ] ; then log_begin_msg "Already running." log_end_msg 0 exit 0 fi start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS log_end_msg $? ;; stop) log_begin_msg "Stopping AzureLinuxAgent..." start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID ret=$? rm -f $WAZD_PID log_end_msg $ret ;; force-reload) $0 restart ;; restart) $0 stop $0 start ;; status) status_of_proc $WAZD_BIN && exit 0 || exit $? ;; *) log_success_msg "Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}" exit 1 ;; esac exit 0 """ class debianDistro(AbstractDistro): """ debian Distro concrete class Put debian specific behavior here... """ def __init__(self): super(debianDistro,self).__init__() self.requiredDeps += [ "/usr/sbin/update-rc.d" ] self.init_file=debian_init_file self.agent_package_name='walinuxagent' self.dhcp_client_name='dhclient' self.getpidcmd='pidof ' self.shadow_file_mode=0640 def checkPackageInstalled(self,p): """ Check that the package is installed. Return 1 if installed, 0 if not installed. This method of using dpkg-query allows wildcards to be present in the package name. """ if not Run("dpkg-query -W -f='${Status}\n' '" + p + "' | grep ' installed' 2>&1",chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Debian dependency check. python-pyasn1 is NOT needed. Return 1 unless all dependencies are satisfied. NOTE: using network*manager will catch either package name in Ubuntu or debian. """ if self.checkPackageInstalled('network*manager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self,p): if Run("apt-get update ; apt-get upgrade -us | grep " + p,chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) except OSError, e: ErrorWithPrefix('installAgentServiceScriptFiles','Exception: '+str(e)+' occured creating ' + self.init_script_file) return 1 return 0 def registerAgentService(self): if self.installAgentServiceScriptFiles() == 0: return Run('update-rc.d waagent defaults') else : return 1 def uninstallAgentService(self): return Run('update-rc.d -f ' + self.agent_service_name + ' remove') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 ############################################################ # KaliDistro - WIP # Functioning on Kali 1.1.0a so far ############################################################ class KaliDistro(debianDistro): """ Kali Distro concrete class Put Kali specific behavior here... """ def __init__(self): super(KaliDistro,self).__init__() ############################################################ # UbuntuDistro ############################################################ ubuntu_upstart_file = """\ #walinuxagent - start Azure agent description "walinuxagent" author "Ben Howard " start on (filesystem and started rsyslog) pre-start script WALINUXAGENT_ENABLED=1 [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent if [ "$WALINUXAGENT_ENABLED" != "1" ]; then exit 1 fi if [ ! -x /usr/sbin/waagent ]; then exit 1 fi #Load the udf module modprobe -b udf end script exec /usr/sbin/waagent -daemon """ class UbuntuDistro(debianDistro): """ Ubuntu Distro concrete class Put Ubuntu specific behavior here... """ def __init__(self): super(UbuntuDistro,self).__init__() self.init_script_file='/etc/init/waagent.conf' self.init_file=ubuntu_upstart_file self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log"] self.dhcp_client_name=None self.getpidcmd='pidof ' def registerAgentService(self): return self.installAgentServiceScriptFiles() def uninstallAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 os.remove('/etc/init/' + self.agent_service_name + '.conf') def unregisterAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return self.stopAgentService() return self.uninstallAgentService() def deprovisionWarnUser(self): """ Ubuntu specific warning string from Deprovision. """ print("WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.") def deprovisionDeleteFiles(self): """ Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will break resolvconf. Therefore, we check to see if resolvconf is in use, and if so, we remove the resolvconf artifacts. """ if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': Log("resolvconf is not configured. Removing /etc/resolv.conf") self.fileBlackList.append('/etc/resolv.conf') else: Log("resolvconf is enabled; leaving /etc/resolv.conf intact") resolvConfD = '/etc/resolvconf/resolv.conf.d/' self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original']) for f in os.listdir(LibDir)+self.fileBlackList: try: os.remove(f) except: pass return 0 def getDhcpClientName(self): if self.dhcp_client_name != None : return self.dhcp_client_name if DistInfo()[1] == '12.04' : self.dhcp_client_name='dhclient3' else : self.dhcp_client_name='dhclient' return self.dhcp_client_name def waitForSshHostKey(self, path): """ Wait until the ssh host key is generated by cloud init. """ for retry in range(0, 10): if(os.path.isfile(path)): return True time.sleep(1) Error("Can't find host key: {0}".format(path)) return False ############################################################ # LinuxMintDistro ############################################################ class LinuxMintDistro(UbuntuDistro): """ LinuxMint Distro concrete class Put LinuxMint specific behavior here... """ def __init__(self): super(LinuxMintDistro,self).__init__() ############################################################ # fedoraDistro ############################################################ fedora_systemd_service = """\ [Unit] Description=Azure Linux Agent After=network.target After=sshd.service ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/sbin/waagent -daemon [Install] WantedBy=multi-user.target """ class fedoraDistro(redhatDistro): """ FedoraDistro concrete class Put Fedora specific behavior here... """ def __init__(self): super(fedoraDistro,self).__init__() self.service_cmd = '/usr/bin/systemctl' self.hostname_file_path = '/etc/hostname' self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service' self.init_file = fedora_systemd_service self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX=' def publishHostname(self, name): SetFileContents(self.hostname_file_path, name + '\n') ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0644) return Run(self.service_cmd + ' daemon-reload') def registerAgentService(self): self.installAgentServiceScriptFiles() return Run(self.service_cmd + ' enable ' + self.agent_service_name) def uninstallAgentService(self): """ Call service subsystem to remove waagent script. """ return Run(self.service_cmd + ' disable ' + self.agent_service_name) def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' start ' + self.agent_service_name) def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' stop ' + self.agent_service_name, False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_restart_option + " " + self.ssh_service_name retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def checkPackageInstalled(self, p): """ Query package database for prescence of an installed package. """ import rpm ts = rpm.TransactionSet() rpms = ts.dbMatch(rpm.RPMTAG_PROVIDES, p) return bool(len(rpms) > 0) def deleteRootPassword(self): return Run("/sbin/usermod root -p '!!'") def packagedInstall(self,buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot+'/etc'): os.mkdir(buildroot+'/etc') SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot+'/etc/logrotate.d'): os.mkdir(buildroot+'/etc/logrotate.d') SetFileContents(buildroot+'/etc/logrotate.d/WALinuxAgent', WaagentLogrotate) self.init_script_file=buildroot+self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def CreateAccount(self, user, password, expiration, thumbprint): super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint) Run('/sbin/usermod ' + user + ' -G wheel') def DeleteAccount(self, user): Run('/sbin/usermod ' + user + ' -G ""') super(fedoraDistro, self).DeleteAccount(user) ############################################################ # FreeBSD ############################################################ FreeBSDWaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ufs2 # ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ bsd_init_file="""\ #! /bin/sh # PROVIDE: waagent # REQUIRE: DAEMON cleanvar sshd # BEFORE: LOGIN # KEYWORD: nojail . /etc/rc.subr export PATH=$PATH:/usr/local/bin name="waagent" rcvar="waagent_enable" command="/usr/sbin/${name}" command_interpreter="/usr/local/bin/python" waagent_flags=" daemon &" pidfile="/var/run/waagent.pid" load_rc_config $name run_rc_command "$1" """ bsd_activate_resource_disk_txt="""\ #!/usr/bin/env python import os import sys import imp # waagent has no '.py' therefore create waagent module import manually. __name__='setupmain' #prevent waagent.__main__ from executing waagent=imp.load_source('waagent','/tmp/waagent') waagent.LoggerInit('/var/log/waagent.log','/dev/console') from waagent import RunGetOutput,Run Config=waagent.ConfigurationProvider(None) format = Config.get("ResourceDisk.Format") if format == None or format.lower().startswith("n"): sys.exit(0) device_base = 'da1' device = "/dev/" + device_base for entry in RunGetOutput("mount")[1].split(): if entry.startswith(device + "s1"): waagent.Log("ActivateResourceDisk: " + device + "s1 is already mounted.") sys.exit(0) mountpoint = Config.get("ResourceDisk.MountPoint") if mountpoint == None: mountpoint = "/mnt/resource" waagent.CreateDir(mountpoint, "root", 0755) fs = Config.get("ResourceDisk.Filesystem") if waagent.FreeBSDDistro().mediaHasFilesystem(device) == False : Run("newfs " + device + "s1") if Run("mount " + device + "s1 " + mountpoint): waagent.Error("ActivateResourceDisk: Failed to mount resource disk (" + device + "s1).") sys.exit(0) waagent.Log("Resource disk (" + device + "s1) is mounted at " + mountpoint + " with fstype " + fs) waagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT) swap = Config.get("ResourceDisk.EnableSwap") if swap == None or swap.lower().startswith("n"): sys.exit(0) sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024 if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024): os.remove(mountpoint + "/swapfile") if not os.path.isfile(mountpoint + "/swapfile"): Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB)) if Run("mdconfig -a -t vnode -f " + mountpoint + "/swapfile -u 0"): waagent.Error("ActivateResourceDisk: Configuring swap - Failed to create md0") if not Run("swapon /dev/md0"): waagent.Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile") else: waagent.Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile") """ class FreeBSDDistro(AbstractDistro): """ """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ super(FreeBSDDistro,self).__init__() self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux=False self.ssh_service_name='sshd' self.ssh_config_file='/etc/ssh/sshd_config' self.hostname_file_path='/etc/hostname' self.dhcp_client_name='dhclient' self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'pw' , 'openssl', 'fdisk', 'sed', 'grep' , 'sudo'] self.init_script_file='/etc/rc.d/waagent' self.init_file=bsd_init_file self.agent_package_name='WALinuxAgent' self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log",'/etc/resolv.conf' ] self.agent_files_to_uninstall = ["/etc/waagent.conf"] self.grubKernelBootOptionsFile = '/boot/loader.conf' self.grubKernelBootOptionsLine = '' self.getpidcmd = 'pgrep -n' self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if=' # custom data max len is 64k self.sudoers_dir_base = '/usr/local/etc' self.waagent_conf_file = FreeBSDWaagentConf def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0777) AppendFileContents("/etc/rc.conf","waagent_enable='YES'\n") return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run("services_mkdb " + self.init_script_file) def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 def deleteRootPassword(self): """ BSD root password removal. """ filepath="/etc/master.passwd" ReplaceStringInFile(filepath,r'root:.*?:','root::') #ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n" # + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n')))) os.chmod(filepath,self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0') RunGetOutput("pwd_mkdb -u root /etc/master.passwd") Log("Root password deleted.") return 0 def changePass(self,user,password): return RunSendStdin("pw usermod " + user + " -h 0 ",password, log_cmd=False) def load_ata_piix(self): return 0 def unload_ata_piix(self): return 0 def checkDependencies(self): """ FreeBSD dependency check. Return 1 unless all dependencies are satisfied. """ for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self,buildroot): pass def GetInterfaceName(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() return iface def RestartInterface(self, iface): Run("service netif restart") def GetIpv4Address(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() return inet def GetMacAddress(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() l=mac.split(':') r=[] for i in l: r.append(string.atoi(i,16)) return r def GetFreeBSDEthernetInfo(self): """ There is no SIOCGIFCONF on freeBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ code,output=RunGetOutput("ifconfig",chk_err=False) Log(output) retries=10 cmd='ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP ' code=1 while code > 0 : if code > 0 and retries == 0: Error("GetFreeBSDEthernetInfo - Failed to detect ethernet interface") return None, None, None code,output=RunGetOutput(cmd,chk_err=False) retries-=1 if code > 0 and retries > 0 : Log("GetFreeBSDEthernetInfo - Error: retry ethernet detection " + str(retries)) if retries == 9 : c,o=RunGetOutput("ifconfig | grep -A1 -B2 ether",chk_err=False) if c == 0: t=o.replace('\n',' ') t=t.split() i=t[0][:-1] Log(RunGetOutput('id')[1]) Run('dhclient '+i) time.sleep(10) j=output.replace('\n',' ') j=j.split() iface=j[0][:-1] for i in range(len(j)): if j[i] == 'inet' : inet=j[i+1] elif j[i] == 'ether' : mac=j[i+1] return iface, inet, mac def CreateAccount(self,user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "pw useradd " + user + " -m" if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user,password) try: # for older distros create sudoers.d if not os.path.isdir(MyDistro.sudoers_dir_base+'/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir(MyDistro.sudoers_dir_base+'/sudoers.d') # add the include of sudoers.d to the /etc/sudoers SetFileContents(MyDistro.sudoers_dir_base+'/sudoers',GetFileContents(MyDistro.sudoers_dir_base+'/sudoers')+'\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\n') if password == None: SetFileContents(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(self,user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") #Delete utmp to prevent error if we are the 'user' deleted pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).pid try: os.remove(MyDistro.sudoers_dir_base+"/sudoers.d/waagent") except: pass return def ActivateResourceDiskNoThread(self): """ Format, mount, and if specified in the configuration set resource disk as swap. """ global DiskActivated Run('cp /usr/sbin/waagent /tmp/') SetFileContents('/tmp/bsd_activate_resource_disk.py',bsd_activate_resource_disk_txt) Run('chmod +x /tmp/bsd_activate_resource_disk.py') pid = subprocess.Popen(["/tmp/bsd_activate_resource_disk.py", ""]).pid Log("Spawning bsd_activate_resource_disk.py") DiskActivated = True return def Install(self): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a) ) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", self.waagent_conf_file) if os.path.exists('/usr/local/etc/logrotate.d/'): SetFileContents("/usr/local/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split('\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") #ApplyVNUMAWorkaround() return 0 def mediaHasFilesystem(self,dsk): if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep "invalid fdisk partition table found" ',False): return False return True def mountDVD(self,dvd,location): #At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location retcode,out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml') if retcode != 0: return retcode,out ovfxml = (GetFileContents(location+"/ovf-env.xml",asbin=False)) if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 : ovfxml = ovfxml[3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them. ovfxml = ovfxml.strip(chr(0x00)) ovfxml = "".join(filter(lambda x: ord(x)<128, ovfxml)) ovfxml = re.sub(r'.*\Z','',ovfxml,0,re.DOTALL) ovfxml += '' SetFileContents(location+"/ovf-env.xml", ovfxml) return retcode,out def GetHome(self): return '/home' def initScsiDiskTimeout(self): """ Set the SCSI disk timeout by updating the kernal config """ timeout = Config.get("OS.RootDeviceScsiTimeout") if timeout: Run("sysctl kern.cam.da.default_timeout=" + timeout) def setScsiDiskTimeout(self): return def setBlockDeviceTimeout(self, device, timeout): return def getProcessorCores(self): return int(RunGetOutput("sysctl hw.ncpu | awk '{print $2}'")[1]) def getTotalMemory(self): return int(RunGetOutput("sysctl hw.realmem | awk '{print $2}'")[1])/1024 def setDefaultGateway(self, gateway): Run("/sbin/route add default " + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " " + mask + " " + gateway, chk_err=False) ############################################################ # END DISTRO CLASS DEFS ############################################################ # This lets us index into a string or an array of integers transparently. def Ord(a): """ Allows indexing into a string or an array of integers transparently. Generic utility function. """ if type(a) == type("a"): a = ord(a) return a def IsLinux(): """ Returns True if platform is Linux. Generic utility function. """ return (platform.uname()[0] == "Linux") def GetLastPathElement(path): """ Similar to basename. Generic utility function. """ return path.rsplit('/', 1)[1] def GetFileContents(filepath,asbin=False): """ Read and return contents of 'filepath'. """ mode='r' if asbin: mode+='b' c=None try: with open(filepath, mode) as F : c=F.read() except IOError, e: ErrorWithPrefix('GetFileContents','Reading from file ' + filepath + ' Exception is ' + str(e)) return None return c def SetFileContents(filepath, contents): """ Write 'contents' to 'filepath'. """ if type(contents) == str : contents=contents.encode('latin-1', 'ignore') try: with open(filepath, "wb+") as F : F.write(contents) except IOError, e: ErrorWithPrefix('SetFileContents','Writing to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def AppendFileContents(filepath, contents): """ Append 'contents' to 'filepath'. """ if type(contents) == str : contents=contents.encode('latin-1') try: with open(filepath, "a+") as F : F.write(contents) except IOError, e: ErrorWithPrefix('AppendFileContents','Appending to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def ReplaceFileContentsAtomic(filepath, contents): """ Write 'contents' to 'filepath' by creating a temp file, and replacing original. """ handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath)) if type(contents) == str : contents=contents.encode('latin-1') try: os.write(handle, contents) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e)) return None finally: os.close(handle) try: os.rename(temp, filepath) return None except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' + str(e)) try: os.remove(filepath) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e)) try: os.rename(temp,filepath) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e)) return 1 return 0 def GetLineStartingWith(prefix, filepath): """ Return line from 'filepath' if the line startswith 'prefix' """ for line in GetFileContents(filepath).split('\n'): if line.startswith(prefix): return line return None def Run(cmd,chk_err=True): """ Calls RunGetOutput on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ retcode,out=RunGetOutput(cmd,chk_err) return retcode def RunGetOutput(cmd, chk_err=True, log_cmd=True): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd) try: output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) except subprocess.CalledProcessError,e : if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(e.returncode) ) Error('CalledProcessError. Command string was ' + e.cmd ) Error('CalledProcessError. Command result was ' + (e.output[:-1]).decode('latin-1')) return e.returncode,e.output.decode('latin-1') return 0,output.decode('latin-1') def RunSendStdin(cmd, input, chk_err=True, log_cmd=True): """ Wrapper for subprocess.Popen. Execute 'cmd', sending 'input' to STDIN of 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd+input) try: me=subprocess.Popen([cmd], shell=True, stdin=subprocess.PIPE,stderr=subprocess.STDOUT,stdout=subprocess.PIPE) output=me.communicate(input) except OSError , e : if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode) ) Error('CalledProcessError. Command string was ' + cmd ) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return 1,output[0].decode('latin-1') if me.returncode is not 0 and chk_err is True and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode) ) Error('CalledProcessError. Command string was ' + cmd ) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return me.returncode,output[0].decode('latin-1') def GetNodeTextData(a): """ Filter non-text nodes from DOM tree """ for b in a.childNodes: if b.nodeType == b.TEXT_NODE: return b.data def GetHome(): """ Attempt to guess the $HOME location. Return the path string. """ home = None try: home = GetLineStartingWith("HOME", "/etc/default/useradd").split('=')[1].strip() except: pass if (home == None) or (home.startswith("/") == False): home = "/home" return home def ChangeOwner(filepath, user): """ Lookup user. Attempt chown 'filepath' to 'user'. """ p = None try: p = pwd.getpwnam(user) except: pass if p != None: if not os.path.exists(filepath): Error("Path does not exist: {0}".format(filepath)) else: os.chown(filepath, p[2], p[3]) def CreateDir(dirpath, user, mode): """ Attempt os.makedirs, catch all exceptions. Call ChangeOwner afterwards. """ try: os.makedirs(dirpath, mode) except: pass ChangeOwner(dirpath, user) def CreateAccount(user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd -m " + user if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: MyDistro.changePass(user, password) try: # for older distros create sudoers.d if not os.path.isdir('/etc/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir('/etc/sudoers.d/') # add the include of sudoers.d to the /etc/sudoers SetFileContents('/etc/sudoers',GetFileContents('/etc/sudoers')+'\n#includedir /etc/sudoers.d\n') if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") #Delete utmp to prevent error if we are the 'user' deleted Run("userdel -f -r " + user) try: os.remove("/etc/sudoers.d/waagent") except: pass return def IsInRangeInclusive(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) def IsPrintable(ch): """ Return True if character is displayable. """ return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'), Ord('z')) or IsInRangeInclusive(ch, Ord('0'), Ord('9')) def HexDump(buffer, size): """ Return Hex formated dump of a 'buffer' of 'size'. """ if size < 0: size = len(buffer) result = "" for i in range(0, size): if (i % 16) == 0: result += "%06X: " % i byte = buffer[i] if type(byte) == str: byte = ord(byte.decode('latin1')) result += "%02X " % byte if (i & 15) == 7: result += " " if ((i + 1) % 16) == 0 or (i + 1) == size: j = i while ((j + 1) % 16) != 0: result += " " if (j & 7) == 7: result += " " j += 1 result += " " for j in range(i - (i % 16), i + 1): byte=buffer[j] if type(byte) == str: byte = ord(byte.decode('latin1')) k = '.' if IsPrintable(byte): k = chr(byte) result += k if (i + 1) != size: result += "\n" return result def SimpleLog(file_path,message): if not file_path or len(message) < 1: return t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) lines=re.sub(re.compile(r'^(.)',re.MULTILINE),t+r'\1',message) with open(file_path, "a") as F : lines = filter(lambda x : x in string.printable, lines) F.write(lines.encode('ascii','ignore') + "\n") class Logger(object): """ The Agent's logging assumptions are: For Log, and LogWithPrefix all messages are logged to the self.file_path and to the self.con_path. Setting either path parameter to None skips that log. If Verbose is enabled, messages calling the LogIfVerbose method will be logged to file_path yet not to con_path. Error and Warn messages are normal log messages with the 'ERROR:' or 'WARNING:' prefix added. """ def __init__(self,filepath,conpath,verbose=False): """ Construct an instance of Logger. """ self.file_path=filepath self.con_path=conpath self.verbose=verbose def ThrottleLog(self,counter): """ Log everything up to 10, every 10 up to 100, then every 100. """ return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0) def LogToFile(self,message): """ Write 'message' to logfile. """ if self.file_path: try: with open(self.file_path, "a") as F : message = filter(lambda x : x in string.printable, message) F.write(message.encode('ascii','ignore') + "\n") except IOError, e: print e pass def LogToCon(self,message): """ Write 'message' to /dev/console. This supports serial port logging if the /dev/console is redirected to ttys0 in kernel boot options. """ if self.con_path: try: with open(self.con_path, "w") as C : message = filter(lambda x : x in string.printable, message) C.write(message.encode('ascii','ignore') + "\n") except IOError, e: pass def Log(self,message): """ Standard Log function. Logs to self.file_path, and con_path """ self.LogWithPrefix("", message) def LogWithPrefix(self,prefix, message): """ Prefix each line of 'message' with current time+'prefix'. """ t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def NoLog(self,message): """ Don't Log. """ pass def LogIfVerbose(self,message): """ Only log 'message' if global Verbose is True. """ self.LogWithPrefixIfVerbose('',message) def LogWithPrefixIfVerbose(self,prefix, message): """ Only log 'message' if global Verbose is True. Prefix each line of 'message' with current time+'prefix'. """ if self.verbose == True: t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def Warn(self,message): """ Prepend the text "WARNING:" to the prefix for each line in 'message'. """ self.LogWithPrefix("WARNING:", message) def Error(self,message): """ Call ErrorWithPrefix(message). """ ErrorWithPrefix("", message) def ErrorWithPrefix(self,prefix, message): """ Prepend the text "ERROR:" to the prefix for each line in 'message'. Errors written to logfile, and /dev/console """ self.LogWithPrefix("ERROR:", message) def LoggerInit(log_file_path,log_con_path,verbose=False): """ Create log object and export its methods to global scope. """ global Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger l=Logger(log_file_path,log_con_path,verbose) Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger = l.Log,l.LogWithPrefix,l.LogIfVerbose,l.LogWithPrefixIfVerbose,l.Error,l.ErrorWithPrefix,l.Warn,l.NoLog,l.ThrottleLog,l def Linux_ioctl_GetInterfaceMac(ifname): """ Return the mac-address bound to the socket. """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1'))) return ''.join(['%02X' % Ord(char) for char in info[18:24]]) def GetFirstActiveNetworkInterfaceNonLoopback(): """ Return the interface name, and ip addr of the first active non-loopback interface. """ iface='' expected=16 # how many devices should I expect... is_64bits = sys.maxsize > 2**32 struct_size=40 if is_64bits else 32 # for 64bit the size is 40 bytes, for 32bits it is 32 bytes. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) buff=array.array('B', b'\0' * (expected*struct_size)) retsize=(struct.unpack('iL', fcntl.ioctl(s.fileno(), 0x8912, struct.pack('iL',expected*struct_size,buff.buffer_info()[0]))))[0] if retsize == (expected*struct_size) : Warn('SIOCGIFCONF returned more than ' + str(expected) + ' up network interfaces.') s=buff.tostring() preferred_nic = Config.get("Network.Interface") for i in range(0,struct_size*expected,struct_size): iface=s[i:i+16].split(b'\0', 1)[0] if iface == b'lo': continue elif preferred_nic is None: break elif iface == preferred_nic: break return iface.decode('latin-1'), socket.inet_ntoa(s[i+20:i+24]) def GetIpv4Address(): """ Return the ip of the first active non-loopback interface. """ iface,addr=GetFirstActiveNetworkInterfaceNonLoopback() return addr def HexStringToByteArray(a): """ Return hex string packed into a binary struct. """ b = b"" for c in range(0, len(a) // 2): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b def GetMacAddress(): """ Convienience function, returns mac addr bound to first non-loobback interface. """ ifname='' while len(ifname) < 2 : ifname=GetFirstActiveNetworkInterfaceNonLoopback()[0] a = Linux_ioctl_GetInterfaceMac(ifname) return HexStringToByteArray(a) def DeviceForIdePort(n): """ Return device name attached to ide port 'n'. """ if n > 3: return None g0 = "00000000" if n > 1: g0 = "00000001" n = n - 2 device = None path = "/sys/bus/vmbus/devices/" for vmbus in os.listdir(path): guid = GetFileContents(path + vmbus + "/device_id").lstrip('{').split('-') if guid[0] == g0 and guid[1] == "000" + str(n): for root, dirs, files in os.walk(path + vmbus): if root.endswith("/block"): device = dirs[0] break else : #older distros for d in dirs: if ':' in d and "block" == d.split(':')[0]: device = d.split(':')[1] break break return device class HttpResourceGoneError(Exception): pass class Util(object): """ Http communication class. Base of GoalState, and Agent classes. """ RetryWaitingInterval=10 def __init__(self): self.Endpoint = None def _ParseUrl(self, url): secure = False host = self.Endpoint path = url port = None #"http[s]://hostname[:port][/]" if url.startswith("http://"): url = url[7:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" elif url.startswith("https://"): secure = True url = url[8:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" if host is None: raise ValueError("Host is invalid:{0}".format(url)) if(":" in host): pos = host.rfind(":") port = int(host[pos + 1:]) host = host[0:pos] return host, port, secure, path def GetHttpProxy(self, secure): """ Get http_proxy and https_proxy from environment variables. Username and password is not supported now. """ host = Config.get("HttpProxy.Host") port = Config.get("HttpProxy.Port") return (host, port) def _HttpRequest(self, method, host, path, port=None, data=None, secure=False, headers=None, proxyHost=None, proxyPort=None): resp = None conn = None try: if secure: port = 443 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplib.HTTPSConnection(proxyHost, proxyPort, timeout=10) conn.set_tunnel(host, port) #If proxy is used, full url is needed. path = "https://{0}:{1}{2}".format(host, port, path) else: conn = httplib.HTTPSConnection(host, port, timeout=10) else: port = 80 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplib.HTTPConnection(proxyHost, proxyPort, timeout=10) #If proxy is used, full url is needed. path = "http://{0}:{1}{2}".format(host, port, path) else: conn = httplib.HTTPConnection(host, port, timeout=10) if headers == None: conn.request(method, path, data) else: conn.request(method, path, data, headers) resp = conn.getresponse() except httplib.HTTPException, e: Error('HTTPException {0}, args:{1}'.format(e, repr(e.args))) except IOError, e: Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args))) return resp def HttpRequest(self, method, url, data=None, headers=None, maxRetry=3, chkProxy=False): """ Sending http request to server On error, sleep 10 and maxRetry times. Return the output buffer or None. """ LogIfVerbose("HTTP Req: {0} {1}".format(method, url)) LogIfVerbose("HTTP Req: Data={0}".format(data)) LogIfVerbose("HTTP Req: Header={0}".format(headers)) try: host, port, secure, path = self._ParseUrl(url) except ValueError, e: Error("Failed to parse url:{0}".format(url)) return None #Check proxy proxyHost, proxyPort = (None, None) if chkProxy: proxyHost, proxyPort = self.GetHttpProxy(secure) #If httplib module is not built with ssl support. Fallback to http if secure and not hasattr(httplib, "HTTPSConnection"): Warn("httplib is not built with ssl support") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) #If httplib module doesn't support https tunnelling. Fallback to http if secure and \ proxyHost is not None and \ proxyPort is not None and \ not hasattr(httplib.HTTPSConnection, "set_tunnel"): Warn("httplib doesn't support https tunnelling(new in python 2.7)") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) for retry in range(0, maxRetry): if resp is not None and \ (resp.status == httplib.OK or \ resp.status == httplib.CREATED or \ resp.status == httplib.ACCEPTED): return resp; if resp is not None and resp.status == httplib.GONE: raise HttpResourceGoneError("Http resource gone.") Error("Retry={0}".format(retry)) Error("HTTP Req: {0} {1}".format(method, url)) Error("HTTP Req: Data={0}".format(data)) Error("HTTP Req: Header={0}".format(headers)) if resp is None: Error("HTTP Err: response is empty.".format(retry)) else: Error("HTTP Err: Status={0}".format(resp.status)) Error("HTTP Err: Reason={0}".format(resp.reason)) Error("HTTP Err: Header={0}".format(resp.getheaders())) Error("HTTP Err: Body={0}".format(resp.read())) time.sleep(self.__class__.RetryWaitingInterval) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) return None def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("GET", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("HEAD", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("POST", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("PUT", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("DELETE", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url'. """ resp = self.HttpGet(url, headers=None, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url' with x-ms-agent-name and x-ms-version headers. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3, chkProxy=False): """ Return output of get using ssl cert. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion, "x-ms-cipher-name": "DES_EDE3_CBC", "x-ms-guest-agent-public-x509-cert": transportCert }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False): headers = { "x-ms-agent-name": GuestAgentName, "Content-Type": "text/xml; charset=utf-8", "x-ms-version": ProtocolVersion } try: return self.HttpPost(url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) except HttpResourceGoneError as e: Error("Failed to post: {0} {1}".format(url, e)) return None __StorageVersion="2014-02-14" def GetBlobType(url): restutil = Util() #Check blob type LogIfVerbose("Check blob type.") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) blobPropResp = restutil.HttpHead(url, { "x-ms-date" : timestamp, 'x-ms-version' : __StorageVersion }, chkProxy=True); blobType = None if blobPropResp is None: Error("Can't get status blob type.") return None blobType = blobPropResp.getheader("x-ms-blob-type") LogIfVerbose("Blob type={0}".format(blobType)) return blobType def PutBlockBlob(url, data): restutil = Util() LogIfVerbose("Upload block blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) ret = restutil.HttpPut(url, data, { "x-ms-date" : timestamp, "x-ms-blob-type" : "BlockBlob", "Content-Length": str(len(data)), "x-ms-version" : __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to upload block blob for status.") return -1 return 0 def PutPageBlob(url, data): restutil = Util() LogIfVerbose("Replace old page blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #Align to 512 bytes pageBlobSize = ((len(data) + 511) / 512) * 512 ret = restutil.HttpPut(url, "", { "x-ms-date" : timestamp, "x-ms-blob-type" : "PageBlob", "Content-Length": "0", "x-ms-blob-content-length" : str(pageBlobSize), "x-ms-version" : __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to clean up page blob for status") return -1 if url.index('?') < 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) LogIfVerbose("Upload page blob") pageMax = 4 * 1024 * 1024 #Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + pageMax) contentSize = end - start #Align to 512 bytes pageEnd = ((end + 511) / 512) * 512 bufSize = pageEnd - start buf = bytearray(bufSize) buf[0 : contentSize] = data[start : end] ret = restutil.HttpPut(url, buffer(buf), { "x-ms-date" : timestamp, "x-ms-range" : "bytes={0}-{1}".format(start, pageEnd - 1), "x-ms-page-write" : "update", "x-ms-version" : __StorageVersion, "Content-Length": str(pageEnd - start) }, chkProxy=True) if ret is None: Error("Failed to upload page blob for status") return -1 start = end return 0 def UploadStatusBlob(url, data): LogIfVerbose("Upload status blob") LogIfVerbose("Status={0}".format(data)) blobType = GetBlobType(url) if blobType == "BlockBlob": return PutBlockBlob(url, data) elif blobType == "PageBlob": return PutPageBlob(url, data) else: Error("Unknown blob type: {0}".format(blobType)) return -1 class TCPHandler(SocketServer.BaseRequestHandler): """ Callback object for LoadBalancerProbeServer. Recv and send LB probe messages. """ def __init__(self,lb_probe): super(TCPHandler,self).__init__() self.lb_probe=lb_probe def GetHttpDateTimeNow(self): """ Return formatted gmtime "Date: Fri, 25 Mar 2011 04:53:10 GMT" """ return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) def handle(self): """ Log LB probe messages, read the socket buffer, send LB probe response back to server. """ self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000 log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)] strCounter = str(self.lb_probe.ProbeCounter) if self.lb_probe.ProbeCounter == 1: Log("Receiving LB probes.") log("Received LB probe # " + strCounter) self.request.recv(1024) self.request.send("HTTP/1.1 200 OK\r\nContent-Length: 2\r\nContent-Type: text/html\r\nDate: " + self.GetHttpDateTimeNow() + "\r\n\r\nOK") class LoadBalancerProbeServer(object): """ Threaded object to receive and send LB probe messages. Load Balancer messages but be recv'd by the load balancing server, or this node may be shut-down. """ def __init__(self, port): self.ProbeCounter = 0 self.server = SocketServer.TCPServer((self.get_ip(), port), TCPHandler) self.server_thread = threading.Thread(target = self.server.serve_forever) self.server_thread.setDaemon(True) self.server_thread.start() def shutdown(self): self.server.shutdown() def get_ip(self): for retry in range(1,6): ip = MyDistro.GetIpv4Address() if ip == None : Log("LoadBalancerProbeServer: GetIpv4Address() returned None, sleeping 10 before retry " + str(retry+1) ) time.sleep(10) else: return ip class ConfigurationProvider(object): """ Parse amd store key:values in waagent.conf """ def __init__(self, walaConfigFile): self.values = dict() if 'MyDistro' not in globals(): global MyDistro MyDistro = GetMyDistro() if walaConfigFile is None: walaConfigFile = MyDistro.getConfigurationPath() if os.path.isfile(walaConfigFile) == False: raise Exception("Missing configuration in {0}".format(walaConfigFile)) try: for line in GetFileContents(walaConfigFile).split('\n'): if not line.startswith("#") and "=" in line: parts = line.split()[0].split('=') value = parts[1].strip("\" ") if value != "None": self.values[parts[0]] = value else: self.values[parts[0]] = None except: Error("Unable to parse {0}".format(walaConfigFile)) raise return def get(self, key): return self.values.get(key) class EnvMonitor(object): """ Montor changes to dhcp and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ def __init__(self): self.shutdown = False self.HostName = socket.gethostname() self.server_thread = threading.Thread(target = self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() self.published = False def monitor(self): """ Monitor dhcp client pid and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ publish = Config.get("Provisioning.MonitorHostName") dhcpcmd = MyDistro.getpidcmd+ ' ' + MyDistro.getDhcpClientName() dhcppid = RunGetOutput(dhcpcmd)[1] while not self.shutdown: for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Log("EnvMonitor: Moved " + a + " -> " + LibDir) MyDistro.setScsiDiskTimeout() if publish != None and publish.lower().startswith("y"): try: if socket.gethostname() != self.HostName: Log("EnvMonitor: Detected host name change: " + self.HostName + " -> " + socket.gethostname()) self.HostName = socket.gethostname() WaAgent.UpdateAndPublishHostName(self.HostName) dhcppid = RunGetOutput(dhcpcmd)[1] self.published = True except: pass else: self.published = True pid = "" if not os.path.isdir("/proc/" + dhcppid.strip()): pid = RunGetOutput(dhcpcmd)[1] if pid != "" and pid != dhcppid: Log("EnvMonitor: Detected dhcp client restart. Restoring routing table.") WaAgent.RestoreRoutes() dhcppid = pid for child in Children: if child.poll() != None: Children.remove(child) time.sleep(5) def SetHostName(self, name): """ Generic call to MyDistro.setHostname(name). Complian to Log on error. """ if socket.gethostname() == name: self.published = True elif MyDistro.setHostname(name): Error("Error: SetHostName: Cannot set hostname to " + name) return ("Error: SetHostName: Cannot set hostname to " + name) def IsHostnamePublished(self): """ Return self.published """ return self.published def ShutdownService(self): """ Stop server comminucation and join the thread to main thread. """ self.shutdown = True self.server_thread.join() class Certificates(object): """ Object containing certificates of host and provisioned user. Parses and splits certificates into files. """ # # 2010-12-15 # 2 # Pkcs7BlobWithPfxContents # MIILTAY... # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset the Role, Incarnation """ self.Incarnation = None self.Role = None def Parse(self, xmlText): """ Parse multiple certificates into seperate files. """ self.reinitialize() SetFileContents("Certificates.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in [ "CertificateFile", "Version", "Incarnation", "Format", "Data", ]: if not dom.getElementsByTagName(a): Error("Certificates.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "CertificateFile": Error("Certificates.Parse: root not CertificateFile") return None SetFileContents("Certificates.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"Certificates.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"Certificates.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + GetNodeTextData(dom.getElementsByTagName("Data")[0])) if Run(Openssl + " cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | " + Openssl + " pkcs12 -nodes -password pass: -out Certificates.pem"): Error("Certificates.Parse: Failed to extract certificates from CMS message.") return self # There may be multiple certificates in this package. Split them. file = open("Certificates.pem") pindex = 1 cindex = 1 output = open("temp.pem", "w") for line in file.readlines(): output.write(line) if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$',line): output.close() if re.match(r'[-]+END .*?KEY[-]+$',line): os.rename("temp.pem", str(pindex) + ".prv") pindex += 1 else: os.rename("temp.pem", str(cindex) + ".crt") cindex += 1 output = open("temp.pem", "w") output.close() os.remove("temp.pem") keys = dict() index = 1 filename = str(index) + ".crt" while os.path.isfile(filename): thumbprint = (RunGetOutput(Openssl + " x509 -in " + filename + " -fingerprint -noout")[1]).rstrip().split('=')[1].replace(':', '').upper() pubkey=RunGetOutput(Openssl + " x509 -in " + filename + " -pubkey -noout")[1] keys[pubkey] = thumbprint os.rename(filename, thumbprint + ".crt") os.chmod(thumbprint + ".crt", 0600) MyDistro.setSelinuxContext(thumbprint + '.crt','unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".crt" index = 1 filename = str(index) + ".prv" while os.path.isfile(filename): pubkey = RunGetOutput(Openssl + " rsa -in " + filename + " -pubout 2> /dev/null ")[1] os.rename(filename, keys[pubkey] + ".prv") os.chmod(keys[pubkey] + ".prv", 0600) MyDistro.setSelinuxContext( keys[pubkey] + '.prv','unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".prv" return self class SharedConfig(object): """ Parse role endpoint server and goal state config. """ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.RdmaMacAddress = None self.RdmaIPv4Address = None self.xmlText = None def Parse(self, xmlText): """ Parse and write configuration to file SharedConfig.xml. """ LogIfVerbose(xmlText) self.reinitialize() self.xmlText = xmlText dom = xml.dom.minidom.parseString(xmlText) for a in [ "SharedConfig", "Deployment", "Service", "ServiceInstance", "Incarnation", "Role", ]: if not dom.getElementsByTagName(a): Error("SharedConfig.Parse: Missing " + a) node = dom.childNodes[0] if node.localName != "SharedConfig": Error("SharedConfig.Parse: root not SharedConfig") nodes = dom.getElementsByTagName("Instance") if nodes is not None and len(nodes) != 0: node = nodes[0] if node.hasAttribute("rdmaMacAddress"): addr = node.getAttribute("rdmaMacAddress") self.RdmaMacAddress = addr[0:2] for i in range(1, 6): self.RdmaMacAddress += ":" + addr[2 * i : 2 *i + 2] if node.hasAttribute("rdmaIPv4Address"): self.RdmaIPv4Address = node.getAttribute("rdmaIPv4Address") return self def Save(self): LogIfVerbose("Save SharedConfig.xml") SetFileContents("SharedConfig.xml", self.xmlText) def InvokeTopologyConsumer(self): program = Config.get("Role.TopologyConsumer") if program != None: try: Children.append(subprocess.Popen([program, LibDir + "/SharedConfig.xml"])) except OSError, e : ErrorWithPrefix('Agent.Run','Exception: '+ str(e) +' occured launching ' + program ) def Process(self): global rdma_configured if not rdma_configured and self.RdmaMacAddress is not None and self.RdmaIPv4Address is not None: handler = RdmaHandler(self.RdmaMacAddress, self.RdmaIPv4Address) handler.start() rdma_configured = True self.InvokeTopologyConsumer() rdma_configured = False class RdmaError(Exception): pass class RdmaHandler(object): """ Handle rdma configuration. """ def __init__(self, mac, ip_addr, dev="/dev/hvnd_rdma", dat_conf_files=['/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf']): self.mac = mac self.ip_addr = ip_addr self.dev = dev self.dat_conf_files = dat_conf_files self.data = ('rdmaMacAddress="{0}" rdmaIPv4Address="{1}"' '').format(self.mac, self.ip_addr) def start(self): """ Start a new thread to process rdma """ threading.Thread(target=self.process).start() def process(self): try: self.set_dat_conf() self.set_rdma_dev() self.set_rdma_ip() except RdmaError as e: Error("Failed to config rdma device: {0}".format(e)) def set_dat_conf(self): """ Agent needs to search all possible locations for dat.conf """ Log("Set dat.conf") for dat_conf_file in self.dat_conf_files: if not os.path.isfile(dat_conf_file): continue try: self.write_dat_conf(dat_conf_file) except IOError as e: raise RdmaError("Failed to write to dat.conf: {0}".format(e)) def write_dat_conf(self, dat_conf_file): Log("Write config to {0}".format(dat_conf_file)) old = ("ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 " "dapl.2.0 \"\S+ 0\"") new = ("ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 " "dapl.2.0 \"{0} 0\"").format(self.ip_addr) lines = GetFileContents(dat_conf_file) lines = re.sub(old, new, lines) SetFileContents(dat_conf_file, lines) def set_rdma_dev(self): """ Write config string to /dev/hvnd_rdma """ Log("Set /dev/hvnd_rdma") self.wait_rdma_dev() self.write_rdma_dev_conf() def write_rdma_dev_conf(self): Log("Write rdma config to {0}: {1}".format(self.dev, self.data)) try: with open(self.dev, "w") as c: c.write(self.data) except IOError, e: raise RdmaError("Error writing {0}, {1}".format(self.dev, e)) def wait_rdma_dev(self): Log("Wait for /dev/hvnd_rdma") retry = 0 while retry < 120: if os.path.exists(self.dev): return time.sleep(1) retry += 1 raise RdmaError("The device doesn't show up in 120 seconds") def set_rdma_ip(self): Log("Set ip addr for rdma") try: if_name = MyDistro.getInterfaceNameByMac(self.mac) #Azure is using 12 bits network mask for infiniband. MyDistro.configIpV4(if_name, self.ip_addr, 12) except Exception as e: raise RdmaError("Failed to config rdma device: {0}".format(e)) class ExtensionsConfig(object): """ Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. Install if true, remove if it is set to false. """ # # # # # # # {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1", #"protectedSettings":"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR #Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6 #tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X #v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh #kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]} # # #https://ostcextensions.blob.core.test-cint.azure-test.net/vhds/eg-plugin7-vm.eg-plugin7-vm.eg-plugin7-vm.status?sr=b&sp=rw& #se=9999-01-01&sk=key1&sv=2012-02-12&sig=wRUIDN1x2GC06FWaetBP9sjjifOWvRzS2y2XBB4qoBU%3D def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.Extensions = None self.Plugins = None self.Util = None def Parse(self, xmlText): """ Write configuration to file ExtensionsConfig.xml. Log plugin specific activity to /var/log/azure/.//CommandExecution.log. If state is enabled: if the plugin is installed: if the new plugin's version is higher if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade: download the new archive do the updateCommand. disable the old plugin and remove enable the new plugin if the new plugin's version is the same or lower: create the new .settings file from the configuration received do the enableCommand if the plugin is not installed: download/unpack archive and call the installCommand/Enable if state is disabled: call disableCommand if state is uninstall: call uninstallCommand remove old plugin directory. """ self.reinitialize() self.Util=Util() dom = xml.dom.minidom.parseString(xmlText) LogIfVerbose(xmlText) self.plugin_log_dir='/var/log/azure' if not os.path.exists(self.plugin_log_dir): os.mkdir(self.plugin_log_dir) try: self.Extensions=dom.getElementsByTagName("Extensions") pg = dom.getElementsByTagName("Plugins") if len(pg) > 0: self.Plugins = pg[0].getElementsByTagName("Plugin") else: self.Plugins = [] incarnation=self.Extensions[0].getAttribute("goalStateIncarnation") SetFileContents('ExtensionsConfig.'+incarnation+'.xml', xmlText) except Exception, e: Error('ERROR: Error parsing ExtensionsConfig: {0}.'.format(e)) return None for p in self.Plugins: if len(p.getAttribute("location"))<1: # this plugin is inside the PluginSettings continue p.setAttribute('restricted','false') previous_version = None version=p.getAttribute("version") name=p.getAttribute("name") plog_dir=self.plugin_log_dir+'/'+name +'/'+ version if not os.path.exists(plog_dir): os.makedirs(plog_dir) p.plugin_log=plog_dir+'/CommandExecution.log' handler=name + '-' + version if p.getAttribute("isJson") != 'true': Error("Plugin " + name+" version: " +version+" is not a JSON Extension. Skipping.") continue Log("Found Plugin: " + name + ' version: ' + version) if p.getAttribute("state") == 'disabled' or p.getAttribute("state") == 'uninstall': #disable zip_dir=LibDir+"/" + name + '-' + version mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) if self.launchCommand(p.plugin_log,name,version,'disableCommand') == None : self.SetHandlerState(handler, 'Enabled') Error('Unable to disable '+name) SimpleLog(p.plugin_log,'ERROR: Unable to disable '+name) else : self.SetHandlerState(handler, 'Disabled') Log(name+' is disabled') SimpleLog(p.plugin_log,name+' is disabled') # uninstall if needed if p.getAttribute("state") == 'uninstall': if self.launchCommand(p.plugin_log,name,version,'uninstallCommand') == None : self.SetHandlerState(handler, 'Installed') Error('Unable to uninstall '+name) SimpleLog(p.plugin_log,'Unable to uninstall '+name) else : self.SetHandlerState(handler, 'NotInstalled') Log(name+' uninstallCommand completed .') # remove the plugin Run('rm -rf ' + LibDir + '/' + name +'-'+ version + '*') Log(name +'-'+ version + ' extension files deleted.') SimpleLog(p.plugin_log,name +'-'+ version + ' extension files deleted.') continue # state is enabled # if the same plugin exists and the version is newer or # does not exist then download and unzip the new plugin plg_dir=None latest_version_installed = LooseVersion("0.0") for item in os.listdir(LibDir): itemPath = os.path.join(LibDir, item) if os.path.isdir(itemPath) and name in item: try: #Split plugin dir name with '-' to get intalled plugin name and version sperator = item.rfind('-') if sperator < 0: continue installed_plg_name = item[0:sperator] installed_plg_version = LooseVersion(item[sperator + 1:]) #Check installed plugin name and compare installed version to get the latest version installed if installed_plg_name == name and installed_plg_version > latest_version_installed: plg_dir = itemPath previous_version = str(installed_plg_version) latest_version_installed = installed_plg_version except Exception as e: Warn("Invalid plugin dir name: {0} {1}".format(item, e)) continue if plg_dir == None or LooseVersion(version) > LooseVersion(previous_version) : location=p.getAttribute("location") Log("Downloading plugin manifest: " + name + " from " + location) SimpleLog(p.plugin_log,"Downloading plugin manifest: " + name + " from " + location) self.Util.Endpoint=location.split('/')[2] Log("Plugin server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log,"Plugin server is: " + self.Util.Endpoint) manifest=self.Util.HttpGetWithoutHeaders(location, chkProxy=True) if manifest == None: Error("Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") SimpleLog(p.plugin_log,"Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") failoverlocation=p.getAttribute("failoverlocation") self.Util.Endpoint=failoverlocation.split('/')[2] Log("Plugin failover server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log,"Plugin failover server is: " + self.Util.Endpoint) manifest=self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True) #if failoverlocation also fail what to do then? if manifest == None: AddExtensionEvent(name,WALAEventOperation.Download,False,0,version,"Download mainfest fail "+failoverlocation) Log("Plugin manifest " + name + " downloading failed from failover location.") SimpleLog(p.plugin_log,"Plugin manifest " + name + " downloading failed from failover location.") filepath=LibDir+"/" + name + '.' + incarnation + '.manifest' if os.path.splitext(location)[-1] == '.xml' : #if this is an xml file we may have a BOM if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128: manifest=manifest[3:] SetFileContents(filepath,manifest) #Get the bundle url from the manifest p.setAttribute('manifestdata',manifest) man_dom = xml.dom.minidom.parseString(manifest) bundle_uri = "" for mp in man_dom.getElementsByTagName("Plugin"): if GetNodeTextData(mp.getElementsByTagName("Version")[0]) == version: bundle_uri = GetNodeTextData(mp.getElementsByTagName("Uri")[0]) break if len(mp.getElementsByTagName("DisallowMajorVersionUpgrade")): if GetNodeTextData(mp.getElementsByTagName("DisallowMajorVersionUpgrade")[0]) == 'true' and previous_version !=None and previous_version.split('.')[0] != version.split('.')[0] : Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') SimpleLog(p.plugin_log,'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') p.setAttribute('restricted','true') continue if len(bundle_uri) < 1 : Error("Unable to fetch Bundle URI from manifest for " + name + " v " + version) SimpleLog(p.plugin_log,"Unable to fetch Bundle URI from manifest for " + name + " v " + version) continue Log("Bundle URI = " + bundle_uri) SimpleLog(p.plugin_log,"Bundle URI = " + bundle_uri) # Download the zipfile archive and save as '.zip' bundle=self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True) if bundle == None: AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,"Download zip fail "+bundle_uri) Error("Unable to download plugin bundle" + bundle_uri ) SimpleLog(p.plugin_log,"Unable to download plugin bundle" + bundle_uri ) continue AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,"Download Success") b=bytearray(bundle) filepath=LibDir+"/" + os.path.basename(bundle_uri) + '.zip' SetFileContents(filepath,b) Log("Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) SimpleLog(p.plugin_log,"Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) # unpack the archive z=zipfile.ZipFile(filepath) zip_dir=LibDir+"/" + name + '-' + version z.extractall(zip_dir) Log('Extracted ' + bundle_uri + ' to ' + zip_dir) SimpleLog(p.plugin_log,'Extracted ' + bundle_uri + ' to ' + zip_dir) # zip no file perms in .zip so set all the scripts to +x Run( "find " + zip_dir +" -type f | xargs chmod u+x ") #write out the base64 config data so the plugin can process it. mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log,'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) # create the status and config dirs Run('mkdir -p ' + root + '/status') Run('mkdir -p ' + root + '/config') # write out the configuration data to goalStateIncarnation.settings file in the config path. config='' seqNo='0' if len(dom.getElementsByTagName("PluginSettings")) != 0 : pslist=dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"Found RuntimeSettings for " + name + " V " + version) config=GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo=ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Log("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"No RuntimeSettings for " + name + " V " + version) SetFileContents(root +"/config/" + seqNo +".settings", config ) #create HandlerEnvironment.json handler_env='[{ "name": "'+name+'", "seqNo": "'+seqNo+'", "version": 1.0, "handlerEnvironment": { "logFolder": "'+os.path.dirname(p.plugin_log)+'", "configFolder": "' + root + '/config", "statusFolder": "' + root + '/status", "heartbeatFile": "'+ root + '/heartbeat.log"}}]' SetFileContents(root+'/HandlerEnvironment.json',handler_env) self.SetHandlerState(handler, 'NotInstalled') cmd = '' getcmd='installCommand' if plg_dir != None and previous_version != None and LooseVersion(version) > LooseVersion(previous_version): previous_handler=name+'-'+previous_version if self.GetHandlerState(previous_handler) != 'NotInstalled': getcmd='updateCommand' # disable the old plugin if it exists if self.launchCommand(p.plugin_log,name,previous_version,'disableCommand') == None : self.SetHandlerState(previous_handler, 'Enabled') Error('Unable to disable old plugin '+name+' version ' + previous_version) SimpleLog(p.plugin_log,'Unable to disable old plugin '+name+' version ' + previous_version) else : self.SetHandlerState(previous_handler, 'Disabled') Log(name+' version ' + previous_version + ' is disabled') SimpleLog(p.plugin_log,name+' version ' + previous_version + ' is disabled') try: Log("Copy status file from old plugin dir to new") old_plg_dir = plg_dir new_plg_dir = os.path.join(LibDir, "{0}-{1}".format(name, version)) old_ext_status_dir = os.path.join(old_plg_dir, "status") new_ext_status_dir = os.path.join(new_plg_dir, "status") if os.path.isdir(old_ext_status_dir): for status_file in os.listdir(old_ext_status_dir): status_file_path = os.path.join(old_ext_status_dir, status_file) if os.path.isfile(status_file_path): shutil.copy2(status_file_path, new_ext_status_dir) mrseq_file = os.path.join(old_plg_dir, "mrseq") if os.path.isfile(mrseq_file): shutil.copy(mrseq_file, new_plg_dir) except Exception as e: Error("Failed to copy status file.") isupgradeSuccess = True if getcmd=='updateCommand': if self.launchCommand(p.plugin_log,name,version,getcmd,previous_version) == None : Error('Update failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Update failed for '+name+'-'+version) isupgradeSuccess=False else : Log('Update complete'+name+'-'+version) SimpleLog(p.plugin_log,'Update complete'+name+'-'+version) # if we updated - call unistall for the old plugin if self.launchCommand(p.plugin_log,name,previous_version,'uninstallCommand') == None : self.SetHandlerState(previous_handler, 'Installed') Error('Uninstall failed for '+name+'-'+previous_version) SimpleLog(p.plugin_log,'Uninstall failed for '+name+'-'+previous_version) isupgradeSuccess=False else : self.SetHandlerState(previous_handler, 'NotInstalled') Log('Uninstall complete'+ previous_handler ) SimpleLog(p.plugin_log,'Uninstall complete'+ name +'-' + previous_version) try: #rm old plugin dir if os.path.isdir(plg_dir): shutil.rmtree(plg_dir) Log(name +'-'+ previous_version + ' extension files deleted.') SimpleLog(p.plugin_log,name +'-'+ previous_version + ' extension files deleted.') except Exception as e: Error("Failed to remove old plugin directory") AddExtensionEvent(name,WALAEventOperation.Upgrade,isupgradeSuccess,0,previous_version) else : # run install if self.launchCommand(p.plugin_log,name,version,getcmd) == None : self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Installed') Log('Installation completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version) #end if plg_dir == none or version > = prev # change incarnation of settings file so it knows how to name status... zip_dir=LibDir+"/" + name + '-' + version mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log,'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) config='' seqNo='0' if len(dom.getElementsByTagName("PluginSettings")) != 0 : try: pslist=dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") except: Error('Error parsing ExtensionsConfig.') SimpleLog(p.plugin_log,'Error parsing ExtensionsConfig.') continue for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"Found RuntimeSettings for " + name + " V " + version) config=GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo=ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Error("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"No RuntimeSettings for " + name + " V " + version) SetFileContents(root +"/config/" + seqNo +".settings", config ) # state is still enable if (self.GetHandlerState(handler) == 'NotInstalled'): # run install first if true if self.launchCommand(p.plugin_log,name,version,'installCommand') == None : self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Installed') Log('Installation completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version) if (self.GetHandlerState(handler) != 'NotInstalled'): if self.launchCommand(p.plugin_log,name,version,'enableCommand') == None : self.SetHandlerState(handler, 'Installed') Error('Enable failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Enable failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Enabled') Log('Enable completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Enable completed for '+name+'-'+version) # this plugin processing is complete Log('Processing completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Processing completed for '+name+'-'+version) #end plugin processing loop Log('Finished processing ExtensionsConfig.xml') try: SimpleLog(p.plugin_log,'Finished processing ExtensionsConfig.xml') except: pass return self def launchCommand(self,plugin_log,name,version,command,prev_version=None): commandToEventOperation={ "installCommand":WALAEventOperation.Install, "uninstallCommand":WALAEventOperation.UnIsntall, "updateCommand": WALAEventOperation.Upgrade, "enableCommand": WALAEventOperation.Enable, "disableCommand": WALAEventOperation.Disable, } isSuccess=True start = datetime.datetime.now() r=self.__launchCommandWithoutEventLog(plugin_log,name,version,command,prev_version) if r==None: isSuccess=False Duration = int((datetime.datetime.now() - start).seconds) if commandToEventOperation.get(command): AddExtensionEvent(name,commandToEventOperation[command],isSuccess,Duration,version) return r def __launchCommandWithoutEventLog(self,plugin_log,name,version,command,prev_version=None): # get the manifest and read the command mfile=None zip_dir=LibDir+"/" + name + '-' + version for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(plugin_log,'HandlerManifest.json not found.') return None manifest = GetFileContents(mfile) try: jsn = json.loads(manifest) except: Error('Error parsing HandlerManifest.json.') SimpleLog(plugin_log,'Error parsing HandlerManifest.json.') return None if type(jsn)==list: jsn=jsn[0] if jsn.has_key('handlerManifest') : cmd = jsn['handlerManifest'][command] else : Error('Key handlerManifest not found. Handler cannot be installed.') SimpleLog(plugin_log,'Key handlerManifest not found. Handler cannot be installed.') if len(cmd) == 0 : Error('Unable to read ' + command ) SimpleLog(plugin_log,'Unable to read ' + command ) return None # for update we send the path of the old installation arg='' if prev_version != None : arg=' ' + LibDir+'/' + name + '-' + prev_version dirpath=os.path.dirname(mfile) LogIfVerbose('Command is '+ dirpath+'/'+ cmd) # launch pid=None try: child = subprocess.Popen(dirpath+'/'+cmd+arg,shell=True,cwd=dirpath,stdout=subprocess.PIPE) except Exception as e: Error('Exception launching ' + cmd + str(e)) SimpleLog(plugin_log,'Exception launching ' + cmd + str(e)) pid = child.pid if pid == None or pid < 1 : ExtensionChildren.append((-1,root)) Error('Error launching ' + cmd + '.') SimpleLog(plugin_log,'Error launching ' + cmd + '.') else : ExtensionChildren.append((pid,root)) Log("Spawned "+ cmd + " PID " + str(pid)) SimpleLog(plugin_log,"Spawned "+ cmd + " PID " + str(pid)) # wait until install/upgrade is finished timeout = 300 # 5 minutes retry = timeout/5 while retry > 0 and child.poll() == None: LogIfVerbose(cmd + ' still running with PID ' + str(pid)) time.sleep(5) retry-=1 if retry==0: Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) SimpleLog(plugin_log,'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) os.kill(pid,9) return None code = child.wait() if code == None or code != 0: Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') SimpleLog(plugin_log,'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') return None Log(command + ' completed.') SimpleLog(plugin_log,command + ' completed.') return 0 def ReportHandlerStatus(self): """ Collect all status reports. """ # { "version": "1.0", "timestampUTC": "2014-03-31T21:28:58Z", # "aggregateStatus": { # "guestAgentStatus": { "version": "2.0.4PRE", "status": "Ready", "formattedMessage": { "lang": "en-US", "message": "GuestAgent is running and accepting new configurations." } }, # "handlerAggregateStatus": [{ # "handlerName": "ExampleHandlerLinux", "handlerVersion": "1.0", "status": "Ready", "runtimeSettingsStatus": { # "sequenceNumber": "2", "settingsStatus": { "timestampUTC": "2014-03-31T23:46:00Z", "status": { "name": "ExampleHandlerLinux", "operation": "Command Execution Finished", "configurationAppliedTime": "2014-03-31T23:46:00Z", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Finished executing command" }, # "substatus": [ # { "name": "StdOut", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Goodbye world!" } }, # { "name": "StdErr", "status": "success", "formattedMessage": { "lang": "en-US", "message": "" } } # ] # } } } } # ] # }} try: incarnation=self.Extensions[0].getAttribute("goalStateIncarnation") except: Error('Error parsing attribute "goalStateIncarnation". Unable to send status reports') return -1 status='' statuses='' for p in self.Plugins: if p.getAttribute("state") == 'uninstall' or p.getAttribute("restricted") == 'true' : continue version=p.getAttribute("version") name=p.getAttribute("name") if p.getAttribute("isJson") != 'true': LogIfVerbose("Plugin " + name+" version: " +version+" is not a JSON Extension. Skipping.") continue reportHeartbeat = False if len(p.getAttribute("manifestdata"))<1: Error("Failed to get manifestdata.") else: reportHeartbeat = json.loads(p.getAttribute("manifestdata"))[0]['handlerManifest']['reportHeartbeat'] if len(statuses)>0: statuses+=',' statuses+=self.GenerateAggStatus(name, version, reportHeartbeat) tstamp=time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #header #agent state if provisioned == False: if provisionError == None : agent_state='Provisioning' agent_msg='Guest Agent is starting.' else: agent_state='Provisioning Error.' agent_msg=provisionError else: agent_state='Ready' agent_msg='GuestAgent is running and accepting new configurations.' status='{"version":"1.0","timestampUTC":"'+tstamp+'","aggregateStatus":{"guestAgentStatus":{"version":"'+GuestAgentVersion+'","status":"'+agent_state+'","formattedMessage":{"lang":"en-US","message":"'+agent_msg+'"}},"handlerAggregateStatus":['+statuses+']}}' try: uri=GetNodeTextData(self.Extensions[0].getElementsByTagName("StatusUploadBlob")[0]).replace('&','&') except: Error('Error parsing element "StatusUploadBlob". Unable to send status reports') return -1 LogIfVerbose('Status report '+status+' sent to ' + uri) return UploadStatusBlob(uri, status.encode("utf-8")) def GetCurrentSequenceNumber(self, plugin_base_dir): """ Get the settings file with biggest file number in config folder """ config_dir = os.path.join(plugin_base_dir, 'config') seq_no = 0 for subdir, dirs, files in os.walk(config_dir): for file in files: try: cur_seq_no = int(os.path.basename(file).split('.')[0]) if cur_seq_no > seq_no: seq_no = cur_seq_no except ValueError: continue return str(seq_no) def GenerateAggStatus(self, name, version, reportHeartbeat = False): """ Generate the status which Azure can understand by the status and heartbeat reported by extension """ plugin_base_dir = LibDir+'/'+name+'-'+version+'/' current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir) status_file=os.path.join(plugin_base_dir, 'status/', current_seq_no +'.status') heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log') handler_state_file = os.path.join(plugin_base_dir, 'config', 'HandlerState') agg_state = 'NotReady' handler_state = None status_obj = None status_code = None formatted_message = None localized_message = None if os.path.exists(handler_state_file): handler_state = GetFileContents(handler_state_file).lower() if HandlerStatusToAggStatus.has_key(handler_state): agg_state = HandlerStatusToAggStatus[handler_state] if reportHeartbeat: if os.path.exists(heartbeat_file): d=int(time.time()-os.stat(heartbeat_file).st_mtime) if d > 600 : # not updated for more than 10 min agg_state = 'Unresponsive' else: try: heartbeat = json.loads(GetFileContents(heartbeat_file))[0]["heartbeat"] agg_state = heartbeat.get("status") status_code = heartbeat.get("code") formatted_message = heartbeat.get("formattedMessage") localized_message = heartbeat.get("message") except: Error("Incorrect heartbeat file. Ignore it. ") else: agg_state = 'Unresponsive' #get status file reported by extension if os.path.exists(status_file): # raw status generated by extension is an array, get the first item and remove the unnecessary element try: status_obj = json.loads(GetFileContents(status_file))[0] del status_obj["version"] except: Error("Incorrect status file. Will NOT settingsStatus in settings. ") agg_status_obj = {"handlerName": name, "handlerVersion": version, "status": agg_state, "runtimeSettingsStatus" : {"sequenceNumber": current_seq_no}} if status_obj: agg_status_obj["runtimeSettingsStatus"]["settingsStatus"] = status_obj if status_code != None: agg_status_obj["code"] = status_code if formatted_message: agg_status_obj["formattedMessage"] = formatted_message if localized_message: agg_status_obj["message"] = localized_message agg_status_string = json.dumps(agg_status_obj) LogIfVerbose("Handler Aggregated Status:" + agg_status_string) return agg_status_string def SetHandlerState(self, handler, state=''): zip_dir=LibDir+"/" + handler mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.') return None Log("SetHandlerState: "+handler+", "+state) return SetFileContents(os.path.dirname(mfile)+'/config/HandlerState', state) def GetHandlerState(self, handler): handlerState = GetFileContents(handler+'/config/HandlerState') if (handlerState): return handlerState.rstrip('\r\n') else: return 'NotInstalled' class HostingEnvironmentConfig(object): """ Parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ # # # # # # # # # # # # # # # # # # # # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset Members. """ self.StoredCertificates = None self.Deployment = None self.Incarnation = None self.Role = None self.HostingEnvironmentSettings = None self.ApplicationSettings = None self.Certificates = None self.ResourceReferences = None def Parse(self, xmlText): """ Parse and create HostingEnvironmentConfig.xml. """ self.reinitialize() SetFileContents("HostingEnvironmentConfig.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in [ "HostingEnvironmentConfig", "Deployment", "Service", "ServiceInstance", "Incarnation", "Role", ]: if not dom.getElementsByTagName(a): Error("HostingEnvironmentConfig.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "HostingEnvironmentConfig": Error("HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig") return None self.ApplicationSettings = dom.getElementsByTagName("Setting") self.Certificates = dom.getElementsByTagName("StoredCertificate") return self def DecryptPassword(self, e): """ Return decrypted password. """ SetFileContents("password.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"password.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"password.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + textwrap.fill(e, 64)) return RunGetOutput(Openssl + " cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem")[1] def ActivateResourceDisk(self): return MyDistro.ActivateResourceDisk() def Process(self): """ Execute ActivateResourceDisk in separate thread. Create the user account. Launch ConfigurationConsumer if specified in the config. """ no_thread = False if DiskActivated == False: for m in inspect.getmembers(MyDistro): if 'ActivateResourceDiskNoThread' in m: no_thread = True break if no_thread == True : MyDistro.ActivateResourceDiskNoThread() else : diskThread = threading.Thread(target = self.ActivateResourceDisk) diskThread.start() User = None Pass = None Expiration = None Thumbprint = None for b in self.ApplicationSettings: sname = b.getAttribute("name") svalue = b.getAttribute("value") if User != None and Pass != None: if User != "root" and User != "" and Pass != "": CreateAccount(User, Pass, Expiration, Thumbprint) else: Error("Not creating user account: " + User) for c in self.Certificates: csha1 = c.getAttribute("certificateId").split(':')[1].upper() if os.path.isfile(csha1 + ".prv"): Log("Private key with thumbprint: " + csha1 + " was retrieved.") if os.path.isfile(csha1 + ".crt"): Log("Public cert with thumbprint: " + csha1 + " was retrieved.") program = Config.get("Role.ConfigurationConsumer") if program != None: try: Children.append(subprocess.Popen([program, LibDir + "/HostingEnvironmentConfig.xml"])) except OSError, e : ErrorWithPrefix('HostingEnvironmentConfig.Process','Exception: '+ str(e) +' occured launching ' + program ) class GoalState(Util): """ Primary container for all configuration except OvfXml. Encapsulates http communication with endpoint server. Initializes and populates: self.HostingEnvironmentConfig self.SharedConfig self.ExtensionsConfig self.Certificates """ # # # 2010-12-15 # 1 # # Started # # 16001 # # # # c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 # # # MachineRole_IN_0 # Started # # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&type=hostingEnvironmentConfig&incarnation=1 # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&type=sharedConfig&incarnation=1 # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=certificates&incarnation=1 # http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&type=extensionsConfig&incarnation=2 # http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&type=fullConfig&incarnation=2 # # # # # # # There is only one Role for VM images. # # Of primary interest is: # LBProbePorts -- an http server needs to run here # We also note Container/ContainerID and RoleInstance/InstanceId to form the health report. # And of course, Incarnation # def __init__(self, Agent): self.Agent = Agent self.Endpoint = Agent.Endpoint self.TransportCert = Agent.TransportCert self.reinitialize() def reinitialize(self): self.Incarnation = None # integer self.ExpectedState = None # "Started" self.HostingEnvironmentConfigUrl = None self.HostingEnvironmentConfigXml = None self.HostingEnvironmentConfig = None self.SharedConfigUrl = None self.SharedConfigXml = None self.SharedConfig = None self.CertificatesUrl = None self.CertificatesXml = None self.Certificates = None self.ExtensionsConfigUrl = None self.ExtensionsConfigXml = None self.ExtensionsConfig = None self.RoleInstanceId = None self.ContainerId = None self.LoadBalancerProbePort = None # integer, ?list of integers def Parse(self, xmlText): """ Request configuration data from endpoint server. Parse and populate contained configuration objects. Calls Certificates().Parse() Calls SharedConfig().Parse Calls ExtensionsConfig().Parse Calls HostingEnvironmentConfig().Parse """ self.reinitialize() LogIfVerbose(xmlText) node = xml.dom.minidom.parseString(xmlText).childNodes[0] if node.localName != "GoalState": Error("GoalState.Parse: root not GoalState") return None for a in node.childNodes: if a.nodeType == node.ELEMENT_NODE: if a.localName == "Incarnation": self.Incarnation = GetNodeTextData(a) elif a.localName == "Machine": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE: if b.localName == "ExpectedState": self.ExpectedState = GetNodeTextData(b) Log("ExpectedState: " + self.ExpectedState) elif b.localName == "LBProbePorts": for c in b.childNodes: if c.nodeType == node.ELEMENT_NODE and c.localName == "Port": self.LoadBalancerProbePort = int(GetNodeTextData(c)) elif a.localName == "Container": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE: if b.localName == "ContainerId": self.ContainerId = GetNodeTextData(b) Log("ContainerId: " + self.ContainerId) elif b.localName == "RoleInstanceList": for c in b.childNodes: if c.localName == "RoleInstance": for d in c.childNodes: if d.nodeType == node.ELEMENT_NODE: if d.localName == "InstanceId": self.RoleInstanceId = GetNodeTextData(d) Log("RoleInstanceId: " + self.RoleInstanceId) elif d.localName == "State": pass elif d.localName == "Configuration": for e in d.childNodes: if e.nodeType == node.ELEMENT_NODE: LogIfVerbose(e.localName) if e.localName == "HostingEnvironmentConfig": self.HostingEnvironmentConfigUrl = GetNodeTextData(e) LogIfVerbose("HostingEnvironmentConfigUrl:" + self.HostingEnvironmentConfigUrl) self.HostingEnvironmentConfigXml = self.HttpGetWithHeaders(self.HostingEnvironmentConfigUrl) self.HostingEnvironmentConfig = HostingEnvironmentConfig().Parse(self.HostingEnvironmentConfigXml) elif e.localName == "SharedConfig": self.SharedConfigUrl = GetNodeTextData(e) LogIfVerbose("SharedConfigUrl:" + self.SharedConfigUrl) self.SharedConfigXml = self.HttpGetWithHeaders(self.SharedConfigUrl) self.SharedConfig = SharedConfig().Parse(self.SharedConfigXml) self.SharedConfig.Save() elif e.localName == "ExtensionsConfig": self.ExtensionsConfigUrl = GetNodeTextData(e) LogIfVerbose("ExtensionsConfigUrl:" + self.ExtensionsConfigUrl) self.ExtensionsConfigXml = self.HttpGetWithHeaders(self.ExtensionsConfigUrl) elif e.localName == "Certificates": self.CertificatesUrl = GetNodeTextData(e) LogIfVerbose("CertificatesUrl:" + self.CertificatesUrl) self.CertificatesXml = self.HttpSecureGetWithHeaders(self.CertificatesUrl, self.TransportCert) self.Certificates = Certificates().Parse(self.CertificatesXml) if self.Incarnation == None: Error("GoalState.Parse: Incarnation missing") return None if self.ExpectedState == None: Error("GoalState.Parse: ExpectedState missing") return None if self.RoleInstanceId == None: Error("GoalState.Parse: RoleInstanceId missing") return None if self.ContainerId == None: Error("GoalState.Parse: ContainerId missing") return None SetFileContents("GoalState." + self.Incarnation + ".xml", xmlText) return self def Process(self): """ Calls HostingEnvironmentConfig.Process() """ LogIfVerbose("Process goalstate") self.HostingEnvironmentConfig.Process() self.SharedConfig.Process() class OvfEnv(object): """ Read, and process provisioning info from provisioning file OvfEnv.xml """ # # # # # 1.0 # # LinuxProvisioningConfiguration # HostName # UserName # UserPassword # false # # # # EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 # $HOME/UserName/.ssh/authorized_keys # # # # # EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 # $HOME/UserName/.ssh/id_rsa # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.WaNs = "http://schemas.microsoft.com/windowsazure" self.OvfNs = "http://schemas.dmtf.org/ovf/environment/1" self.MajorVersion = 1 self.MinorVersion = 0 self.ComputerName = None self.AdminPassword = None self.UserName = None self.UserPassword = None self.CustomData = None self.DisableSshPasswordAuthentication = True self.SshPublicKeys = [] self.SshKeyPairs = [] def Parse(self, xmlText, isDeprovision = False): """ Parse xml tree, retreiving user and ssh key information. Return self. """ self.reinitialize() LogIfVerbose(re.sub(".*?<", "*<", xmlText)) dom = xml.dom.minidom.parseString(xmlText) if len(dom.getElementsByTagNameNS(self.OvfNs, "Environment")) != 1: Error("Unable to parse OVF XML.") section = None newer = False for p in dom.getElementsByTagNameNS(self.WaNs, "ProvisioningSection"): for n in p.childNodes: if n.localName == "Version": verparts = GetNodeTextData(n).split('.') major = int(verparts[0]) minor = int(verparts[1]) if major > self.MajorVersion: newer = True if major != self.MajorVersion: break if minor > self.MinorVersion: newer = True section = p if newer == True: Warn("Newer provisioning configuration detected. Please consider updating waagent.") if section == None: Error("Could not find ProvisioningSection with major version=" + str(self.MajorVersion)) return None self.ComputerName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "HostName")[0]) self.UserName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "UserName")[0]) if isDeprovision == True: return self try: self.UserPassword = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "UserPassword")[0]) except: pass CDSection=None try: CDSection=section.getElementsByTagNameNS(self.WaNs, "CustomData") if len(CDSection) > 0 : self.CustomData=GetNodeTextData(CDSection[0]) if len(self.CustomData)>0: SetFileContents(LibDir + '/CustomData', bytearray(MyDistro.translateCustomData(self.CustomData), 'utf-8')) Log('Wrote ' + LibDir + '/CustomData') else : Error(' contains no data!') except Exception, e: Error( str(e)+' occured creating ' + LibDir + '/CustomData') disableSshPass = section.getElementsByTagNameNS(self.WaNs, "DisableSshPasswordAuthentication") if len(disableSshPass) != 0: self.DisableSshPasswordAuthentication = (GetNodeTextData(disableSshPass[0]).lower() == "true") for pkey in section.getElementsByTagNameNS(self.WaNs, "PublicKey"): LogIfVerbose(repr(pkey)) fp = None path = None for c in pkey.childNodes: if c.localName == "Fingerprint": fp = GetNodeTextData(c).upper() LogIfVerbose(fp) if c.localName == "Path": path = GetNodeTextData(c) LogIfVerbose(path) self.SshPublicKeys += [[fp, path]] for keyp in section.getElementsByTagNameNS(self.WaNs, "KeyPair"): fp = None path = None LogIfVerbose(repr(keyp)) for c in keyp.childNodes: if c.localName == "Fingerprint": fp = GetNodeTextData(c).upper() LogIfVerbose(fp) if c.localName == "Path": path = GetNodeTextData(c) LogIfVerbose(path) self.SshKeyPairs += [[fp, path]] return self def PrepareDir(self, filepath): """ Create home dir for self.UserName Change owner and return path. """ home = MyDistro.GetHome() # Expand HOME variable if present in path path = os.path.normpath(filepath.replace("$HOME", home)) if (path.startswith("/") == False) or (path.endswith("/") == True): return None dir = path.rsplit('/', 1)[0] if dir != "": CreateDir(dir, "root", 0700) if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(dir, self.UserName) return path def NumberToBytes(self, i): """ Pack number into bytes. Retun as string. """ result = [] while i: result.append(chr(i & 0xFF)) i >>= 8 result.reverse() return ''.join(result) def BitsToString(self, a): """ Return string representation of bits in a. """ index=7 s = "" c = 0 for bit in a: c = c | (bit << index) index = index - 1 if index == -1: s = s + struct.pack('>B', c) c = 0 index = 7 return s def OpensslToSsh(self, file): """ Return base-64 encoded key appropriate for ssh. """ from pyasn1.codec.der import decoder as der_decoder try: f = open(file).read().replace('\n','').split("KEY-----")[1].split('-')[0] k=der_decoder.decode(self.BitsToString(der_decoder.decode(base64.b64decode(f))[0][1]))[0] n=k[0] e=k[1] keydata="" keydata += struct.pack('>I',len("ssh-rsa")) keydata += "ssh-rsa" keydata += struct.pack('>I',len(self.NumberToBytes(e))) keydata += self.NumberToBytes(e) keydata += struct.pack('>I',len(self.NumberToBytes(n)) + 1) keydata += "\0" keydata += self.NumberToBytes(n) except Exception, e: print("OpensslToSsh: Exception " + str(e)) return None return "ssh-rsa " + base64.b64encode(keydata) + "\n" def Process(self): """ Process all certificate and key info. DisableSshPasswordAuthentication if configured. CreateAccount(user) Wait for WaAgent.EnvMonitor.IsHostnamePublished(). Restart ssh service. """ error = None if self.ComputerName == None : return "Error: Hostname missing" error=WaAgent.EnvMonitor.SetHostName(self.ComputerName) if error: return error if self.DisableSshPasswordAuthentication: filepath = "/etc/ssh/sshd_config" # Disable RFC 4252 and RFC 4256 authentication schemes. ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not (a.startswith("PasswordAuthentication") or a.startswith("ChallengeResponseAuthentication")), GetFileContents(filepath).split('\n'))) + "\nPasswordAuthentication no\nChallengeResponseAuthentication no\n") Log("Disabled SSH password-based authentication methods.") if self.AdminPassword != None: MyDistro.changePass('root',self.AdminPassword) if self.UserName != None: error = MyDistro.CreateAccount(self.UserName, self.UserPassword, None, None) sel = MyDistro.isSelinuxRunning() if sel : MyDistro.setSelinuxEnforce(0) home = MyDistro.GetHome() for pkey in self.SshPublicKeys: Log("Deploy public key:{0}".format(pkey[0])) if not os.path.isfile(pkey[0] + ".crt"): Error("PublicKey not found: " + pkey[0]) error = "Failed to deploy public key (0x09)." continue path = self.PrepareDir(pkey[1]) if path == None: Error("Invalid path: " + pkey[1] + " for PublicKey: " + pkey[0]) error = "Invalid path for public key (0x03)." continue Run(Openssl + " x509 -in " + pkey[0] + ".crt -noout -pubkey > " + pkey[0] + ".pub") MyDistro.setSelinuxContext(pkey[0] + '.pub','unconfined_u:object_r:ssh_home_t:s0') MyDistro.sshDeployPublicKey(pkey[0] + '.pub',path) MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0') if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(path, self.UserName) for keyp in self.SshKeyPairs: Log("Deploy key pair:{0}".format(keyp[0])) if not os.path.isfile(keyp[0] + ".prv"): Error("KeyPair not found: " + keyp[0]) error = "Failed to deploy key pair (0x0A)." continue path = self.PrepareDir(keyp[1]) if path == None: Error("Invalid path: " + keyp[1] + " for KeyPair: " + keyp[0]) error = "Invalid path for key pair (0x05)." continue SetFileContents(path, GetFileContents(keyp[0] + ".prv")) os.chmod(path, 0600) Run("ssh-keygen -y -f " + keyp[0] + ".prv > " + path + ".pub") MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0') MyDistro.setSelinuxContext(path + '.pub','unconfined_u:object_r:ssh_home_t:s0') if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(path, self.UserName) ChangeOwner(path + ".pub", self.UserName) if sel : MyDistro.setSelinuxEnforce(1) while not WaAgent.EnvMonitor.IsHostnamePublished(): time.sleep(1) MyDistro.restartSshService() return error class WALAEvent(object): def __init__(self): self.providerId="" self.eventId=1 self.OpcodeName="" self.KeywordName="" self.TaskName="" self.TenantName="" self.RoleName="" self.RoleInstanceName="" self.ContainerId="" self.ExecutionMode="IAAS" self.OSVersion="" self.GAVersion="" self.RAM=0 self.Processors=0 def ToXml(self): strEventid=u''.format(self.eventId) strProviderid=u''.format(self.providerId) strRecordFormat = u'' strRecordNoQuoteFormat = u'' strMtStr=u'mt:wstr' strMtUInt64=u'mt:uint64' strMtBool=u'mt:bool' strMtFloat=u'mt:float64' strEventsData=u"" for attName in self.__dict__: if attName in ["eventId","filedCount","providerId"]: continue attValue = self.__dict__[attName] if type(attValue) is int: strEventsData+=strRecordFormat.format(attName,attValue,strMtUInt64) continue if type(attValue) is str: attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr) continue if str(type(attValue)).count("'unicode'") >0 : attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr) continue if type(attValue) is bool: strEventsData+=strRecordFormat.format(attName,attValue,strMtBool) continue if type(attValue) is float: strEventsData+=strRecordFormat.format(attName,attValue,strMtFloat) continue Log("Warning: property "+attName+":"+str(type(attValue))+":type"+str(type(attValue))+"Can't convert to events data:"+":type not supported") return u"{0}{1}{2}".format(strProviderid,strEventid,strEventsData) def Save(self): eventfolder = LibDir+"/events" if not os.path.exists(eventfolder): os.mkdir(eventfolder) os.chmod(eventfolder,0700) if len(os.listdir(eventfolder)) > 1000: raise Exception("WriteToFolder:Too many file under "+eventfolder+" exit") filename = os.path.join(eventfolder,str(int(time.time()*1000000))) with open(filename+".tmp",'wb+') as hfile: hfile.write(self.ToXml().encode("utf-8")) os.rename(filename+".tmp",filename+".tld") class WALAEventOperation: HeartBeat="HeartBeat" Provision = "Provision" Install = "Install" UnIsntall = "UnInstall" Disable = "Disable" Enable = "Enable" Download = "Download" Upgrade = "Upgrade" Update = "Update" def AddExtensionEvent(name,op,isSuccess,duration=0,version="1.0",message="",type="",isInternal=False): event = ExtensionEvent() event.Name=name event.Version=version event.IsInternal=isInternal event.Operation=op event.OperationSuccess=isSuccess event.Message=message event.Duration=duration event.ExtensionType=type try: event.Save() except: Error("Error "+traceback.format_exc()) class ExtensionEvent(WALAEvent): def __init__(self): WALAEvent.__init__(self) self.eventId=1 self.providerId="69B669B9-4AF8-4C50-BDC4-6006FA76E975" self.Name="" self.Version="" self.IsInternal=False self.Operation="" self.OperationSuccess=True self.ExtensionType="" self.Message="" self.Duration=0 class WALAEventMonitor(WALAEvent): def __init__(self,postMethod): WALAEvent.__init__(self) self.post = postMethod self.sysInfo={} self.eventdir = LibDir+"/events" self.issysteminfoinitilized = False def StartEventsLoop(self): eventThread = threading.Thread(target = self.EventsLoop) eventThread.setDaemon(True) eventThread.start() def EventsLoop(self): LastReportHeartBeatTime = datetime.datetime.min try: while True: if (datetime.datetime.now()-LastReportHeartBeatTime) > \ datetime.timedelta(minutes=30): LastReportHeartBeatTime = datetime.datetime.now() AddExtensionEvent(op=WALAEventOperation.HeartBeat,name="WALA",isSuccess=True) self.postNumbersInOneLoop=0 self.CollectAndSendWALAEvents() time.sleep(60) except: Error("Exception in events loop:"+traceback.format_exc()) def SendEvent(self,providerid,events): dataFormat = u'{1}'\ '' data = dataFormat.format(providerid,events) self.post("/machine/?comp=telemetrydata", data) def CollectAndSendWALAEvents(self): if not os.path.exists(self.eventdir): return #Throtting, can't send more than 3 events in 15 seconds eventSendNumber=0 eventFiles = os.listdir(self.eventdir) events = {} for file in eventFiles: if not file.endswith(".tld"): continue with open(os.path.join(self.eventdir,file),"rb") as hfile: #if fail to open or delete the file, throw exception xmlStr = hfile.read().decode("utf-8",'ignore') os.remove(os.path.join(self.eventdir,file)) params="" eventid="" providerid="" #if exception happen during process an event, catch it and continue try: xmlStr = self.AddSystemInfo(xmlStr) for node in xml.dom.minidom.parseString(xmlStr.encode("utf-8")).childNodes[0].childNodes: if node.tagName == "Param": params+=node.toxml() if node.tagName == "Event": eventid=node.getAttribute("id") if node.tagName == "Provider": providerid = node.getAttribute("id") except: Error(traceback.format_exc()) continue if len(params)==0 or len(eventid)==0 or len(providerid)==0: Error("Empty filed in params:"+params+" event id:"+eventid+" provider id:"+providerid) continue eventstr = u''.format(eventid,params) if not events.get(providerid): events[providerid]="" if len(events[providerid]) >0 and len(events.get(providerid)+eventstr)>= 63*1024: eventSendNumber+=1 self.SendEvent(providerid,events.get(providerid)) if eventSendNumber %3 ==0: time.sleep(15) events[providerid]="" if len(eventstr) >= 63*1024: Error("Signle event too large abort "+eventstr[:300]) continue events[providerid]=events.get(providerid)+eventstr for key in events.keys(): if len(events[key]) > 0: eventSendNumber+=1 self.SendEvent(key,events[key]) if eventSendNumber%3 == 0: time.sleep(15) def AddSystemInfo(self,eventData): if not self.issysteminfoinitilized: self.issysteminfoinitilized=True try: self.sysInfo["OSVersion"]=platform.system()+":"+"-".join(DistInfo(1))+":"+platform.release() self.sysInfo["GAVersion"]=GuestAgentVersion self.sysInfo["RAM"]=MyDistro.getTotalMemory() self.sysInfo["Processors"]=MyDistro.getProcessorCores() sharedConfig = xml.dom.minidom.parse("/var/lib/waagent/SharedConfig.xml").childNodes[0] hostEnvConfig= xml.dom.minidom.parse("/var/lib/waagent/HostingEnvironmentConfig.xml").childNodes[0] gfiles = RunGetOutput("ls -t /var/lib/waagent/GoalState.*.xml")[1] goalStateConfi = xml.dom.minidom.parse(gfiles.split("\n")[0]).childNodes[0] self.sysInfo["TenantName"]=hostEnvConfig.getElementsByTagName("Deployment")[0].getAttribute("name") self.sysInfo["RoleName"]=hostEnvConfig.getElementsByTagName("Role")[0].getAttribute("name") self.sysInfo["RoleInstanceName"]=sharedConfig.getElementsByTagName("Instance")[0].getAttribute("id") self.sysInfo["ContainerId"]=goalStateConfi.getElementsByTagName("ContainerId")[0].childNodes[0].nodeValue except: Error(traceback.format_exc()) eventObject = xml.dom.minidom.parseString(eventData.encode("utf-8")).childNodes[0] for node in eventObject.childNodes: if node.tagName == "Param": name = node.getAttribute("Name") if self.sysInfo.get(name): node.setAttribute("Value",xml.sax.saxutils.escape(str(self.sysInfo[name]))) return eventObject.toxml() class Agent(Util): """ Primary object container for the provisioning process. """ def __init__(self): self.GoalState = None self.Endpoint = None self.LoadBalancerProbeServer = None self.HealthReportCounter = 0 self.TransportCert = "" self.EnvMonitor = None self.SendData = None self.DhcpResponse = None def CheckVersions(self): """ Query endpoint server for wire protocol version. Fail if our desired protocol version is not seen. """ # # # # 2010-12-15 # # # 2010-12-15 # 2010-28-10 # # global ProtocolVersion protocolVersionSeen = False node = xml.dom.minidom.parseString(self.HttpGetWithoutHeaders("/?comp=versions")).childNodes[0] if node.localName != "Versions": Error("CheckVersions: root not Versions") return False for a in node.childNodes: if a.nodeType == node.ELEMENT_NODE and a.localName == "Supported": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE and b.localName == "Version": v = GetNodeTextData(b) LogIfVerbose("Fabric supported wire protocol version: " + v) if v == ProtocolVersion: protocolVersionSeen = True if a.nodeType == node.ELEMENT_NODE and a.localName == "Preferred": v = GetNodeTextData(a.getElementsByTagName("Version")[0]) Log("Fabric preferred wire protocol version: " + v) if not protocolVersionSeen: Warn("Agent supported wire protocol version: " + ProtocolVersion + " was not advertised by Fabric.") else: Log("Negotiated wire protocol version: " + ProtocolVersion) return True def Unpack(self, buffer, offset, range): """ Unpack bytes into python values. """ result = 0 for i in range: result = (result << 8) | Ord(buffer[offset + i]) return result def UnpackLittleEndian(self, buffer, offset, length): """ Unpack little endian bytes into python values. """ return self.Unpack(buffer, offset, list(range(length - 1, -1, -1))) def UnpackBigEndian(self, buffer, offset, length): """ Unpack big endian bytes into python values. """ return self.Unpack(buffer, offset, list(range(0, length))) def HexDump3(self, buffer, offset, length): """ Dump range of buffer in formatted hex. """ return ''.join(['%02X' % Ord(char) for char in buffer[offset:offset + length]]) def HexDump2(self, buffer): """ Dump buffer in formatted hex. """ return self.HexDump3(buffer, 0, len(buffer)) def BuildDhcpRequest(self): """ Build DHCP request string. """ # # typedef struct _DHCP { # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ # UINT8 HardwareAddressType; /* htype: ethernet */ # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ # UINT8 Hops; /* hops: 0 */ # UINT8 TransactionID[4]; /* xid: random */ # UINT8 Seconds[2]; /* secs: 0 */ # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast */ # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte ethernet MAC address */ # UINT8 ServerName[64]; /* sname: 0 */ # UINT8 BootFileName[128]; /* file: 0 */ # UINT8 MagicCookie[4]; /* 99 130 83 99 */ # /* 0x63 0x82 0x53 0x63 */ # /* options -- hard code ours */ # # UINT8 MessageTypeCode; /* 53 */ # UINT8 MessageTypeLength; /* 1 */ # UINT8 MessageType; /* 1 for DISCOVER */ # UINT8 End; /* 255 */ # } DHCP; # # tuple of 244 zeros # (struct.pack_into would be good here, but requires Python 2.5) sendData = [0] * 244 transactionID = os.urandom(4) macAddress = MyDistro.GetMacAddress() # Opcode = 1 # HardwareAddressType = 1 (ethernet/MAC) # HardwareAddressLength = 6 (ethernet/MAC/48 bits) for a in range(0, 3): sendData[a] = [1, 1, 6][a] # fill in transaction id (random number to ensure response matches request) for a in range(0, 4): sendData[4 + a] = Ord(transactionID[a]) LogIfVerbose("BuildDhcpRequest: transactionId:%s,%04X" % (self.HexDump2(transactionID), self.UnpackBigEndian(sendData, 4, 4))) # fill in ClientHardwareAddress for a in range(0, 6): sendData[0x1C + a] = Ord(macAddress[a]) # DHCP Magic Cookie: 99, 130, 83, 99 # MessageTypeCode = 53 DHCP Message Type # MessageTypeLength = 1 # MessageType = DHCPDISCOVER # End = 255 DHCP_END for a in range(0, 8): sendData[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] return array.array("B", sendData) def IntegerToIpAddressV4String(self, a): """ Build DHCP request string. """ return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, a & 0xFF) def RouteAdd(self, net, mask, gateway): """ Add specified route using /sbin/route add -net. """ net = self.IntegerToIpAddressV4String(net) mask = self.IntegerToIpAddressV4String(mask) gateway = self.IntegerToIpAddressV4String(gateway) Log("Route add: net={0}, mask={1}, gateway={2}".format(net, mask, gateway)) MyDistro.routeAdd(net, mask, gateway) def SetDefaultGateway(self, gateway): """ Set default gateway """ gateway = self.IntegerToIpAddressV4String(gateway) Log("Set default gateway: {0}".format(gateway)) MyDistro.setDefaultGateway(gateway) def HandleDhcpResponse(self, sendData, receiveBuffer): """ Parse DHCP response: Set default gateway. Set default routes. Retrieve endpoint server. Returns endpoint server or None on error. """ LogIfVerbose("HandleDhcpResponse") bytesReceived = len(receiveBuffer) if bytesReceived < 0xF6: Error("HandleDhcpResponse: Too few bytes received " + str(bytesReceived)) return None LogIfVerbose("BytesReceived: " + hex(bytesReceived)) LogWithPrefixIfVerbose("DHCP response:", HexDump(receiveBuffer, bytesReceived)) # check transactionId, cookie, MAC address # cookie should never mismatch # transactionId and MAC address may mismatch if we see a response meant from another machine for offsets in [list(range(4, 4 + 4)), list(range(0x1C, 0x1C + 6)), list(range(0xEC, 0xEC + 4))]: for offset in offsets: sentByte = Ord(sendData[offset]) receivedByte = Ord(receiveBuffer[offset]) if sentByte != receivedByte: LogIfVerbose("HandleDhcpResponse: sent cookie:" + self.HexDump3(sendData, 0xEC, 4)) LogIfVerbose("HandleDhcpResponse: rcvd cookie:" + self.HexDump3(receiveBuffer, 0xEC, 4)) LogIfVerbose("HandleDhcpResponse: sent transactionID:" + self.HexDump3(sendData, 4, 4)) LogIfVerbose("HandleDhcpResponse: rcvd transactionID:" + self.HexDump3(receiveBuffer, 4, 4)) LogIfVerbose("HandleDhcpResponse: sent ClientHardwareAddress:" + self.HexDump3(sendData, 0x1C, 6)) LogIfVerbose("HandleDhcpResponse: rcvd ClientHardwareAddress:" + self.HexDump3(receiveBuffer, 0x1C, 6)) LogIfVerbose("HandleDhcpResponse: transactionId, cookie, or MAC address mismatch") return None endpoint = None # # Walk all the returned options, parsing out what we need, ignoring the others. # We need the custom option 245 to find the the endpoint we talk to, # as well as, to handle some Linux DHCP client incompatibilities, # options 3 for default gateway and 249 for routes. And 255 is end. # i = 0xF0 # offset to first option while i < bytesReceived: option = Ord(receiveBuffer[i]) length = 0 if (i + 1) < bytesReceived: length = Ord(receiveBuffer[i + 1]) LogIfVerbose("DHCP option " + hex(option) + " at offset:" + hex(i) + " with length:" + hex(length)) if option == 255: LogIfVerbose("DHCP packet ended at offset " + hex(i)) break elif option == 249: # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx LogIfVerbose("Routes at offset:" + hex(i) + " with length:" + hex(length)) if length < 5: Error("Data too small for option " + str(option)) j = i + 2 while j < (i + length + 2): maskLengthBits = Ord(receiveBuffer[j]) maskLengthBytes = (((maskLengthBits + 7) & ~7) >> 3) mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - maskLengthBits)) j += 1 net = self.UnpackBigEndian(receiveBuffer, j, maskLengthBytes) net <<= (32 - maskLengthBytes * 8) net &= mask j += maskLengthBytes gateway = self.UnpackBigEndian(receiveBuffer, j, 4) j += 4 self.RouteAdd(net, mask, gateway) if j != (i + length + 2): Error("HandleDhcpResponse: Unable to parse routes") elif option == 3 or option == 245: if i + 5 < bytesReceived: if length != 4: Error("HandleDhcpResponse: Endpoint or Default Gateway not 4 bytes") return None gateway = self.UnpackBigEndian(receiveBuffer, i + 2, 4) IpAddress = self.IntegerToIpAddressV4String(gateway) if option == 3: self.SetDefaultGateway(gateway) name = "DefaultGateway" else: endpoint = IpAddress name = "Azure wire protocol endpoint" LogIfVerbose(name + ": " + IpAddress + " at " + hex(i)) else: Error("HandleDhcpResponse: Data too small for option " + str(option)) else: LogIfVerbose("Skipping DHCP option " + hex(option) + " at " + hex(i) + " with length " + hex(length)) i += length + 2 return endpoint def DoDhcpWork(self): """ Discover the wire server via DHCP option 245. And workaround incompatibility with Azure DHCP servers. """ ShortSleep = False # Sleep 1 second before retrying DHCP queries. ifname=None sleepDurations = [0, 10, 30, 60, 60] maxRetry = len(sleepDurations) lastTry = (maxRetry - 1) for retry in range(0, maxRetry): try: #Open DHCP port if iptables is enabled. Run("iptables -D INPUT -p udp --dport 68 -j ACCEPT",chk_err=False) # We supress error logging on error. Run("iptables -I INPUT -p udp --dport 68 -j ACCEPT",chk_err=False) # We supress error logging on error. strRetry = str(retry) prefix = "DoDhcpWork: try=" + strRetry LogIfVerbose(prefix) sendData = self.BuildDhcpRequest() LogWithPrefixIfVerbose("DHCP request:", HexDump(sendData, len(sendData))) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) missingDefaultRoute = True try: if DistInfo()[0] == 'FreeBSD': missingDefaultRoute = True else: routes = RunGetOutput("route -n")[1] for line in routes.split('\n'): if line.startswith("0.0.0.0 ") or line.startswith("default "): missingDefaultRoute = False except: pass if missingDefaultRoute: # This is required because sending after binding to 0.0.0.0 fails with # network unreachable when the default gateway is not set up. ifname=MyDistro.GetInterfaceName() Log("DoDhcpWork: Missing default route - adding broadcast route for DHCP.") if DistInfo()[0] == 'FreeBSD': Run("route add -net 255.255.255.255 -iface " + ifname,chk_err=False) else: Run("route add 255.255.255.255 dev " + ifname,chk_err=False) if MyDistro.isDHCPEnabled(): MyDistro.stopDHCP() sock.bind(("0.0.0.0", 68)) sock.sendto(sendData, ("", 67)) sock.settimeout(10) Log("DoDhcpWork: Setting socket.timeout=10, entering recv") receiveBuffer = sock.recv(1024) endpoint = self.HandleDhcpResponse(sendData, receiveBuffer) if endpoint == None: LogIfVerbose("DoDhcpWork: No endpoint found") if endpoint != None or retry == lastTry: if endpoint != None: self.SendData = sendData self.DhcpResponse = receiveBuffer if retry == lastTry: LogIfVerbose("DoDhcpWork: try=" + strRetry) return endpoint sleepDuration = [sleepDurations[retry % len(sleepDurations)], 1][ShortSleep] LogIfVerbose("DoDhcpWork: sleep=" + str(sleepDuration)) time.sleep(sleepDuration) except Exception, e: ErrorWithPrefix(prefix, str(e)) ErrorWithPrefix(prefix, traceback.format_exc()) finally: sock.close() if missingDefaultRoute: #We added this route - delete it Log("DoDhcpWork: Removing broadcast route for DHCP.") if DistInfo()[0] == 'FreeBSD': Run("route del -net 255.255.255.255 -iface " + ifname,chk_err=False) else: Run("route del 255.255.255.255 dev " + ifname,chk_err=False) # We supress error logging on error. if MyDistro.isDHCPEnabled(): MyDistro.startDHCP() return None def UpdateAndPublishHostName(self, name): """ Set hostname locally and publish to iDNS """ Log("Setting host name: " + name) MyDistro.publishHostname(name) ethernetInterface = MyDistro.GetInterfaceName() MyDistro.RestartInterface(ethernetInterface) self.RestoreRoutes() def RestoreRoutes(self): """ If there is a DHCP response, then call HandleDhcpResponse. """ if self.SendData != None and self.DhcpResponse != None: self.HandleDhcpResponse(self.SendData, self.DhcpResponse) def UpdateGoalState(self): """ Retreive goal state information from endpoint server. Parse xml and initialize Agent.GoalState object. Return object or None on error. """ goalStateXml = None maxRetry = 9 log = NoLog for retry in range(1, maxRetry + 1): strRetry = str(retry) log("retry UpdateGoalState,retry=" + strRetry) goalStateXml = self.HttpGetWithHeaders("/machine/?comp=goalstate") if goalStateXml != None: break log = Log time.sleep(retry) if not goalStateXml: Error("UpdateGoalState failed.") return Log("Retrieved GoalState from Azure Fabric.") self.GoalState = GoalState(self).Parse(goalStateXml) return self.GoalState def ReportReady(self): """ Send health report 'Ready' to server. This signals the fabric that our provosion is completed, and the host is ready for operation. """ counter = (self.HealthReportCounter + 1) % 1000000 self.HealthReportCounter = counter healthReport = ("" + self.GoalState.Incarnation + "" + self.GoalState.ContainerId + "" + self.GoalState.RoleInstanceId + "Ready") a = self.HttpPostWithHeaders("/machine?comp=health", healthReport) if a != None: return a.getheader("x-ms-latest-goal-state-incarnation-number") return None def ReportNotReady(self, status, desc): """ Send health report 'Provisioning' to server. This signals the fabric that our provosion is starting. """ healthReport = ("" + self.GoalState.Incarnation + "" + self.GoalState.ContainerId + "" + self.GoalState.RoleInstanceId + "NotReady" + "
" + status + "" + desc + "
" + "
") a = self.HttpPostWithHeaders("/machine?comp=health", healthReport) if a != None: return a.getheader("x-ms-latest-goal-state-incarnation-number") return None def ReportRoleProperties(self, thumbprint): """ Send roleProperties and thumbprint to server. """ roleProperties = ("" + "" + self.GoalState.ContainerId + "" + "" + "" + self.GoalState.RoleInstanceId + "" + "" + "") a = self.HttpPostWithHeaders("/machine?comp=roleProperties", roleProperties) Log("Posted Role Properties. CertificateThumbprint=" + thumbprint) return a def LoadBalancerProbeServer_Shutdown(self): """ Shutdown the LoadBalancerProbeServer. """ if self.LoadBalancerProbeServer != None: self.LoadBalancerProbeServer.shutdown() self.LoadBalancerProbeServer = None def GenerateTransportCert(self): """ Create ssl certificate for https communication with endpoint server. """ Run(Openssl + " req -x509 -nodes -subj /CN=LinuxTransport -days 32768 -newkey rsa:2048 -keyout TransportPrivate.pem -out TransportCert.pem") cert = "" for line in GetFileContents("TransportCert.pem").split('\n'): if not "CERTIFICATE" in line: cert += line.rstrip() return cert def DoVmmStartup(self): """ Spawn the VMM startup script. """ Log("Starting Microsoft System Center VMM Initialization Process") pid = subprocess.Popen(["/bin/bash","/mnt/cdrom/secure/"+VMM_STARTUP_SCRIPT_NAME,"-p /mnt/cdrom/secure/ "]).pid time.sleep(5) sys.exit(0) def TryUnloadAtapiix(self): """ If global modloaded is True, then we loaded the ata_piix kernel module, unload it. """ if modloaded: Run("rmmod ata_piix.ko",chk_err=False) Log("Unloaded ata_piix.ko driver for ATAPI CD-ROM") def TryLoadAtapiix(self): """ Load the ata_piix kernel module if it exists. If successful, set global modloaded to True. If unable to load module leave modloaded False. """ global modloaded modloaded=False retcode,krn=RunGetOutput('uname -r') krn_pth='/lib/modules/'+krn.strip('\n')+'/kernel/drivers/ata/ata_piix.ko' if Run("lsmod | grep ata_piix",chk_err=False) == 0 : Log("Module " + krn_pth + " driver for ATAPI CD-ROM is already present.") return 0 if retcode: Error("Unable to provision: Failed to call uname -r") return "Unable to provision: Failed to call uname" if os.path.isfile(krn_pth): retcode,output=RunGetOutput("insmod " + krn_pth,chk_err=False) else: Log("Module " + krn_pth + " driver for ATAPI CD-ROM does not exist.") return 1 if retcode != 0: Error('Error calling insmod for '+ krn_pth + ' driver for ATAPI CD-ROM') return retcode time.sleep(1) # check 3 times if the mod is loaded for i in range(3): if Run('lsmod | grep ata_piix'): continue else : modloaded=True break if not modloaded: Error('Unable to load '+ krn_pth + ' driver for ATAPI CD-ROM') return 1 Log("Loaded " + krn_pth + " driver for ATAPI CD-ROM") # we have succeeded loading the ata_piix mod if it can be done. def SearchForVMMStartup(self): """ Search for a DVD/CDROM containing VMM's VMM_CONFIG_FILE_NAME. Call TryLoadAtapiix in case we must load the ata_piix module first. If VMM_CONFIG_FILE_NAME is found, call DoVmmStartup. Else, return to Azure Provisioning process. """ self.TryLoadAtapiix() if os.path.exists('/mnt/cdrom/secure') == False: CreateDir("/mnt/cdrom/secure", "root", 0700) mounted=False for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]: if dvds == None: continue dvd = '/dev/'+dvds.group(0) if Run("LC_ALL=C fdisk -l " + dvd + " | grep Disk",chk_err=False): continue # Not mountable else: for retry in range(1,6): retcode,output=RunGetOutput("mount -v " + dvd + " /mnt/cdrom/secure") Log(output[:-1]) if retcode == 0: Log("mount succeeded on attempt #" + str(retry) ) mounted=True break if 'is already mounted on /mnt/cdrom/secure' in output: Log("Device " + dvd + " is already mounted on /mnt/cdrom/secure." + str(retry) ) mounted=True break Log("mount failed on attempt #" + str(retry) ) Log("mount loop sleeping 5...") time.sleep(5) if not mounted: # unable to mount continue if not os.path.isfile("/mnt/cdrom/secure/"+VMM_CONFIG_FILE_NAME): #nope - mount the next drive if mounted: Run("umount "+dvd,chk_err=False) mounted=False continue else : # it is the vmm startup self.DoVmmStartup() Log("VMM Init script not found. Provisioning for Azure") return def Provision(self): """ Responible for: Regenerate ssh keys, Mount, read, and parse ovfenv.xml from provisioning dvd rom Process the ovfenv.xml info Call ReportRoleProperties If configured, delete root password. Return None on success, error string on error. """ enabled = Config.get("Provisioning.Enabled") if enabled != None and enabled.lower().startswith("n"): return Log("Provisioning image started.") type = Config.get("Provisioning.SshHostKeyPairType") if type == None: type = "rsa" regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair") if regenerateKeys == None or regenerateKeys.lower().startswith("y"): Run("rm -f /etc/ssh/ssh_host_*key*") Run("ssh-keygen -N '' -t " + type + " -f /etc/ssh/ssh_host_" + type + "_key") MyDistro.restartSshService() #SetFileContents(LibDir + "/provisioned", "") dvd = None for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]: if dvds == None : continue dvd = '/dev/'+dvds.group(0) if dvd == None: # No DVD device detected Error("No DVD device detected, unable to provision.") return "No DVD device detected, unable to provision." if MyDistro.mediaHasFilesystem(dvd) is False : out=MyDistro.load_ata_piix() if out: return out for i in range(10): # we may have to wait if os.path.exists(dvd): break Log("Waiting for DVD - sleeping 1 - "+str(i+1)+" try...") time.sleep(1) if os.path.exists('/mnt/cdrom/secure') == False: CreateDir("/mnt/cdrom/secure", "root", 0700) #begin mount loop - 5 tries - 5 sec wait between for retry in range(1,6): location='/mnt/cdrom/secure' retcode,output=MyDistro.mountDVD(dvd,location) Log(output[:-1]) if retcode == 0: Log("mount succeeded on attempt #" + str(retry) ) break if 'is already mounted on /mnt/cdrom/secure' in output: Log("Device " + dvd + " is already mounted on /mnt/cdrom/secure." + str(retry) ) break Log("mount failed on attempt #" + str(retry) ) Log("mount loop sleeping 5...") time.sleep(5) if not os.path.isfile("/mnt/cdrom/secure/ovf-env.xml"): Error("Unable to provision: Missing ovf-env.xml on DVD.") return "Failed to retrieve provisioning data (0x02)." ovfxml = (GetFileContents(u"/mnt/cdrom/secure/ovf-env.xml",asbin=False)) # use unicode here to ensure correct codec gets used. if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 : ovfxml = ovfxml[3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them. ovfxml=ovfxml.strip(chr(0x00)) # we may have NULLs. ovfxml=ovfxml[ovfxml.find('.*?<", "*<", ovfxml)) Run("umount " + dvd,chk_err=False) MyDistro.unload_ata_piix() error = None if ovfxml != None: Log("Provisioning image using OVF settings in the DVD.") ovfobj = OvfEnv().Parse(ovfxml) if ovfobj != None: error = ovfobj.Process() if error : Error ("Provisioning image FAILED " + error) return ("Provisioning image FAILED " + error) Log("Ovf XML process finished") # This is done here because regenerated SSH host key pairs may be potentially overwritten when processing the ovfxml fingerprint = RunGetOutput("ssh-keygen -lf /etc/ssh/ssh_host_" + type + "_key.pub")[1].rstrip().split()[1].replace(':','') self.ReportRoleProperties(fingerprint) delRootPass = Config.get("Provisioning.DeleteRootPassword") if delRootPass != None and delRootPass.lower().startswith("y"): MyDistro.deleteRootPassword() Log("Provisioning image completed.") return error def Run(self): """ Called by 'waagent -daemon.' Main loop to process the goal state. State is posted every 25 seconds when provisioning has been completed. Search for VMM enviroment, start VMM script if found. Perform DHCP and endpoint server discovery by calling DoDhcpWork(). Check wire protocol versions. Set SCSI timeout on root device. Call GenerateTransportCert() to create ssl certs for server communication. Call UpdateGoalState(). If not provisioned, call ReportNotReady("Provisioning", "Starting") Call Provision(), set global provisioned = True if successful. Call goalState.Process() Start LBProbeServer if indicated in waagent.conf. Start the StateConsumer if indicated in waagent.conf. ReportReady if provisioning is complete. If provisioning failed, call ReportNotReady("ProvisioningFailed", provisionError) """ SetFileContents("/var/run/waagent.pid", str(os.getpid()) + "\n") reportHandlerStatusCount = 0 # Determine if we are in VMM. Spawn VMM_STARTUP_SCRIPT_NAME if found. self.SearchForVMMStartup() ipv4='' while ipv4 == '' or ipv4 == '0.0.0.0' : ipv4=MyDistro.GetIpv4Address() if ipv4 == '' or ipv4 == '0.0.0.0' : Log("Waiting for network.") time.sleep(10) Log("IPv4 address: " + ipv4) mac='' mac=MyDistro.GetMacAddress() if len(mac)>0 : Log("MAC address: " + ":".join(["%02X" % Ord(a) for a in mac])) # Consume Entropy in ACPI table provided by Hyper-V try: SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0")) except: pass Log("Probing for Azure environment.") self.Endpoint = self.DoDhcpWork() while self.Endpoint == None: Log("Azure environment not detected.") Log("Retry environment detection in 60 seconds") time.sleep(60) self.Endpoint = self.DoDhcpWork() Log("Discovered Azure endpoint: " + self.Endpoint) if not self.CheckVersions(): Error("Agent.CheckVersions failed") sys.exit(1) self.EnvMonitor = EnvMonitor() # Set SCSI timeout on SCSI disks MyDistro.initScsiDiskTimeout() global provisioned global provisionError global Openssl Openssl = Config.get("OS.OpensslPath") if Openssl == None: Openssl = "openssl" self.TransportCert = self.GenerateTransportCert() eventMonitor = None incarnation = None # goalStateIncarnationFromHealthReport currentPort = None # loadBalancerProbePort goalState = None # self.GoalState, instance of GoalState provisioned = os.path.exists(LibDir + "/provisioned") program = Config.get("Role.StateConsumer") provisionError = None lbProbeResponder = True setting = Config.get("LBProbeResponder") if setting != None and setting.lower().startswith("n"): lbProbeResponder = False while True: if (goalState == None) or (incarnation == None) or (goalState.Incarnation != incarnation): try: goalState = self.UpdateGoalState() except HttpResourceGoneError as e: Warn("Incarnation is out of date:{0}".format(e)) incarnation = None continue if goalState == None : Warn("Failed to fetch goalstate") continue if provisioned == False: self.ReportNotReady("Provisioning", "Starting") goalState.Process() if provisioned == False: provisionError = self.Provision() if provisionError == None : provisioned = True SetFileContents(LibDir + "/provisioned", "") lastCtime = "NOTFIND" try: walaConfigFile = MyDistro.getConfigurationPath() lastCtime = time.ctime(os.path.getctime(walaConfigFile)) except: pass #Get Ctime of wala config, can help identify the base image of this VM AddExtensionEvent(name="WALA",op=WALAEventOperation.Provision,isSuccess=True, message="WALA Config Ctime:"+lastCtime) executeCustomData = Config.get("Provisioning.ExecuteCustomData") if executeCustomData != None and executeCustomData.lower().startswith("y"): if os.path.exists(LibDir + '/CustomData'): Run('chmod +x ' + LibDir + '/CustomData') Run(LibDir + '/CustomData') else: Error(LibDir + '/CustomData does not exist.') # # only one port supported # restart server if new port is different than old port # stop server if no longer a port # goalPort = goalState.LoadBalancerProbePort if currentPort != goalPort: try: self.LoadBalancerProbeServer_Shutdown() currentPort = goalPort if currentPort != None and lbProbeResponder == True: self.LoadBalancerProbeServer = LoadBalancerProbeServer(currentPort) if self.LoadBalancerProbeServer == None : lbProbeResponder = False Log("Unable to create LBProbeResponder.") except Exception, e: Error("Failed to launch LBProbeResponder: {0}".format(e)) currentPort = None # Report SSH key fingerprint type = Config.get("Provisioning.SshHostKeyPairType") if type == None: type = "rsa" host_key_path = "/etc/ssh/ssh_host_" + type + "_key.pub" if(MyDistro.waitForSshHostKey(host_key_path)): fingerprint = RunGetOutput("ssh-keygen -lf /etc/ssh/ssh_host_" + type + "_key.pub")[1].rstrip().split()[1].replace(':','') self.ReportRoleProperties(fingerprint) if program != None and DiskActivated == True: try: Children.append(subprocess.Popen([program, "Ready"])) except OSError, e : ErrorWithPrefix('SharedConfig.Parse','Exception: '+ str(e) +' occured launching ' + program ) program = None sleepToReduceAccessDenied = 3 time.sleep(sleepToReduceAccessDenied) if provisionError != None: incarnation = self.ReportNotReady("ProvisioningFailed", provisionError) else: incarnation = self.ReportReady() # Process our extensions. if goalState.ExtensionsConfig == None and goalState.ExtensionsConfigXml != None : reportHandlerStatusCount = 0 #Reset count when new goal state comes goalState.ExtensionsConfig = ExtensionsConfig().Parse(goalState.ExtensionsConfigXml) # report the status/heartbeat results of extension processing if goalState.ExtensionsConfig != None : ret = goalState.ExtensionsConfig.ReportHandlerStatus() if ret != 0: Error("Failed to report handler status") elif reportHandlerStatusCount % 1000 == 0: #Agent report handler status every 25 seconds. Reduce the log entries by adding a count Log("Successfully reported handler status") reportHandlerStatusCount += 1 if not eventMonitor: eventMonitor = WALAEventMonitor(self.HttpPostWithHeaders) eventMonitor.StartEventsLoop() time.sleep(25 - sleepToReduceAccessDenied) WaagentLogrotate = """\ /var/log/waagent.log { monthly rotate 6 notifempty missingok } """ def GetMountPoint(mountlist, device): """ Example of mountlist: /dev/sda1 on / type ext4 (rw) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0") none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) /dev/sdb1 on /mnt/resource type ext4 (rw) """ if (mountlist and device): for entry in mountlist.split('\n'): if(re.search(device, entry)): tokens = entry.split() #Return the 3rd column of this line return tokens[2] if len(tokens) > 2 else None return None def FindInLinuxKernelCmdline(option): """ Return match object if 'option' is present in the kernel boot options of the grub configuration. """ m=None matchs=r'^.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?'+option+r'.*$' try: m=FindStringInFile(MyDistro.grubKernelBootOptionsFile,matchs) except IOError, e: Error('FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return m def AppendToLinuxKernelCmdline(option): """ Add 'option' to the kernel boot options of the grub configuration. """ if not FindInLinuxKernelCmdline(option): src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r')(.*?)("?)$' rep=r'\1\2 '+ option + r'\3' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep) except IOError, e : Error('AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return 1 Run("update-grub",chk_err=False) return 0 def RemoveFromLinuxKernelCmdline(option): """ Remove 'option' to the kernel boot options of the grub configuration. """ if FindInLinuxKernelCmdline(option): src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?)('+option+r')(.*?)("?)$' rep=r'\1\3\4' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep) except IOError, e : Error('RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return 1 Run("update-grub",chk_err=False) return 0 def FindStringInFile(fname,matchs): """ Return match object if found in file. """ try: ms=re.compile(matchs) for l in (open(fname,'r')).readlines(): m=re.search(ms,l) if m: return m except: raise return None def ReplaceStringInFile(fname,src,repl): """ Replace 'src' with 'repl' in file. """ try: sr=re.compile(src) if FindStringInFile(fname,src): updated='' for l in (open(fname,'r')).readlines(): n=re.sub(sr,repl,l) updated+=n ReplaceFileContentsAtomic(fname,updated) except : raise return def ApplyVNUMAWorkaround(): """ If kernel version has NUMA bug, add 'numa=off' to kernel boot options. """ VersionParts = platform.release().replace('-', '.').split('.') if int(VersionParts[0]) > 2: return if int(VersionParts[1]) > 6: return if int(VersionParts[2]) > 37: return if AppendToLinuxKernelCmdline("numa=off") == 0 : Log("Your kernel version " + platform.release() + " has a NUMA-related bug: NUMA has been disabled.") else : "Error adding 'numa=off'. NUMA has not been disabled." def RevertVNUMAWorkaround(): """ Remove 'numa=off' from kernel boot options. """ if RemoveFromLinuxKernelCmdline("numa=off") == 0 : Log('NUMA has been re-enabled') else : Log('NUMA has not been re-enabled') def Install(): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a) ) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", MyDistro.waagent_conf_file) SetFileContents("/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split('\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") ApplyVNUMAWorkaround() return 0 def GetMyDistro(dist_class_name=''): """ Return MyDistro object. NOTE: Logging is not initialized at this point. """ if dist_class_name == '': if 'Linux' in platform.system(): Distro=DistInfo()[0] else : # I know this is not Linux! if 'FreeBSD' in platform.system(): Distro=platform.system() Distro=Distro.strip('"') Distro=Distro.strip(' ') dist_class_name=Distro+'Distro' else: Distro=dist_class_name if not globals().has_key(dist_class_name): print Distro+' is not a supported distribution.' return None return globals()[dist_class_name]() # the distro class inside this module. def DistInfo(fullname=0): if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', str(platform.release())) distinfo = ['FreeBSD', release] return distinfo if 'linux_distribution' in dir(platform): distinfo = list(platform.linux_distribution(full_distribution_name=fullname)) distinfo[0] = distinfo[0].strip() # remove trailing whitespace in distro name if os.path.exists("/etc/euleros-release"): distinfo[0] = "euleros" return distinfo else: return platform.dist() def PackagedInstall(buildroot): """ Called from setup.py for use by RPM. Generic implementation Creates directories and files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent, /etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot. Copies generated files waagent.conf, into place and exits. """ MyDistro=GetMyDistro() if MyDistro == None : sys.exit(1) MyDistro.packagedInstall(buildroot) def LibraryInstall(buildroot): pass def Uninstall(): """ Uninstall the agent service. Copy RulesFiles back to original locations. Delete agent-related files. Call RevertVNUMAWorkaround(). """ SwitchCwd() for a in RulesFiles: if os.path.isfile(GetLastPathElement(a)): try: shutil.move(GetLastPathElement(a), a) Warn("Moved " + LibDir + "/" + GetLastPathElement(a) + " -> " + a ) except: pass MyDistro.unregisterAgentService() MyDistro.uninstallDeleteFiles() RevertVNUMAWorkaround() return 0 def Deprovision(force, deluser): """ Remove user accounts created by provisioning. Disables root password if Provisioning.DeleteRootPassword = 'y' Stop agent service. Remove SSH host keys if they were generated by the provision. Set hostname to 'localhost.localdomain'. Delete cached system configuration files in /var/lib and /var/lib/waagent. """ #Append blank line at the end of file, so the ctime of this file is changed every time Run("echo ''>>"+ MyDistro.getConfigurationPath()) SwitchCwd() ovfxml = GetFileContents(LibDir+"/ovf-env.xml") ovfobj = None if ovfxml != None: ovfobj = OvfEnv().Parse(ovfxml, True) print("WARNING! The waagent service will be stopped.") print("WARNING! All SSH host key pairs will be deleted.") print("WARNING! Cached DHCP leases will be deleted.") MyDistro.deprovisionWarnUser() delRootPass = Config.get("Provisioning.DeleteRootPassword") if delRootPass != None and delRootPass.lower().startswith("y"): print("WARNING! root password will be disabled. You will not be able to login as root.") if ovfobj != None and deluser == True: print("WARNING! " + ovfobj.UserName + " account and entire home directory will be deleted.") if force == False and not raw_input('Do you want to proceed (y/n)? ').startswith('y'): return 1 MyDistro.stopAgentService() # Remove SSH host keys regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair") if regenerateKeys == None or regenerateKeys.lower().startswith("y"): Run("rm -f /etc/ssh/ssh_host_*key*") # Remove root password if delRootPass != None and delRootPass.lower().startswith("y"): MyDistro.deleteRootPassword() # Remove distribution specific networking configuration MyDistro.publishHostname('localhost.localdomain') MyDistro.deprovisionDeleteFiles() if deluser == True: MyDistro.DeleteAccount(ovfobj.UserName) return 0 def SwitchCwd(): """ Switch to cwd to /var/lib/waagent. Create if not present. """ CreateDir(LibDir, "root", 0700) os.chdir(LibDir) def Usage(): """ Print the arguments to waagent. """ print("usage: " + sys.argv[0] + " [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]") return 0 def main(): """ Instantiate MyDistro, exit if distro class is not defined. Parse command-line arguments, exit with usage() on error. Instantiate ConfigurationProvider. Call appropriate non-daemon methods and exit. If daemon mode, enter Agent.Run() loop. """ if GuestAgentVersion == "": print("WARNING! This is a non-standard agent that does not include a valid version string.") if len(sys.argv) == 1: sys.exit(Usage()) LoggerInit('/var/log/waagent.log','/dev/console') global LinuxDistro LinuxDistro=DistInfo()[0] global MyDistro MyDistro=GetMyDistro() if MyDistro == None : sys.exit(1) args = [] conf_file = None global force force = False for a in sys.argv[1:]: if re.match("^([-/]*)(help|usage|\?)", a): sys.exit(Usage()) elif re.match("^([-/]*)version", a): print(GuestAgentVersion + " running on " + LinuxDistro) sys.exit(0) elif re.match("^([-/]*)verbose", a): myLogger.verbose = True elif re.match("^([-/]*)force", a): force = True elif re.match("^(?:[-/]*)conf=.+", a): conf_file = re.match("^(?:[-/]*)conf=(.+)", a).groups()[0] elif re.match("^([-/]*)(setup|install)", a): sys.exit(MyDistro.Install()) elif re.match("^([-/]*)(uninstall)", a): sys.exit(Uninstall()) else: args.append(a) global Config Config = ConfigurationProvider(conf_file) logfile = Config.get("Logs.File") if logfile is not None: myLogger.file_path = logfile logconsole = Config.get("Logs.Console") if logconsole is not None and logconsole.lower().startswith("n"): myLogger.con_path = None verbose = Config.get("Logs.Verbose") if verbose != None and verbose.lower().startswith("y"): myLogger.verbose=True global daemon daemon = False for a in args: if re.match("^([-/]*)deprovision\+user", a): sys.exit(Deprovision(force, True)) elif re.match("^([-/]*)deprovision", a): sys.exit(Deprovision(force, False)) elif re.match("^([-/]*)daemon", a): daemon = True elif re.match("^([-/]*)serialconsole", a): AppendToLinuxKernelCmdline("console=ttyS0 earlyprintk=ttyS0") Log("Configured kernel to use ttyS0 as the boot console.") sys.exit(0) else: print("Invalid command line parameter:" + a) sys.exit(1) if daemon == False: sys.exit(Usage()) global modloaded modloaded = False while True: try: SwitchCwd() Log(GuestAgentLongName + " Version: " + GuestAgentVersion) if IsLinux(): Log("Linux Distribution Detected : " + LinuxDistro) global WaAgent WaAgent = Agent() WaAgent.Run() except Exception, e: Error(traceback.format_exc()) Error("Exception: " + str(e)) Log("Restart agent in 15 seconds") time.sleep(15) if __name__ == '__main__' : main() WALinuxAgent-2.2.32/config/000077500000000000000000000000001335416306700153635ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/66-azure-storage.rules000066400000000000000000000031451335416306700214630ustar00rootroot00000000000000ACTION=="add|change", SUBSYSTEM=="block", ENV{ID_VENDOR}=="Msft", ENV{ID_MODEL}=="Virtual_Disk", GOTO="azure_disk" GOTO="azure_end" LABEL="azure_disk" # Root has a GUID of 0000 as the second value # The resource/resource has GUID of 0001 as the second value ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" ATTRS{device_id}=="?00000001-0001-*", ENV{fabric_name}="BEK", GOTO="azure_names" # Wellknown SCSI controllers ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="azure_datadisk" GOTO="azure_end" # Retrieve LUN number for datadisks LABEL="azure_datadisk" ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" GOTO="azure_end" # Create the symlinks LABEL="azure_names" ENV{DEVTYPE}=="disk", SYMLINK+="disk/azure/$env{fabric_name}" ENV{DEVTYPE}=="partition", SYMLINK+="disk/azure/$env{fabric_name}-part%n" LABEL="azure_end" WALinuxAgent-2.2.32/config/99-azure-product-uuid.rules000066400000000000000000000005271335416306700224520ustar00rootroot00000000000000SUBSYSTEM!="dmi", GOTO="product_uuid-exit" ATTR{sys_vendor}!="Microsoft Corporation", GOTO="product_uuid-exit" ATTR{product_name}!="Virtual Machine", GOTO="product_uuid-exit" TEST!="/sys/devices/virtual/dmi/id/product_uuid", GOTO="product_uuid-exit" RUN+="/bin/chmod 0444 /sys/devices/virtual/dmi/id/product_uuid" LABEL="product_uuid-exit" WALinuxAgent-2.2.32/config/alpine/000077500000000000000000000000001335416306700166335ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/alpine/waagent.conf000066400000000000000000000053171335416306700211360ustar00rootroot00000000000000# # Windows Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable logging to serial console (y|n) # When stdout is not enough... # 'y' if not set Logs.Console=y # Enable verbose logging (y|n) Logs.Verbose=n # Preferred network interface to communicate with Azure platform Network.Interface=eth0 # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/config/arch/000077500000000000000000000000001335416306700163005ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/arch/waagent.conf000066400000000000000000000055141335416306700206020ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y WALinuxAgent-2.2.32/config/bigip/000077500000000000000000000000001335416306700164555ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/bigip/waagent.conf000066400000000000000000000056201335416306700207550ustar00rootroot00000000000000# # Windows Azure Linux Agent Configuration # # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.StateConsumer=None # Specified program is invoked with XML file argument specifying role # configuration. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role topology. Role.TopologyConsumer=None # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. # waagent cannot do this on BIG-IP VE Provisioning.MonitorHostName=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Specify location of waagent lib dir on BIG-IP Lib.Dir=/shared/vadc/azure/waagent/ # Specify location of sshd config file on BIG-IP OS.SshdConfigPath=/config/ssh/sshd_config # Disable RDMA management and set up OS.EnableRDMA=n # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/config/clearlinux/000077500000000000000000000000001335416306700175315ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/clearlinux/waagent.conf000066400000000000000000000046231335416306700220330ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.StateConsumer=None # Specified program is invoked with XML file argument specifying role # configuration. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role topology. Role.TopologyConsumer=None # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Enable or disable self-update, default is enabled AutoUpdate.Enabled=y AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=y WALinuxAgent-2.2.32/config/coreos/000077500000000000000000000000001335416306700166555ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/coreos/waagent.conf000066400000000000000000000062611335416306700211570ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=ed25519 # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks OS.AllowHTTP=y # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/config/debian/000077500000000000000000000000001335416306700166055ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/debian/waagent.conf000066400000000000000000000063441335416306700211110ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=auto # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval # OS.SshClientAliveInterval=180 # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=n # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/config/freebsd/000077500000000000000000000000001335416306700167755ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/freebsd/waagent.conf000066400000000000000000000061371335416306700213010ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs' here. ResourceDisk.Filesystem=ufs # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd OS.SudoersDir=/usr/local/etc/sudoers.d # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/config/gaia/000077500000000000000000000000001335416306700162645ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/gaia/waagent.conf000066400000000000000000000056631335416306700205730ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. Provisioning.PasswordCryptId=1 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext3 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=y # Size of the swapfile. ResourceDisk.SwapSizeMB=1024 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=/var/lib/waagent/openssl # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images OS.EnableRDMA=n # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=n # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y WALinuxAgent-2.2.32/config/iosxe/000077500000000000000000000000001335416306700165125ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/iosxe/waagent.conf000066400000000000000000000055761335416306700210240ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=n # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=n # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval # OS.SshClientAliveInterval=180 # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=y WALinuxAgent-2.2.32/config/nsbsd/000077500000000000000000000000001335416306700164745ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/nsbsd/waagent.conf000066400000000000000000000055161335416306700210000ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=n # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs' here. ResourceDisk.Filesystem=ufs # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) TODO set n Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd OS.SudoersDir=/usr/local/etc/sudoers.d # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # Lib.Dir=/usr/Firewall/var/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # Extension.LogDir=/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=n # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=n WALinuxAgent-2.2.32/config/openbsd/000077500000000000000000000000001335416306700170155ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/openbsd/waagent.conf000066400000000000000000000055031335416306700213150ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=auto # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. OpenBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ufs2 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=y # Max size of the swap partition in MB ResourceDisk.SwapSizeMB=65536 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=/usr/local/bin/eopenssl # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=y WALinuxAgent-2.2.32/config/suse/000077500000000000000000000000001335416306700163425ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/suse/waagent.conf000066400000000000000000000057701335416306700206500ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Microsoft Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y WALinuxAgent-2.2.32/config/ubuntu/000077500000000000000000000000001335416306700167055ustar00rootroot00000000000000WALinuxAgent-2.2.32/config/ubuntu/waagent.conf000066400000000000000000000061401335416306700212030ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=n # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=y # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=n # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Microsoft Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable RDMA kernel update, this value is effective on Ubuntu # OS.UpdateRdmaDriver=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/config/waagent.conf000066400000000000000000000062351335416306700176660ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval # OS.SshClientAliveInterval=180 # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/config/waagent.logrotate000066400000000000000000000001341335416306700207310ustar00rootroot00000000000000/var/log/waagent.log { compress monthly rotate 6 notifempty missingok } WALinuxAgent-2.2.32/init/000077500000000000000000000000001335416306700150615ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/arch/000077500000000000000000000000001335416306700157765ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/arch/waagent.service000066400000000000000000000005371335416306700210130ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/bin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/bin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.32/init/clearlinux/000077500000000000000000000000001335416306700172275ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/clearlinux/waagent.service000066400000000000000000000005661335416306700222460ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/bin/waagent ConditionPathExists=/usr/share/defaults/waagent/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/bin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.32/init/coreos/000077500000000000000000000000001335416306700163535ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/coreos/cloud-config.yml000066400000000000000000000023511335416306700214500ustar00rootroot00000000000000#cloud-config coreos: units: - name: etcd.service runtime: true drop-ins: - name: 10-oem.conf content: | [Service] Environment=ETCD_PEER_ELECTION_TIMEOUT=1200 - name: etcd2.service runtime: true drop-ins: - name: 10-oem.conf content: | [Service] Environment=ETCD_ELECTION_TIMEOUT=1200 - name: waagent.service command: start runtime: true content: | [Unit] Description=Microsoft Azure Agent Wants=network-online.target sshd-keygen.service After=network-online.target sshd-keygen.service [Service] Type=simple Restart=always RestartSec=5s ExecStart=/usr/share/oem/python/bin/python /usr/share/oem/bin/waagent -daemon - name: oem-cloudinit.service command: restart runtime: yes content: | [Unit] Description=Cloudinit from Azure metadata [Service] Type=oneshot ExecStart=/usr/bin/coreos-cloudinit --oem=azure oem: id: azure name: Microsoft Azure version-id: 2.1.4 home-url: https://azure.microsoft.com/ bug-report-url: https://github.com/coreos/bugs/issues WALinuxAgent-2.2.32/init/freebsd/000077500000000000000000000000001335416306700164735ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/freebsd/waagent000077500000000000000000000005151335416306700200500ustar00rootroot00000000000000#!/bin/sh # PROVIDE: waagent # REQUIRE: sshd netif dhclient # KEYWORD: nojail . /etc/rc.subr PATH=$PATH:/usr/local/bin:/usr/local/sbin name="waagent" rcvar="waagent_enable" pidfile="/var/run/waagent.pid" command="/usr/local/sbin/${name}" command_interpreter="python" command_args="start" load_rc_config $name run_rc_command "$1" WALinuxAgent-2.2.32/init/gaia/000077500000000000000000000000001335416306700157625ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/gaia/waagent000077500000000000000000000014561335416306700173440ustar00rootroot00000000000000#!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent.sh start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -start & success echo } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL WALinuxAgent-2.2.32/init/openbsd/000077500000000000000000000000001335416306700165135ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/openbsd/waagent000066400000000000000000000002311335416306700200600ustar00rootroot00000000000000#!/bin/sh daemon="python2.7 /usr/local/sbin/waagent -start" . /etc/rc.d/rc.subr pexp="python /usr/local/sbin/waagent -daemon" rc_reload=NO rc_cmd $1 WALinuxAgent-2.2.32/init/suse/000077500000000000000000000000001335416306700160405ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/suse/waagent000077500000000000000000000062011335416306700174130ustar00rootroot00000000000000#! /bin/sh # # Microsoft Azure Linux Agent sysV init script # # Copyright 2013 Microsoft Corporation # Copyright SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # /etc/init.d/waagent # # and symbolic link # # /usr/sbin/rcwaagent # # System startup script for the waagent # ### BEGIN INIT INFO # Provides: MicrosoftAzureLinuxAgent # Required-Start: $network sshd # Required-Stop: $network sshd # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Description: Start the MicrosoftAzureLinuxAgent ### END INIT INFO PYTHON=/usr/bin/python WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } . /etc/rc.status # First reset status of this service rc_reset # Return values acc. to LSB for all commands but status: # 0 - success # 1 - misc error # 2 - invalid or excess args # 3 - unimplemented feature (e.g. reload) # 4 - insufficient privilege # 5 - program not installed # 6 - program not configured # # Note that starting an already running service, stopping # or restarting a not-running service as well as the restart # with force-reload (in case signalling is not supported) are # considered a success. case "$1" in start) echo -n "Starting MicrosoftAzureLinuxAgent" ## Start daemon with startproc(8). If this fails ## the echo return value is set appropriate. startproc -f ${PYTHON} ${WAZD_BIN} -start rc_status -v ;; stop) echo -n "Shutting down MicrosoftAzureLinuxAgent" ## Stop daemon with killproc(8) and if this fails ## set echo the echo return value. killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; try-restart) ## Stop the service and if this succeeds (i.e. the ## service was running before), start it again. $0 status >/dev/null && $0 restart rc_status ;; restart) ## Stop the service and regardless of whether it was ## running or not, start it again. $0 stop sleep 1 $0 start rc_status ;; force-reload|reload) rc_status ;; status) echo -n "Checking for service MicrosoftAzureLinuxAgent " ## Check status with checkproc(8), if process is running ## checkproc will return with exit status 0. checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; probe) ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit WALinuxAgent-2.2.32/init/ubuntu/000077500000000000000000000000001335416306700164035ustar00rootroot00000000000000WALinuxAgent-2.2.32/init/ubuntu/walinuxagent000066400000000000000000000001321335416306700210300ustar00rootroot00000000000000# To disable the Microsoft Azure Agent, set WALINUXAGENT_ENABLED=0 WALINUXAGENT_ENABLED=1 WALinuxAgent-2.2.32/init/ubuntu/walinuxagent.conf000066400000000000000000000007321335416306700217620ustar00rootroot00000000000000description "Microsoft Azure Linux agent" author "Ben Howard " start on runlevel [2345] stop on runlevel [!2345] pre-start script [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent if [ "$WALINUXAGENT_ENABLED" != "1" ]; then stop ; exit 0 fi if [ ! -x /usr/sbin/waagent ]; then stop ; exit 0 fi #Load the udf module modprobe -b udf end script exec /usr/sbin/waagent -daemon respawn WALinuxAgent-2.2.32/init/ubuntu/walinuxagent.service000077500000000000000000000010251335416306700224740ustar00rootroot00000000000000# # NOTE: # This file hosted on WALinuxAgent repository only for reference purposes. # Please refer to a recent image to find out the up-to-date systemd unit file. # [Unit] Description=Azure Linux Agent After=network-online.target cloud-init.service Wants=network-online.target sshd.service sshd-keygen.service ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python3 -u /usr/sbin/waagent -daemon Restart=always [Install] WantedBy=multi-user.target WALinuxAgent-2.2.32/init/waagent000077500000000000000000000014761335416306700164450ustar00rootroot00000000000000#!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -start RETVAL=$? echo return $RETVAL } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL WALinuxAgent-2.2.32/init/waagent.service000066400000000000000000000005411335416306700200710ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/sbin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.32/makepkg.py000077500000000000000000000067601335416306700161230ustar00rootroot00000000000000#!/usr/bin/env python import glob import os import os.path import shutil import subprocess import sys from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, \ AGENT_LONG_VERSION from azurelinuxagent.ga.update import AGENT_MANIFEST_FILE MANIFEST = '''[{{ "name": "{0}", "version": 1.0, "handlerManifest": {{ "installCommand": "", "uninstallCommand": "", "updateCommand": "", "enableCommand": "python -u {1} -run-exthandlers", "disableCommand": "", "rebootAfterInstall": false, "reportHeartbeat": false }} }}]''' PUBLISH_MANIFEST = ''' Microsoft.OSTCLinuxAgent {1} {0} VmRole Microsoft Azure Guest Agent for Linux IaaS true https://github.com/Azure/WALinuxAgent/blob/2.1/LICENSE.txt https://github.com/Azure/WALinuxAgent/blob/2.1/LICENSE.txt https://github.com/Azure/WALinuxAgent true Microsoft Linux ''' PUBLISH_MANIFEST_FILE = 'manifest.xml' output_path = os.path.join(os.getcwd(), "eggs") target_path = os.path.join(output_path, AGENT_LONG_VERSION) bin_path = os.path.join(target_path, "bin") egg_path = os.path.join(bin_path, AGENT_LONG_VERSION + ".egg") manifest_path = os.path.join(target_path, AGENT_MANIFEST_FILE) publish_manifest_path = os.path.join(target_path, PUBLISH_MANIFEST_FILE) pkg_name = os.path.join(output_path, AGENT_LONG_VERSION + ".zip") family = 'Test' if len(sys.argv) > 1: family = sys.argv[1] def do(*args): try: subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("ERROR: {0}".format(str(e))) print("\t{0}".format(" ".join(args))) print(e.output) sys.exit(1) if os.path.isdir(target_path): shutil.rmtree(target_path) elif os.path.isfile(target_path): os.remove(target_path) if os.path.isfile(pkg_name): os.remove(pkg_name) os.makedirs(bin_path) print("Created {0} directory".format(target_path)) args = ["python", "setup.py", "bdist_egg", "--dist-dir={0}".format(bin_path)] print("Creating egg {0}".format(egg_path)) do(*args) egg_name = os.path.join("bin", os.path.basename( glob.glob(os.path.join(bin_path, "*"))[0])) print("Writing {0}".format(manifest_path)) with open(manifest_path, mode='w') as manifest: manifest.write(MANIFEST.format(AGENT_NAME, egg_name)) print("Writing {0}".format(publish_manifest_path)) with open(publish_manifest_path, mode='w') as publish_manifest: publish_manifest.write(PUBLISH_MANIFEST.format(AGENT_VERSION, family)) cwd = os.getcwd() os.chdir(target_path) print("Creating package {0}".format(pkg_name)) do("zip", "-r", pkg_name, egg_name) do("zip", "-j", pkg_name, AGENT_MANIFEST_FILE) do("zip", "-j", pkg_name, PUBLISH_MANIFEST_FILE) os.chdir(cwd) print("Package {0} successfully created".format(pkg_name)) sys.exit(0) WALinuxAgent-2.2.32/setup.py000077500000000000000000000217431335416306700156420ustar00rootroot00000000000000#!/usr/bin/env python # # Microsoft Azure Linux Agent setup.py # # Copyright 2013 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, \ AGENT_DESCRIPTION, \ DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME from azurelinuxagent.common.osutil import get_osutil import setuptools from setuptools import find_packages from setuptools.command.install import install as _install import sys root_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(root_dir) def set_files(data_files, dest=None, src=None): data_files.append((dest, src)) def set_bin_files(data_files, dest="/usr/sbin", src=["bin/waagent", "bin/waagent2.0"]): data_files.append((dest, src)) def set_conf_files(data_files, dest="/etc", src=["config/waagent.conf"]): data_files.append((dest, src)) def set_logrotate_files(data_files, dest="/etc/logrotate.d", src=["config/waagent.logrotate"]): data_files.append((dest, src)) def set_sysv_files(data_files, dest="/etc/rc.d/init.d", src=["init/waagent"]): data_files.append((dest, src)) def set_systemd_files(data_files, dest="/lib/systemd/system", src=["init/waagent.service"]): data_files.append((dest, src)) def set_freebsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/freebsd/waagent"]): data_files.append((dest, src)) def set_openbsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/openbsd/waagent"]): data_files.append((dest, src)) def set_udev_files(data_files, dest="/etc/udev/rules.d/", src=["config/66-azure-storage.rules", "config/99-azure-product-uuid.rules"]): data_files.append((dest, src)) def get_data_files(name, version, fullname): """ Determine data_files according to distro name, version and init system type """ data_files = [] if name == 'redhat' or name == 'centos': set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) set_udev_files(data_files) if version.startswith("6"): set_sysv_files(data_files) else: # redhat7.0+ use systemd set_systemd_files(data_files, dest="/usr/lib/systemd/system") if version.startswith("7.1"): # TODO this is a mitigation to systemctl bug on 7.1 set_sysv_files(data_files) elif name == 'arch': set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, src=["config/arch/waagent.conf"]) set_udev_files(data_files) set_systemd_files(data_files, dest='/usr/lib/systemd/system', src=["init/arch/waagent.service"]) elif name == 'coreos': set_bin_files(data_files, dest="/usr/share/oem/bin") set_conf_files(data_files, dest="/usr/share/oem", src=["config/coreos/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) set_files(data_files, dest="/usr/share/oem", src=["init/coreos/cloud-config.yml"]) elif name == 'clear linux os for intel architecture' \ or name == 'clear linux software for intel architecture': set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, dest="/usr/share/defaults/waagent", src=["config/clearlinux/waagent.conf"]) set_systemd_files(data_files, dest='/usr/lib/systemd/system', src=["init/clearlinux/waagent.service"]) elif name == 'ubuntu': set_bin_files(data_files) set_conf_files(data_files, src=["config/ubuntu/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) if version.startswith("12") or version.startswith("14"): # Ubuntu12.04/14.04 - uses upstart set_files(data_files, dest="/etc/init", src=["init/ubuntu/walinuxagent.conf"]) set_files(data_files, dest='/etc/default', src=['init/ubuntu/walinuxagent']) elif fullname == 'Snappy Ubuntu Core': set_files(data_files, dest="", src=["init/ubuntu/snappy/walinuxagent.yml"]) else: # Ubuntu15.04+ uses systemd set_systemd_files(data_files, src=["init/ubuntu/walinuxagent.service"]) elif name == 'suse' or name == 'opensuse': set_bin_files(data_files) set_conf_files(data_files, src=["config/suse/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) if fullname == 'SUSE Linux Enterprise Server' and \ version.startswith('11') or \ fullname == 'openSUSE' and version.startswith( '13.1'): set_sysv_files(data_files, dest='/etc/init.d', src=["init/suse/waagent"]) else: # sles 12+ and openSUSE 13.2+ use systemd set_systemd_files(data_files, dest='/usr/lib/systemd/system') elif name == 'freebsd': set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/freebsd/waagent.conf"]) set_freebsd_rc_files(data_files) elif name == 'openbsd': set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/openbsd/waagent.conf"]) set_openbsd_rc_files(data_files) elif name == 'debian': set_bin_files(data_files) set_conf_files(data_files, src=["config/debian/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files, dest="/lib/udev/rules.d") elif name == 'iosxe': set_bin_files(data_files) set_conf_files(data_files, src=["config/iosxe/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) set_systemd_files(data_files, dest="/usr/lib/systemd/system") if version.startswith("7.1"): # TODO this is a mitigation to systemctl bug on 7.1 set_sysv_files(data_files) else: # Use default setting set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) set_udev_files(data_files) set_sysv_files(data_files) return data_files class install(_install): user_options = _install.user_options + [ ('lnx-distro=', None, 'target Linux distribution'), ('lnx-distro-version=', None, 'target Linux distribution version'), ('lnx-distro-fullname=', None, 'target Linux distribution full name'), ('register-service', None, 'register as startup service and start'), ('skip-data-files', None, 'skip data files installation'), ] def initialize_options(self): _install.initialize_options(self) self.lnx_distro = DISTRO_NAME self.lnx_distro_version = DISTRO_VERSION self.lnx_distro_fullname = DISTRO_FULL_NAME self.register_service = False self.skip_data_files = False def finalize_options(self): _install.finalize_options(self) if self.skip_data_files: return data_files = get_data_files(self.lnx_distro, self.lnx_distro_version, self.lnx_distro_fullname) self.distribution.data_files = data_files self.distribution.reinitialize_command('install_data', True) def run(self): _install.run(self) if self.register_service: osutil = get_osutil() osutil.register_agent_service() osutil.stop_agent_service() osutil.start_agent_service() # Note to packagers and users from source. # In version 3.5 of Python distribution information handling in the platform # module was deprecated. Depending on the Linux distribution the # implementation may be broken prior to Python 3.7 wher the functionality # will be removed from Python 3 requires = [] if float(sys.version[:3]) >= 3.7: requires = ['distro'] setuptools.setup( name=AGENT_NAME, version=AGENT_VERSION, long_description=AGENT_DESCRIPTION, author='Microsoft Corporation', author_email='walinuxagent@microsoft.com', platforms='Linux', url='https://github.com/Azure/WALinuxAgent', license='Apache License Version 2.0', packages=find_packages(exclude=["tests*"]), py_modules=["__main__"], install_requires=requires, cmdclass={ 'install': install } ) WALinuxAgent-2.2.32/tests/000077500000000000000000000000001335416306700152605ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/__init__.py000066400000000000000000000011651335416306700173740ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/common/000077500000000000000000000000001335416306700165505ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/common/__init__.py000066400000000000000000000011651335416306700206640ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/common/dhcp/000077500000000000000000000000001335416306700174665ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/common/dhcp/__init__.py000066400000000000000000000011651335416306700216020ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/common/dhcp/test_dhcp.py000066400000000000000000000104161335416306700220170ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import mock import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.osutil.default as osutil from tests.tools import * class TestDHCP(AgentTestCase): def test_wireserver_route_exists(self): # setup dhcp_handler = dhcp.get_dhcp_handler() self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) # execute routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric " \ "Mask MTU Window IRTT \n\ eth0 00000000 10813FA8 0003 0 0 5 " \ "00000000 0 0 0 \n\ eth0 00345B0A 00000000 0001 0 0 5 " \ "00000000 0 0 0 \n\ lo 00000000 01345B0A 0003 0 0 1 " \ "00FCFFFF 0 0 0 \n" with patch("os.path.exists", return_value=True): mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(dhcp_handler.wireserver_route_exists) # test self.assertTrue(dhcp_handler.endpoint is not None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) def test_wireserver_route_not_exists(self): # setup dhcp_handler = dhcp.get_dhcp_handler() self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) # execute self.assertFalse(dhcp_handler.wireserver_route_exists) # test self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) def test_dhcp_cache_exists(self): dhcp_handler = dhcp.get_dhcp_handler() dhcp_handler.osutil = osutil.DefaultOSUtil() with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', return_value=None): self.assertFalse(dhcp_handler.dhcp_cache_exists) self.assertEqual(dhcp_handler.endpoint, None) with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', return_value="foo"): self.assertTrue(dhcp_handler.dhcp_cache_exists) self.assertEqual(dhcp_handler.endpoint, "foo") def test_dhcp_skip_cache(self): handler = dhcp.get_dhcp_handler() handler.osutil = osutil.DefaultOSUtil() with patch('os.path.exists', return_value=False): with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint')\ as patch_dhcp_cache: with patch.object(dhcp.DhcpHandler, 'send_dhcp_req') \ as patch_dhcp_send: endpoint = 'foo' patch_dhcp_cache.return_value = endpoint # endpoint comes from cache self.assertFalse(handler.skip_cache) handler.run() self.assertTrue(patch_dhcp_cache.call_count == 1) self.assertTrue(patch_dhcp_send.call_count == 0) self.assertTrue(handler.endpoint == endpoint) # reset handler.skip_cache = True handler.endpoint = None # endpoint comes from dhcp request self.assertTrue(handler.skip_cache) handler.run() self.assertTrue(patch_dhcp_cache.call_count == 1) self.assertTrue(patch_dhcp_send.call_count == 1) WALinuxAgent-2.2.32/tests/common/osutil/000077500000000000000000000000001335416306700200675ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/common/osutil/__init__.py000066400000000000000000000011651335416306700222030ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/common/osutil/mock_osutil.py000066400000000000000000000037701335416306700230000ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.default import DefaultOSUtil class MockOSUtil(DefaultOSUtil): def __init__(self): self.all_users = {} self.sudo_users = set() self.jit_enabled = True def useradd(self, username, expiration=None, comment=None): if username == "": raise Exception("test exception for bad username") if username in self.all_users: raise Exception("test exception, user already exists") self.all_users[username] = (username, None, None, None, comment, None, None, expiration) def conf_sudoer(self, username, nopasswd=False, remove=False): if not remove: self.sudo_users.add(username) else: self.sudo_users.remove(username) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if password == "": raise Exception("test exception for bad password") user = self.all_users[username] self.all_users[username] = (user[0], password, user[2], user[3], user[4], user[5], user[6], user[7]) def del_account(self, username): if username == "": raise Exception("test exception, bad data") if username not in self.all_users: raise Exception("test exception, user does not exist to delete") self.all_users.pop(username) def get_users(self): return self.all_users.values()WALinuxAgent-2.2.32/tests/common/osutil/test_bigip.py000066400000000000000000000257541335416306700226070ustar00rootroot00000000000000# Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import socket import time import azurelinuxagent.common.osutil.bigip as osutil import azurelinuxagent.common.osutil.default as default import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError from tests.tools import * class TestBigIpOSUtil_wait_until_mcpd_is_initialized(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(logger, "info", return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil._wait_until_mcpd_is_initialized( osutil.BigIpOSUtil() ) self.assertEqual(result, True) # There are two logger calls in the mcpd wait function. The second # occurs after mcpd is found to be "up" self.assertEqual(args[0].call_count, 2) @patch.object(shellutil, "run", return_value=1) @patch.object(logger, "info", return_value=None) @patch.object(time, "sleep", return_value=None) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil._wait_until_mcpd_is_initialized, osutil.BigIpOSUtil() ) class TestBigIpOSUtil_save_sys_config(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(logger, "error", return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil()) self.assertEqual(result, 0) self.assertEqual(args[0].call_count, 0) @patch.object(shellutil, "run", return_value=1) @patch.object(logger, "error", return_value=None) def test_failure(self, *args): result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil()) self.assertEqual(result, 1) self.assertEqual(args[0].call_count, 1) class TestBigIpOSUtil_get_dhcp_pid(AgentTestCase): @patch.object(shellutil, "run_get_output", return_value=(0, 8623)) def test_success(self, *args): result = osutil.BigIpOSUtil.get_dhcp_pid(osutil.BigIpOSUtil()) self.assertEqual(result, 8623) @patch.object(shellutil, "run_get_output", return_value=(1, 'foo')) def test_failure(self, *args): result = osutil.BigIpOSUtil.get_dhcp_pid(osutil.BigIpOSUtil()) self.assertEqual(result, None) class TestBigIpOSUtil_useradd(AgentTestCase): @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) @patch.object(shellutil, "run_get_output") def test_success(self, *args): args[0].return_value = (0, None) result = osutil.BigIpOSUtil.useradd( osutil.BigIpOSUtil(), 'foo', expiration=None ) self.assertEqual(result, 0) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) def test_user_already_exists(self, *args): args[0].return_value = 'admin' result = osutil.BigIpOSUtil.useradd( osutil.BigIpOSUtil(), 'admin', expiration=None ) self.assertEqual(result, None) @patch.object(shellutil, "run", return_value=1) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.useradd, osutil.BigIpOSUtil(), 'foo', expiration=None ) class TestBigIpOSUtil_chpasswd(AgentTestCase): @patch.object(shellutil, "run_get_output", return_value=(0, None)) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=True) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) @patch.object(osutil.BigIpOSUtil, '_save_sys_config', return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil.chpasswd( osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) self.assertEqual(result, 0) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[0].call_count, 1) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=True) def test_is_sys_user(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) @patch.object(shellutil, "run_get_output", return_value=(1, None)) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) def test_failed_to_set_user_password(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) @patch.object(shellutil, "run_get_output", return_value=(0, None)) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) def test_failed_to_get_user_entry(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) class TestBigIpOSUtil_get_dvd_device(AgentTestCase): @patch.object(os, "listdir", return_value=['tty1','cdrom0']) def test_success(self, *args): result = osutil.BigIpOSUtil.get_dvd_device( osutil.BigIpOSUtil(), '/dev' ) self.assertEqual(result, '/dev/cdrom0') @patch.object(os, "listdir", return_value=['foo', 'bar']) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.get_dvd_device, osutil.BigIpOSUtil(), '/dev' ) class TestBigIpOSUtil_restart_ssh_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.restart_ssh_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_stop_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.stop_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_start_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.start_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_register_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.register_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_unregister_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.unregister_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_set_hostname(AgentTestCase): @patch.object(os.path, "exists", return_value=False) def test_success(self, *args): result = osutil.BigIpOSUtil.set_hostname( osutil.BigIpOSUtil(), None ) self.assertEqual(args[0].call_count, 0) self.assertEqual(result, None) class TestBigIpOSUtil_set_dhcp_hostname(AgentTestCase): @patch.object(os.path, "exists", return_value=False) def test_success(self, *args): result = osutil.BigIpOSUtil.set_dhcp_hostname( osutil.BigIpOSUtil(), None ) self.assertEqual(args[0].call_count, 0) self.assertEqual(result, None) class TestBigIpOSUtil_get_first_if(AgentTestCase): @patch.object(osutil.BigIpOSUtil, '_format_single_interface_name', return_value=b'eth0') def test_success(self, *args): ifname, ipaddr = osutil.BigIpOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") @patch.object(osutil.BigIpOSUtil, '_format_single_interface_name', return_value=b'loenp0s3') def test_success(self, *args): ifname, ipaddr = osutil.BigIpOSUtil().get_first_if() self.assertFalse(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") class TestBigIpOSUtil_mount_dvd(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(time, "sleep", return_value=None) @patch.object(osutil.BigIpOSUtil, '_wait_until_mcpd_is_initialized', return_value=None) @patch.object(default.DefaultOSUtil, 'mount_dvd', return_value=None) def test_success(self, *args): osutil.BigIpOSUtil.mount_dvd( osutil.BigIpOSUtil(), max_retry=6, chk_err=True ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 1) class TestBigIpOSUtil_route_add(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): osutil.BigIpOSUtil.route_add( osutil.BigIpOSUtil(), '10.10.10.0', '255.255.255.0', '10.10.10.1' ) self.assertEqual(args[0].call_count, 1) class TestBigIpOSUtil_device_for_ide_port(AgentTestCase): @patch.object(time, "sleep", return_value=None) @patch.object(os.path, "exists", return_value=False) @patch.object(default.DefaultOSUtil, 'device_for_ide_port', return_value=None) def test_success_waiting(self, *args): osutil.BigIpOSUtil.device_for_ide_port( osutil.BigIpOSUtil(), '5' ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 99) self.assertEqual(args[2].call_count, 99) @patch.object(time, "sleep", return_value=None) @patch.object(os.path, "exists", return_value=True) @patch.object(default.DefaultOSUtil, 'device_for_ide_port', return_value=None) def test_success_immediate(self, *args): osutil.BigIpOSUtil.device_for_ide_port( osutil.BigIpOSUtil(), '5' ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 1) self.assertEqual(args[2].call_count, 0) if __name__ == '__main__': unittest.main()WALinuxAgent-2.2.32/tests/common/osutil/test_default.py000066400000000000000000001101051335416306700231220ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import socket import glob import mock import traceback import azurelinuxagent.common.osutil.default as osutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from tests.tools import * actual_get_proc_net_route = 'azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_net_route' def fake_is_loopback(_, iface): return iface.startswith('lo') def running_under_travis(): return 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' class TestOSUtil(AgentTestCase): def test_restart(self): # setup retries = 3 ifname = 'dummy' with patch.object(shellutil, "run") as run_patch: run_patch.return_value = 1 # execute osutil.DefaultOSUtil.restart_if(osutil.DefaultOSUtil(), ifname=ifname, retries=retries, wait=0) # assert self.assertEqual(run_patch.call_count, retries) self.assertEqual(run_patch.call_args_list[0][0][0], 'ifdown {0} && ifup {0}'.format(ifname)) def test_get_dvd_device_success(self): with patch.object(os, 'listdir', return_value=['cpu', 'cdrom0']): osutil.DefaultOSUtil().get_dvd_device() def test_get_dvd_device_failure(self): with patch.object(os, 'listdir', return_value=['cpu', 'notmatching']): try: osutil.DefaultOSUtil().get_dvd_device() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue('notmatching' in ustr(ose)) @patch('time.sleep') def test_mount_dvd_success(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(0, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() except OSUtilError: self.fail("mounting failed") @patch('time.sleep') def test_mount_dvd_failure(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(1, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue(msg in ustr(ose)) self.assertTrue(patch_run.call_count == 6) def test_empty_proc_net_route(self): routing_table = "" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertEqual(len(osutil.DefaultOSUtil().read_route_table()), 0) def test_no_routes(self): routing_table = 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) def test_bogus_proc_net_route(self): routing_table = 'Iface\tDestination\tGateway \tFlags\t\tUse\tMetric\t\neth0\t00000000\t00000000\t0001\t\t0\t0\n' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) def test_valid_routes(self): routing_table = \ 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' \ 'eth0\t00000000\tC1BB910A\t0003\t0\t0\t0\t00000000\t0\t0\t0 \n' \ 'eth0\tC0BB910A\t00000000\t0001\t0\t0\t0\tC0FFFFFF\t0\t0\t0 \n' \ 'eth0\t10813FA8\tC1BB910A\t000F\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ 'eth0\tFEA9FEA9\tC1BB910A\t0007\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ 'docker0\t002BA8C0\t00000000\t0001\t0\t0\t10\t00FFFFFF\t0\t0\t0 \n' known_sha1_hash = b'\x1e\xd1k\xae[\xf8\x9b\x1a\x13\xd0\xbbT\xa4\xe3Y\xa3\xdd\x0b\xbd\xa9' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(raw_route_list), 6) self.assertEqual(textutil.hash_strings(raw_route_list), known_sha1_hash) route_list = osutil.DefaultOSUtil().get_list_of_routes(raw_route_list) self.assertEqual(len(route_list), 5) self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193') self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0') self.assertEqual(route_list[1].mask_quad(), '255.255.255.192') self.assertEqual(route_list[2].destination_quad(), '168.63.129.16') self.assertEqual(route_list[1].flags, 1) self.assertEqual(route_list[2].flags, 15) self.assertEqual(route_list[3].flags, 7) self.assertEqual(route_list[3].metric, 0) self.assertEqual(route_list[4].metric, 10) self.assertEqual(route_list[0].interface, 'eth0') self.assertEqual(route_list[4].interface, 'docker0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='eth0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1'}) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) def test_get_first_if(self, get_all_interfaces_mock, get_primary_interface_mock): """ Validate that the agent can find the first active non-loopback interface. This test case used to run live, but not all developers have an eth* interface. It is perfectly valid to have a br*, but this test does not account for that. """ ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertEqual(ifname, 'eth0') self.assertEqual(ipaddr, '10.0.0.1') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='bogus0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1', 'lo': '127.0.0.1'}) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) def test_get_first_if_nosuchprimary(self, get_all_interfaces_mock, get_primary_interface_mock): ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") def test_get_first_if_all_loopback(self): fake_ifaces = {'lo':'127.0.0.1'} with patch.object(osutil.DefaultOSUtil, 'get_primary_interface', return_value='bogus0'): with patch.object(osutil.DefaultOSUtil, '_get_all_interfaces', return_value=fake_ifaces): self.assertEqual(('', ''), osutil.DefaultOSUtil().get_first_if()) def test_get_all_interfaces(self): loopback_count = 0 non_loopback_count = 0 for iface in osutil.DefaultOSUtil()._get_all_interfaces(): if iface == 'lo': loopback_count += 1 else: non_loopback_count += 1 self.assertEqual(loopback_count, 1, 'Exactly 1 loopback network interface should exist') self.assertGreater(loopback_count, 0, 'At least 1 non-loopback network interface should exist') def test_isloopback(self): for iface in osutil.DefaultOSUtil()._get_all_interfaces(): if iface == 'lo': self.assertTrue(osutil.DefaultOSUtil().is_loopback(iface)) else: self.assertFalse(osutil.DefaultOSUtil().is_loopback(iface)) def test_isprimary(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ eth0 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ eth0 00345B0A 00000000 0001 0 0 5 00000000 0 0 0 \n\ lo 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('lo')) self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('eth0')) def test_sriov(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n" \ "bond0 00000000 0100000A 0003 0 0 0 00000000 0 0 0 \n" \ "bond0 0000000A 00000000 0001 0 0 0 00000000 0 0 0 \n" \ "eth0 0000000A 00000000 0001 0 0 0 00000000 0 0 0 \n" \ "bond0 10813FA8 0100000A 0007 0 0 0 00000000 0 0 0 \n" \ "bond0 FEA9FEA9 0100000A 0007 0 0 0 00000000 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('eth0')) self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('bond0')) def test_multiple_default_routes(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ high 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ low1 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('low1')) def test_multiple_interfaces(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ first 00000000 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ secnd 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('first')) def test_interface_flags(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ nflg 00000000 01345B0A 0001 0 0 1 00000000 0 0 0 \n\ flgs 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('flgs')) def test_no_interface(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ ndst 00000001 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ nflg 00000000 01345B0A 0001 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('ndst')) self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('nflg')) self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('invalid')) def test_no_primary_does_not_throw(self): with patch.object(osutil.DefaultOSUtil, 'get_primary_interface') \ as patch_primary: exception = False patch_primary.return_value = '' try: osutil.DefaultOSUtil().get_first_if()[0] except Exception as e: print(traceback.format_exc()) exception = True self.assertFalse(exception) def test_dhcp_lease_default(self): self.assertTrue(osutil.DefaultOSUtil().get_dhcp_lease_endpoint() is None) def test_dhcp_lease_ubuntu(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") def test_dhcp_lease_custom_dns(self): """ Validate that the wireserver address is coming from option 245 (on default configurations the address is also available in the domain-name-servers option, but users may set up a custom dns server on their vnet) """ with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.custom.dns"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() self.assertEqual(endpoint, "168.63.129.16") def test_dhcp_lease_multi(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.multi"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.2") def test_get_total_mem(self): """ Validate the returned value matches to the one retrieved by invoking shell command """ cmd = "grep MemTotal /proc/meminfo |awk '{print $2}'" ret = shellutil.run_get_output(cmd) if ret[0] == 0: self.assertEqual(int(ret[1]) / 1024, get_osutil().get_total_mem()) else: self.fail("Cannot retrieve total memory using shell command.") def test_get_processor_cores(self): """ Validate the returned value matches to the one retrieved by invoking shell command """ cmd = "grep 'processor.*:' /proc/cpuinfo |wc -l" ret = shellutil.run_get_output(cmd) if ret[0] == 0: self.assertEqual(int(ret[1]), get_osutil().get_processor_cores()) else: self.fail("Cannot retrieve number of process cores using shell command.") def test_conf_sshd(self): new_file = "\ Port 22\n\ Protocol 2\n\ ChallengeResponseAuthentication yes\n\ #PasswordAuthentication yes\n\ UsePAM yes\n\ " expected_output = "\ Port 22\n\ Protocol 2\n\ ChallengeResponseAuthentication no\n\ #PasswordAuthentication yes\n\ UsePAM yes\n\ PasswordAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match(self): new_file = "\ Port 22\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ Port 22\n\ ChallengeResponseAuthentication no\n\ PasswordAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_last(self): new_file = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ Port 22\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_middle(self): new_file = "\ Port 22\n\ match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ match all\n\ #Other config\n\ " expected_output = "\ Port 22\n\ match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ match all\n\ #Other config\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_multiple(self): new_file = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ Match all\n\ #Other config\n\ " expected_output = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ Match all\n\ #Other config\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_multiple_first_last(self): new_file = "\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_correct_instance_id(self): util = osutil.DefaultOSUtil() self.assertEqual( "12345678-1234-1234-1234-123456789012", util._correct_instance_id("78563412-3412-3412-1234-123456789012")) self.assertEqual( "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8", util._correct_instance_id("544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8")) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="33C2F3B9-1399-429F-8EB3-BA656DF32502") def test_get_instance_id_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( util.get_instance_id(), "B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="") def test_get_instance_id_empty_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( "", util.get_instance_id()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="Value") def test_get_instance_id_malformed_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( "Value", util.get_instance_id()) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[0, '33C2F3B9-1399-429F-8EB3-BA656DF32502']) def test_get_instance_id_from_dmidecode(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( util.get_instance_id(), "B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[1, 'Error Value']) def test_get_instance_id_missing(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual("", util.get_instance_id()) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[0, 'Unexpected Value']) def test_get_instance_id_unexpected(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual("", util.get_instance_id()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file') def test_is_current_instance_id_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() mock_read.return_value = "B9F3C233-9913-9F42-8EB3-BA656DF32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_read.return_value = "33C2F3B9-1399-429F-8EB3-BA656DF32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_is_current_instance_id_from_dmidecode(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() mock_shell.return_value = [0, 'B9F3C233-9913-9F42-8EB3-BA656DF32502'] self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_shell.return_value = [0, '33C2F3B9-1399-429F-8EB3-BA656DF32502'] self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) @patch('azurelinuxagent.common.conf.get_sudoers_dir') def test_conf_sudoer(self, mock_dir): tmp_dir = tempfile.mkdtemp() mock_dir.return_value = tmp_dir util = osutil.DefaultOSUtil() # Assert the sudoer line is added if missing util.conf_sudoer("FooBar") waagent_sudoers = os.path.join(tmp_dir, 'waagent') self.assertTrue(os.path.isfile(waagent_sudoers)) count = -1 with open(waagent_sudoers, 'r') as f: count = len(f.readlines()) self.assertEqual(1, count) # Assert the line does not get added a second time util.conf_sudoer("FooBar") count = -1 with open(waagent_sudoers, 'r') as f: count = len(f.readlines()) print("WRITING TO {0}".format(waagent_sudoers)) self.assertEqual(1, count) def test_get_firewall_dropped_packets_returns_zero_if_firewall_disabled(self): osutil._enable_firewall = False util = osutil.DefaultOSUtil() self.assertEqual(0, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_returns_negative_if_error(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (1, "not used")] self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_returns_negative_if_exception(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (1, Exception)] self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_transient_error_ignored(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (3, "can't initialize iptables table `security': iptables who? (do you need to insmod?)")] self.assertEqual(0, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (0, ''' Chain OUTPUT (policy ACCEPT 104 packets, 43628 bytes) pkts bytes target prot opt in out source destination 0 0 ACCEPT tcp -- any any anywhere 168.63.129.16 owner UID match daemon 32 1920 DROP tcp -- any any anywhere 168.63.129.16 ''')] dst = '168.63.129.16' self.assertEqual(32, util.get_firewall_dropped_packets(dst)) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)), call(osutil.FIREWALL_DROP.format(wait, "A", dst)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION), call(osutil.FIREWALL_LIST.format(wait)) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_no_wait(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION-1) wait = "" mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)), call(osutil.FIREWALL_DROP.format(wait, "A", dst)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION), call(osutil.FIREWALL_LIST.format(wait)) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_skips_if_drop_exists(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [0, 0, 0] mock_output.return_value = (0, version) self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_ignores_exceptions(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [1, Exception] mock_output.return_value = (0, version) self.assertFalse(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertFalse(osutil._enable_firewall) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_checks_for_invalid_iptables_options(self, mock_run, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" # iptables uses the following exit codes # 0 - correct function # 1 - other errors # 2 - errors which appear to be caused by invalid or abused command # line parameters mock_run.side_effect = [2] mock_output.return_value = (0, version) self.assertFalse(util.enable_firewall(dst_ip='1.2.3.4', uid=42)) self.assertFalse(osutil._enable_firewall) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_skips_if_disabled(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = False util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertFalse(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_not_called() mock_output.assert_not_called() mock_uid.assert_not_called() self.assertFalse(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_remove_firewall(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [0, 1, 0, 1, 0, 1] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.remove_firewall(dst, uid)) mock_run.assert_has_calls([ # delete rules < 2.2.26 call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), # delete rules >= 2.2.26 call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_remove_firewall_does_not_repeat(self, mock_run, mock_output, _): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst_ip='1.2.3.4' uid=42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [2] mock_output.side_effect = [(0, version), (1, "Output")] self.assertFalse(util.remove_firewall(dst_ip, uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertFalse(osutil._enable_firewall) self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) self.assertFalse(util.remove_firewall()) self.assertFalse(util.remove_firewall()) self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) @skip_if_predicate_true(running_under_travis, "The ip command isn't available in Travis") def test_get_nic_state(self): state = osutil.DefaultOSUtil().get_nic_state() self.assertNotEqual(state, {}) self.assertGreater(len(state.keys()), 1) another_state = osutil.DefaultOSUtil().get_nic_state() name = list(another_state.keys())[0] another_state[name].add_ipv4("xyzzy") self.assertNotEqual(state, another_state) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/common/test_cgroups.py000066400000000000000000000251151335416306700216470ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from __future__ import print_function from azurelinuxagent.common.cgroups import CGroupsTelemetry, CGroups, CGroupsException, BASE_CGROUPS, Cpu, Memory, \ DEFAULT_MEM_LIMIT_MIN_MB from azurelinuxagent.common.version import AGENT_NAME from tests.tools import * import os import random import time def consume_cpu_time(): waste = 0 for x in range(1, 200000): waste += random.random() return waste def make_self_cgroups(): """ Build a CGroups object for the cgroup to which this process already belongs :return: CGroups containing this process :rtype: CGroups """ def path_maker(hierarchy, __): suffix = CGroups.get_my_cgroup_path(CGroups.get_hierarchy_id('cpu')) return os.path.join(BASE_CGROUPS, hierarchy, suffix) return CGroups("inplace", path_maker) def make_root_cgroups(): """ Build a CGroups object for the topmost cgroup :return: CGroups for most-encompassing cgroup :rtype: CGroups """ def path_maker(hierarchy, _): return os.path.join(BASE_CGROUPS, hierarchy) return CGroups("root", path_maker) def i_am_root(): return os.geteuid() == 0 @skip_if_predicate_false(CGroups.enabled, "CGroups not supported in this environment") class TestCGroups(AgentTestCase): @classmethod def setUpClass(cls): CGroups.setup(True) super(AgentTestCase, cls).setUpClass() def test_cgroup_utilities(self): """ Test utilities for querying cgroup metadata """ cpu_id = CGroups.get_hierarchy_id('cpu') self.assertGreater(int(cpu_id), 0) memory_id = CGroups.get_hierarchy_id('memory') self.assertGreater(int(memory_id), 0) self.assertNotEqual(cpu_id, memory_id) def test_telemetry_inplace(self): """ Test raw measures and basic statistics for the cgroup in which this process is currently running. """ cg = make_self_cgroups() self.assertIn('cpu', cg.cgroups) self.assertIn('memory', cg.cgroups) ct = CGroupsTelemetry("test", cg) cpu = Cpu(ct) self.assertGreater(cpu.current_system_cpu, 0) consume_cpu_time() # Eat some CPU cpu.update() self.assertGreater(cpu.current_cpu_total, cpu.previous_cpu_total) self.assertGreater(cpu.current_system_cpu, cpu.previous_system_cpu) percent_used = cpu.get_cpu_percent() self.assertGreater(percent_used, 0) def test_telemetry_in_place_leaf_cgroup(self): """ Ensure this leaf (i.e. not root of cgroup tree) cgroup has distinct metrics from the root cgroup. """ # Does nothing on systems where the default cgroup for a randomly-created process (like this test invocation) # is the root cgroup. cg = make_self_cgroups() root = make_root_cgroups() if cg.cgroups['cpu'] != root.cgroups['cpu']: ct = CGroupsTelemetry("test", cg) cpu = Cpu(ct) self.assertLess(cpu.current_cpu_total, cpu.current_system_cpu) consume_cpu_time() # Eat some CPU time.sleep(1) # Generate some idle time cpu.update() self.assertLess(cpu.current_cpu_total, cpu.current_system_cpu) def exercise_telemetry_instantiation(self, test_cgroup): test_extension_name = test_cgroup.name CGroupsTelemetry.track_cgroup(test_cgroup) self.assertIn('cpu', test_cgroup.cgroups) self.assertIn('memory', test_cgroup.cgroups) self.assertTrue(CGroupsTelemetry.is_tracked(test_extension_name)) consume_cpu_time() time.sleep(1) metrics = CGroupsTelemetry.collect_all_tracked() my_metrics = metrics[test_extension_name] self.assertEqual(len(my_metrics), 2) for item in my_metrics: metric_family, metric_name, metric_value = item if metric_family == "Process": self.assertEqual(metric_name, "% Processor Time") self.assertGreater(metric_value, 0.0) elif metric_family == "Memory": self.assertEqual(metric_name, "Total Memory Usage") self.assertGreater(metric_value, 100000) else: self.fail("Unknown metric {0}/{1} value {2}".format(metric_family, metric_name, metric_value)) @skip_if_predicate_false(i_am_root, "Test does not run when non-root") def test_telemetry_instantiation_as_superuser(self): """ Tracking a new cgroup for an extension; collect all metrics. """ # Record initial state initial_cgroup = make_self_cgroups() # Put the process into a different cgroup, consume some resources, ensure we see them end-to-end test_cgroup = CGroups.for_extension("agent_unittest") test_cgroup.add(os.getpid()) self.assertNotEqual(initial_cgroup.cgroups['cpu'], test_cgroup.cgroups['cpu']) self.assertNotEqual(initial_cgroup.cgroups['memory'], test_cgroup.cgroups['memory']) self.exercise_telemetry_instantiation(test_cgroup) # Restore initial state CGroupsTelemetry.stop_tracking("agent_unittest") initial_cgroup.add(os.getpid()) @skip_if_predicate_true(i_am_root, "Test does not run when root") def test_telemetry_instantiation_as_normal_user(self): """ Tracking an existing cgroup for an extension; collect all metrics. """ self.exercise_telemetry_instantiation(make_self_cgroups()) def test_cpu_telemetry(self): """ Test Cpu telemetry class """ cg = make_self_cgroups() self.assertIn('cpu', cg.cgroups) ct = CGroupsTelemetry('test', cg) self.assertIs(cg, ct.cgroup) cpu = Cpu(ct) self.assertIs(cg, cpu.cgt.cgroup) ticks_before = cpu.current_cpu_total consume_cpu_time() time.sleep(1) cpu.update() ticks_after = cpu.current_cpu_total self.assertGreater(ticks_after, ticks_before) p2 = cpu.get_cpu_percent() self.assertGreater(p2, 0) # when running under PyCharm, this is often > 100 # on a multi-core machine self.assertLess(p2, 200) def test_memory_telemetry(self): """ Test Memory telemetry class """ cg = make_self_cgroups() raw_usage_file_contents = cg.get_file_contents('memory', 'memory.usage_in_bytes') self.assertIsNotNone(raw_usage_file_contents) self.assertGreater(len(raw_usage_file_contents), 0) self.assertIn('memory', cg.cgroups) ct = CGroupsTelemetry('test', cg) self.assertIs(cg, ct.cgroup) memory = Memory(ct) usage_in_bytes = memory.get_memory_usage() self.assertGreater(usage_in_bytes, 100000) def test_format_memory_value(self): """ Test formatting of memory amounts into human-readable units """ self.assertEqual(-1, CGroups._format_memory_value('bytes', None)) self.assertEqual(2048, CGroups._format_memory_value('kilobytes', 2)) self.assertEqual(0, CGroups._format_memory_value('kilobytes', 0)) self.assertEqual(2048000, CGroups._format_memory_value('kilobytes', 2000)) self.assertEqual(2048*1024, CGroups._format_memory_value('megabytes', 2)) self.assertEqual((1024 + 512) * 1024 * 1024, CGroups._format_memory_value('gigabytes', 1.5)) self.assertRaises(CGroupsException, CGroups._format_memory_value, 'KiloBytes', 1) @patch('azurelinuxagent.common.event.add_event') @patch('azurelinuxagent.common.conf.get_cgroups_enforce_limits') @patch('azurelinuxagent.common.cgroups.CGroups.set_memory_limit') @patch('azurelinuxagent.common.cgroups.CGroups.set_cpu_limit') @patch('azurelinuxagent.common.cgroups.CGroups._try_mkdir') def assert_limits(self, _, patch_set_cpu, patch_set_memory_limit, patch_get_enforce, patch_add_event, ext_name, expected_cpu_limit, limits_enforced=True, exception_raised=False): should_limit = expected_cpu_limit > 0 patch_get_enforce.return_value = limits_enforced if exception_raised: patch_set_memory_limit.side_effect = CGroupsException('set_memory_limit error') try: cg = CGroups.for_extension(ext_name) cg.set_limits() if exception_raised: self.fail('exception expected') except CGroupsException: if not exception_raised: self.fail('exception not expected') self.assertEqual(should_limit, patch_set_cpu.called) self.assertEqual(should_limit, patch_set_memory_limit.called) self.assertEqual(should_limit, patch_add_event.called) if should_limit: actual_cpu_limit = patch_set_cpu.call_args[0][0] actual_memory_limit = patch_set_memory_limit.call_args[0][0] event_kw_args = patch_add_event.call_args[1] self.assertEqual(expected_cpu_limit, actual_cpu_limit) self.assertTrue(actual_memory_limit >= DEFAULT_MEM_LIMIT_MIN_MB) self.assertEqual(event_kw_args['op'], 'SetCGroupsLimits') self.assertEqual(event_kw_args['is_success'], not exception_raised) self.assertTrue('{0}%'.format(expected_cpu_limit) in event_kw_args['message']) self.assertTrue(ext_name in event_kw_args['message']) self.assertEqual(exception_raised, 'set_memory_limit error' in event_kw_args['message']) def test_limits(self): self.assert_limits(ext_name="normal_extension", expected_cpu_limit=40) self.assert_limits(ext_name="customscript_extension", expected_cpu_limit=-1) self.assert_limits(ext_name=AGENT_NAME, expected_cpu_limit=10) self.assert_limits(ext_name="normal_extension", expected_cpu_limit=-1, limits_enforced=False) self.assert_limits(ext_name=AGENT_NAME, expected_cpu_limit=-1, limits_enforced=False) self.assert_limits(ext_name="normal_extension", expected_cpu_limit=40, exception_raised=True) WALinuxAgent-2.2.32/tests/common/test_conf.py000066400000000000000000000150771335416306700211200ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import mock import os.path from azurelinuxagent.common.conf import * from tests.tools import * class TestConf(AgentTestCase): # Note: # -- These values *MUST* match those from data/test_waagent.conf EXPECTED_CONFIGURATION = { "Extensions.Enabled": True, "Provisioning.Enabled": True, "Provisioning.UseCloudInit": True, "Provisioning.DeleteRootPassword": True, "Provisioning.RegenerateSshHostKeyPair": True, "Provisioning.SshHostKeyPairType": "rsa", "Provisioning.MonitorHostName": True, "Provisioning.DecodeCustomData": False, "Provisioning.ExecuteCustomData": False, "Provisioning.PasswordCryptId": '6', "Provisioning.PasswordCryptSaltLength": 10, "Provisioning.AllowResetSysUser": False, "ResourceDisk.Format": True, "ResourceDisk.Filesystem": "ext4", "ResourceDisk.MountPoint": "/mnt/resource", "ResourceDisk.EnableSwap": False, "ResourceDisk.SwapSizeMB": 0, "ResourceDisk.MountOptions": None, "Logs.Verbose": False, "OS.EnableFIPS": True, "OS.RootDeviceScsiTimeout": '300', "OS.OpensslPath": '/usr/bin/openssl', "OS.SshClientAliveInterval": 42, "OS.SshDir": "/notareal/path", "HttpProxy.Host": None, "HttpProxy.Port": None, "DetectScvmmEnv": False, "Lib.Dir": "/var/lib/waagent", "DVD.MountPoint": "/mnt/cdrom/secure", "Pid.File": "/var/run/waagent.pid", "Extension.LogDir": "/var/log/azure", "OS.HomeDir": "/home", "OS.EnableRDMA": False, "OS.UpdateRdmaDriver": False, "OS.CheckRdmaDriver": False, "AutoUpdate.Enabled": True, "AutoUpdate.GAFamily": "Prod", "EnableOverProvisioning": True, "OS.AllowHTTP": False, "OS.EnableFirewall": False, "CGroups.EnforceLimits": False, "CGroups.Excluded": "customscript,runcommand", } def setUp(self): AgentTestCase.setUp(self) self.conf = ConfigurationProvider() load_conf_from_file( os.path.join(data_dir, "test_waagent.conf"), self.conf) def test_key_value_handling(self): self.assertEqual("Value1", self.conf.get("FauxKey1", "Bad")) self.assertEqual("Value2 Value2", self.conf.get("FauxKey2", "Bad")) self.assertEqual("delalloc,rw,noatime,nobarrier,users,mode=777", self.conf.get("FauxKey3", "Bad")) def test_get_ssh_dir(self): self.assertTrue(get_ssh_dir(self.conf).startswith("/notareal/path")) def test_get_sshd_conf_file_path(self): self.assertTrue(get_sshd_conf_file_path( self.conf).startswith("/notareal/path")) def test_get_ssh_key_glob(self): self.assertTrue(get_ssh_key_glob( self.conf).startswith("/notareal/path")) def test_get_ssh_key_private_path(self): self.assertTrue(get_ssh_key_private_path( self.conf).startswith("/notareal/path")) def test_get_ssh_key_public_path(self): self.assertTrue(get_ssh_key_public_path( self.conf).startswith("/notareal/path")) def test_get_fips_enabled(self): self.assertTrue(get_fips_enabled(self.conf)) def test_get_provision_cloudinit(self): self.assertTrue(get_provision_cloudinit(self.conf)) def test_get_configuration(self): configuration = conf.get_configuration(self.conf) self.assertTrue(len(configuration.keys()) > 0) for k in TestConf.EXPECTED_CONFIGURATION.keys(): self.assertEqual( TestConf.EXPECTED_CONFIGURATION[k], configuration[k], k) def test_get_agent_disabled_file_path(self): self.assertEqual(get_disable_agent_file_path(self.conf), os.path.join(self.tmp_dir, DISABLE_AGENT_FILE)) def test_write_agent_disabled(self): """ Test writing disable_agent is empty """ from azurelinuxagent.pa.provision.default import ProvisionHandler disable_file_path = get_disable_agent_file_path(self.conf) self.assertFalse(os.path.exists(disable_file_path)) ProvisionHandler.write_agent_disabled() self.assertTrue(os.path.exists(disable_file_path)) self.assertEqual('', fileutil.read_file(disable_file_path)) def test_get_extensions_enabled(self): self.assertTrue(get_extensions_enabled(self.conf)) @patch('azurelinuxagent.common.conf.ConfigurationProvider.get') def assert_get_cgroups_excluded(self, patch_get, config, expected_value): patch_get.return_value = config self.assertEqual(expected_value, conf.get_cgroups_excluded(self.conf)) def test_get_cgroups_excluded(self): self.assert_get_cgroups_excluded(config=None, expected_value=[]) self.assert_get_cgroups_excluded(config='', expected_value=[]) self.assert_get_cgroups_excluded(config=' ', expected_value=[]) self.assert_get_cgroups_excluded(config=' , ,, ,', expected_value=[]) standard_values = ['customscript', 'runcommand'] self.assert_get_cgroups_excluded(config='CustomScript, RunCommand', expected_value=standard_values) self.assert_get_cgroups_excluded(config='customScript, runCommand , , ,,', expected_value=standard_values) self.assert_get_cgroups_excluded(config=' customscript,runcommand ', expected_value=standard_values) self.assert_get_cgroups_excluded(config='customscript,, runcommand', expected_value=standard_values) self.assert_get_cgroups_excluded(config=',,customscript ,runcommand', expected_value=standard_values) WALinuxAgent-2.2.32/tests/common/test_errorstate.py000066400000000000000000000073241335416306700223610ustar00rootroot00000000000000from datetime import timedelta from azurelinuxagent.common.errorstate import * from tests.tools import * class TestErrorState(unittest.TestCase): def test_errorstate00(self): """ If ErrorState is never incremented, it will never trigger. """ test_subject = ErrorState(timedelta(seconds=10000)) self.assertFalse(test_subject.is_triggered()) self.assertEqual(0, test_subject.count) self.assertEqual('unknown', test_subject.fail_time) def test_errorstate01(self): """ If ErrorState is never incremented, and the timedelta is zero it will not trigger. """ test_subject = ErrorState(timedelta(seconds=0)) self.assertFalse(test_subject.is_triggered()) self.assertEqual(0, test_subject.count) self.assertEqual('unknown', test_subject.fail_time) def test_errorstate02(self): """ If ErrorState is triggered, and the current time is within timedelta of now it will trigger. """ test_subject = ErrorState(timedelta(seconds=0)) test_subject.incr() self.assertTrue(test_subject.is_triggered()) self.assertEqual(1, test_subject.count) self.assertEqual('0.0 min', test_subject.fail_time) @patch('azurelinuxagent.common.errorstate.datetime') def test_errorstate03(self, mock_time): """ ErrorState will not trigger until 1. ErrorState has been incr() at least once. 2. The timedelta from the first incr() has elapsed. """ test_subject = ErrorState(timedelta(minutes=15)) for x in range(1, 10): mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=x)) test_subject.incr() self.assertFalse(test_subject.is_triggered()) mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=30)) test_subject.incr() self.assertTrue(test_subject.is_triggered()) self.assertEqual('29.0 min', test_subject.fail_time) def test_errorstate04(self): """ If ErrorState is reset the timestamp of the last incr() is reset to None. """ test_subject = ErrorState(timedelta(minutes=15)) self.assertTrue(test_subject.timestamp is None) test_subject.incr() self.assertTrue(test_subject.timestamp is not None) test_subject.reset() self.assertTrue(test_subject.timestamp is None) def test_errorstate05(self): """ Test the fail_time for various scenarios """ test_subject = ErrorState(timedelta(minutes=15)) self.assertEqual('unknown', test_subject.fail_time) test_subject.incr() self.assertEqual('0.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60) self.assertEqual('1.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=73) self.assertEqual('1.22 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=120) self.assertEqual('2.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 59) self.assertEqual('59.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60) self.assertEqual('1.0 hr', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 95) self.assertEqual('1.58 hr', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60 * 3) self.assertEqual('3.0 hr', test_subject.fail_time) WALinuxAgent-2.2.32/tests/common/test_event.py000066400000000000000000000234341335416306700213100ustar00rootroot00000000000000# Copyright 2017 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function from datetime import datetime, timedelta from azurelinuxagent.common.event import add_event, \ WALAEventOperation, elapsed_milliseconds from azurelinuxagent.common.future import ustr from azurelinuxagent.common.version import CURRENT_VERSION from tests.tools import * import azurelinuxagent.common.event as event class TestEvent(AgentTestCase): def test_event_status_event_marked(self): es = event.__event_status__ self.assertFalse(es.event_marked("Foo", "1.2", "FauxOperation")) es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_marked("Foo", "1.2", "FauxOperation")) event.__event_status__ = event.EventStatus() event.init_event_status(self.tmp_dir) es = event.__event_status__ self.assertTrue(es.event_marked("Foo", "1.2", "FauxOperation")) def test_event_status_defaults_to_success(self): es = event.__event_status__ self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_records_status(self): es = event.EventStatus() es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) es.mark_event_status("Foo", "1.2", "FauxOperation", False) self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_preserves_state(self): es = event.__event_status__ es.mark_event_status("Foo", "1.2", "FauxOperation", False) self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) event.__event_status__ = event.EventStatus() event.init_event_status(self.tmp_dir) es = event.__event_status__ self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_should_emit_event_ignores_unknown_operations(self): event.__event_status__ = event.EventStatus() self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) # Marking the event has no effect event.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) def test_should_emit_event_handles_known_operations(self): event.__event_status__ = event.EventStatus() # Known operations always initially "fire" for op in event.__event_status_operations__: self.assertTrue(event.should_emit_event("Foo", "1.2", op, True)) self.assertTrue(event.should_emit_event("Foo", "1.2", op, False)) # Note a success event... for op in event.__event_status_operations__: event.mark_event_status("Foo", "1.2", op, True) # Subsequent success events should not fire, but failures will for op in event.__event_status_operations__: self.assertFalse(event.should_emit_event("Foo", "1.2", op, True)) self.assertTrue(event.should_emit_event("Foo", "1.2", op, False)) # Note a failure event... for op in event.__event_status_operations__: event.mark_event_status("Foo", "1.2", op, False) # Subsequent success events fire and failure do not for op in event.__event_status_operations__: self.assertTrue(event.should_emit_event("Foo", "1.2", op, True)) self.assertFalse(event.should_emit_event("Foo", "1.2", op, False)) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_if_not_previously_sent(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_does_not_emit_if_previously_sent(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_if_forced(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent", force=True) self.assertEqual(2, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_after_elapsed_delta(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) h = hash("FauxEvent"+WALAEventOperation.Unknown+ustr(True)) event.__event_logger__.periodic_events[h] = \ datetime.now() - logger.EVERY_DAY - logger.EVERY_HOUR event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(2, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_forwards_args(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") mock_event.assert_called_once_with( "FauxEvent", duration=0, evt_type='', is_internal=False, is_success=True, log_event=True, message='', op=WALAEventOperation.Unknown, version=str(CURRENT_VERSION)) def test_save_event(self): add_event('test', message='test event') self.assertTrue(len(os.listdir(self.tmp_dir)) == 1) def test_save_event_rollover(self): add_event('test', message='first event') for i in range(0, 999): add_event('test', message='test event {0}'.format(i)) events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == 1000) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertTrue('first event' in first_event_text) add_event('test', message='last event') events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertFalse('first event' in first_event_text) self.assertTrue('test event 0' in first_event_text) last_event = os.path.join(self.tmp_dir, events[-1]) with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) def test_save_event_cleanup(self): for i in range(0, 2000): evt = os.path.join(self.tmp_dir, '{0}.tld'.format(ustr(1491004920536531 + i))) with open(evt, 'w') as fh: fh.write('test event {0}'.format(i)) events = os.listdir(self.tmp_dir) self.assertTrue(len(events) == 2000, "{0} events found, 2000 expected".format(len(events))) add_event('test', message='last event') events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertTrue('test event 1001' in first_event_text) last_event = os.path.join(self.tmp_dir, events[-1]) with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) def test_elapsed_milliseconds(self): utc_start = datetime.utcnow() + timedelta(days=1) self.assertEqual(0, elapsed_milliseconds(utc_start)) @patch('azurelinuxagent.common.event.EventLogger.save_event') def test_report_metric(self, mock_event): event.report_metric("cpu", "%idle", "_total", 10.0) self.assertEqual(1, mock_event.call_count) event_json = mock_event.call_args[0][0] self.assertIn("69B669B9-4AF8-4C50-BDC4-6006FA76E975", event_json) self.assertIn("%idle", event_json) import json event_dictionary = json.loads(event_json) self.assertEqual(event_dictionary['providerId'], "69B669B9-4AF8-4C50-BDC4-6006FA76E975") for parameter in event_dictionary["parameters"]: if parameter['name'] == 'Counter': self.assertEqual(parameter['value'], '%idle') break else: self.fail("Counter '%idle' not found in event parameters: {0}".format(repr(event_dictionary))) WALinuxAgent-2.2.32/tests/common/test_logger.py000066400000000000000000000075121335416306700214450ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import json from datetime import datetime import azurelinuxagent.common.logger as logger from azurelinuxagent.common.event import add_log_event from azurelinuxagent.common.version import CURRENT_AGENT, CURRENT_VERSION from tests.tools import * _MSG = "This is our test logging message {0} {1}" _DATA = ["arg1", "arg2"] class TestLogger(AgentTestCase): @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_emits_if_not_previously_sent(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_does_not_emit_if_previously_sent(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_emits_after_elapsed_delta(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(1, mock_info.call_count) logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG)] = \ datetime.now() - logger.EVERY_DAY - logger.EVERY_HOUR logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) self.assertEqual(2, mock_info.call_count) @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_forwards_message_and_args(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) mock_info.assert_called_once_with(_MSG, *_DATA) def test_telemetry_logger(self): mock = MagicMock() appender = logger.TelemetryAppender(logger.LogLevel.WARNING, mock) appender.write(logger.LogLevel.WARNING, "--unit-test--") mock.assert_called_once_with(logger.LogLevel.WARNING, "--unit-test--") @patch('azurelinuxagent.common.event.EventLogger.save_event') def test_telemetry_logger1(self, mock_save): appender = logger.TelemetryAppender(logger.LogLevel.WARNING, add_log_event) appender.write(logger.LogLevel.WARNING, "--unit-test--") self.assertEqual(1, mock_save.call_count) telemetry_json = json.loads(mock_save.call_args[0][0]) self.assertEqual('FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F', telemetry_json['providerId']) self.assertEqual(7, telemetry_json['eventId']) self.assertEqual(5, len(telemetry_json['parameters'])) for x in telemetry_json['parameters']: if x['name'] == 'EventName': self.assertEqual(x['value'], 'Log') elif x['name'] == 'CapabilityUsed': self.assertEqual(x['value'], 'WARNING') elif x['name'] == 'Context1': self.assertEqual(x['value'], '--unit-test--') elif x['name'] == 'Context2': self.assertEqual(x['value'], '') elif x['name'] == 'Context3': self.assertEqual(x['value'], '') WALinuxAgent-2.2.32/tests/common/test_version.py000066400000000000000000000177651335416306700216660ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function import textwrap import mock from azurelinuxagent.common.version import set_current_agent, \ AGENT_LONG_VERSION, AGENT_VERSION, AGENT_NAME, AGENT_NAME_PATTERN, \ get_f5_platform, get_distro from tests.tools import * def freebsd_system(): return ["FreeBSD"] def freebsd_system_release(x, y, z): return "10.0" def openbsd_system(): return ["OpenBSD"] def openbsd_system_release(x, y, z): return "20.0" def default_system(): return [""] def default_system_no_linux_distro(): return '', '', '' def default_system_exception(): raise Exception class TestAgentVersion(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) return @mock.patch('platform.system', side_effect=freebsd_system) @mock.patch('re.sub', side_effect=freebsd_system_release) def test_distro_is_correct_format_when_freebsd(self, platform_system_name, mock_variable): osinfo = get_distro() freebsd_list = ['freebsd', "10.0", '', 'freebsd'] self.assertListEqual(freebsd_list, osinfo) return @mock.patch('platform.system', side_effect=openbsd_system) @mock.patch('re.sub', side_effect=openbsd_system_release) def test_distro_is_correct_format_when_openbsd(self, platform_system_name, mock_variable): osinfo = get_distro() openbsd_list = ['openbsd', "20.0", '', 'openbsd'] self.assertListEqual(openbsd_list, osinfo) return @mock.patch('platform.system', side_effect=default_system) @mock.patch('platform.dist', side_effect=default_system_no_linux_distro) def test_distro_is_correct_format_when_default_case(self, platform_system_name, default_system_no_linux): osinfo = get_distro() default_list = ['', '', '', ''] self.assertListEqual(default_list, osinfo) return @mock.patch('platform.system', side_effect=default_system) @mock.patch('platform.dist', side_effect=default_system_exception) def test_distro_is_correct_for_exception_case(self, platform_system_name, default_system_no_linux): osinfo = get_distro() default_list = ['unknown', 'FFFF', '', ''] self.assertListEqual(default_list, osinfo) return class TestCurrentAgentName(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) return @patch("os.getcwd", return_value="/default/install/directory") def test_extract_name_finds_installed(self, mock_cwd): current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd", return_value="/") def test_extract_name_root_finds_installed(self, mock_cwd): current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd") def test_extract_name_in_path_finds_installed(self, mock_cwd): path = os.path.join(conf.get_lib_dir(), "events") mock_cwd.return_value = path current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd") def test_extract_name_finds_latest_agent(self, mock_cwd): path = os.path.join(conf.get_lib_dir(), "{0}-{1}".format( AGENT_NAME, "1.2.3")) mock_cwd.return_value = path agent = os.path.basename(path) version = AGENT_NAME_PATTERN.match(agent).group(1) current_agent, current_version = set_current_agent() self.assertEqual(agent, current_agent) self.assertEqual(version, str(current_version)) return class TestGetF5Platforms(AgentTestCase): def test_get_f5_platform_bigip_12_1_1(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.1.1 Build: 0.0.184 Sequence: 12.1.1.0.0.184.0 BaseBuild: 0.0.184 Edition: Final Date: Thu Aug 11 17:09:01 PDT 2016 Built: 160811170901 Changelist: 1874858 JobID: 705993""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.1') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_bigip_12_1_0_hf1(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.1.0 Build: 1.0.1447 Sequence: 12.1.0.1.0.1447.0 BaseBuild: 0.0.1434 Edition: Hotfix HF1 Date: Wed Jun 8 13:41:59 PDT 2016 Built: 160608134159 Changelist: 1773831 JobID: 673467""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.0') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_bigip_12_0_0(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.0.0 Build: 0.0.606 Sequence: 12.0.0.0.0.606.0 BaseBuild: 0.0.606 Edition: Final Date: Fri Aug 21 13:29:22 PDT 2015 Built: 150821132922 Changelist: 1486072 JobID: 536212""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.0.0') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_iworkflow_2_0_1(self): version_file = textwrap.dedent(""" Product: iWorkflow Version: 2.0.1 Build: 0.0.9842 Sequence: 2.0.1.0.0.9842.0 BaseBuild: 0.0.9842 Edition: Final Date: Sat Oct 1 22:52:08 PDT 2016 Built: 161001225208 Changelist: 1924048 JobID: 734712""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'iworkflow') self.assertTrue(platform[1] == '2.0.1') self.assertTrue(platform[2] == 'iworkflow') self.assertTrue(platform[3] == 'iWorkflow') def test_get_f5_platform_bigiq_5_1_0(self): version_file = textwrap.dedent(""" Product: BIG-IQ Version: 5.1.0 Build: 0.0.631 Sequence: 5.1.0.0.0.631.0 BaseBuild: 0.0.631 Edition: Final Date: Thu Sep 15 19:55:43 PDT 2016 Built: 160915195543 Changelist: 1907534 JobID: 726344""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigiq') self.assertTrue(platform[1] == '5.1.0') self.assertTrue(platform[2] == 'bigiq') self.assertTrue(platform[3] == 'BIG-IQ') WALinuxAgent-2.2.32/tests/daemon/000077500000000000000000000000001335416306700165235ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/daemon/__init__.py000066400000000000000000000011651335416306700206370ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/daemon/test_daemon.py000066400000000000000000000120431335416306700213770ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from multiprocessing import Process from azurelinuxagent.daemon import * from azurelinuxagent.daemon.main import OPENSSL_FIPS_ENVIRONMENT from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * class MockDaemonCall(object): def __init__(self, daemon_handler, count): self.daemon_handler = daemon_handler self.count = count def __call__(self, *args, **kw): self.count = self.count - 1 # Stop daemon after restarting for n times if self.count <= 0: self.daemon_handler.running = False raise Exception("Mock unhandled exception") class TestDaemon(AgentTestCase): @patch("time.sleep") def test_daemon_restart(self, mock_sleep): # Mock daemon function daemon_handler = get_daemon_handler() mock_daemon = Mock(side_effect=MockDaemonCall(daemon_handler, 2)) daemon_handler.daemon = mock_daemon daemon_handler.check_pid = Mock() daemon_handler.run() mock_sleep.assert_any_call(15) self.assertEquals(2, daemon_handler.daemon.call_count) @patch("time.sleep") @patch("azurelinuxagent.daemon.main.conf") @patch("azurelinuxagent.daemon.main.sys.exit") def test_check_pid(self, mock_exit, mock_conf, _): daemon_handler = get_daemon_handler() mock_pid_file = os.path.join(self.tmp_dir, "pid") mock_conf.get_agent_pid_file_path = Mock(return_value=mock_pid_file) daemon_handler.check_pid() self.assertTrue(os.path.isfile(mock_pid_file)) daemon_handler.check_pid() mock_exit.assert_any_call(0) @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=True) def test_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertTrue(OPENSSL_FIPS_ENVIRONMENT in os.environ) self.assertEqual('1', os.environ[OPENSSL_FIPS_ENVIRONMENT]) @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=False) def test_does_not_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertFalse(OPENSSL_FIPS_ENVIRONMENT in os.environ) @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest') @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run') @patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()) def test_daemon_agent_enabled(self, _, patch_run_provision, patch_run_latest): """ Agent should run normally when no disable_agent is found """ self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) daemon_handler = get_daemon_handler() def stop_daemon(child_args): daemon_handler.running = False patch_run_latest.side_effect = stop_daemon daemon_handler.run() self.assertEqual(1, patch_run_provision.call_count) self.assertEqual(1, patch_run_latest.call_count) @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest', side_effect=AgentTestCase.fail) @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run', side_effect=ProvisionHandler.write_agent_disabled) @patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()) def test_daemon_agent_disabled(self, _, __, patch_run_latest): """ Agent should provision, then sleep forever when disable_agent is found """ # file is created by provisioning handler self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) daemon_handler = get_daemon_handler() # we need to assert this thread will sleep forever, so fork it daemon = Process(target=daemon_handler.run) daemon.start() daemon.join(timeout=5) self.assertTrue(daemon.is_alive()) daemon.terminate() # disable_agent was written, run_latest was not called self.assertTrue(os.path.exists(conf.get_disable_agent_file_path())) self.assertEqual(0, patch_run_latest.call_count) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/daemon/test_resourcedisk.py000066400000000000000000000031341335416306700226370ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import * from azurelinuxagent.daemon import * from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class TestResourceDisk(AgentTestCase): def test_mount_flags_empty(self): partition = '/dev/sdb1' mountpoint = '/mnt/resource' options = None expected = 'mount /dev/sdb1 /mnt/resource' rdh = ResourceDiskHandler() mount_string = rdh.get_mount_string(options, partition, mountpoint) self.assertEqual(expected, mount_string) def test_mount_flags_many(self): partition = '/dev/sdb1' mountpoint = '/mnt/resource' options = 'noexec,noguid,nodev' expected = 'mount -o noexec,noguid,nodev /dev/sdb1 /mnt/resource' rdh = ResourceDiskHandler() mount_string = rdh.get_mount_string(options, partition, mountpoint) self.assertEqual(expected, mount_string) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/data/000077500000000000000000000000001335416306700161715ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/dhcp000066400000000000000000000005101335416306700170260ustar00rootroot00000000000000ƪ] >` >* >]88RD008CFA06B61CcSc56 >* > >"test-cs12.h1.internal.cloudapp.net:;3 >WALinuxAgent-2.2.32/tests/data/dhcp.leases000066400000000000000000000035721335416306700203140ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers invalid; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 never; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 4 2015/06/16 16:58:54; rebind 4 2015/06/16 16:58:54; expire 4 2015/06/16 16:58:54; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers 168.63.129.16; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } WALinuxAgent-2.2.32/tests/data/dhcp.leases.custom.dns000066400000000000000000000035641335416306700224110ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers invalid; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:01; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 never; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option unknown-245 a8:3f:81:02; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 4 2015/06/16 16:58:54; rebind 4 2015/06/16 16:58:54; expire 4 2015/06/16 16:58:54; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers 8.8.8.8; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } WALinuxAgent-2.2.32/tests/data/dhcp.leases.multi000066400000000000000000000037161335416306700214450ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers first; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:01; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers second; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:02; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:03; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2012/07/23 23:27:10; } WALinuxAgent-2.2.32/tests/data/events/000077500000000000000000000000001335416306700174755ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/events/1478123456789000.tld000066400000000000000000000006271335416306700217670ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "Test Event"}, {"name": "Version", "value": "2.2.0"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Some Operation"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": ""}, {"name": "Duration", "value": 0}, {"name": "ExtensionType", "value": ""}]}WALinuxAgent-2.2.32/tests/data/events/1478123456789001.tld000066400000000000000000000006701335416306700217660ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "Linux Event"}, {"name": "Version", "value": "2.2.0"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Linux Operation"}, {"name": "OperationSuccess", "value": false}, {"name": "Message", "value": "Linux Message"}, {"name": "Duration", "value": 42}, {"name": "ExtensionType", "value": "Linux Event Type"}]}WALinuxAgent-2.2.32/tests/data/events/1479766858966718.tld000066400000000000000000000007571335416306700220340ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "WALinuxAgent"}, {"name": "Version", "value": "2.3.0.1"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Enable"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": "Agent WALinuxAgent-2.3.0.1 launched with command 'python install.py' is successfully running"}, {"name": "Duration", "value": 0}, {"name": "ExtensionType", "value": ""}]}WALinuxAgent-2.2.32/tests/data/ext/000077500000000000000000000000001335416306700167715ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/ext/event.xml000077500000000000000000000022201335416306700206330ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/ext/sample_ext-1.3.0.zip000066400000000000000000000015561335416306700223220ustar00rootroot00000000000000PKR-G.h`HandlerManifest.jsonUT OVlVux }M 0}N֢D=)G2$J]}4)MQ1̘'&bЃA<^u`cWFт 3 MH@U*mvNM{vmBJo,fj=8wpN37on2<)d}Ixi (hzOە\7$r )"5BVj#Ј>q(V28wq.k1 \V 0 and item[separator + 1:] == "settings": sequence = int(item[0: separator]) if sequence > latest_seq: latest_seq = sequence return latest_seq succeed_status = """ [{ "status": { "status": "success" } }] """ if __name__ == "__main__": seq = get_seq() if seq >= 0: status_path = os.path.join(os.getcwd(), "status") if not os.path.exists(status_path): os.makedirs(status_path) status_file = os.path.join(status_path, "{0}.status".format(seq)) with open(status_file, "w+") as status: status.write(succeed_status) WALinuxAgent-2.2.32/tests/data/ga/000077500000000000000000000000001335416306700165605ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/ga/WALinuxAgent-2.2.32.2.zip000066400000000000000000015726701335416306700225160ustar00rootroot00000000000000PK"3>Mx 8#bin/WALinuxAgent-2.2.32.2-py2.7.eggUT ΰ[ΰ[ux c0M(m۶m۶m۶m۶l۶m{n$?j.M(H@T@׫okhiOi$h-zEDATQu*.*h#YҵS9k.dN6 weܿspNj+W6"^v2]%> 3w=N ,mK|1_ 7 pk;fXJ-4w/@Vգ:ء;p}sPøW2 ^;Exwh>.7^Ŋ<%GFj@dw@w*q>]}+VP +FDOɉ u2:ݦy"ʶqޣm/ Cԝbܪ_1w@0'4*~G?,`:S|2m' ? {縙3#wdwc[chlE2 s%N+ wr:acɤ55Gt'W dk8æUcR>s_&`F`)H#qCq## Sq## 3(0_p ~X3f TST5 $ 2Sˀk!JfrQ~JxPH?+N? mn=ASHpFY;?)K\ qogG-^QqqZI91yz{C5~JnA7#(Vh1Y'.<)LⅨwrSS}@Q VKwp7ijϵڴmgٻ H4Fb j*c|襭yD,1u=Fɥ6rzo@FM۪UzQ  ̔Q`No6&Nj||<&lv0Kƴ>10,WXӷŊ7ﳑ*ICش$qyo2YԶ4t;k֑ ^\A,hZg4뤈Dw 9fݞUsE!E-Y*h8Ӓ6T ]VhGQ;SHDSG C P6pUU]E4xw0>1!hX2@15i s!;؊b+'@}V򹹍Kv0)ڃ.?2 QG5ZO(! Z$"%fpFؘE*) ײDzFk_Cc׮ux#B™d5y\MyWEإNQlu*,`0۾pkt"'P DF)K5%ul>qfVtPH ™^}0ܫqQ.xUKtCg$LJ|E tĩGE) F K@ Z8IS8M k@Җ4HZ'S=xLF/Omԋ֍76|jRuA؜t+`<+4$lY!,'D`5P=Rz'Ll|~j~'f(zMm ?~`\b_sxe"Q `"$b)$K39)?%)3GH83=KQ;Q>+/.>#UKE+ ~ߧ=z& ̲SsTPGZS҆ϗ$CqD7@C<Vc213 ָDu:p2H(&ٿ]]KBvr GU11h j%pZvLO;NtJth"ҤPy!26CELXHZw)锥/p }΍E$U[43)vKtSz1 [%*ڡ=3ҚOՐc#'|YtG36~M9e1e}Ko 0]RbbhEsh1( 2ERM2$pbH=% l*ley:]Isb왥$Əܾ)qG8p|TiTsDĕI0bMI_{%AF$,,;FRNSW ^8&-}l6vώ9t#N-8_2%t:fy&B@qe (<ߧh&=̦2j 9HwRh F.Co Y|Yay C2@߈2ȉVCtH<Ea9%0ꑱaiۻ yB&hrDX[cOja1FFP?!4N3WnS:P3Y͓v/{Np;!r|Da&QC W ?Tjhz;V4Ly69#~<NB҄T|UKը4ȎV[`uCVÏ[ZTT?x=||K~j1hnx16 a /Hic(}TMNe;jy0Q|P綃r 4woҁ_`oOUL:LxjW XRU~N(sfN|1{Pb_ύ{zT h >$uֻn'iC^raηM[mӵNwv.DS- 7QoUz~&$IY+KI A. B@IT}I eUA۾M~M@_xP7V]3(rh9~NѣJdO"Qz-4RVX1sN#ms汰˄u>ԻLu&pѫjm[@؃/_g2&ٌ/m͗f[4ǀW0rW>©NFMgx{@nϠm"gyЛ*s>+N/ŷ_DU +ۓkō=~4ƅkĮٖjs܋Krj/Q-)voeBͻg5w„mO!}W3Y|&nwtKis9[[y9#DЛfe 񁞧AE݃=YAНNf*:Z_*~OrٕP$b9 RQ ҝ|ҹT#/EGb>Bi`"6RiaiSQJ6vҋr)zBB)BJTJVJQ Elgtb\'م@0[+\|ТsPl| .,D iaz].b=5HNt*TCHTwRUWcEŧ'%B(sWb[T BXFwN{T[ZQդCK-g+SI+6W|WOCÏȑS3kVhWu4gRkAWA9*{wUzݪn5.?v[Ҹ$0~^-gtwY ahKIbQuW"7u1E1e[j5(5)28a` (Vn r%2лKg`uNOŮ1lfCEJXi^M̕x= B P+j? j|kR栴3՞/7e w<\AYeoh=Nlz|Xn|x^{4մ);('j1nXM;~^ ݡJ0;``.;#A\7 cBGF 0ͽ LQn1 cB_AFIv4ՓîUe);6ݹD"?M~N0@+]dzE[qﰷ# =g`~߷HwVŝ=)Wgܠ XK2 X!|t׊]ʾLQܖ0=Vhw^ ']צG\zDa,=ΖuNJRiW,]|fvsGT:TTDMڴV:NT;U[^Y6J tL+i+x4}`Xe\a˚һ)ʕTNr=83S-0?q&֍cI  @ c `P#p%k46xHRKK W3w xbn| Xo=Ҫ;rVO듹0o!i}YKAipULf3KmYݻ:X=@-)iNWO ]C,@|pGqNLE8IɑKlI*p["A4P#F1T|tf~Ăg {8\nD?,; M³-Gx*xN'3EqM02raUH#&\^֠2r*dlx-@1 8舓V wп. 4C½^E6ōb`1E:_KSJ$8ֈۅ̝ct`^Z>/ E ֈ68fc\R$q} 281 _uM-ge۞v"(:J׹%.9#q9y &RJZ6cՖy˧—[_T8]lٶ^L^W֩g}0T fؾpTN_3]ۇ|P?@G֒0:-'@ؒ+sVӇ[H`+:çЍ9267Bqkv ZAsF_Ai8}h ǚNZu7a6Мe+nG~vz[t<Ɵ34Ze7kq.4n:OM8"[si?1h5ۧp#x:v*On m+[CT(VY7utsL#ťD,ߴG#@xGlA DwMC30v\k_Dہ ſw)W1LPM]Hd阐G3n'=EJMFPTu^, Dmay< ߍ941$;%V8O?;߬,Y6P(@^\"? _&k81b2H"i+!d;Y/lX^p R^ 1z, ^"X1@9 aF{%:2x2^zIMo F1oIdF2I%IGӼD#%̔ Yq}IF%I8*'|0nBƆ/l94P|5<>Ψ?Cn@dzLOkf@+5tmc%c8+[.+@coV)/P T κ]0_G0CЪ2O ;|wfxnǤ¾ӡ||Cu@4?S[a){'];9܋4y(!VĖGwMlv#|.VRkrzggI[J?8 -[-̨)f\b[L Q׋]9v֣Ƙ]>/I.~&dRklQjľ] ĸʧ+ 8ђ#/)@KO_I+~'-A%XP=͇ȹ>/W6ne۩( 2( +JVa׿wʿ^yT)-f{޽]<&Q)ptg4;s_[{9. ֯P+P`v:uxP;@ϓO0B?q{syȎn7w+8xLGdA9k詺y>y9ڢ 'KGRpg(z}Ӟ^^M1a9V ]o*?o Pe)rb*2B=,}i3I}Ȓa`$EeyQS DKg)~Vr.`UƅFQ}l К wodn#z٨k^ƅ؋ײF,@ 1\ oyo|/W ZBx FIcS4&]ؖRv4=_$} ȺKM0{xE&Z"N:!+' N"IcL~~"MBWb5tFX|ÙIʁz{2(ps]؋]o^8Nq e?a)b '51{'Lr$+L6ߴP1EBE%uʅ0# `B2[q6c˯?D!pkO D10H8aXYt!A8y$%F@f |sX !6g}|AAуBܴѨ͘cdJUDqIՐr>SpNJWQLO"r#NK*Q l6J YW$JG) i'93!>BtX2$q$zTFJTè1b" d㘜L}Teh%CjǠE!"+# ʰ|Qn_2x"+y$xIio{g6AVH,,ƉrWJhvSiV#Ī N)'{a K[|toF=!qu-#%7GLS0ƴ 'VCѰ*ZKʩ=pA: j.IEs HW߉UXtV@O)H4J)I ӰmV̗RVqyPw\]G6mAv`ʕed C-)OVE~Tħ"ۢF!K@oqGEG`S ǹ>GH4PCl/1IZg;jlμg3Gؤ@Ѡ.eÔO4МMT io~5{ÖQ,ϙtmLNbS^9|c\9ZfY-`T=و!Qu!i )4wc?d&Zg'32R%m&dyj\ '=@sa]Pt`Jqܒ OqkGphR6N1?G mԒKsלZNE@ɛukV,1gx]wf[ovE{Bk,zQ~l߰kL%MlY_R˶Ϳ3iH4xj0dߎG7dـJPjf+7O,V"\P phB kYDY *(J/ة>ƼFfuSL܋b81:42\JLUR_m-,f[-*칄"/Mr2!$j_ ]vcT DsDd҇UW)iA{̗{21[zl޻A`" TW<DHchO<݅QG—lIElZ YPv$@Fy5d涟/m% 畠7~Ua[<̮[̼RzBrj_Tڡk|-4)P,fgivyӪGjݤiQR*Id\0^$z]&ʈH;j D ĺr %JO&6}DX>{+3p"Mscb;Ԝy&)+Y.PmL)@C+Kv@6U% DLANЇe?BrkQ0X+.hp _6zzC`w{aF=4ƚ+FG-6+SV.4ivՕG%^ƭ>#EjIR,Qg)hw7=ZЊ7ά&@&q )ZYedZAPnOq#SX&:p/t_"[&b}EpX %t4Ҍ@꥽{u"տ? {NRpשf"0`4yUnE#R!0 KD.|K(Uh(H D|z-P&krݲyhŷ'hƫ,"u^n*9{W/t7QIݶ#c3Xme '6]˰7f(6ZS~v C*^iqaz\6Z#3 Ok+r%{ͱ|Uy_\0u>\7b=?+M_c@H$ |&7N\Ī' keOQZgq~zzc?R{=e~#rFI<ֱNBS %q]jH#[\Pce m;̧OӏpX7Xtj_+Ì]'6mO[lfm/ک薥v7m;5L?&ț nWlHvj^,$5FP,Fkxir!QV~ }ò9WId̘B p!Ԩ ]j75̧Ww4(c$k /DCq FyУ aڈz;lF1STʠ  _Xsί0"켡|ؒ-79Xr*>^K&H4򳚎a[]Chbcq"VqLvK@zRZıd uWyA;mߕwˊ^x:`j?Q:+ş-61KJ~ $|n٭mƋK))8*:#ۂ:*tiqo/<6i]Wd+bw(L;QTJtt5&qM MG #xZvOKW5 n;\uOB҂ Sd$9S˘ wNW%q咑\52XQbe14W{w%-uo9Y/m!Ig+2Ld_U$L ۋ +C<&Gف{yq ]rv'#t)Ueu4+YcnP͝G\z䵍\d,9p$ڙVh2-co_z16MZנQ;S<⑮⢚tE 66A[,EG,h*F>ȕƻ/"3ԓyavԓ-! bVkD*KP1TUZp8::gͤ:Kf.QN}b+NQZ뀜2+*cwJ:CN[kOLq WZvzq0\Lc@/9~yB" _!=.z+ yN~$͈rxs"QO+6I UE??)TP{GM9ꛘ*iPcFKLM/Pcd$ YoӥJq_ %E9x۸uuWx_VbJ!m;Rd=:د<(e&1SyJ}]F8}[KJz3vM GN;;C]VLGpl%\9#.;9t7՞8PQa|bR>d`Ni^ 2YYwtk43-{oQ` ab[4̺z YTwvE,+bj_a1{RTQLx<涽X}Ov!;FCvH.풨NWE.6c$eQC1F(i`‰kNawz{Б<1e1 %7+(cOvwⱐG.كH5WMq-D\5]xGwT.*^>9+S\(;g ;oO ”jXefoC+#3rb #{~2հ_tjNލ Fs_7 6I<2͉Knr*Ge80|^;$Kbi+X'/Z|$z)L0ol4N8%Ѽ? kf -a;w{I뼴' ~g{kyWK\)βE9fSHv.}KrMk~$AV5!g0|C ePGo/{$I:Q맄TiCfkv2ڹ6B=TTt=8:S3GyfKǛ_V ך7"dvET|hP9&ٗ Lz/,vqnۏ_oߞ;twqKʯrq˔dՂr1шO]]aʊ.g rNel#'p{!qw]gr-^ʓm.a \ȐK*ww T1E^㉲dJ;S? ,O}^Jz.$_(_4O巪SZ}|9nBd͏ȅu7!`U O-o qbI%$c ˰z}C;M뿡:ݝٳeLgxUFͷv !LaiUURe _rlB-v? ?(Er4N+׶D{~چq /s<zRYWz&&qĢ7oN7{€eەB)\ԧb^DFqkF~f\y#%@lq2WQ,oe]4Rܒj6!<5dqUBN9A#墁_iy˃WStvE~rtmg;4o޼Q_0կ9:_eSƸ6c,n #FſO[*u~ ٲ{3/]Z9Sf-[.PR`~"Lr@xWG:8ե_T<`7hZ/5jՋ5$Vuqh_mw/c kԝSnۆ"wgAjH "b;n3o]&5С'='R˱y4)6s[0-|&=Se{VmbEǓ̢#]*&A$:T Fᦶ PQplr@Ңr f FWHKҁ?q/М׶prS1!e*{ٟ5M|В Lb8l| 0$,dС2T=_oDCSmL)6cFwųtL| qṻtؖb(thcr*g9Têz,YxIHG$满oE 6QGI$G]ɞkK+(ω^1QxJ,.ɞb̑H|gh_x1^,pcaͪ9"^}̐ޡ\;&/~Q~_F7']rS)[L3N8?"G<[w*If{%-63)"3{,os54yRF@TBS3t7,C2 j-qY{磆SiKtORV%hvKE%7w-pU>#?L6 Yƈ TBJD=ZZJ1(:9|A[`>p -aI;~/*mr0|?SYM֋dk5 όɷ;MY0^D%/M~#P+ȗHXl[ͦ_)QF(=s IGV`j_"wZÑ(R9}p7@4Y8AoSH08 TQlXD̤]G:nJAe>.o=p#O0}A0E[!)hY(mie~&Y FW#2D e{h`tJcf.i:-K&t9vV۞e4 )ʔ65z (j#h a/1p\-9C'xI^l^=`WiZ {1D5sDsW*3rTfvLL S:|o3A~\Y>^tktdC"mƒ\3|09<sEĿ a+K[%1P%+܄Wᴤ[Fw2$jn]heXlbV[ `scXWTү2H '\42,ݳ\Bqև[NgITBT!vm'5ߜ:x2Xڷq{;x5|#M/[hASx|]_& $1{Qg5Wr:[sZfv@z;_"5_[3ꏞN1}ʨwy'Y$諷``ڭNyG+g`;!`M>zh qwX8YX 2Q"Lz(fhޅ"T$e qNo#HxH. F>1kf. ךܸК޳Ĥf8zQxP`%[C+F{RNQ3]5BYrq)v8G讅&M?}Ű1 !P?RDČJ &!e)^'}v?R%X{Q:yHoa<>oS xlBHǧ^JdT! ]Umcs'kG8Xgc. Gz[VS]eKKVb&L][$5q?6ٗ\%`^)s2mCMxЊXV1}ļsC{e VWc`ѠIG9 Czz;P`Iن)6HXF 9Q#P9sDTԨ] 6C0vcdN hԷl=r&Hzd:nZWJVRE!*m9$h X@]cY,<d:wȷaAY]E)PQl0VhnEdDMLuOL[%:VGEHIv.rZ.& uɲf-B\5۱YR놪Ќ3?1TÍ,}\.2? ޮ\ i疀;x== 8VSs+ 8+Vo2fm7B{nW|Ž()?,Dh @bdD7ɭ`CJ5ئj^b͚O=Ѩ u蓈?{cUL^:719.b5TYCȋH>̭˹ܥCNI6׍|V/v 9n";n`w&ɥn혞┺gob {IT 0&Ig/#*6_zJ\$ScT8|gWM %CU,8}lxus cV&u*qBکHʪt.ʼn]CҬ;=&Nrv9^uO<[X, XsPsgT?3OŴXGۭlmŞjZrg}%S&0S%ʿ$ry8i~:*28"Gq25ݑ)owJ}.=˒F\\rV; OZm8 h'bzJcd KBm)/4УoRKbWpˤYЈ왉%՜9r<6<*e%0U 2a'GUvn&9Nmp꓄u<*pRmps+W6E8tfQ7!qEʡg0IH%*Ze~^׏סWNޡr A 쫹b54Or+*nW>T*L a==eW҃WP a٠vӆG)#dLS1*2%adY{L|'9H.P\(Exr=hQ 䝓qQ_q2"WByRE16|l8hV(B|A$"3jfpR?\ "F]%lR]._Ta\;IhmŢq|<7U dž@@ ?6+ώ<3_ 9|;*oG4[%'uW{5M*"^pp[uv,K,_ E=[䂳PD3v' K/65IkSIF w,-vuJIM܌D:#^R0t}5_]+ťDt|ggiEڠӅIHw[~,-LLf;`I\2F*|`d;$hx4f#{MY(-$:+v8V9SuSE I6ť *%| Njʠ&sٝH1FP쪹*L\e/CT5˭pVV98 Mo& J dES'0u˶Ҳvxs HFzbΞN@[v}4|r!忐EX 4l6ƶyiLsj۳Sl^k;ݶi|{9vf^ciit]J$c)Pd{f?W;}.%°_J⌴dephF\CL#wUHj!R +z/bI}b*@5[#^BG8*cJ%{4+i^>tdBJJv_IJqU-N2i4Aۯ j}П槿Ӥfjn/_vI<7;5nQZ$dfk k= 8bĘ+ƱL+wr..xjϯIr;ﶞyl%sXlQ(PJkF ͳoEBBnDW֫j4^x{' 3^$Iv_@/⦔8NTdp?z8}sj1 RtGLj¼S Ucq-ٹ(JPc2G,i00+['hbL\ HEJ J/OC QEn*G {Lvicde `K47?nYEk U1Mhc 0(}*jOSQ/ v235Yv k8FP7 K-o-olԱۼwZCb+F,ȼo[K\<ĕGӬ N!ZjeP! 'o WQ]T֪ c [Dċf494訾Rű[k78рIAQ86LUDHX=![1\XοtSs%Pyx\XLW[SuV~b!og莌f_q̆ΣԌ{6]ap;b_==_Y2Ks2EaCk"CZ =RFD. f^QJۗ{ HYs,:LlCbȘ;i tPk45>Tø|uI h'/Lɸ1C *|$B&;쟥^i-Sf43] V4[Z̼Oow@]'Bj EuP@Pͽt8ʮ,9i^QVOܔIśǐNY"/ё󛂒zњ"^0zTE}ǘ՝6Y] qhE@ ~_կ3׶D❌LR,mhm7H˰bzabA!muL`midTz/Q?VƩ2 =ae]dtqƟG:Q1rZٟ/8 w:IZً!vn3emCGnϛMxF] l_ igXR~gCojUVhF@v Z9&6Y#cmqZk~ 7Տ&x}dp~2 Z$t e=0kd%k5z<!x<~[.my`ED뭷a:hOl@eLT+e̔ns pS:@{[)D]c(m:7TǾxoröCՆ4]bm}GHbPc%YP䙂骥N>=a;ܵ`$.M9v[,)iL]-2khK"J{רM0UbR["hIl'⦁)~0T'-sXWY^K:wφ 1Qz /c1]TAdfR! P qITr42Zh)ӴDP}k~ HWIŹ oy>Y9>7>PS LFΩTv*w="yq!zŞ"A[vc=oQŇ _ }xoMs]VۂSJ oz5*e 2((.TI;dždl,ڍ{Q#ƚq H[qphC(/\@ J}&^ sydLL4*u~ B򘊃6n4B(I]Cc&]ZK$SSs+sJ}G<8F9a% g8Pfi}uns?G:|oZ/t`G@_D\a.~d]f?ȝ2UBӜo [h~]E( \nj=>Vczo7%D)mI~IqSfQx :jTR^ ~Ly̞q8B]0ߟr]Ԍ)TJz697'?Hl9m3JٗX܉`T >Q4<饓rcIйM[4ܣ6K}o|F47?hF*g-A|6o.tx&r0E w9SD8~/duh$UT eoEAf>_ Hۙfr9x">+F.<\!TWpn2@W}_u9U\0 G6|pLQ{U(F@فU JRԭKV5=?خޠw]ɱW`6yt#ECm4)eڐ.w-Tp$oY(1L2=oGj7e4Pq}~WAMNb^L3+F3bo8k@egYOqO ͌ Ni"hl$qrKJ ƿ=ƀh_6/6@ -6_[̯'@z2>|iW\x߾ O+$xQ04rB?  g ޙ9Psb)C!+-r/h!{OIT[zS)+c})\Iᥝ_J} fQ}taT2eTf(dU3-!'[\.GS__wpCxvftZ[HomRQwNkXK$KthHԑ\IQGh?’mu#I90~8S)LIbuI IoEoA1>O.6Q*T1DL$I(ajp'Pw)1 a8c%dILƃcJC@Yר ь(?)ci˭^6%L%S-=}SؔuK$^a7,ͣMqMJ 8IHL!HIȆlzRTy38D =0bwsuҖDN#Rg2uʖ4ue̊/dOs%8/i]"TWw&&cC?߻G[#^o<}&a݆\J& !hPtpJUp^@]p2JU25K'^2}D]y؞ٓwwd'zZI^>smwpbidS2m L0adP v2t4I4n>ɫi=bڬAs$r֟L5"&: #c-8<#s%\n-)(8sYoM1`>d`}9c dam;e`ˣYϩ]8?)-9Zmn/~]b) gIiP668Y3pcjW@<-0ھF gxkt$ir"( 3CYj ~ th҅,7构g Yx\  Ndr #+i(pn7lB+|T@/0"04ƑG+7nOy+eVflb1UuMل" *_RcQّxeFӗ,Nu@BXuA- eDq$Bx ^CsT6%g6VUx#6}9;kj=Fh2_ 6OZZLQ؁jhVoAzIϡ8 bHʆTeԃe78c(YB:l> J_7c jqF\*YɁ$^j{WJi`1Nso=PȬ/GZGE&k-HdWzt?+CiwfqJ0l~0yR}A=?˘EֺܺHĒ-Sq^O O@O/؋YgV//? r[E[=S\8x.=+J4û8imAQc4DΊG@b͈o= ݏo:c>kn]UDY1]kD-b4w>ؚJ.` {ifgv)@ͨ 9x4cq~{GnGF{%\+ĝ/WLA)!(2kdl~< (NIqp~r@!/E9j %$KDuk9:#<4f"ձQ3JԆ/vX4rdVrgaqmv@]U{I6B^cD0נMڗWhKddjzd /ۗe>CIK?[WQ~mz)2i_F8&e /L;Ghncij=;' ŹaY~gv | wGqkp5::[دkv6ޫ ;?߲y@pM!N̶ufZk'h%xpAL]ڶ{kI\r ym;_^`8A/a/߻[?^[쾟+{g燻!+LOa Daap\ oDE*8(a\!hUJ?ak^MeF1h^O(sY8hOf\%Y/W&dy|:f ܅ ܞfg. ϝu\{|n-#P&j!k]zE7qX<;@SnQMqI$ݲ|%7q{>!!>ނkcB|Ȅx or>oJq_ Sc8e6cUP#y`y|_{}=<0>ow{{}~} } @z`= ?k@k`080<@=p?n@nH0@<_<w}<`m’L)#.mLMiSyv=ǪOf{`)14F Q^Gyg^RcOMFaRbG3ĩ 4)ы ,$V`y0ϥ]G#sDžOS|_TdU>.Ql3 0)CH5l-2%mDu% :wzWU,8FӅKJݣNfj<={)S s):Fի@/NPr!0B~ѥ` $caQB׿GBlȇՄj4,(-hLEܔ@$AZj zU&EjHhnQ܉;y$}"^"hvGԐDK2TfUצx.{[MT[Rjl@'qjErtzLLr ~ހ!׀tN0T xM\;"%Ӄl#L 5|=2U Km#`@",~/8vXiĂX/|@̃Hz {6w1&<ٔ =DْFp?d_CmH</nc$s);RR6k3 2E;+5^4;c\LKI{XaYp۶m۶o۶m۶m۶mƜL&dNS*j hΝP+XS $TYf{uɷWi7AM5Bi_\!z6ʬܢsJAxsȸu&Ti+9@*?@ِ[3=o]5ϟ&5AN<u#̫>7zOw`I%Ch"Y\ hiY.\ Eirk汾O?kG~&Ȁ+d]Z@hwzZ", υHתkԥ$ZLXҏk5iiy~QyIIG-ms9/<I~:cglX7-,}|pa;N[͞f.VtXDpZHRIr+Ҭ6<˜QSQ?i{Dсky'I]՛DB3']T]{T-tk* |]VxS|[ʈj+S]޽ Z=q>FJt2}^I)dSg Ujswh`620W_iԠCwЗ2ƽUүENٮ{oj$H뚰"> {v &(/Ą&>}LcXXRMyP!92;Y̆T,$ө=$y;Niju "\ra>Rݢ9ؙ/Puv=9q#M{=#s?t[=| 1Z٫^_ܹ?J`9Z2H21f1o*^O]LRȾ=IFȵcԫz0j_#둢LOD@H._4%H)Yp2dZٴdd3hܶ*zGz?`ɨ ?c]}Ͻ4RgN&.ϴe?>(*X\unex750dBfߙBJ5[H{7q塚# pg`[ѝJZiWލd(]]=ڍeE"jf1"e4B@7OR)Tg< EyP(ssM 5:? _PiQk9a;Ax|aӷH#')5ׯ3M}sg Ly⿛T^-%ho5*M4=d9.=ZSG.c%CDæ>:@6z9S8mЭN1JB&،'r wq㞾R4=8(϶ԧCJO+{z*J>8(W6bZW}80Cw5ʊTC"fɢ="K7 *Ξ()&:4ZX '9F%#f~z\x`+dx5(v8۩Z]{|q^4~2ooo=iұ @͍U?BLB][4S>4<+)T&?NNbqe I)'_O1ѷmrLXa?L$ v@q·%'zh҅]$u[̐rCd/$ ۲Y0T^5N ; ((8σgFNў06-d9nF7j #L_E&.uE2U '+;;D.'F(zgz1{ze_hoH}PP E!C@O};_MPaN9Ќ# JxtK}#fvBh i#TYr4RB#|yf\m>f&hñ"8toҬI%:iP)cm(FP[2)%m}4midΓ ^~Sp'2,0ACB|p_C bRH Zx iR 6: SrM5%B `la@(bbG_Mt6X=[d.WD[[Xe`Zr3Wш5NʱM+ima[39C79g. -w< -X2-W3 آ6Jov|+yXۢg!nTx}ڍ72kK&S!hiLQ5`m2|2 iWaPzg!nWc:|Hhc-clǔRBZ5glaLW̙a"`2`tyƠׁ"5Dt7ʎЈo@|#0"& űeUn 4Fa !5|{m]_aQT'֫i2*2>eA 2ZqȗB=ٲ\;Ew\/HV@}.`츢q3nF/2ܞ,h,x,\m]TjIeˈ}nq;0ހר-vGBvӲA䘢(]@7'c{k5&BM Շ4Dr1`|.:K{b tJ3orgE&ȠYjquL -ugo`;bHnL10x"ѺI|zlhZJ*QQVgAlӴC[ZQ]Xl&cǑ{ƋPZVȏ؍6" XM5'rkSjQHvK;K}idu9"<jytWb=e+C Y% E+ 1[ϼotmUVeP5d/DԛyX:0<"rK*q|;C"e{lqRьje p_Ne_ٕӳ u|clˑ\!JX6/dd. 턫=cǽy cPĘn~{zܼu{۴gSߎ$1NLq%Thfo렜pw-)fw"|bo L@8 އއ[g^-#~zW-ʔy @4Ûѡ8<2-2iAc~5 w({ ustF&k07Q͡vֲ8L V7gI_t7g)}Ghp`Љq3hˮ!`\Dl2{*{ARȀ3mggOJvd6hIb'"ujHf)͐T}co)VovDM>Y`+J !h:"S҆΂r"dWi Ij*Kpj$qfG*RZFe ZfEMUOzQ~2woSIڜ|1(npN2ubgE?(Kc5E!IK6)s"5k#FDCF 1U/%B<[(/Blp?5L1˜Ǖի#iH Tx1@)o\g!q;^ڊ QSh4pO7cI&S~傯>PzuߪIp)ueG~8s @q>8?d|_hkUH{PXL4>K{0r&? -;vH@ >&| dHsD={h^]>؝Sb @;aKmҪ.g*Rr9gĤy#dc̗(5ُ>8Y`'xf#޵$ǐ'|Fgi{02&JvB 2=`P,8]Ww;&6Y=w 4[r W# ,KSN Hݍ'CmRO/w  [sW 4K= W>hI+uGȑwP2}畯vM0= }O"Ŗk  [8b @7fJ_[Cܻ^&H |?qjCSmpYLqP7wWپ$Aui8u%VQoi A#ܧ) .Lu10+lV5(d.rum壸E1Y!S4>O<˨DInx.~FrSMF6uĒ|P^8`dqQ$3#('9)T=gD *6ȫN . ;68nz+-% +<T^P3[KpajpëqaQ|z>1Ea< p7ͼ=^V@Q:p'HhRƟ~lS" UZEi)zoW  -M-;qx3wTɉĒ5]'t,Z^ "j]E)e1NI6%ZBJ/(oc37WB;GctA_Q 9JΙJ4 oѼǦy~nkTbá&lP(] Zh&KJ51X웅:9 D4Ѕ;9摳9 d\GFdã21 Hʖ2sj/Up68ȁ> oIGw./< o"4B`iSE'CBGBvWB^&|)rBn)}5^έJO͹̩UūIlI[{ao|91,( Ђۆie#K3VQs!_Y(WՕXa 4p)Xq fg;^KB?ߥugsqwx[nW6fJo %<1øw]Be@~%ٍ0rYVtluRxkHiNXT)R \@2$(({`9RcOr)V-&<[$S4%dQSL]DZ74MXlzc J3y8t }@."%ڡBfB1AK0NJ2g |Mn~CJ\%o؉ғ JcmSH_> `]R;Ϙ\QrHN0[ZhxL mX#$~{jd'`IR Gtr71ڔ|Hpvg.a^#¡0)4erbrq$W:Vf YczyKܓ? H,Yh@:;e}L] ;HMI28;Cx)O6eqek;~KrO -hf WL󸵐fP?10St.PzċW"/h^ƔWKqұem.l6qϽ VF+NP$72T_[:4-v&?˻Y {K1>VIJ WW pͨĤ>0UgM)y&7/9QVqiC A %zΌ;2w܀~M{"ie*]~D2,r$NFKJ;EЎY=u۾A9ŕlziA ϔ9aPNNu'" c4SXNYfԅ8㕚q~QG wO;.R>>9N ԓӞ7DƗ0h0?6 fYD 8y1)mPGt_ gxu:35+Jdxێ7n6Kze&{?( rav~tbkZiup o茫D0 K\" ,c4>0vGt'5o }Y졾Ou3 O03&`o !.F*8 K8N+GճNxa&LG!H0YB2_XTV:|qZC;C~4M 0t8dSzЏ2&TT)_#Zq}p}p}p}"??Õ]sZ&{/@R0Zg"/hjI`DT˂HIJvmI%APsx, W]_xiN@>sG S*hgQ`deyTCVBHvMc$_LWY` 3~NMc2Ȣ&-3xx/iQUd"f;v*MJ4|"[{}ςScaiɢR꧌Uk$ [47LF\Xـ;jU岑`=ϔ\ z%GP~^M吳Ə*) 3wW;B7ƾΤ!=mTaS:hg` Zy-ʲWIP *[j]elWSU3FźAiĽؙ)wOH?PYW4.~oɃe{"τjͳP-a+1/&&R`;0Yk?@:>c3zX`9_*Cqn_3L\[jsIfv11T2wj6' F֏a%_ iFrkrJ`NzkW'ESH )Z)^<|D0 'Gշߴfm£u9Z (1:,0q3[3#OAT O$! ; yb@כQwwyƀM_:+trKmF!a [ Y^ 4ͫ0&zOpkBHM'Qf.uʼnBl&qbS%p@c#!4|B?KΡcڨxD4u\:M]!"uŝ @&r 2/poz0fv[/i|~%]ot&J%,daP9+ˮ;tkz^]SNp䐸r(xOݫ?XzLP4P@1%b4cu3C)ͽu4?ōUm TXmFX nHE `)4)#PX4ٰ-/@ǜ8@_0iG'p SIavsud2$ʜ_ }t_/cbj 918D'unPZ|6m)`ҳeXl9|,etwtu>}7HMb ߊgjf:"!IHwbokf#ZϾ=!kJ 6*/zX- vY? Yŏy$PE]YYJq#FV#$4Dz,DaG0 :T2HrNcSnz )Uag?-ֵx&7HRm}a|-/|Sehr8t̀ưò 2n溻z[鷷H kLXqtf3BC$Q#pV:oV2\E!)H #́-ށ_Beypf  ^n](okXO" [^u$j-b.`j~M֍هI8c p_>֣%σ;K+?/e"AH[q/ed]C3C-D#H0q?`\qwRrpΪ>" ~,5Q$\O+?q;˰!A@?V(fԫu|~Fwh/ sK\-Z%vN@KEj>=HGZ`r˰o ;1<ۚE;4@O(4C*QHKꞲ֛CiW#WaĚ$Ix<,?1]RvN [HyitM8̧C9x*!̻wBzq{ ]V‹|̓77W-iO;qoi*i" n6NN~7gh3OwFx=E7@2qzR#" df"9{@8]q{XaTX)cx$_Dðc_J~[Wwɲc,ٝ@" >1t:?^'֦;? 'bDYfFNX6(j\&%[scӉSh }Fo/psTx +raU{ GО$䍚#,͛|} 2GHqɛ~i&֩&lWޙN !dJU)3g3])' M!yvNIwo׫cC(]ïgX̄{н/";;f^ޠ4-5ha,/e1:nt,cqm_1M^ {.0؁`SA9$gdεtPWfä(x˦@'78s6qsGj|ٮvT8OCA⭐Nqy5LPy>BO vw{5Xq\vA"bPSؿ=ZҩdMݢǵS0t>§ V7աS4n-d힋$id, 8zt?0;ɒr5E 5bEH/FM],y'#6pC=*;}HSXU"#Ze[|;aW6%a;D5K6yhSURvS@#EtMrfb#m: zR/܄*2W;Bw7hva^t\;v@I2g@~ bBuI?>#hJh) EM0 }̚nZ#UPNUx ,Tbcf| mx&*lWQ:If2Y" O$1@&y! NZ*ŧπ3C ZdS!4"Z':"=eqP[0%j+ Tio^`]T$Y)[ՓXD30 P05/ 5G f;!6hҥ܇9إ*hC.zTtĠT>!yy&Ӿn" Fj6kX+A1V@6yg,+Ö \uO<CK_Ē@Pڂ+qÌomYOp Qh@*Ll>X9^|!E TJȷ=;oȥaeTrpxdTp"-G[l1-)(WUPU/RҬN]X+ / dBVJ iTudHfN9_OuD|P:*Q 9"hհ%q@Nf)d/ B^DqpFi*]Oj* dR'5"̓Oq W q!3lrσH>l\`$"CCFԶbvBAoaA]4~@Ւ0]nRjm@P]:WeHЪ{c['n0,y~kKJ-BGOcgEY}\<hnvJ|?fJvJՂx *gՍx9Ҋu Eu?ߤʴA L"#,V ai.&7 U<5eoIW8in_+p9Zf]&DۤHqy:rGgPj.0.$&zxS~۪Jp/V `d5mհllT ));kv W1ԧp a^MQ rKY+PhP {&ϡX'MIQk_{QisCWEC=~<f~QRX.ر]9Nb^܅ F\ӝ=: 9Kr]N[+#L+V=d1w(klr4tZ)U:/.KX\Ɉko-i˞$|%~p1JhwH])q0"ao}t(xBJSUđ޹1tJ31ZV[9ui,Ced1nf %WJ :tXBҷ{dS8^)Lg(j(vPqMMDTc76AƇh?Pֻ:3ePDA'*'3@Q$W8!Xh>Oǿݐ8d0"d@Nr}ƺ7w&5gOgZ'_ϕV60~F 2&w|&,46$ڔ!<ʌ.I9B~s9qK62`uHx+LF 4Ґc ޤV̅FSCBl١<'ƛx딸TL5{)JH?*EhRM$)m3VڦL5NřAhA^ŻP" ,REڌm_&ʡoL,DU2L/+]U>rMB.2/Q>OuBت2BfKNz(7!A0!å"8c Fg)u BOL=ұ]9\=:aWҳݰپ`8TYvRu0 rq\bu0a+2Q݋3.|)a@Vm!3$3ZbS +KeJzyqsL<u }@h yvKm%R<Z~vbD vܮ/ǐI'r>p^.0vdbT^Ų>ުYo4 zN5Y=8Nfy]h+3q: &GXE[$BTr.`'V:+/G*R@3)XJ/c 9YIdXMgs%nAT!;$r<8dm;]== WMd \2oOz~׋!gZRNPmtVD?0ͤ 4(ǠK̘jL~#KcPG,!'WCUÁBw30;;_V'@x10j`̳lkRKʪ\E \jkܳ `N`ppt8@iU$iuX!=Fpn(Dl0JGqu)ԕ‡;3+V\hS>%fƠf-2a v?!k- )#2kҨWb>]9(+nb"h/ehXg[F1!__~|ݻЂgq,pN%y5$ZS}D`c@H_`n^IFJ<ׂzJ6Jv>$_'28m@;̐Y-dtN.0=8)(۶]&H%9j\)tc9ٯڮfjK(^Mݼҍ@mk 4,gIi||ޤMe^$}.2X$C 2/Ei`!$G-SlIA1O}V F@0p/(暬pR1DAwвE?EKSoBEϹyދlN5RE|F[vpTGă _Ƥfr|xTLU.НkI^_)tⶪ$/YX>uq"?/ 0.d4ܦveLy#3ޒ4w(kSF5!2BW'mIwG|u^R9iq@8QAzgcƗzrg>7@ՎHl 5SVf$v1p _2'=C[C{ wm!"~YـJM.b\rBƞq7D OkWתsP)* '`Q0Zon"bd4m m27hvBat H^ e=yK^[ X:Q <2>xLIL[Czyhsjn  v9Ϳ ^*fl30싹dˆK oP1w7~Y||aOtNp t$I3>"M'$3,yA!B9(&,R8i t WЫ L=IMO7ǒQ52a9z(vmzNH+y!P]Z@=ɦrcd)^;sB͝ڲƜ2OOR+(Z1!J$J_vΕD;-Hk \`&fU8҆ZgN}$;OpuZP`" `BD)ۡQbڠP@ oG…2usb+5Œ'X},ƀN31lJe.TK-4ic;,{])5W:]Qz >v'p_Z),]7mzNv&IƇ4}_дg=og.1ѱ$'W嵿P}[)4Y_U11ۂmVnTG.ް*F Gy4OQ @dWY*H/8pyLNt=0,7 g؆|G+H{2$Ese>34?ie {`&$ ZӚ<Xjahf7b6=Dw8 59WS,n>5 32e GfE8 2+egGyʶT@͡c.V6SZojeTԚAe%6}s/t('\J `:||LRx>>F,{[tņYv1OF]{lǂ-wț sk$w Њ'BnmW.WG2-ZSؒ\ӑg@ /RyQu8t yOU8k=뵅;pǿV4 w% 0ijԹZzlL6#l<>K]F* f||܊DbG:I큜8koyq) ᇎ=& {_[4&BJ(EJw :FV` okPE >&FP4 d'm3@Y'C!+ 4kĻS~EpD\:8jg&8/m^ 4=yAi]rÎt}~"&k>= nLf=wm +..ȒӽUT|CLQia3#Ii08$A3+ͬ=&;s [柶NY&jeITƣdjyr~G22OOp/遛&u7] KtYܦݲ:t]/.n#aG*[\H TFK,fSQ~|l*t$QKzV$26"]- dCdܕlX-pYAFj ] -WS_wT|^p9~2ߣay7ڰG?ixժY8_$wYgw!lq;yFqsu*ty5aW*#f.GM.pN\øzbv#z޽C7`"=vQ|}N /63%_Mh;}߿iT2z)H^$f G Jd DI | p * šEe%G~_M?LěVwe*\"j;# @[[B+/H`f+wF =:Cy X'Oc5Íንa<IG EHٴF ) *YOg-٭d%۟%.,!h\s(-e͘o^PcĔ0ߑО}ܕ%>Hu <5ʏaN8=SOCxNGs_9=Ig1=Hzt1f4+Wc3{Ikgm[IgӒֺ`j\TmwՉr+|=@T8|j6]DbeEP {+iA*$FВ( ϟ}f|;n}-nbUaiD"ymەdBnJ b,e3SgI w ?.uDeL(#<9-1u$2$6H492ڥ s٬14 \M"˵XV.L8p !^5&`zw7[sZ7~j?7ucx~8/]Qb--mSDdriqփ86*їl+KV-HȚ5X.5r.F9l5x! ː/-Nb5HRUe1\j\j{LuK9+kJEl9A\5l)x{DԍrkE"`5-P!%=@<NK1/@JpigʀmdBŢ*+4d_R Wj"k #X(v*fE4.Juխqn7~nwor.  ji1[c;#(@)B/lWr; 79.G1=w˥`ʝ@אII1F F:]Ù ~/sD 7bz({l9gբob; -D _+`Ja*TopFt^-Mfb3L1\WpEo=twҤbf2t')/`x)jSOǣL+c) rx(JLR@8{dp]FvӈZko1Xan{7ޫed`Ј ý41Tnő©d0ZvoQj{|ӎTQ]D'=j"D0zۘE{fɏ<<wNxs蟱c []e0femЏ01bg{!߾NN𗚂Aa0|Ly`IBU?OH!Ճhd%íĕBjIow[JSҖ%#zT;rіN/̵֝i5 4t/&OXӦmO8ʆP^wx*5Sxg{|EHCB a!8?=GU Jf%-^ 4M 7Dk^ HˬejvLq|f:kֈgTOA 6uQBcتbYnUV#DZ?<(x "]c3eid_ؓM)'d\K .ƇEQR='ú|ѐ8[Jrggҍw.;jt STY7 m.A6U*6isyDE4U)EiQXh ok=Ϥ.0…X69dT®5į#Yۙ8'6v[lz*<'% / ZFrQ bֵ *m~{ 9(?L;CGliڌycDp$PHNDp>g ŋ)O R6#%LS fVD #YEio񽑙jlݹ[U9uT%%[ NZkǏtxKTlms+r`Yf6'hGC&t~hRDB3*2jtwk7zM?oXcq~ŋ*Myǽ }FJgwoq{2p+:0o_dլ :S^S*xjvvLoFHS,.A[C},Q aqd{G un7*Ro@nئC_/_8*#y!'v}CލFTݯM)zI/6`3{{e) h٭{|n:|^~ k@]?$] Ŷ*O9*AU=I!_&FlnLJq7Hlt@$eDܒ/UWg3 lES$$lF[^6*9ouBl۔bE"PB/Ble'6l$PyTwN+.u`@^jZ6 >u^-%妒[%DR59-i әP4pT9ixJw)s31\[J%ձf( Qо #8XZMRFR]s:k/ VH˱b*W1)Y[FYY/$ɾ q.t^eJ $mhZ !2 - h}m%Q G]y7y_To9&=d1MM1k+.]Ϫک!3jXV,MܑJbM3b5uMmީwupu)Lċh^#0]'.pĦY㝷MW}/2 RrqOFh 7Mu#7xkHXUE=) G&&6rQ:Gf_VCQoAK? N8aK=N(q")hKU#s2^o̬iN6]QsnBVr8ml8ePeHd\,|`u@}^FKnjj)eYe.V^$Yj1ZQ.np~/.fZ0_&fg4Uzdr:>蚦"!_\hf#ʖGS^.s ͶIP\ =IwMp/A`t5r_Y)g m:8Ip3&9VKPp>K܈c]?@89+@l!8Ijf<4u8PbM ՂfI=4lx3 \39d̄uz"x<$ Lug,/UaG4“8 8 dÀ4^F5PB#ZG؉>*9QfNfȤHYJnA/-+vy%wxGѧCld(?Λ4֤uVCl"U`6ov bNLdV/Jn7z֋g#C[9GMAH+-P@skdx˥ F;getDt?Bw7-L5*)kXNeˈjc47^}Ř΢90 lƘpN+ G -ȵR{fCs~S#И.UJN ~dQB׹L48MQ%Y)Ӆ_b`ˢF9-rcOۂj&CVWQedh'cT4<EwL?p{ꮳڊF!Ť$XK>0ufu(=1t}+a0u\Ԫ!6"ZwK^AM?f{tOS?D֘b% y!$_'T7p[PN/Z6F(=O-;S#u"&%m? *e`٭c7mP|1Pޙa"{u;+]F${>@tf ~IR ܇n.mzn!|P˹?9y}VGz+z:!fI=(V Aj8͒KyO'2'}ӃA[ӞqnެtdaʳeGlUWϘ ŖܵaF~-9-}\K/u]y.ZA𝗞y6z6ao#A@|D=*TA:Z!:XԽ/6ZC`8$o+_pL$mU6_ꌻz;&/e>O>Pe7u=5~31L~#=ҭʠT/Z `,n% ZFۍĒ t@Iu\>G PX 0ot/&%>M.9شX߻Lm"mDh27:'NKF;@P|>gN콁װ->& #dAtV7?ȣ`®C-o;_NÓF%<6QRBgqHf× !d duVKjg- Uʸ*C,ʞA_D+rIK5eW_k≖&{Jڱ. g-,@bfbXtɼEl]1:,PgH/Q 7ٍ_=I9Ђ9gv=*ڰ:4- 9<K6OO#E UXRT\$r{4c/Wa Q8s@&z.F)*D22*Deփ4zmD=7q|GPO@ӎRiˬ@2@"g}Mj+ήK_Zhe[mᢣ`Gu#ciB6zH؈şK, ]-ۦ姛 ܰw\I[s "7OlvU܍QR$ѢE? lQm>;m۶m۶m۶mC=dKeTk\" nf_ꞋɛvACoA垛;ۦw8@XBš&{ zS9,ű?ȷ%t4韓;zg=cbsd]WKE~78QZDR#G&}q/$AWb9mSoŝg?voY.(ة t AWQ@g]HJV诨٤Y8Riz9/@#tј/32l{PtwbbMFD*|f8V'[$h$@]65iѤ^JV 5SNh۰$KnF@ȑ9Äs8f&4M!OkN^m$W],>*YS0َH,?[Ub񚌅Xhlc?'N[/]ҢJI/*1fF2nB ," ^oaA ⒮zIwy9Mzىm4wMooُ3BU8q{ n4Q@عbG SLR沑M2(uL$8qǰpx >lIQ $EOD@lk 0~UbE;‰gS #7'+3 4iޓT AOH6yi Le5wtfSB["4"&SL[ĻסڻyMYSY3_rg//חG'~2{z9:~~:+rcSUMTBwlL2flɜ04h4`&3¬{Mb({@ꋭg.` iQ/^zQ?&[g#2_62HFb!O!+Zk8bMvw)=݂gqk< x2O_!5Aރ"Qe??>wsJ#DŽ B%{vѹO-mwkoښBN*{ [L 8fN6Gn.i3G@Ey/uH|G@Y˳Y@؃¥C m]#Hǒt{ӳ'9=x||X}>=sa9Y^U}z ^߫}( NM{ ]0Rit)e !>'ƓDl@j㑛P2cJ;dYxgGq sMkAVPⓡ< !1K >X!#raE$f p©7,VٔWqQAXЪr+\ѥH۩/~>ݯG $*@y蚮pJ M>#" x(mυE `j']q]3ڣmbʚ"=9KިR u`iM5\Sxoi.hoijd.,b!ԣSx߽rdW{{ze#Qq$Y.laW 68`;Peʉ;/ByTmaUWrR.q'C8mn?.ܦC,4R jd>qitTSOCɧTy1R'3?NsK}ә#ھglCe$!dGz3=[Р(Utysba#Ve,K W1cɎr+9q^QȔw~`{)m,`BӉfub쿮Ɯt0EeY-TCXIIzʓNGgaTu-J ƯWDtk s]),.EP˄W^_%D]*'{745cm@I{ TAco}PLjiiVԬz(#\9Z5ќIr#8#~wٯb@fr Fľgbdbj2Xtlӣ\iV7 O)/߇M~%q%g3kȷcjSn>DOaNee&Br*"Ny1-JŬڽ&yx'(HyRS r)3d1.Mw~ծOQ3t.LgrLӕ{.b)u+,[UV5 5nw54$DMG #tPQSl eqqTacI$~mQ2˩D7{h$Y. =hl QOnNΌ}`aC$3~=":.Z#P[=8%ollɬCͩ.PPX JFl$ٱ[!gjw?yD<@CPh/bG'̦ |Y2vy_Wff;J ;ϕJ Ћwͬk vk^Jx/C^Kë*Es.ur\:"wG!6.2%m'?\K|)҂KsxI% lj;I,ZPb;g~è%$;MSa>@W+*J G, ڮڲ0eu; $Q#L05 O>1`jUߔHu5s Fj*j]H6#ʼnt#(YOc@A WEY cZs#=ٝ.(Q}: b̗2?`qN7,τp3 a. 8I+W%L Bi|RZ!}өoB+8T$-,D ^,)I4'BB)ކdhAX߄g.Oݮ@S&ͱ[C^WYŹ& FcdahD 0cJ7cG?0)iE=@͜ʁhBltGRؙ4."nYuA8 & RYNfŠtVַ|a&oE9L@U=XpZ7/G/>/ \ϰF'7jrQe0H%!ι#{`,tBڼ*&N|(.RjI龟VHXPIx@p9__}wJէnv{L*L0 CN?4dEUgQ6 o-m&Oӱ^:zZOg * Zg .`G)Zg64VE'a]xI JGd.c"ND 6՘ug d#'՜I}.x{:yNޒ puم.ȕ >-C" |efr$2I5Pldfdux%tKP@wc)Y8L{VJu{zŽjyj#b kXL*%-A7i*û +ܳ외N Y5k N" I8,QnDG2Lnj&j /`'! 8\@zV| 6`O)aeꠎh'SQ%T4A(WI[~:Sgא ucl{聣Y||TEb&7zɛoTzMcX*5{- 8@sB.x ~q;>RphKwᰜ11{-<(ɸSu#`\Xu&!R4/<s0.ƪiՇ'] }6ǽ_>gV Od6nH:4uB|u]} lل`l5$dnXmƌpIJ(MlpṐT`VンLۺL!m[6 ] > &Y>$uÄ.IH|9bkj .w3B|j>Twh/`.MXN.:koVA #>c^CAxVVWU7 Js]0d 9m)ßۘL@+0.Ƭa/DxPo|D0ת&+({'d藗>΂2]&YGW@PĘ2rstKxwUOge,zOC&DI'#HN<zf{gb_s0=k.Yo.KL `Q 5؁6;EVfڭK癈н< T D:X^0@Q5Q6 Mc9՞:|7FتPԀ?َɫ6_q(؞ 3VᛲsX!V2#g ȴ]-.(2} 5HDI0"xCSZ6\RaPe6>W`in($كdy'F`؉i}S+}[=4tW=\@E"`\T@.J͡ Llמ.Qa9tb#~Qprđ:܃b!`ǖ]BUq5(RhEW~vdiWfBSAu g3"'CoN]Gʪ&Md"`wsUs.2#u yfs GVBO, } @-J~X3;n /z;Y !3v0[u"a|!^S-A}lM%[= + bw7o*=m4jxl_/C<:2=2|^Y=hR`=SA-: -G]Ri10mK/_[㟪4,eg@W{ă-s;o溱EKwI\r! aF1h 3*'ĀH2J0~ioX`p,,Wc趺⿳'ʨ<T#%a}mJc) Tsl-x+%ș+ m NS3t…:q$"1W8בDjL ! T`Z< ( Iҷ,}; @Y5waqS/-dHx/a񷤳h}}oB}1nd !;C dJjG)0P J hKSSrPS$fTn)Pzx}#t!ΆM`w\E[EVʜm$=5*մ7D.5ޗ*1=8$H&uTvFweǑqZ<U0W{g`Bp$^HmyYj+c4M{<]Du.˨A$< 9VhӮ@s@v4w6K*Cj"DLݝ ] 0&Muw*ۍIoj2-1CGZP 1:-x0BIN"G{A+OX % /bNc\r.-eޯ41x^L;i CE㴎NQ"K^ymYմʻ(q>@EE!c aӄ=W- #+~THʗ. R @̾z9Dk4^7F'62TP;7ۡM'v;0a&Q47gg;r{t|>Qܾjlkρ-aӀ+?yi"Cu^9j r=kY5L0i]g \")+#pVGJIfw I+kc75߄6.y6vv|_ؓ:ɫB8ɃEo6"pUBƵz{n\w gW8QS@[;OgYB///т;.v5k|k 8D7aE(-KE2`r |6RŽ@"5(Pj5ߎѪE )-Iy4EPJDY bx,m5 B~% QUDK zP?#Jo;>'٘Ϩ6qg#h?WfB_G`EE):8 9W_j=_/Ma聊?&^E{ǐMݥV'b"GAGml]6JZpxJ-簪rvocݪ E%u#CA ]>J{D<4^Qn~]KYCrmZҟE#P PYaȲ)Ѓyo!Ln5>,PzM\4"ƤpⰩE6Fǁƪ*@>a.74KύJ`VL63{Z'!U0z","IIK=&B5;`M> {~FV7/%F=y6M}DY1 :fRH#yd!>mx8U:|)Ou! T\; eωī>{)" [ oQ 419Ņ*ͳ'Lg2F\ה‚G':_M'9u5 4{[B"8v[My16 rR? I_Q T7r%kYPVws=Бy;7#l&`~N'p 禤ɰXJcB)2ep~yܴu U 3&ʹ.jymEN&=(ܶ.ئ:֔-;VG*C!LhP㓷czWf(.\2OB'pzWm4Eyp9?Πu)\jj~cC$o>%IBG-X#4p%CHfq^@S8hV$Fݢ9DB^#w,-:0k+ լ**262?Q*tOX5 *bQ^R (h n% -{9eGV |oհ[^(nŤfWz-? AMVC!P-l"?]v[t;H][pޡk̨aa^ҮaIy}?ஓLڥ;ӷjJxP gYm8]eKѦx>u&P'j:z\?:sm Yꂁ8>5k{coӑ<c%|avh\H)(pf} MZh4H4Iz ax-X7-:}[Ӭ1I1%dӱ|(DxG_ݜ[&s~7'7++'3I֨n9 v\u۹mb'yEv>`Zw7Vڥ~qkйIADpG*F3mF ֨R"U.S>vBQ2͢BP?8[EƕӦ8߬c7B jCCZnά `n^ e['ȬgrÉdbRQ10?wAGhfDf"qTfqLHr_D T:aF\A PP`QqAIVd Cat3&Ks~D&g֬| \Z2_;Ǭae;)'r|9i)u $cF "&xx̬FC*iS(Cb>v>[Ѳ㳃`Xѝ=줃lxp ?vbw7ptZePRKxI&fyS#Ij(vUbfchD<JGLzEN%OaH>J= $ P"$s |9A_ ; qy{(rySDƈgBU8c '{{lѵ= ttF'z"ZG9Z38y")*HGaAf,D򑑬 %.}m".%/}̓pI,C)60xX`n*׿ҥ!q|H:(gD7-ane$_"@!O@FuVUt}icq##;P꬐I#q~dv[׽>*/0Y Y':3!"|gۥJv(Bva.C2Cma@zU_0U8g(g,ޥ|]ǣ)kK 'i%=k>5&J9J2-=>c`v1yБZι^uCO"hvpbĒY)gr 1/ g rvP4ƽpֆ=50"Hbg&[\u29 H &#XNfq\hiSk1s7Z&X_,g%Xrsx*'0SQ{,hީRi!3_yםe 0{Mx^fkǖccp7|O^;^[ 763`9!uuXVSATbqZ('dk_妹K\GPWyn{YBg`B۽ϊ| &jۼ,<\ r/x4Cc oemUb靭${&/1PU+ B Yt$]tقJה{>m'xC4xa@@C4^ mc*'BT e[]Ʋ.2tW0-9G"q01Kc)".2yKv g?9ՄۃZijca.7ͨ+'%‡bpvet*n^gy|| Dd8\Ttjq\1J2mlI ǼagrQATE<<^L~Mb:H\L[` &wzikʠA>iRPtϸ>͸`Xd; oICDmdGuuq%cQkԼGbڸi*Aj-ڬ#S>$rC54N\^9Kj\*D^kGf"1&99RkDޭ%>l^WOqJv]kpU*g%rM-;L~f%-q 1jָDBJ#MK]وחɚ.~L6̼2>!CwG`: ]7]w5tCo~_Vֺ@g{fTDr}vX6_w֏MMIA Eԋú~+s^7:2-O̊uɓMr'7|G®uhU쐵ꁋZT%?{%Xrg#$-yA#>=lJs)+D=.Zf<&)A9L+(HX534ym$:<&8v7+ګl?Y L\/U<0 vr.X&1 /2 2Ս諄_z9j[0v}D7\p/R*z( Tقg&vu.5Àc̜5LKW#$m/fhP{-"eI}_l%2'{,:<9}ߘJ`DD*`׳Z^"\yڱYgq2,Y>--6|ҫ<-Zl̷ =V;fk1NOX "T0.2b q]M 4Ud]]>sBGC/Wd,垔53:##3}0n%';G g(UPi_Nk߭>]IO )U A7_!\djK9%ˤQ(nۡ[; ;]LB#Zgj_wk pbЙڐicgE^͢6i%`r,jZx((VFW7U}؁DvGg*b;kVO,Nj\)pB*ҲE}pW5aq|)J=`qx]S-1#II"1P5qJFKQD.!h!`¢aPYۣa݉(1sHه1o!):&LHK9TэL(##؃CthZ90Vmp+Rf7[ZY2p׎gRW{SZcU`Zo<&36&vLBK§fK1%)E{n?pAy?a8aȝQpV0X> فaF`%iv!PrY_MalU€∝͊ű8tHkgQAШܫQȎƲJ? rmk["NbX%}:4|ۺ[UNkx j2ag%s$g-y'e/We .Y qhETKRuNodʖ͏o8s!-\]_u b,[dAeaKE< ,uP骞8 ܂Q-(8l,ZݩC ǖD*݂T._u/K$%ḮGdIg,[e45.FԢ dyh1~Oɐ* ]Pǁ#AIxtX&w=_FIz7-̑ mDM#T-<8]-՛'|Wm y>^SCa~~y`f[c>Ef1iQ'Ou,VYE#hX%y$u3E7b twz{Mv]~āc7.-F̰0V` }KBMjVN޹"זGIes[~ :{Le8&W@8ih"< KIw_Z #ϿzBpn b>Rl; RxҡKc:tGvƆ>ӘQh UeurRYzQ˹܀՘?ا2s-}s0RF(k/<@oAk kCS@{!́-E |wظP&rUϭʲrz~ aЎMѵ|TO 5h)&r=W@ j`pg9 6;@@A1=o\ip;ے]*mGQL T7 @0y1 '#/2!l ڌ{OʎOx43B&6_w~1ѱ%+K*jk;BD867Qf(El-"}B_Gǫ\ufdd]nsˊ 8fH(ea_KVT*{6h»ΦNVGm4%H $g:90p.bF(B0%زϪyLep٦Qlj dOKLWh {k۫#ilc\7ҼZԱ"6/=ooݷqV*ѩV͑2"9sty[׎[-fbkU iaYϦTczINbɔnOn;mz<u (Q&{mbK^PkT*:Gb" }Kq߾[?.jsC~zmebx]xz?/F9kHAА-U9i ;;U;*=X:2;E%YD5 {f#N~G0]ijk +sp+/ccߌ+1{kWj)G,kv.>6 +W t;:*lRb춪7&ïvuYVAH,[mrnNG@E%Z>u 5.6Y-v,8Lmߔ1dܸ=rE[%C;? |R^B#؎ta{1JʷN*qnߖn HVG&ۤ֙V|G}l@ ?avɤ"ER1}*ќl/2B-ZjKEyR#rO =f:# Z_k4WIKVFU|eBX [+$ o) @\e%೦&}..8AoSFx)@NvZs+]%DRpwK1mulհQHe5h8Ȝ_5.2W|Hkf[}TUm:$4Cj_k._bLP)YiE 8̆ɨc8t\ f-!PRs OS6ޛo CHFxːjɰls/iD+QX>{{,l D.dFlcg΀jtzMjˉ~ߑ x0߄1zqəǴN~=9/$Yu,\S]A+Ț4q.?jBF7SB?et7ÿy}GƥB&)4݈xу?lGMJ=ZZCɦ&!^lx2?%; e~Mprҿ8";AC\ͨ?GQV0BMYc ! a}@¸: 6Fx{}:T߽7rPxxZٶZ[\ _]ߟr'o_8(~>b?`óB^4`; Mn"a:a23/;佱g!Gߢ um/Ghj+SAT>@i$<1_'N¢7z&I|Xj6⥿ٵЫ1ܡ+W4 Dw^ԘN!>^td98qF )nFouqΙ3H=l&y]#zd:d^Aq,9'V˜_yדi_MIqyTRrbĖڊ "v'rEcugAV6 ғa/Bq`bYb"xɬHP|ѓ&~.&O3=}GGc'qq~ԇizZ1m#oaY*h6!xDyE)Cv ǓmJSVJdh\ĥd>>FݹO.HTۊhw`j/yUsB&6p_*'7jלPy>us:yH-CT\Qw?ۭ[lQ ] tqvb*ITc?j3eӴzd8"N|mWg'_ROҺWD/y俻3퍲P,SDDFe?U6O|F{IR2nן_ )Ͷ`ʤ D>Nq}M]5VČ 2p3M3500a`JJm8}֥OA-UR1^5f}ħ偱 wvUX QlFf !:#}^lWTT?hU21gGӁ)2"޸-T=:eU-~"ߜIiv;\/ㆎVPm6q:w̗wL& ;zʽ2cuKb@LbF#[Kn6l6xDp)*TdHP$D62G wFp8%6S%3tyѪ}7 ^qhҢ fD@e5Վt^qV bR7F8XS1-:>JӻV=7`~žnVySR8?-05|_24+&O!? ebg^;od_8"lh擴긺~FV؃7G<7;'0AGCmPpX?tΫ [8Qh Xإ`Rd4ZU|h/LsE@2߮>W"Yf0jF(r 04!G4дB=zFI4dvWIXL}l66E\AXakcdJ'ihZI$ϙ䇚X(!ezJ bClewv|R6^8{2S<)ga9xh4TGA:2bXؒqkCpZՉ~5+?`?NxGCI#?֥wn:疌EwzϧbxHr'1SȻyMje{AxX{|r29e\jX,\n} ^;aY1deXuF4,Mm5m%Q>$9;݀cjޚ,9<&Yb7['Ejl_Pp/F@ErTAl@;u2ׁձF"r惚7(g* :ճJe/q9J4tnް&cbarDx/{j~7dv5pE'k wȷ jc¤uYuBjz 5 _۬gɊ!l qޝcj fki*k%5GG'wT-y@o01dا O_E$ p_#T#Z/՛r]q2gUYkU1?qy{1+Gÿj-؜=k~: _PxbC+7M@}x;h&uf6JR?Cn.[20z_1\UX&A6 $?䌔qH0̾y˟Yyo*;CꚔKc Th]SVyvQŏ77hIPDD4G.au`CX:q4!rthy@^1`С*0NEu,,R2?D4׺5/xqevά #&!7g&A:)Iyu.c43 auU7N0G\-Nՠ>I̎-^tФN߲vg5@pE=K@.}#:iԆ6ZcNED[ۃT"<+fN% עiYLFi:c*#4@݅oE*ڀm~8M,AHAӹF$,Cd?XҢ0l *v6 B$O!XϳD&_3"vȮ^inX>i :Y\] ӄvDRV|#; @L~&Ъ $T=Rm ǁfh IȬ}-m{ήyh߭/hBJLQZ.hB+A*2N8Jgv"iI98`I JE0V09a9Xxledb눃l\oH+xiMSe9' %^- B争na M1n3lf> N$H(t͕1*"ҀWlt,| tY_`xQ~We,S|]Tb՝(jC:c)6d@X%C]CĘ ~$j t*tX`&ֳkKhx[#-;$?L-QD C 1 PPyMT.". !>C,E0) 4F&] J`kmBǓPꦊPVG^fw9>3`*Nㄦeϱf&)qS,Xje\䅻 J 'Gȟ;j .ހP`0)a?bX6RhIZEV$-fF6!¤JD o j! a$b\N m<6ĄV❩A;(3%i-h |nEe,Y3=Z)HŜ[VcLB{+B!Na& {q]F DdFsX]ci/a)qb8?t$ŢAWq+6ha/*V;ϕƔ̮y`Pʫ8SD<` /e F QHހ׏1ĕ0U,&fg#$sVߪ o/y!ݛe|IM+́9376ɒB-rC3d˴Fנn_E|k*'GuR/Cz3n~D@k`0Ș88a[Xb41⎍v阴-TQ-]ZȒu.01gK[kRK-k sE/GܚX֫,{D"Hesf0i5fJHJ-\l;_XWAwSje{x H`rlnz Lm0W:l]oʮYҮgy.8qR>l~:*`%N3sMUb($"c/^Q FBߴR z?WrP{wT;ﶘt7 YTA#VL@PmB?:4,ӮR KEI2zg> F9f A6 7pod;;'. ֡u#z6/@ ѽl!;)z<FḞK10qV5U[}I4&{B&$=WR&{8YE"XԁO،:Ӊ6rA rg1+t︼ai5V%!Y6Cq1D qmWfxBl-/E^l`TMr}n z1}'vז6R?ɬe'yRhE~D.h}GGͲ! (i@F5t_Bx&Aqy]{k+g}=٪ʪzds`)I7m*xpZY(BRP3((c戹/0|'KjKP/E,mxqzF7ѽ;rf;9:uab'D*{% 6Csk۰j3+Wp^B&R]M;-BSU.|8GvѻBLet̝d$QZ,)_F9'~-lG=4cV=5hn ϶oc(:JObo\7Szڊ2 [!'nQؽ /˕-/ƍ]6jHFR~ŭbaLl4w=pc\"{Z{ Zs\}~ܼ 6WfP^z%ֹSt2AjM0<67SWcgAVӽsCrߕQl R:(ʴKRo=kt_Kspc A+צ yr >fm{Zѩu`β52mp%>ȺĂ/ h7օ0dt-i;?$tĦ)䈣u,VcjTC \ &+ZIF1M1-K.N_1eB):ELD"wfw{Qܧ7=D'5,h:#d6d6d赸6:-qC-Ej63Dz6 *zN~jpAyO:uiqr9>a.1]ћv %\l)o$T#b: .Uby,F&:O6Mޜl)s~Mf?* d9px1m(0 Q A?nifo:bꯗLp{&H}m6w.},L]>~_Q8K}mzI]KJ-?yM>җA6| Ú-n$y`: S餶*'} QWqdRaWL¡})n;η˵jWM| 6{EYCD`Pc's!_]Swoq͙Y 2jXظm1iZJCZߜ<mXXp*+r7A pAч:/@b`<[4p8c < UV8N 9}5,\X$b ,!5^ R(`cn0DjaE++_a6 kG3{y; <{Z8cƙABvn $O"`8iLS|qw_˛/P'~J"3A ,f(BHzN!!!&Z@8'!>94 ]bT< Ǿ0 #ՙFQF.m뀶K49dvDJ1$M 1*d-g~ψVP쑲As,~]kvJ/U̕ 5lbXD鞴m1Sƛ9Hø搁|`IXgwt"Pα*[t!/nɖmz}AXtgH"wc)|9G 'Ā뺘ad LB-Aspn_, &[DC{ ʇTU!ٌh6˂jn7>4/|lZvlʆ k 2Ih mjVP+ __Ae_Q︫^0accK4.=p\|f.Y*c;h|:-6Gm$ K,{~Ro ӱl[P`)ɸ jR6iGSE]!ZŁ+JNV[Bd,yH@ 3_9|0Vd &ZC@ALNc —eE/L0S%,34#$Ms›N /^ttR5Ij'fxjtE*&"KU3GODn{HPBtsC uFu`|΁Q{=doA#Cz՝^ɜqCR{JG<%YH ּa_;ctb\CK'9UV$!/|F&H z|yJ` (a]&R;PG S])RE5WAwbqoAӒӊLsH@]D#q~hJ3y|%elo 6u@$ۺP (Q H>/꘠$"|]֞tH'&sGz,p׈c5cT9/ 8)bCD1DFfGR7QlW&*=ɶ$>Qf5Rڞn,'NaC]cFٱW [&.'"rvϤc˒[%e?DcnP6EZ#F2̦5W+;nNh_&UNbXv%ܸ8<ܛ&ܠa"pp1؏VX(hܸHKE\,s0@s??`,CKK1W?2 0gQG۱N%Z+zGJ^Gb!( w+M,U'~5>44Xr+<$ə\aa]cU*"aĝI/]{5Ncu]M2.\cqwŇ1PPVClq5r ZT@4oUl_spzpvI]"+*6| 60@KӲ}ˑVQj[ei8sgm{i?Ĭ77䱄2+iNb^fl* c D#V#*6s FR&#ߓ5 LD#W9gϙ#s%$+V/J!=;6樺0ƂC(5_"6uK#Ȝ5I#I!ۜoOUb b~e(x##PL\hwFӹI52h\3 ^rC̘/ z.w@qU`iyo{ f@|~>FhNLCP_5=ƞߢG]玄oḻПm0hç.଑ϢXU,K]l$蜎(J{j>^9 dkpeo5ѐ6)x?'do4YB S.P};^e=uéUM(jj#KjwxVԭmPnJ] AGI=)̓~yGa8cCMLT PɐCVap^#' D~cI~um;5ŀk,eh9z@)F&don;Pqd u9  C΀?y)ȢܓP6q9cg侬}u]q9<7^>a'r$Œ5V.X4bVi9+n!{nsߓ "xR+M\JSʖ8 M UUՑwŠ-Njx|Tm7-Vk VEƋR׎E%3,ҍPSj*8$-yke/`_T UsXͧ4RQMW~\6+2V 8(vӬEJXl,7 Ďo,bPف<-UI6Q9W}hN(~_'^iZzkYf>]BĦC쳸]fs8ۏL)K:հJ ,arX)pd ؐvDv'jp蠓k׏ r񍅮]Yã烓dEأ'LZI(`ct215= tvng{J3KX7xҀn7 J?voA$RXOFO]:i,jwʆsu.DMYz֭DMpF"n_Vc ħ&Ljo&$˒i̊N)Ѯ?ÆVnKnDr0|wldmb?gi_wÆBaNҲ(?kdwI} ?Sdj4c,wCVMt΋o i!P%Z^#P+cѯ2;fs NqߠY=|&A82D{4i<Q82rpT&U?! gtN+[rɏ-?1N1sljG($:~47C? rB+)GLQ >ThD )1>eJv`[z~ЄAtQ;{{fmX2^3c7nwW97F,? j؀%eN4 Y8,R##nZa(Ò9"3Ŏf)?I"H,([ Bh%Fl5 CMs8VSh܋sĊ\HaaU%ͨcC^-$Ln'@]ArJۯ@B1S ЧX/nEgkrs&;*^j*ζ]HAT9s9T!d[@B:\!n n4k!0$ εKVL-}C&0A^Ge)B+ab2XtΈUmC\~hg!RQML ib=S&7H!VOS I{MR=cvTW~0 M v'+RN|Vg"O/@{ IoR(;hA7`6WK7f@=49~f}hU0d#Տ]zÞ8)f30K0?|\)Ϟ&x\/w6027+{c4VQmϙbD7u]%gEyAxzÞXFI5ݞU$nbewZjVV%lCT\p5+6NJT6FaŕGkpY-tŗRN1lZU͚D t)G~;:W<6^ϼ=dC-m~ cHBhW[VR)T85H.ZmKa>zCȲ^"DQ9|tUN [Sn Z]W(\kI@/SCQIY[46:-aoD=%n\s RtDJ! St4|&Z[a$n|@paP8[ګ6[ӽl?^؍/nwtub Z qdiiRi0ie'P)ڵ~X35VxҬN5~DѝNk8tS`TtA2 zQ6̵tٿ۔Ĥ ]IjZ9ዱ㕉-ςFCf!ѤA@f 5Q5%iMJ(*U&Yfik>ad2(QvgŎ!Aqa5ܵݤA]k:zܔqL K/;=)*=;u0/7 t !!8S`Lc?7A>pgWx1}$_pH~Z10-d{pȋ:Շo}PNk޻ƗB/{6Pvtj~}Ok{lⶥ劧nFh|lNqwlq zBuu>fկ1<{)d!/lF H3?q)P]xϋPS lB>+{zY^|FΚI[& H`z@Fu0}< ;y8DŽoQF_%V! GĒ@ޢG1q@eC )o%þO=O%#U *Vd ql8(.% 8DxA*#NJZ4O +npag]]i}+K>IH dZX֝,l}F+%ݟ¤b tä9P>.lvYrkUXxvMEw9cK f]BO@rd!`,;ȡk;:H:M9}.>r3!p?~ڗ>Gy£P硗JGwi -4Yȸ6"vGc >Z( LVYllB1BBM0A~'o[&-RNFm$VI2P9ōRҳ,MU\vXMq-.M|Oz&ec{Fb nkl+|XLx?Jd6&_0v%g6Pe"zm iX "!UTZCҥopB 0s4swE;„˖۶m۶m۶mm۶m۶m[==eT~uEte<mЪmTL!|[_}{-VzC{>OfI->߀% %5f x BdB! F;)t8|M:Vn$š`D@*#9LJ.詈z *#M#Mz-Z4W=פϽsMSDYۑ 11:ʕQ >@ځX3Rc%'Fe$jOfF늳# ,sklcmV^EYY &|`eIQ><9[wDžJ>j l4&1'iD'˗CNl? =&'oucM*f^<15UK#5 ̜Fα̉kU NG AL Zf1' v#MR\]]c JPS96[_4 "|ici<2a^,zS9G]7DQ3wU7e,WzSjҵkaVHnV]KVOr pf`KxjEpR?`|xP6:7/9ڪ=*'_X4f ow4 Bg,Cҧh2?,aQ"\4"2%޳vϭߩ'mJF#j^2i:x.t5]T(3-j&ٌ!%>v2{p!n& >E 8rI;I026W7]_ۻƽy `t/,@{dNcVYES0;:S<Ҡ<{=ܣ ?XuHpiv?'Գ ^`?^đRM4؅ODBzӅf%6 B@b A 0DX0: JfE’^0%If&nnW yӍGu^}  "b9\jmb'MJ%9b^ Ș *MCct ŪZ 5ɠW{b֟Y논)WYq=y v3-]J/O.!VƝw0ߖwmcX%'ZYNP!-S!x)b1T'!tAڋg;OyqmIfi2ir|dٰOJl.%@! m4+W '^`LҎPB{T˛HL#f)v{ϤXQhb숱\ Sϫ"Zi\6lo:p55 _XOFW$5xVҧV`F[A AR3#Wm,ȟ 3h:U}0r#Wux:[9=Zy>EˆtR*R!%,ퟁnKP%/n-Mɋa6 kBGYB%>Mv6Qfx.;U]6-p%DG'][`VdY|} 9KǦ$DhOBRo 1צv=Xe9 TuK\(M~=X@ÆJ)}׃qQnx eq Yܥ$Uvֺ兑Eo ;#yQ]柣mxmqwBLrۅME t疘;6kk1Ӹ"I۟^:iɲ1Q36;g"`G . `. ͙Y)5{[b0~o-rQ\J M򃚅%rx#_WcI4cCY~ D%M:\}0us? .btQ#{W08VYĀiuLdJr(CaR7\a\ ܶs&bY,Sl3$Pme;p(mAGL_mMK,V#d~=:Nְa!7xREX (]m3hxQ ꏹ.lg3W2L 9xCOFpѳ۰9:2٣XZlЪJw|> i$s +xss$u[UڽIO+]3шei[BTYt+{i5?5KAW`dYYsʂ "3w,J,hJe taޖF7!1H$eVu{ <-[" p$#+lћI׺?C'c->왑2aM\c;Htf-Ս"/ T!Ym!  .쥃V[7}:!n W(O{9q?Y冑}?@oXxD6?Vmqb]M(G@ UK@ ?@: >Yhsg3 l`v-[W#7zjӬbWfl gF"_?/;e_8;e_:ٽ`^{+/]vCp":q\N8,t(9> d@i^v\ؐ.7N˓ ?LSI6Oߩ_1!jy&H!8(e#{A%ifqٱ^R"Yw#u{ 1qWKL9̧^8oÎzl~-m2 XDӣ<"U)M4n8|1wN {V4&T0TE^Jr ̋D`FG  "qF*TFZ͆NN=><{嗟 pp0Z _Ke{ܦ Iey쪂i*1/;{NX<$A=(IAܟPƖ+yj#TM;Ɋt'D&W]̯:*e!>p5Lݠ-o<4hGw?V@EZ;wH(I޳e3QtЉ 0+9蕝!640}EM'֛MІPvI:.qKWmv잉-[5g#m*d@pgdM|]T7?.>Nlnk$H~BS6eۚfn,uzD!GW<ޒ yf6.V{/C))DcVպS?Fk7 73D'o]vqX3M!%m)k""}d0Ŷ hǕ&j?C74{FM;:IǴˏdP|ǂ#tˆp\&0?:Ar"\~]V,'%ate&<<|𸖢Oh2XH)9dԏ~ n,нcv)O=٬P!`p&/GB7Ѿ[p` I)z{#(_L_oQ^:Rybn}p5nކXY8)pE,H``[Qrg !Cxc>_wowqMv93 20IN(t,ܵo"EʶڲEʼUF>B|o3w5ivb;$nȕkPͧ?h(NE/ݚIwaMAD'r?W8kQmGg2%oտDl;?#ZڎL" ]7$%T5SYkLe=yđr;HsMAgMkHvDLql`T;9*Ur|>tA/nzzm ID<,799>"쇩`P(~qc7la●+k'((_:Pl\^(Jo*jUAa|$-gRo?Wڥv]OG+K//PpG ط*XQ度/G8iMCGAE gA<`~[o^4pm`9_?-!(p&æIX@ Iw\>9\/V'!ݗngV@7PH=Z3:dd 8G7wx&&_3`T}n%PTh?lXxy ߛ0XkX5{zИw~]QՔk I썾o:cIJfS`q4C<5sv(q|eY:p52:/o5[JOXـk9~\)Mz) ];?~ùI+)NP"Ǘ W{ v]j\Zg8SٱQ['韑qBnP5,&z[QaoSd?|+Uj7彉w]R';<^ +Vy8>|!m)6alK=v(< W;"v:eޮ}͵_ٸ`vr}# C79 4ݘ8C̖ ?޿5w)y"}դ^ ,AGnSH;(.q)71/=,KzV` ].Cc#p-Ze;ePWɱ`r+R2jMnF*6c㙁k M0 -Lh=Zɍ#P97 qCV@A܎Qxˑߠd~TG oMo%_݀8j(Q<P%ּ-m1p˶rҕ?B؀1X1'Ű!6-]ysFi7wM9Ⱦ{I)5 ˒ 4jHH@ȍ<y GFX97n U'grY7u{&>sҚӳ2~s61"~5S҅ e:VR'O+Z腕PC,L,\!j 1MˡCGa`}^hM4M+F"9u_Y5z_-lؾ Ci'b|P2 7*euˏ:ǭlУؤS^)Q+OҚp&Hwc>4!MFeW'#L'5q ?S^c> Cj~d+,)C-hR?_S1i,iw~hTU 2 5d'~uQ>{^Btm0 ?*[2 . e > Z.5hA)wЮܸ@BBf_aաC٪RDVaPlDl!D bK1 t*SR"рszIU`F(]ylKo%Oݧ8! L-{v3d={ zgdBZD y'O ;$l?$"ԩF!+ {GznO'N{k^7I2 *  7|j-d*&4:GΚv̨#LeX#_~W*'@KM~jM`%AG5 U{RP/woSeK\lJV !ܰIHʢڑeu:VΪҬ ]STKPh:ڣpUl M2I4 '!cK*}HNb5j(2aAuW]$<,]Ԕ0&Ix:T#LQrb)]7:ŕ7r>]dL"R-FsaSC#B3/6ˆ} Yq*h@1Vf`0zƌ,q2RPbcC?!c@Ibx!d!r)BMuE~|^}6:P+qѸ#rG3vrg~f|Y\}v_YBDùû(~C&Y@6HB} }/]N^esy8τ@睧Rk:DrupoP zxbA<'͸+jE;I`~ :qӞɽy8 _v+579++@ uIc\Gj AK+^zE=ෘ.g[LǤ3l,q2+2rڭz0e2VTyҁyYVpov45O,W鈴uW`;Bt[o).O"z"ķ'GԼjQ&Ԫ:g=R$$MP1ut>aKRVX<$Kzb)OlāJ~hM!)HWz} WUNgm䚍6GBX1Ҏ)]^EJ=꤄K4\cyP-!?$<$4t&Qvٽ~PCKV$=0QZ%uVr9R`:pK2`DAq]/(3AXdKcSt]pX^q@43|Cpe`d 9_Ƃ {cfUN;Ԙc|G+a_E[Oi"^e#=QrTl+L/N)6UeÈ?niK-I˲&l3Aē}F{PVdD"_UَǺ}µ^/&2\qc,Tb{6 ,76 mhOl41%ݎFubS Q(,ɧrkS$Z"Eg,lY13\;}-8f+oSB9nBτ]FXuU o!ڮ}g_u!(B33M >9"[4 _t!eUȊM]=FPWD*Y`#ޖ2ɓ`P4sb\ NP3q,ZN†V% )ϧͯJL>٦Uǿ,*ŽxbvN^NX-nȜ0^%H*ñnLmPOn$`}b]F Eg{n V@!i 7[ݥ~ % ?Rw(NB䗯)h!1jn=mhԈ~ Wz3}R!S $6idxτUb Yq<{5g\Z V&UJzI,rX"p< \"@uTi=m×IA |e**(5gXÂ8^6`R $ ~D"#IQ}#) sP(_LjOn7R~_K`: ol)o兀?Gټt Mos8 ȈȊqDeIAhL)Pv Ͱ$~evK-$(E5;5zkL *Mi^p>tZN]EnXdK_+ ]+#SiRFA6֓޳Wuo$, \!SZZLuo3זa\GreM CYg =m׭CL@b3=9fE4CPj-t/S{k #7ɅjpJfRUS8&j+p]UrjcJciJ`ScB* U޾A N<2֫t)>fijŀmN/dq M=~ Ր6A ͟ev@J6Al)0M܀KmV/gU ̿ )BZUd 6=5jROܰ'*AՏxB"%\' ?$_d4ǨADJ;m(v{+Yk; T'y`-\[rz=ӉesaG~MeR |E""['N)j8@}J_:X@jL VZўyHM#䬂?vhh$ 6.cAJ=Wؐ$КV׎hv U6PEE(>rF dûd]fL_ؒ®"ih#W%x Nv҃T$C (+-JSШDs̏T5P`i'LM'_zPӚ lGc&=)BL[TPRf 6`a [KGeMKCZTP`ܼRg.;,]}P|ퟵ#)BoA[J3f:=QSh/m{:^ &mBM Ku&ʝϔETM:#io/dk%*V׫%!OOY=iOzc\>{#&B5"ץ||*HRxpY@f2LjڜJu7y@x]5r7I$mDz7*͞U2X`ʃLrՂZM)ҫ _~SWx4SNX*bՖF2b)YBiZ T#KU ޞj9YN^f3ӵr#dK@{NPK;|4G>-28=JFL}5a};e-^H5Xt&vZ޾F$'"94 OLYg$-E 6RüH ͝po]F85~5p#u_Y[>%ZGl FBETbgu\(c1׌~f*a]d:ӔN/i)Gkt,4@\(&zQFU%D9k91ovɉ+9 d2-+pvo'sLDZ:A[*Tc}0#ϑ(8P58wHYʣ`d`6kϻxN T~MML+3:^ O=.Hw*MrDlf>9mO)Iس9 r`1Bןbb0f%H0(kRqjhV.$N] Gc2X"'XBD=r%XVW( >`J.m5h'R! ]cs];'>8[Yx'AzARlޔ)|U$g-"3B0kʙ4u\$An:pӮ! E&]=-gr>xAS]mN\wY1ѵT) %gՍvHL?&n"JSqDn mJOc7iGP ;>{)l\ի!RG 0qO48(V}j0V@,{:S>,zr+彇o2#WD>{7ؘM6_-cǢ 'iaᬬƢ 8[ [F(=&0ts2:`:UFY*g Id4LV u7PUXju[ʈqpwdMh[5q8c8a kq&a̞SƖ7}tW_>:_%pЪ0`%:dO)9F@{(RĆ5 hAeqF4=|@'|i,D:0,hĊ0]nʝL2XȈؤCO%Z:.-@z")W$781I"2ALf2/3o묦NBsKCF_Zr{YBVk@2ślΕ> }>/`?XW@BG+p9P$7 H25e e'^'鴡hE-@7Af^\Chûbo<7Qy^tt5 3Rr9L^>LQ;W_Ͼ'Y}&?xw6$"k:f$WSi\&1*a;)y1V" ޚ9F“I``aCDy[Lt<.(yr/Q_sIUoAdx*i2AVBc{-O?nI' AF .4S?Z(EI%]&x)bn Oٔ)u5dY'>4"\ymfE-9@2-oUzoYDxޛ?#~k3~krciȱhYjusvDm_a]Ƌg >ѓ)$`> A)~y볨<<kld`0:T T,u s}LS7krtt7*KJ5ExRH5CBe%>ÈFҮ7Їql쫉XREWwY8neom҂v^mо <6G^RC5vb EDkS*qos7{uxwδKp 1"3hZ%FDΌr ;aYrFQM=ug(~dBkGzix7Pt DxZ38?:p9Jx}9…>?L?:I`55? :F޶?+[ z@!r1 ǥc({3sgMʺ%#cnMPZLVQ1ۏԺJ:4!h0ჭULZZ\9F洊)x h5 1Hi{aeu%W`~ZbVok>QC((Q P'cHa2{hGiU׻L` @t/%/C[;-I^1b%0Q;sy|5gOz$`b}OWa3F,ZVF}pO'#d؃`y|wPw #vMP΄d(Mh]Gzg~8#Aa oE -4[:W+Gb:AQUaKZ HU"gc G|E{a=Lyfx $K+t&7jUv[^"9X|r?gWyпË4v=Goc;v͙Z|v~zieG`O9=~e<++ q]2XfXit&v85t$wa4>~"¾E;b- Z)\z2xYf٣⒔L݇>6KהESĊՃ䇽c/w/;a}ar6I4Jji(azQ3o#yC1(RZѣ엱\} q(B2кNwJaź4Z7gve*=xD~\XCv֮z :0KZ?卜l_gs"*JZYeHVx9}aW Akvu(EGZpǵq|F;rJi bLt;VkB/M(3e G$q!RuBf.J sSaU)hd54)p/UZ">0hn6^4W[/:VA<i3+N2V%-he>r0v,&O&{o!> 622Aq[;9A6WY"L!=3u4u;{fsf7t7oU=ʏ{RrWWε •z{~~us]zzUvw#УWܰ9s]|/R퇆c2>\y]y1NTXtѝm[~6CJj^f[XL y‘3>8u4rH Vt xWhop)Toa'SwlћhB46so C#80-ks_"1?^6/j"L"ӁzNW5Ta2]62C2kMS~805RkfW@ʇJw]3Mg4jzl׉v/gX˕7b&4sU;Dw, UwN"ӣ)6]@' AW8/yV;,lulѵ>apW{~&I/oIv t@9B_~/g1tцȡz{d#:0uk##O<cTNI"O@"Wsʠ.ϵ=n>|h(D DkM 7zკ{H0w` v` W *kCO'ZֹOjgAa!k ;xk'- xrڜ.AC0-iQ,B:|I& , ݢOGπ)0vFsۨr ?@{qz/>eNO/&KnCzv2<Ϗ`+jhYB#Ժ`^-N99В5//l9K :z$#Vuc! dWK~K (`W .Hv#a/C Zasá `kc u3ͥBIlݪӖ+Voo岮BSXSAb}v5p'Sџ(ON➷)1X[[;1/=W`Pr<-X쫞~a4ʢpZhKk}:|Xn x٧k-nBe2߇`Hoe5,?Y} Q3^vs܍" <:0u ; E:֋Ɯv۵HIq HqRj3IV[n5V1N5rΘpڝ>086=ؚvr˸syd6;dEnWe~L3Qela{ 2 4:LsK1Έ:"O2PK5Ya2&w 2dDfc_a:"[7?0_DK; KD[PaBcP6u*U2` ^ȺthO鮧$pJ?X/hN,p5&>NZVEiaNZ;pr:ܙ>5Snu9wONP'n%;M:n.rm{1ぉy?fѢ(CZ#/hBch\ Nmi~FXΓNj沢 ,{qc~(3V'q~v߫_}1;{qO&J"3y&'5m@:[)sn^G[e]FnGGGصˡ1jrsǢ,|] ;Qi {]"ܱ C֌<:g޳k@^|\Oņmd~㱓Mn2rmf%8JA nTsqkW"5NJt9)E%j0raR5N"[х5^8{d_3>!sbZ 1 nP M{CLd>]`a S V0% 0DǴ:`N6˼\ftπ7s.CIg2_EZS9?E GqC{~N|P!}zOx`~Ay \Bx\ TKvi4>@@gWxL@yz>pM9D(xJÀ1 mA"}C/TAcbjBlCdMa@KР&ً3ؠ'r)rtBW`hqq"$C'ª&'@}=khA}fQ5 k$2*.@MiPF: t{2$ +A'dmq&l+,KB !4Yh2٠d٠ bC9c{iBhݱ]J~Aĩ%ի6M !ZFyp7*n|J1~Oywww#ڏc߿y9 %歭BS>T>7:ZK&¼֒Fg`?j8%ܽenҜCuf򇖨ONx%{7kB* edfM2g<+VWi`טVPHi#<+Ngt䠼u0>X+7E6-Ei,∠牰??V\d>qtjD2 "/#[pTla?9 &"97{+UzOv6dxC CtG?A# 6~OyМo|LM4j> "E  0 v2S *,Q_ɪi֪W|ՓɧPK2Wiަ 9m#DwB#f\6j҆ʵhi"B|Z i-0r&H{{tA|:61 `ŰB$ASoN!C0&?>nxs__!x퐦.Z_,h"IcʽX*вkS3#:Qa4_jrQ!7gyX A|RtC(P2ApDXwpjpfuv:)JK)̰OȠּg@%<56TpZqޣ1pk>X`KPk+p{Or[ivCouŁ7]C#ހz{ K"iRhn1۹=kCq0yݹ䜻Ρ\,v=2!(J,ODo@~?voc3u 3 7֗lUgX3f|bl0D:fEK#W4.XX!8Q#$P)x^nJ +j "8$#"m+ KԂ$â+,xR@5P*B0uRxKm(VjYč"Ƙ kƪ䧃b3MsA;`_ i M;-zc՟2,9LلvViu#ȑBhܞR~b$"B| :>W;v?#By(ugy8%[ w:U 05Nxy<`+X8Gs DcXֻ%#kz@iRe-JQ B2c^Xb9k}-8^D:iDHGsW6U:`j1qaQ #-I絨^ `bH}6&.V^yrbY}VϏw&1ԫ BltNF/}!"z5~xxx]IVSc2BvH/W^E1R[$rtl(۫Q7_$LGhNHD]Jri }n ć޼D帿Fї1P3FC{/T &>-:ƧE'čsTVF;E<.G V]]OB T&e] (97eUm<׉*_,)3Ư/ :7t>J䚵m>.0BktNwl>nE *ؤI`떜>pרn@LY8  DI.mIvǝ`陕S-@hДX7fb?Qc+KIx\͔KLTz3 Ě椝5:y4Jz1@F*ti[m'_l^5x}CT,z"euiݕ ˴-n8fmJYLV7Ǜ)X(H؃b/J55Qd|eqeJZ!MM#1"9(\b5Qex2g+T6P`h:gw-9q1ai3!AQŁukK^LOS5wdR]86CkM#4 RTN9MagM+&\?[s;6K g@>V A Hw1*O@ S7&RtvʵuEdB_~z+jé$_uV_EWh{Ug:J1xYEi줶m*.?MDDL*0p82tOmiZάuN\t72\NMŇ, rF ݍM;GN'+M":K(-P<5 +#=bʊ5̬|HIG$&i{}*UjI9`IHtBY,.CzA0@<+fV񰦸 `@#]J>:WUJ9n֠.{:j$ TP%hU͍΁nSsuA6ϸBJq8j]JEA:7)KP-',Z f.[z1 gZz5kcE+"^ft/ 2j-W@c_G*X:=ߒ0ݚ %5Iy(ҋo s" 9^c0ж!LsPҸGY/}YЛּ߹ͿL3x;}1@-z*}ͫ@1L<XJ| V[-Xf խYaP9Jl4A܀N*1gTx`(d>SS oUpA\R[!=[wUxx2Ti267G`VU#6'Aᛊ5A:*+7>~wRد?duw(oC/s(a坡|`<-o'œaPy lO`Hw:@_6ptYlMxg q,? {{@t)1O2.\:Y+BAc$2 h7-CMJWhzl|r<С)2tFCZjZTҘ @y( /J "䊞U_p3lF@Z͘.Ӛw?"̔~o$ B2} n̔H?-.xDW}HjO`7 4/ޯjiEc'f#I/؀lpQ{P`\:ڎ [_QpN-#ܔzep* 'k=-sM߯P hY`#/~qr^J2

;HcGz^y k=8@MTmA2l_yuTS5(ԭA!27'yFnnvD_ZU?"d\SZjA kR\6 2* JJE"6,"RZ7V$ՈwdPD Fq+^Rj,*UWR?$q;>!Zr9AӤnhf/JSX0#}hq e0;An 2MJo&N%~ɞe֖D_}1E7J!دTrN%pw@0(f]ۻſd\adđ`"ʚK 4MT 3^|%ܽps/EnA'!3@Jz3u@o]6(ۆ(L!`J.zȫHu^:aOnDM|w:f棢Q[!ᕵ@q-C3ꍍɤ˜0DAAi':|jTa95+Bn4& N:VG:ppr Y!}:9 Wsӷ/A\wn.A}ylkxKWZ=OxJn1nog eNWvϛrUX/'Ixg)"~ϸiiZ Jf5tAa3QiNZox+3-dsҋDEVSq} #YGzfyǘM[(H,^<%VSbIyt;h])g[]+tl CP&ɶ(-+?nq6\+.V桖S|Ou*g ϑi;T(:;+=֚$VI#wנm++*iZM bkD)$,{AnlDWmBnQƄ V[ ծG~sPd]KzCDCpsEm9*->䊌LIgkde tb۔i@\l XDc[7zs# 3FiQZW~AMK.M(D:Ñ6@3y'[4jemP"ERyx%23AW=Q(2LH/jP֕!\=Ԩ$>Dm&n )@ !&@NM)Q E~J=z -JL#\W]4Ǘ2fPȵxKet"ѭdx-ym(ϨY똉*v&QٔuJ3^(@&!0a$Iiْ<#&ٵ,C' <0Q}ي4T9%1Tnƴ=r"IY&.@S{[g;E'W;ߋ_ -?KT취RxLr.Ljm"/-|/NߩdJ=CM<6օpƕBQ&DiC%ՇJ${<"e唔E盡@ `']3L;l.¼wܫwv Y?3 Y{-65d/N:d8`ĚȾDO(bZOח}3bn@U;P= 2NA՜|B'FQ]++/pntP"$҄h~ŗĩ3q+e3i#L*Ǟ_1f3gd7 e0L,C! 'sl > O%4ʆ>$bݯZ,F zT2Hmκptl2*#=!K>KdW: X 9#} *c(]% (qұW UDgMѤδ,~+p9yڗ1Ge2RTYL9LdI<굥߾hز,z[*l- šNl>~Zn{=>cV*?^/Nۖ=G n4h2N"A-Hb|2<]`qdGr7jE3}8)x@Gjc5>~a#a" '=MU:uVA ;5aD&/wS*1#?li4+7#TW,un) Uf9O<ܙ; ;k_)ʎ~ =MJj*$яx_h}E">[a%b1MxTgpĎAU&sۆ[g9KwfO4@3FqtTNU:}Sag㔷gP> buǜ_0D]aaӻ]ik"4OU=@~eI'6zD ^;#Z5i )%9N&~-M>xrA &crMҟnKkє5DkJ{̐$Rkf{+JE=0T;g!cTuhedvDwJ٣ ;;y="jlUnS;fѨZ4TP)]@u-dڝ,~9}_j:"&& .w0@z!_xaR˻r&q<@`  [Bm!tnCDfí@\M UW}p:Sc2|X_u2)*`LQEɮX3<1}ys|^yAwk&rhAUG E;qJǩ*פvW4n*|v}黎Jbא ֫و6° pϛ.}ٰ&u`.["\PE\LĢ2Irz\Jl<8:۲{4 _HF&U.!::+q Yav_@#^d9H YS|*>u=2rb LMnJZhɌg&_dFq2o%u5YOP׭l3 Td}`M ԣYt2>*]N`Y8ׂ+Ŧ/ىV1Y遣g*}VLL>@+v9xU(r( Nb7tonL@UUHI^9HDjIcVOr襉8ϡ&' ]W'FdHtƃAWRX*inDiLڱn'G\n؛-z>RXlL};wxR,}1Vĥoen^(,Q+N'sdgĥ?1:/SX\+[d ׊Q ޴r-\IEGwCTsH2s+IaWyOqݑ_'Jgx-|]o~42`w2b.4w[Ev_S gT(5t"{U"L>':j5Bᕪkc~t)ycW>({MBX05>A7x(ʨ \[1$~eS '=E9v ר\І$I/4E̻*?g%ё]H#YRc[DT]̣)~ `[GIL3 ,G5~xbХO_Sc? ׁ2% 2zN 5qFcwڥRkVJ6z ATp0m:ӣ~?JOصv|xI,i_^v2#t͑eR` `ʃ'~ʚKTɣsN?ڴxJ'%XL߿ ]nN&A6"jxH"1M?6 lHз`'̹M>.w)EJv5 T}ރ,6s)U%=0ZL-N-m'`r -O'Jvժ۞J|9s&Hzϗ],1M=m;P $7"EUki23̫ mjXIXJKEa=<&\J"Hvc_E%#1 sB[˶I q16;\Ktz\&[gMrE{)~Sc$?Y Hg+l恚`"诲oRC-{ov3 i"؀谮3=ڵRL,<(0E)d ++ ͅuJCT%PiCbEv J^Ԛq_O.!v&#^6q!T1|"?Рos:ڲl.Nէ%BUb{:/v$;Ptav 1qꑔUz0:+x CaQ;z(0$K+BaWpF!qq6S!^ѽeo8/f|K[YvFF݉\\3:VM?Uҿ*70خ-4J S647v{7!yo Ɵe=YIy~䞥$0M+:\kӣzMk쯎B`Wxl1U!xfs:3+Mis7؍'4M3"49>ђF L8Ȓ!u.!Aif(Jn/heGx}XByV)jz-q̊T &K9Dxcn:_WQ#Jܦehu;[|-ZnYH8F0noqB,)" ߿> 9.C&K߭X]qQ#n!y6l.@v#ie0ԥX/xq2ik=^p$80X:ѱPD(@Vm!PU% F1(=VvRIv]ٙ=Vf"يmJ@T(C1QO2Z3D1̹y69kW)ha{n<.>{vț6Yevi?;.2c@[K`0DS1?[J"K;G}SvK- v)lbt*6!l#i\!ɭ~ti׆ߟ09pk91;֞g+=gcUk9+?ɆJU/[ry]gFK3ؙka/ķ^fU]plS/&A^ˍlBEڬ / jV@^a^A1䍲S%A;QDuݜUMj:8ڒXbT~BVPd W }]cAϹF!^oIp% SW1m*"^y D4ETۦY.Ys|*Zx`֘Y_[z21ǘYvM\g@ncq|; 6uhKp`H S}5/nq-]_9/91t^54aAZ^iwg FN`B DsT;;|3Ms7blwS Jv7 c nF|;˛#rzzQeb:X`ȗx`v1 v:uSmȩHc}F}g,$0g;^ŧo/mq=DamƢǥ %LLen7nwo^LN񺭏H|͹zZH?{>shIuHՊp0u_=:m'LָZu2'Rc"mCazLV,~Vʮoea?*ajގR<<ؼk{qv&YSut1zJqCݿ.7 O!8zM/UXT2İ{7bE KB-"pC8% ]{L7B%I%GgZaI]J{i-7glofr'XCrQKtbTHL%o20G #`s]jǗ+b /G6M#f;WU\l"ѠFgףq rN]#z S$JCmognp e)Ade ,E3187B8T}2|ίxvS"?&|xP uVo:3KJIB bV2GBg^ݪ4?C0$xm۶m۶m۶m|׶m۶mk^GLMբ"*Of0avnNcF6쮂rAN`剫e}<,&xIآ,g>Sdu^%͜>VktW֗84g0$eֻc[mk y+cfT^=dƃ~K2 oQZM °nYAo{6r'$_!#sA[ 0>[RY*’=v{)~qM q 8!!ϜfO^͑I,UƵ}<߬x@BdqVo %nώv[alK^*qOdg2]Pg43(BrR?K#Ϲ|6ZWӊg{ G`K(MƍagɟVs)m&ܽ<{m؎|J$~S{%Y򅿡{߽<yDo%-75V-75u]BW2 D6gΆ U·mG.s0ABn^P4t#!+OdR{y Rt(-~*+T1ɮ篚\qbI0],Z@uKܖ-zX]LX+%IԲiFRX2>ƒIl@+R`G*n:ضOc%uZ lL}ۨvxq:,##/mއRB#d=8 .!KeVj62j3Xxv6]ѧZ;lhnd-)2ǃSRI07#ݯ'>ЖsOިx\[jވB[c9YxFLYngfgMU fGnb:Sw09RD-E !NmG:`?}W+śF!ѓ0i rݎ%x,i[}S-z3ݰΘ,Nqy/DrBvp+kEvK-?̂q3[<[JAtw B}] k Й=.E֟E]N$EtL"0Ԇ֠UͶ9;rVutB0nYUJ+fǴù3bg=0P"?}(6:DHnTt?I_w mG^d;#Β˥A=l8Su|^PބU 73*ތmKz݄+ДNXK9ʅ6qRY{b6[qwXUФ摊ƢI ]Rszw@%[H nJHxǴңpUtcW:hifz]PTl2'1mJ }0FGCV׶MKQ,8WKS$ Q}Y_1%/f Zdu0n^6!] D͘Nս3H41SxdmL 32_MV۟j(UhO(9]d%E|j1}߇xnmVՐ⪇K xym+(ʕ!X=n!m^6?6ձ/flQʛ$ oHe o W[Z둆. ʼn 7{IKw[6d'nj8@M3TjY7qX%CKJ%ypՠ-=Fs.9;'H8YOz,:>g;.C/p-NOC 綷^dFf *v3 :>b'r~hVN_H z&x\VB^u. Mla Nt(pb_d7`ؚJK+FZxGoܸC} 5[J8/7&mM?enƭu; ۑƱ {{XO Lk56O0aW|YESf| sf9Ť(`*{2=yҵ: ѷ 3%RurDqZLgňwa40(ؤJ`Cu>Ehc/XR[$zЩϦtd?ETzS\p8:'O&ϮٜHPA?8{/͟T_ B=/ PTۣ.ID jy";vEgL$AuwkWf*BVR_N=m|o\+}'Ѣ;{n)2x}om&#ɯO𬻗C|hRb'͡Ƥ~Ul395GIpq䥣OWLOB,&=$XV~ (d"͸nXZuY%3 J??5l%\g" +ކE<^ JHRZy2TLf&|٧3| ٌb ΫalD #3DhSWCPga;x8bܖj[Q3*~(H6Qy;*Wu+S)|KebNMcwL O8(q2,aai >g)s!6:Β0Zr1fqCSE\zmSle_Ā,])n d~e]ZNYkuf(ltN5&}-26h#)/2˶1;⯿b[:''R̫ƒ 1Y Xz]8iL-?8zҮ {ZSܣl{Gep-D0S֨ovwnSm<."Aa c_ʉ).Lb*kFnӐ2PMږ3դHkVqMZǑnQ:N;Ic*W̢ 䟔X|5 <qp:Z$dS1uj(bm4G~/MG;f90;Oogڒkl?x{ WWrAS}1@nGjdwo*n_[VW,!?<̊L*uvU8ZEGx!ky1ǀdo8!!{fo8+oW??p{Zs9=hS(~;;E9Y جE:_g %>`cEﶖKzɿ>>vz>z}j{o{?^n{.0.qӾ?KRpnyi4\jNhan0nPxBdqsHl~_n ?}#v1,Y\YS/#l `^~CzJ\HX0%-}N=E%9Pںzv.Q3c0BMZ(}Z f:5DI;WcfĒDY6j95`vØHqc9tr{+ˢxi p@4u L?g2G*"%JwD2 Z[S=Ka+gDN0`Gw4X6&Rf2jgIZɰ$H(g(kv 4|[ $Iy㚤vDa,7}nezMyrb?z=9n 0?l&/h-3L8H RG2!)FS!mTg.S.t6̤Diwhe2&Iοp!lXsա 7 s|x{g':&Gz-ٿRޯFM;6L'VܞdP# r `(hOü@=q4<ØFn>HÐp@W )x=&vuŢox]QL`#sõ`CJMLs?V)`fE.||u'llj2'g0GGU6"Ii?7Or<³Ȕ+>yr{bVۋpxDk9,&%[jsgkBuOVP=%L]6?E ߴ0~v-N;Ј׿qs)Ua<A53I."QXW^OAsth낫>D.m2vmS7ڬxAoi ߎOnd^-D][˽fZ:J|hy2Wsxf!jrp=9flemq#whgP# FMl*t hU)BʠE3NBt@FJm@յ ujk,,ҳ@DPO%VT)O|:ySzYma#ڙ|]87[Ԥnu!CBXG[+Tl!Tjb೥6Topr? +r|iR>(+ten7tI"3J2R]nY ?+^3+93B2裏1,D1BkH d.!7D ޏ2BQf刀t c4ʡDXڗvxt0d7Dާ 1GFxgE@Hkoml4*j-Sݠ? heX5ܗtXe,jVWm,l*Z~M@ںqH[@/<6ш$XwG:4O\4ރTԹ^?}5}7.sV6({XRyɸ\:t$Gdi{D՜)` 5:WS5?4U17OfǍ=ceg*H ˱!fm j|R͝JeG/!6C;Y7pE$b3,BTgdb|"<t'QeiWmer H|0ع%\0/BD!8G3fr`.;۠9&Iԟ (L9+pbbfz3j~gO'\VS}32?ڙ>`iQ&A '` IBkVN5r~߮DUE걵C5E53\-mQO!^(D0ۊR?ö ^}#lj*äb4ZVU3Ua˾d90km{: 4+(x%\ZIz WFCċ,hXU י)͎,Y&<3G1#1%1|MtS}1^z;SVbM !RX_֔ sE?H_q`gno}[wxe"__"!yTlLT{/5jN}w@0PW?2~koz'HF1|HHjG [C؉dA:ڡ^{X0@dϷ H~ز;NWdCCI-%_3Z~W"#fz߼AhAu$qt6|)nN $<9P3u Zi} fPC(͉Jh RD[v0@/\=* 2%҃S9DN58ZlMl҄ ֍;0W:.Nnn E;V(I_o=:R  -Go2Dr L+7CJADVCK3*Q:ͅQ9q]P}tzzStwWTӔ?Yб8!0ۄ5P?z(ej-sC='X%' R@<7ECJ! Kc%#@E0ؘ9 R8Q4q?C l_E,/ajn^pΓp2hЈ;e(ǭ*Sn㺥@CU"l7qTΝpjO)dX2PQl/}#ZVl'Hvɫ~x7v4LzS!w2&# 1Y$uayIrlU;TyFF/&T*ۤQMTAh P*aR=QDѝN m2nеltF6ԱQԙQ#v:0Fg+ {k~EH4ڳLq bUj_0D@O4IW]D eUEwQ:QR Ё5?i!BД9i(d1Ջ@=X˪%q&Ԁ`!+ҿ8 hKpe`zZFuXzEL+.<Wk$1ՠjmֺ]( b',SZ߄K5^oTO+NIT7K][@bp:0 057]0G b`am20o i[#jy*2"0Ejm[7pW^8б?2rhR 6<6Յ(,5 ]U,B Ja%Q`΁;)hgd`+B?ݍ>m=| ZLr4Oߪh6/mҔ3k5e8~SH&SkZ }sZu,cr="WNőbHyi_cr3T`-]vL-WKU`u APE0Ϫn,%>qZ움\ehYRqWׇ7"%8N/4J6 SBe~e?܁iyz+%L^f!ƩOq :-O;Hu9 `? `<3z]ߞ"F&>Q2^(fxo<=[]jF*7]6l_rB \P</9e yH,TVF_6wr,'vw\&㏿M"B0H 'umJӘԺ:jjUc |Vfc*ֵA,- T^zz&bp]e&rq]R!퐚4,|ЊI~$5cL ]5G0!ýChDE/?>E:Уа-EҘ\%=*iނNE}i0P8y0h>ZzPp>CF`@p@t@qM= ]uJ?j{.5{H'e7SlIzǏoE ik~Q3t_Rs))jJ0Mnaҹt괒9yg+Wٕ-hp:lك@μ*w*չ@y5wfF~xp]ylޞӯ jMbzv"^Bku~{uz mHX5' DH+M`0AdqK8sbщȷ)!ܴ]󣝿| dÎ0 pD.ꠀ!Gr(o9š`m є@3 S4٥&zHړs4Y!f' ?Q Hy&jEӋe1>5]y1r9KBTPf T݃H6crWHw#VCA2Y[,A5xݡKX=%E*\$btMZ6Qv!~|,rMb0;C rȿԊs访C`n%{gk' .'\Y<h>I9S/C@ Kx'o'e*X<Y Q>a(k&cjrJ阤EIM2=rgpA= 3 LlIK5èN `}ԍq֙0 Uxf3iK.S>-"렞NYi E#CX=#:b!oL% ێ&stc.} G( I ,;KYc<0"|d |*xl /:`}܄ǟ)1F{~| M qadqq Ϋkݢ,-M!Y92}B7% G$8jbe\a-ZEA~'6L>>8'Q1R2JCg0 IK4@ƔvKkwUS#kf }m|O>[װJ}RbV꺸^Gp"(n'ԟ-=Ñ)3-gXupr$wIf#~HK7A~XKU0saKfeഗ>M/02}OUgoZ)V:3,b9W˃xyZ+g,`v-%lvMTZN@]H'wY}s1]ڌ" fOw e+~"ه-6r=1#SĉBhoE-M2ڛ~rL2Ò[sXC#Bȑ@p˳OiA,[Csd]]I>@(NuӯA}PNx%p*cC1n)nn ۓ,(pk9A>%tö,iE@Z:l _Ӕ~fO*J$t<{♔&G4B6+'uR#s:IԙE>N8Z4ҬC-yK3vhIJ=>=9 ETa1-6`i'E g:&lsf޳`/OGkȓS0eXKT[SRֽgE_i:˘^F؜2P<.yk/ņF-AN'gqNнgn}2lV-KpȒNy+ͦޒʇA-߲>Hn>fͦqOa nV exb[KSE󔎵vWj{f2m7;ލ"}D\Q.:#vc2q'hSSОXEXL볱{qhp#0^a$^9;(2 Ĺ?řGTs0'v☸4K8~`"CV^TݲyY^UU'MFߤP~Nm_D+wRpTg5 nA6}6"kSax:=cC-חL,bZfL!%jNu C{dV&Te'% =+opoyK[:NosND O%dIM{ fΦ.p6}O]?a"六lJQ3=v7̴(Kߺ j.)砇ٮ XaQ>8 *P̨eRQKs$n3n9K,i2Ts3bXyKNN(JXW>INFqs@$k$oxY-nޓ r`R!,8jQ$#u43&ݗdgq;g\"od0~'6#jGg/s::\6"c|:f [W"oC)׵;zyZ6X%oa/a .XɆq.5sոgE/u50"QU|is4^zb2 S^\N@H0Hl\54>xI4#hԷ<3K;g Kc/LJ/)Ԋ=Uin`~,ퟏM"JgH,,mLOJq̪{U p3+*C?:ף4kas(_%*X Dnll.OgzNtWY%NI1Cö+|:9}<뱏JF&!/vH3g1MUFfspY*vwuy,U ݳBAp8@@R³H)M :w7P xC- 8Uko0,W$:K'WxF1oEiag#}Bo-KDs{=ehF ⇞mݨsID\hKv0jJCvdFа.čB=1m.*MR-[B( &Tjv:m5U%i3WQ_"Uݯg߈;͞?&׻čVeKm7*F5_rMg[TvE??,^1' 6jz˃B>G=ޢ\E6̬s nGpĈi[UӉ_iVjjJ62VNSO^ A_,*ayxip  ek2-泆g_N@ټ |(%|Nk*B{Tf C\7e(Mx חC_WgMlCIf(,]/x#A|KmZ0|$#ܠC#gp)hk|]o憸EVSoZ`Ri]E= ڡvkMoix8B[ly+ԥ\i Ca[r#ƻ%]F7nvƤNԷdon.qu?rI,(>ss /=~2ڳs}LL]1iԓE9qtS\ 8XaFSlv {3:Bpwƨkq3TI=W |R@E~D(w SәR=L!+ͽNgO#)4ukO RӿRo,0l*,߮\^#{\ﮬ}%:K DV{ohÒ$FxZ/R׃Qe6ņĘܒxӁ0']$λ7p,b]O#|#Ӗ ^d.W a|pM kkmAkWG9MŠ{⁷\ĪfbM%.VF:pA:TKl5>d-_={1^±'KIJ{xhn?E)R\D/=0 I*Bۆs%:oA%3OU2lUە}8g|BM4ce~4o a5nIo`ڛHEoڅO"sB[TsB,*&%0& BiW``е%חZ[k`HI>_l䏵zVzٯcZn}y\(ְ۰Cɯj{|sy"4{B*6 A5^1N p1";D^q\s0Іd`[kف-";6O*M;鎜vj[B}Ǐ+QnF1V%,Tm; }7ڞd={J5RCƪ=)^Iې͌zNc H|B4M}7Bp[ٓyһ=#3K \6<0Cʹ)<"蕤(MHXkԴ=*znb<j 6C܌p{UdMf2pӁrvGl)v2q6D^":-oTFfWuLfLxkdit\|ȡtUن'DҎC011J'gPa20Q2Ǽ_,n%8XYYZ=!6\SnX_}H mG"KpTZ5>>8HwJ d0YWQ` A0ʢ߉߂6zB[SĤW_u?%}}FR$9]w] 'pPa,\2 ! $g *N\d(jͤLSqD8k9AR۔$,{xZg (|a;-*%b/uִ՞9Re^6߫n_2i<*%چ1FK']l 2QҩB 7(}{'=HD1R͊kЇ("_@. SƝP,*՗!_e(8Zn_6l RZ5 h"Yn X^lt޳d._F{}pc*4Dg%lAgv/Y|~xj޼p+amz[]߆K+K|, M>L xj8d6pS,0|k m&D*}؉:UOLzɃSON$Voec_lidX N4v].OMz|Ƶ4֙1+3KV SI+ndSYr0&7(h~~1VmޅBDYK==z0t]֡ ?JY ?i#IqȁX7B?JF~ VљX̔"j>`>mYdJDf/w׆bh =z6u{bޢNTFPA j ݌ol)BЃZ:@=aH p@ܐ&k \-Y1-A;aqP h%`2MV54*p JW:̭j|+TTGFJeliÉK`p$ <z ɒa#GW, r/ߘ>{iݾ85L@)K 4<.m~₞w/zɢcF 4)Ygj'B,?)%E Qq6MEr܌(Hj֓`% 8YE&HD҄J싇B^tfgStan~aN|( yڷUM'(s'@}s hv9MFGGEĩ}<־>j`sAY (M]#}kV#0 3YkF Cp'"#%N7µkĮMyP[oHxj N2qN1#dty$(ӻ_) c57 ఻ٲ G`ݍ` U ԼһA,DqIM\9z@Q(t&׈"A׎S Ty o)d E}rfYǿ1Dg-44]/ N {ա|ͧMPg@rޯx,!`=n ]z)`H#2^>vy w|'%)7Rzo_ވ=ۆLLYŲs*q?ht{JiuȖm{!$s![Oce-\&V&QEbވUv+ [DN+4lDD2 n b(=I^!TrCV.ht9a. vUR't9F0#[z1χ7 64;Cǽ]tjwkkgPT(sc߫r qC䄲YAޓGKhw1JGp*p&-"x'я;bGRa=89h9"{ )%vм\@$Alt,!pQ0(m5Gw$3vObo^E{V>fEIx6~5 &Tmjj;~}bOk ?@_#(J&uR+ wkɰ@($WYBК.cL[$I\nn${ ݱH-b1W3o^Yw>5GuHwS#a.\lM %+贍Q7z:fMSAWdcy/8'X( E}jyy⮥+mTӭ3CSכQV5rFp&9"7Äf—2Rtrfd}Lm1yPլ-k3+*YWPvs~^ rp=}Ů`O*'&)!0.``QCk*Ĩ-QqZNo|%Z5 .C5#%yхp_!a\א\sxavH6uR9}Ogmd*"Q03E6~*|q+y(YFT#*OWUH _|z 왓feJ"F qA,2B9v]T+k LwAuo{:c'YgiχD m6z؇<5.w^T&^+3OшȼH,>4f o Ґ7B/'([bi=9*o4w[]9+w$ZbXzߌY#55V`Y-T65\^ٌ1s@GiULD6'1@(3h}qWIj9A?SDj ֺ#D*,;̯`( KM$\o1^CtƃIzx^A7D#sω%_[䝰8$ Q^H^s FUph}cAbo>>4 A[V28TiQ[͸ ,+6`dT0S _ȕSG#Iģ (Pm=<+&l5`攅[c*Ga**Y7fnv|DtM7ДP<ͥD}Ѳ:كrSݞvn7wOݲ`iۤb& ML?eh<|~ytunH@_ O //s9Px;ECHj#K@QBdžS%'0vEkC9{paal=>Lܡ1%lopz`!$/2Bzxtu5)F),MH9?1Z8a:(B6 NpMND/hyL"@y נsu̞< MUZٓ/.ؓ^W%sp A~8-T[k] `0T'v|.mԪavJC2 t ǿ5v(}~ ); p/cgI`lG7]?X /y-  /RD"]fİ#D>+$bb{j\zNIePT @] b:g/b3K-e(ZֿƸR3Kp1;dj[ű+K3$CNY}[?IG$Cy.23AHh<4CIpŋC0 A= 8OV8쑉t]:"(ːyǻۤ9 ܵ`36vB[eT!kDlye?CM7V,c8fCMjbկ/'Mm0Ni[=,k>Rj߮JmWu"Gq)Te6q֑tro]@eTacI5Y ;Np>̻SvxqukВ Xb5_A4eN+z+J8}VOvjY+x,8etYHOZvUٝ9\'Bws>+,l.?囒ttRA^ <:^1.:BD?[ly`/n!>'6Pq쮇NKvl-G V5ȈMܚ?G 2âWySۤ+ʜ*n!]|kmO0ieyˬ$HN 0 M&A"ZS7RCBUŜ I^./w|WeZewEО~Sd13w~Xq.:WB]ź%uą9CGA{J4ux,4YѪ?AyBT<ر @[mA9op.yjq[3 G,=@GV{ XՀ?-)ȰPV^MmQT>Jb Etm7#nQK*bKEF vjn: 4xsa< g~3cik9Y759MԿ&/g}D_hh>w/kt>UI.] iMP.ԚgDj'y@0VuCS@bl.'n:v^{w`h`y߻øPZ`+Iin4fmyK5HFw&͏_&4 (QhaU4 f+* 2o\#/)btrŠTVjMra;nW`uCYr+Yt=coG;e?x'Z9=sm'AZ"Co2)@2-M  W>PEfӴc 1 BD N[oO HEN&PpvXO]'4 wĔ$9 t@Wr:2I(iz|l!Gd}̍m/˵MRʞQ ];q 'b VȜ{g%G-^7nCBiFxU-<ݵ~{՛@˞`&>ܞ"/ڕ1/Uk?mf+\Qsb#+&Ib(zhkgS<$Pe(A^Dڕ!V$zG~nj* :$""[6iI#̤`RJj4ʓ&sL.*[['ۓ87&4[V$(,{@84]Ktlqv:Ӧm`Pnjh2@U@ҫlz-*`Q55jeCqtV`V,Q{Q@WĬueioةcR m`Mֹ֔Ften~wݵ*6 3-}nWď#am[dғX~!~:Mr(xRJpDP ;33)=GuBר֞RB_LR(+be`ՋWl;O7ٷ:yջ=8R6[ 2dA.(DiZ7t~}жǂI Z oOãzf_Vחʴ$gmM~c^G58S ϙsH̘ؔNnc$ YjI+Β2 A 3W{ㄘVA ;PmSQpN1.Jܬ|€3! d)&r, `CF"!sQ w7&+O6Z[̽v!rOpꛥYZbam,Z|d0eG"^JfGTzf4ՅeܕV|"҇@*jo`(L.||IzE_`Rº31Gr" u7(^4b267Kx^ `ō0T0.5.R(;E?s ![& mu1 g[H*t?4Вwa |+Lnĝ0u<1w\yia9DCUӂ#S{7~n℀@ubSa^8x NpV#<;1VzN n|AzHj_X1CiexE prRhD.X8-!^uo/"AkkbS cRf$ޜRT0`rB2,"Y݌Tڨ9f́&EaPVkC0#kʐQ %?WF=h>́2w}ùrCݲ{BG?졺)]c!!YM񭌍no6 dٶObkCbz$<.hf*52@3#2Bk`XL&sOzr.yO\Vl/2a$x^@; ] S$ cߏ$yLLipyVH=l+i^WoSǼ}eY͙kʋ:/)MjEKlI~dbne$R&prVV+?(6pEL>` gNg6e|dcКGSC~sdq^A{UyBiNMЁNN~gEi;9TvϓؐG96!#7w+;'s\/IѸ ao L7vb{)+axYT_Wsm9NN LۣPoˤ4R:6ZםߚGy$%%WFZmZevN.(c{mhGuzmD{79iCcPCWg=m/?J ,t9[*g(t~ ıA]Ek2}4$(I,<$:'B+VeɄU'k!KuE 3bt4C37?/^-2OVpR6?(Q" EHŐQ1 L#H q:;a u!Cܘ xb_L_?>~NQ2~V^L(U,n֟'7X5?F1F 7 0?'oU^d.3B'K rгkSvuD1.ͪtgZG?} Zb\jV o2/7')2آh PIbo{p<>oMzz٘ђ]Չ86C?2n~n3Wh tjnR7#4Nj p}XQGoOgR,&PPTCгvX^+=Wިq=3hv١~MF9؏kcW3YV( j'#B$ KَF\ZyxZI'GFx߿>9G-AׯQ8h?98A>4Wg`Z i(Đ![CҐ5הY\x~F@'&D@円̈́P~mf.W oh|JZȀcݏ<!Nq+@iX2~dJ!JEau>:)Md[o} %ph.QRꂖ6<^)/UF6!06GQ:*?`9HO{Å ,aVId ꃈɕ2J[ !tŘkZ̧$.ʖgLRQڄ1 FGDuia-[K3Q:a\Ke{A oV NS;.itP]؛}MNzpַ[˧7k,vt>ńB VniS4TC)(44S9ږnRH74:0jvs45#Q)*yT{Aޥ_`3Q`m1ºzc )RlSHlfO^랤dM]Y탱Ձ!H>`쾌PƇHi֋ݳ>~7Zbh^M:w{-X>aInkn rT;dI 2pƞ&ɧRyM%W /upwh HрBZPuM4i d RE[Zbd3 kݴ#Qka;w?d7B1j)r~{Ln~KfL󲗋-d*<ĝyef~J/b\n_FIZsx=Yۃˋxm2S/Fv WVh(gYm|i[k6ճ\]9UnA+g\ŃO~6:{ ah9 ̔-/¨`eC KCJ#ţ|.b\[<Paloݕ ,q4U±s]:Hͬy+ߖFjwJ޵ e-^zb5LmmW8RР)y7D1%j"˃ecâ:a%q0|i]HVD~4HPmQS#c"tD{=*"efc ,\hܫi'E>jm}(]~l;dTe}rM޴;zƟxS,5q-[:?b\Β-gU7n ^bH >Ok-Hv÷"}[-Lk׵@'3d:6B=p!V"Pp hyEPQ$MKQqˑ6pQeŰ3+jj FaN :2K^㬟i%Ke>x59|s: uA>wTI>]'E"Nj/ IlaO_ Ê4׎TI J 22(6CSI { W"XFMۨI~P[pPqG,ӅxTG{qE@1dNu}t#^'V \߭vRhS"d"I[JMU TEwObt wЦ p&qc9k$-5~_ }A}t'I9ksȃ|QQS!o|Q#eG7Y+:9s䴀4\].A$ 4ù UnB+TgS ^)Ik xXFHRh#IG4[A+ >T ćܠxbn|IO9'%M0zUWJ~*&NgERE)͎{|ne>ߧawZ܀U2ť_:LOP=J0X1Lf" ] ʩpF*DCOq}ʴĶ  8s8 s0RnK!4q8*s ~睂4ABBѐB󝍔ȟ ƿ8 G X|>8[|=u;;9C&"Cً3w?.k|8zG̘vģLGW$ʥ4U0Mhp3A{~k)o)'nׯeMڭϬK]|>ِMAsGJ1,_G7>db⻼~dlV;7I?r>߆Ԩa, jKK[3(Sj&ƙI 6)6I D⣊G H_)EhuSjFWW?ct[d9.jоrYCB /&AU![^o༑KR͎^^cJ,vĚn8a_^$Iy$@ RTQ6a @vr?cx⣗^,֡󧞳LДT6M8I/NI֋ڑ֋俎5 A6wG[ZoP[2N8m]l۶m۶m۶mg۶m#9Y\ٙ^֞zkq'O)~S+N^,>'K{n/A#YHYA^\oy.[\&LD/J8GYݙ>cO^MHr4 v4\ CM.c.9P,P Pr[. ;;%$O+9 ۿ0y ;0(Snu$yt d`P Na^ Ǝ%R7ۏ_)w,H)PIC:Bs8J=8ѩ37.Ky7y [SPKW傾/sw c\CxbVQ /<h%7x>!֒WJ -ށ !-)؛x4Si&c LXF3dNsF6i6!aI;@Ts\H+[nZSvtD٭Gͫ@yd ۗj1vt Lj- ͛+o;p`f5qCCw VxLO:L3ɡMȏU,b0?h9!0.`J)<4fJt3i) d6Š(DZ"_ ّCiM J-,oɿ\v E;AnV眱mAB#I#25dxfBL-.w8X.,+V.!pUddOYLqzJO/'u<>(&$(LUE L=r$PRKe/'|ס%^O!IZPEm뮾z@$#JyD hX}9VM4]hxl>A˄VEE2*v4u6u(1T<0:2)$ ^-o_`N9)m*r "2LE \ L86TR{"IRNh#fff䱭t@Ƒ@ݡ)+nk+-+Ĕ{JʞS֛ts"$?qbǗ5;@Q'qNYϋ{Jޭ(LVJf㝐5 sϣPJ}lG6qg(u1%3[þw4 }a!+,OcZSx#v2JP]HUr]aNTg8{%SwlW7jԐb̎j]>Fy,KϜ/1}} JcrTu1(G{tIpT}Q*VyG;fr)n[T^9pylߊV *aEcJ+#[`W`ۑ(bbf<J5F, dsg:V5 U/wֿzv!@_nȪ9"%h)q[Sic*l4Ѷ^؀wN Km@y#.Nc˲-0ӯJFJ\aԩC,+$)mpTOP明5=Jwd9Ael7ЪU5ԁ@ g8?Bٮ5s"f]OJ];D$JgjmZR΍-燖I,y$>)~1# !oYWrpaROU7;BҐ~Hs9tG?r7F/Ã-1T6rHzᛟJcLwEQ7H}9qr+c} ϟ 0 WFV_B!͆`=E.8`5o:DZs6bkA'$I|γ\6u߂UdAriy@3D1\)MccN9(XR+a | ,i!t8q7˞ȟIEFe?W] ؁š/=-ʲgvBw?dv\{}(>)L*;ԖwʿQR #AQ+!HUȤ!h/^TᤑWN/ʁ=>_C }Ο`/0U_PRYo8uYxh )Z UW% N*e!Mo+6XR1W 1Lsn!x *[0mM|VAT=Oap`ؑE2,Kt$_WM4G /^2`v *B//$֔#|\݌m?j=8KDTА4DZR(guMw%XӬT%[:N&PIM xoru EUSnIOʳlE_$_7s?yrC9&(7҈Jbw4zjģirn)^ۭ \{}V3AM|Ƭ[pnYdl2.kbzs +Ȑq 9k-=MMP,;qd*gj1S{x?i-^`?(AzKI]oCcm;̰=0]-?o{!]ۖ W9Ae0gII=;Ө= @UkkG(T*jF]H2'>y$?c^Τuxݤ`kzbнxW#[WR\ڿv1W;LݨhKᶋMyO5-OWpחzb;`9 r%Kt66 ulr>b鍢IHcq:PO3)T W)\l1dc`m8 #-g*)#K*xgRU?Xh[ j)t^ bֻ}Wv41q+E_h{C^Aj-n4֢dr9E)t5;dM-eC#c䧕T i5Ecq4iǻIؔA%G:%|֚7Q0op@no:L^\[Ώc%$pVBc-X7N,(=m#S5xG!@F5;S{RX1DNlڲ\^yDV14bAθX8?심ޣsFA'vѬ?j|z@Nu*U|/fK_JVQRQ˯LN2M r[ 697h /` rl*`3ջ"Z$CRՃz{$_ jE)_{|ÉוQg[f%h %XemD:wH-6\k[?T[9 $Fv r::;q_ M缓WwMϨ5E>8ݤaDT6ׄ"Q~r ⻨ ~7eBѡec>[b3!2AM.A\BA \y>%j whusGXIvXH*=(@a~)#7u-ZFa HhN& !&-xI Gowt헉x7IN}IةIэᵮd'k R\jOޥX?c#x3+򇛮Z+L}'rd w_K@ ZdkrHO230qX%H/}pJhD&2wI2'+Ze9l(>g"hu12HJ& eR >UU*!)6v^'j#)>BsR!}ʑKxHFÀ _ $ Ӝ+P%|8%Y@)Xg52 ,ɛu!{jdJFw,VGB0;wE)C{%EB3͔- h}="Qw7aˊ~b,7aw{m\GwscDhvrtL*]e%tVd$x)[HTUb*-+\"7ZKݥ{nIdMFvB' 1nZ0ĪƬbd5q4>@qkC+Lo3qi:McC2rEI-!`Id{wP>}8x+p1 E_DЄ.2h$@iٵI{)x(zFe ֍Sd1S} 1V4<9-.3#`nvKZX]>6G,>tHfGc~ʀlWyVjDm|*ٴws j}2|{ߣ xaH?W,iKgpa}:+'?PURL祝l.52K6sͳ17~Җc?xPM;q3[z^QGb\ʑvK%ɫr.dR2<` q$Vdzs2Z]]Qk8K;m_7|r-5t=Oo3u3WFIN^`$NvAæ`wdI"XȈ(ILfH_4Kn߹޺8n3NJ+M$]9;92FGZ RWbSZ P-Sn&rJ  _|Tk7DI%H #R@o km|6 f9ru0,a}c-Q8I n??1߀OSsd357,&N^WCrufe}I7v%>L q)O%H4 5?mAH$`EA[;xdq*]2+ƆzuZ "xmC,UH2?C+AI[폇2W#RT9wc G&ʣ F9(PgGLÓ#&r;a%-Zrڮz)G,]<eW:p]E R-B:yw|EȻ+⥫[ I|R^ucuLƼve.;sĥs`e;H¦x\e&0k4&54+ ܤpUgnY|q(g.(2`ocChK\C;C (u=MZ%Qo1)D䖞OV Y,SŒI˶Q9M\1L?`<&*εQCS/*i.RmoR[uL$;Y׃1GհgAeŽ|6D.=Vj"0^ 'i'p!h.lƑϔt8KӲ@(ARCP] }a-.-A<4D/^x={ QЕӱ_d% og ]zb&ZMMR'dfP.#Q\S'ICgvl;s}VPƧK#w3fo;b UU5}g|1/F.نK0yk%FIPyS3]JjJt dK2QB3&!_ufpE:xL0Jd`݁Dƈ"v}lsbW=PE'4:=Cڠ,Zޱ?Hrzؿ6FeϮgS*fhXpdG<67B!2hw 5Ѫ܌25ݚo ){O{y;y-_b0gQOm%=^ m" >9i,7\)5HqK姓,jw{ rAQɗj,TsZvSQ~5o@$r}P0<lbѧ`*,I(W {GζFΕ h 78:WGP w#XEuEէr`С\ٵ늣ߞ$έazg*W0uf-*<1=x:-u|TQm<Qg=b*L5׷ R+! d `b,f(]L՘k͚֛Lz_"E38a.Пv埭8z FF+X"~ţ&n%& 4f]!uJw]3$6X ]a!zbW%V,:2ct3,: .l Y W%Q |l1Ya\@jMEO(R  ER镒zUKem-QL/aRb\aa4$rTJkR-ROzYiOzDʃ~~UL(WLyd:E12Po/?`zxQ~o aOJǟU Y:U_J}E153YH 񌆭F27l.Y4]8ƹqXfaq..gq,M#;7vS7RuS[]F:4_委$Ӳ:q89VVtWڴMՕeePt'QfteȩRYr,STi(m&3-ktImSeYb,#[w4sr]b^n-[VUMzYP?BID"EAOH"V*hWgzkϨZ՞x-#֛ەEgߞ=bh oGMlhOu8a> QaC "C'Y^" E},u g-~leDB;XrDESyM9*]Aq;82vUb# )2s9׃΁m|ZU V\ Iu՘[PcSm=+pHCm /+*~QC +%:2Z*5Nv,bE /V›MD|D;|v|-"SթT(]J _#|z/5$˾gCƴ%%Bs9BswԸ{MZGďL"J}@o^$\RW\3-Tk fh2Zf"@r #U izFY:/16Ai\2=%z\-)Oȶ/erAR>a?91F8yT! lHk`/D?Z$c3K8cq Wq\M<wDZZ#a tp(@'N\-Qﱐ<yUup=4 {> p# ݰzcH,1mfx1{k0j#^1ɑ\sd87B[2x"p_vېgQHp#0j f**UFt Y:XAX鄻!ЇLwqY tWT 0!r[E[3pR3rropO⵾8w#W:':EJ[4~%`+uJnJ|Ugjw G;`h ewŐuǺ<[84zQ~5?JaWUYWi"^-Z<)6ϢmܽxJy&;^`U=pEg /{EavI =0) #v'*9c ,R mGnJP ;4duy v 5oqp#E`ȂEa| L0YJD' mpYt~Njbt랒TqMltlL2OA=*wzꬮ2ksO|Mg(Ak\\\\+DFt6hO5F"QG ];$J|ťHIo=HܱB2$i0DABG! 9f5$`Iг˻WL %6BXIuA!ACffE"NS|JYY?bzp3+2 |+rKsდޯYݨ҉kr\d!LO P+s3Z̡&[}4]PU΍aF=ybL̍1r&,<1bž_a%T'RF :J60@ r Ǎt(UPL*jmXǥtXg |v=䡙M[ S $K <_kIU *QʕGXP#7טe^cx-3y8kč `uJ`11ߗZv9/0>mw#c&dJ,¯*Ags}AQM[o;uP}ƈX4v;c<ԹS&[q%/I^s3Va$f`E|Rȫ1${bcJY>Qk]-Qn%f2{c{3A'GvOWז"o6븡r?fVR=O8Ɍ`.DB?F<V}ZtN&M_[̸}XO BfCIgjJWsUl5Ϳ\ǮHY<9G֌`%Hs2` S}ueIs.٤(~HVnUZc&<٩|ˠ>wfɈ`w?S?xFsi[&ۆ-lce0 fʌLNEJuKL %uU"x9xwrUm}` 箹d=DtAȥGתa0dRge\FInFo۫$HA Q[WjdN0U"X2pj0&yqVM&y`(h+<2F h12 Cu=k7U+#l5*娂/Dvi$#6dqͫ&wW`F-` ^i>j{'6'6I䱂;)IGVr`/!giw3Us^frU'MN(3SMJ%3LE/i9<هI+jšH%s.HҴ:^|3^KDpn`ߕB_=;.T`Q5C$y\ށnvT[xךR6/N"wQ> L[̄ pzg]Taa-k:G|Nt;}yDN+Yx}+ڿtա-ѥl*;f-6:> Q<$mZ%'Y`~(oD"{/bj9|]p_]N5ivn.!RNs O =tB b;;@`/;9)ExWp3I ^ WR?*A%;r9E!qh&. !l)XT:T ]WJ-)CRއrXb>~hrDwG3v >}æzpĦ'c}{_"ҷ*`dboloCoig颯tUb@BZ$>V@tV BMK& NSBHJea$c`=e֙MPn*ψ]q-%&QKmM.~ 2t \p"D|TP GV9|hA7l2%!.$Gdފ_{k]Y:y^vfXw;&|W1-bSQ``\C},o*r#G*+5LAI@#~AdD wan"EI33_a%;ҕ(  g,Vrf:MSQp٩*d뛷Fӕ)CV VinZ9=J|bG =n1G2R sqOX2X=_*cfd% sa}<\1'z_Kݣgz6kHLRt?2l?3wkwzׅ  d`$?S#qS#3C#U0oxH yHzI ٹHzډiШ./ Q!D"?,*f Vrf"F^:zN~vv~yxq` A,  iy 2mCC4{xpa8 χLL҇/TR!'e)gsqk_j?)9nt)Ê FsD EM+9|ŕ F vxv; ̃fHHr H3%VCFΠ#wqq11ǒ%E@("ulJc{pzp ˈ*ʥ_V gQ#@P-Ąs"-B|Zc PS{dJzˈU F֦˄5BSZ\Z޿XB}; :@B\;8|}x@iby@VI?k9RYmB9)Q, L7gCUuX΋dfԑ]nbfλ,Xȋ. 0`,Z ?T$B3%$1"3]|멳=sv9)|$C^ISqcwO\0(f :H? X"5`VS1q#I'$ z?nj*@{YR0YիT:mQ&>r E}ں8U4@լXE xW| , ɂ: ?YBۢ+Э cE/q@t2- gɋ&h(G.޳^CrR 26ov}mV(bA fj ,NmzgVlR%v^u @ $7NAdu2XON[#j%ʌ[VuZǛo )S4;VЋ"YL?;`ȥ$/>?8hZY9s(n3#[^3NY^JZ)}40sN/rcw =~_>>ez-(p$2#VmG2w8哌 D"Vr#'jN,80_ɵE&r#;6opUTy@[U]ӍGgJdw0]Q(/ENOzDjԇ|©E 5ThAlS0SK^H-/б3[Gy58hUs2&k\y1…ZD*&Ecq˷T0 4Kc +/8&-^&Ҧ]AB˶Ό\FSH'ԔghP4gr5\/jBvWQh>Y\6SYYٚk%DMCda`9dD^6ěv@[+lJ7>>}x2SK`[!T/%q̓OJr껲Qþ[[)aO{P6>SZrϐy~rM7ղ:W{"y2 |n%3:AcK)j2>hVBYyՑMSCo-6&V6& FMҼ5ٍG &]Ӯ@ ̳T˲2DbPS)w5G'dۋNyټS gQCT[ y՘薇E2RoDز->?˦Eو=;@ s%tYOQ\< [*Kk5dX!M <"֙)R $d|/VjyvM )f6%Od:#J|YE{(M 5 :v˖Z*3q+^QnP20ߤcez.wUOh ATpBhAZθ HL&k&|R4ƢlSoLH,47Q5by=B<||cۿj7r{c݁0͌ i2zMV? 4%Ő8i=- ܽT\r7P$)Κ:5.QlaQU*i>[5Bvv3}Č@w;vM]_kڔݯ#~gzhHi}JM.{<ͰYH~r/υ٫{s:MP_^oͅlN_]?g"]υEI#m(w9L> zԴ15׼,UɍH|.5Q);R>DQCrĨdU`RRKv.;% *ϟ<}!v^dCUւZ)LΣXzB䕪Lxw38l"fDXFmj)9;_t(hg^ՄB,&Oi<+U@m%e}B-#r&JE=@v_R^~UGsB@ W2k:1I w8Rm9wkZtrt_zkF:JSt.]TPX=HBh/\׆1F[{-p:bb4TῨoj).סmknd ݁Tf:,i%uІ*"\HFCm, 0;V2Xba8cF͢2 bXuVB*Ԋި$ޫr!2@}A4$2B +Cb=A4 qW 2hɭ&F 7D2U[ʚ]o'>g tLi5&mno]&.ySKQpj4kcLWK reU(ݏő{` &C>k.=Z1I'$ڑ 9yWQFJO/qNT;G\׋\bVzKzMe| 2(fv>OM9G>5:s#*VjV []b^)7xWkw!?4/5a/BahG IDƛɠϧnS&I.L fa0wߛc{?L 'в%22 *GV "b_pIMbq?`,ޠ:JTrV^# Y9_gML0ȇ!VÖB yv,jk}*OZljƶ16YUdȸ70b(h&2fԧ3~2R0w~NoȨ CgUon*=99a RELW_ i3",6nM@a-rS]fC/Zv}Gt _b0?s'd 8F_N[­:,rIQAE. TWo߫P6bCaႥ1 cx^wmf"; rpQe3 !1?ǐ⤈IEHS<7.L e՜ԏ>$o)=VVň88Edo\>MarPuu'Űܛ͙!dEO𮃁wY2XkTx'GZ*& j A;KzXy~"&EnfP&yyŵw¿e;N45ϜHQNҙCrzv;pz>OZ^T;dT]8K!E%/0@9NW̩I9q {J5r,@qji.ΤO\S63q&-%SdiWrrlo)悬XN<#<H/w*ӛ( Qwvqq5h:!eW&؝HHĽescw'=udf&C ٟ^>ue=8q?%I%/3TC~a& ױ>"|'ne3 !c^/5>g}BO8%$VBbRw'ꊭe8įR9p؈E$EKFޛ3ZA=nϸr Lk:XW(GגȃOBAaϙ*J g\[|iu~'̋wwY 6odpD I iT1L*ɏ;(}&2aٕzlˆCv`%'W/|xu)/2# #33۲N+JlfI% zDczI8~D b=p z[F:'f tW8#QXzg,~;to<u5}DͿC]3<A,S.KFÌU7f_}[1͆_5wc»IM9ܣuV<&ϛZM'ck_$xm wiOT`-!“?qJNPG< ,qދIeZ?$2 0hZՍ*/s91iM.sa0NȋW:y%̃_Ǘ-N4a }0:[e\eW(_ʮIusU+B2/=Ay]d~ {z/7 xxZxClˉUD"LmI l^OcЃc,pEIB;]Ȼn`6|42͙ʸ1UdLMeK_uy;ԓ uMs|%H oEMvti՘u$|ᔹސAߖ/ܛ3s۹zi4ⰕGbk&y9(_#_JvÍw5j0h 1ki JTᾌo ua Snߙ.Yk:I`` 660>xasFI pmt9;CLD2g:DKW ˁ:֭@U6ȆF,`^V(OS]jQC H!q"15[(Ɲzl^\&gum;uH#o sCbP#ࢺ`J*JJ5JdA1n;0v:ONt2G'p),(I!U7Ld>0PLD4ɣ5 |{IP%)\ .ey;49/`Y}U!$j=<2Nڛ rSm|UA$ATӶDX7zrmfSN*^ʛz>23@B|*];O?o!X"< SNa0dkJ<ՔbxRr!h!NMX_P041-)^22EwcPQU\J5m*ٛԤ_ #mѫQ?5_ "20Lk0 yU~ 3I6ٍ*Qh9^ vnJA5N6 "윫hil]CW9o'hF_"*>'g4w XJgƺ2Q]ߐ450ף&hjRm&0oH [{CPҗQ7D`P4J7Iq:Or옵B"$cݾ qf?JGI @G\|:T}Ll( ǚT*k&>ȉTq+dpFME:y'j\Cel>ųanO7v[gHU,ɭaxҍg2)f "iUGCqAeex<:rcWaǛ_2B"7ƚ"z_b^UґBjs iW1-?PN]0bB5 A%HQd׾#h(%7͗TLI(XY"Ev|S:NI1v--\-ڞ@e`6b@6rźEb"_]jYa*'KQ۪!kz22N:"}֥H .S$0D\HXf:j; AqNE|4UV#BVY3rl4χhgrs,lW Izm*G;֫wja=]# l6Z;IZҰps)ݯHx\ܽcF ժRfu g' m iܢ뼥PId:و 4ig'.b,;5)7A $@[tajgފs#'B̵۬ 'xC 7{{Ǫ>q{1&wr~rr*]dCcq*Ks&@5TGE\W$D:è<T.⬳Psx(Ra|X8b ʡ j@P[.duaNB䗣/kZ3WYVȁ>";{0 g1~xy0ןD:Y"Bp>f(oơycEd6aL=bscQ6e~%(@HN`Eme62i]kVj܏QకAd|4R%/&a3wfє2{)&*ߔ}_!^T8O$ zBlF7pQ5G#nȘlT@LIڶuROv&ȓ"([(PdIÝ|2:YTi*pXϹhSUSZRiuq擋&'_7.};-X}]?4o[\ JVMu-]0fOx,#_{Cb$FQ~c0Oakg>CVPSlD@ME\G/j(:SMeO01,lNez&Wş6ƭInS&.smqRL=Uǀ44S LJ8GGhK!{#|+$ތ`E*_3 1xVm.ftj֘c DP w,X̱hkJۺ7@"VI͔i:/:x="*.͹2ٌSa)UG IF, `mCٛiKV _K `cï qE(fRnR$if[FHD /%ZE /z; ]{3Y VMkxq-١Wӏ}GzM|oW_;\v)Nζ֋w`˲^:k_[a7cJ7!- ڍ*q&ev׺_nd>>& JI1fX k+#sXYy9fx4jr+Dpc"91-?}k/ ²)pPJRZrQWSqD%tU+g̙׼E!RY mEF1XHITw+DBjȲaDMd-K &m][)_duI)?* ԣztl ,4<#M6";£ C4m2!؜=vuv@}JƆѳG\qj}n vvqΏ;J2bqa;DHtբgdA7Mę(8kLpi.,MZ,>WٺtFubg;P}"!4{FOJ:+3g1D+?h7τ0qLvGqhhJP!˕Eq<7[1&E0~;h67 l-6Tq:Y0G@SJ'AlhJV۫UVhm[O÷#> A|16N:iZ^za-ڭ yqB98=zd ,=|Y[P_Wک<,HfYҞG^"[H˪m-g\((;l egX}ӇK0orŁyb`3D&eө|Y%r%*Va 0}\EthUeN ΟjfPN]R ?ps@}hI `@ SkwIyjRmXcϯ}@T'vK)=:"yLM;t67~njs՛c42cg@9&1Aҝ߫_u8}6Gd" ub؎P}t]G `W32l.l!L V`Ɔ놽2ߛth Gb]= 4k`)®'Jx;K.[=z3g{Qci f,+?(HODJq|>,vuxb4^x!cLQǐʦ ײ]eewV[КY>ABIR΁+"O_Bцp.gV-Qa rVanqUŹ #4<6vfXX?u]ѷdQvX ޹.ZN yw bV`yӨ'XUo6޷%Lu'>5Fb<9^ԕ!.K+ztyd?/A̕']A0M`Dd4 x/VY#IuChԃS-i)xofDo5i/? o˂{([i|rZA^*yc-4aS2n"1jgl\«g9>#S#i}*ID$: ڟn7c_^/[x%8eYHA^Sɾ&\pcj8#dyh_2**) F&IԸ>71F€&$XLdeLaN.zˇtϒn%Zeϼ!&yіg\-OjIiHU .M*r_yF%XK8Z-s;HNGZeb,"9kD2dOJI,sx4 Y&90S1 @]3 SjRƙĜJC֢ Y곎vꛉwYPUˆHd%޷b_tcA;U98.gÐj ];'b v5 +Xh7y{>i[Œ2?mĚ脆^ ,x$\3!/fØf;Orb}> 9za4ܷ?tBՃ T̚)F>G}(qK~&c.Q Eklr>"Tx>&2靛4նј2L.OT˾ aO 8.sڞu~Lx0o5q9+_ˠ4B6=[NͣӌG4~N{x?GrH1+cF'b#!|]M;+6 ʐHO>4Ó ‹sE9t7 ~G/03օlrݭC-|ѫ\=IZ>}ĜyZ_P>Q\ 'k02(y!)E:pPEkuܤv1n&Cڣ1 ΤxઈOxNَj5*ۄ:5qǭ kMFjܫW6pn^/@?KcGpb]/uh$~?쎤#gT_JˮFӈ1Y]짤l}u[aIf&5`'I 3~sH75=wL\1R&"%R>%ⓧȞ3@MHzy) 1Ԏ7zORDUG %9xHaFaTݱO j8uK,Ke tt SQ57S#?_P:Qz2g?,fcu% l| 9yTХ :fכc# 2}V{WEv_]9GZsⶫokKNZ#nܐz6ZVKKN}gx>mA߷+]_>W L2Bo >f{3WG1R8+p&Ć+c(YfDY{,U]{"4*LIuYښ8QB @0ÎH9FG:`WQÑJwk53G;۾ZT0j/.դ q?ؒj:ϡ}ix`~o1dBd6 #<6HlБ Lp! XbafBVnt X?hzϩXiw2$0 ‡؅$/+-ń̔m$7ȐyG{;kԼ>|axVHy_y1" AfBf7 ?jÇ"חJx3SČț>\ZIKX9 `0&V0djі>`&~}_kFe {:^t*aOÎ5m?Zy{02/.~P_+֢O"` Z2r<5zy׿d7;8v0Y!|n)␁\։r% 3eqHɟl5  ̶1M؞2" p*Hzsя| #ctTf}Bc"葾yR]@(27onIݒYE V'π_ym q8`u l.R2 S:I`T Z^R8Llȇ_SN5{a A2.`@:MnC\ .J|(tuSc; `ؒBYoP(%3cĠ@R6.1OѩfNCO@$o|,|0?gD"'&<8V>XpwY{ғ7>9̏MZi]-'<+Vn&q>kY[3wE{ 擶͏Ld͑QK$~󅅂̏6MD8=WG8F׭=9iF>J@(Y1uRFIvՔ) `(z:l&[3UG򃺐.jv7Y q;ל*mBַBm^)0-&Y8Ԅ[1AD ElFˢQIuz-S>$rQx!cA v0PP޹*z@ɰC/!V)"W,FBkBv*Pe^BT}+0Ha* 2v]r.Դ`b;Wݬ`?r#*~ )K!/|'?<ΓVsm$Ql\2QW<7,3bqkc:uDD(+l:TPdl 8G)Y^ ݀Ua}S4g4ګdI)PK$"FݨAڧ{oR n*/MpH,u*(LiПfO2NL~lO{W,VvU ;Z٨ wD+js A+:6]jh/wc)OWIRf816ss {p71yaqm׳5^l}{2ӿI]#F> ߎ'h6T!ZZYxsxIm4p*]X`D豫9ˋ.c+ ]XiYBws9>.17N,mU<o9cd)8qUQ`٪Imx-»ktN CX-f˓7iø,MqdP8蚸//}HMɆO?g7LE[N-ƛ*7 %{4ynFDZ;'2%ksp3wS A o-Ukt^4XIaA&9R7)"Xhš^eGG QAàk _> a>x3R6Fb" =eyuR{h@+>+Qm5*.K䂽6뭶=qӭ"58Q<3)Y'ܽ;elaȚ=ovaJ6IT(5L,M-UuWfRws4S 5-6 b-ĢY1ğ) ǵ}=ƵW|cA.xjἵޥeHH,9x> `īHzP=[,˞{Nj8<8ɐɏA?\&WՎp MIņ W@?#pNCܥtXk-aE\oZ:rK^%NU{@WỲTuРr=sƜi,%ڡ`b<!7"=jj o{ R]-ma`]-Ed6$<oj %GI]K#d.ZEu"^RV%Y{nTd*;UZq6%Y;*Dqُ뱕;Dp׬DsjX 6 uK+ /^,[M(S֌2]b;W_5)\RQ",f6r@B RHNcNp,j $\پIb":s0@j_?^&h~r9wPF_3fKj*CQ,Uz1WE k2kU0Qf pKѠ U*n>S8A*LHzFUzfd=Sj=S2,s(U}S/W3RH8&.L]Ԋ76WC_CHA P޹uVJV%A 'LVByüvXմ|6ލ}}S33E_w!d衐5S10 p~f-5/EPsm1\N7/GaL7 ;zYv2 zᆕV.S%<_AǛ[G6C63lЋv3-Mh p8[d6W^kA7 > T>dN#J ):*|4jlS7zlB?ӳf=jr<[o;CO=w/xz>}oa]H༫3X9H-pnbSc/c֞kcV9I0iP} yEn×bj^J\ > ɸ%hLjC wd_o2rۊ^(62jΩG<՞-o4aF i{~f\m pKG&SKqS-iwE<6݌{+tǻк&ӉrVw o=3m[m)*;0WsRq|E3 e;n0ʼbNP@1,aHԎe@FGH(Ju&2##nK"!7l.rKBk +V>IB(Q̃kP_}0a81 鎯Lת1(4˦$ObK)|!M1 ;sl=] 2 fD‹o0%[a 4a.Q#"pdlA'7'~#H;?Q@7 txKEɓׂ!*'R]Tp嵨Ca: ]Tz) Y!m2ND+S%diuȨ{}KsJRRAsHJb)7VLe=|딌׫W Ġ;1P=2{' L\|FW.`.3MRf{M_3j 9[GG+ e?{>GjL$38"պPlRs~I1P9BY&\,p Y*2e@3UO@JZKJՋ61.VဪDփHkvG޲+u8A)6na筚 J)9EӨ֑-q=̰}'@/j"a]U{9}ܒֺ:ɪ9 zLh%>`|fWJ1HƲ\W9[Mol°Q҆qӜI({ hVLjI8 FD:D1OFw#|F=.SCxĦ9B H .TQI ﰶPOE(=8"iOCQxp(~hVsa0*GtiD)N ӼpAV˷jZs[JQ0ݘ,\ݱwN-%-Gݬ$eΜIܥ jY'&͈B25+"9'ד7fX+\6QR\hzX|v9./wsS58!)PMc2"[%-Tx B+^izP*ڞ+=9މ+Қtys@t$o{9Tvɏ0UԤV9щDߍWWع"™C&"Z4 oT̂Ou6u1@se+ DmǶm۶m۶m۶m۶mzIfzzJZTESP4P?ӣJJͩ>ev(/sz(gF]}ap;'IRy)"arKcR42 Gq‹ '_t"c QJ$E2iQ#D?`KHÓŅA,`T}*9'3w.4>,"_PgΪQt! F<,5=x4JϥqY# .4P5l& ̑]T{D s .;Q2wZgBdNp|cn@C侅+3 w5 DV c CuunG"8F `O?!5 b6 -1ņÌF.) hh-$.8(w qm*&__aPpԻXO,$wEozChQvYI>R [,& 3_pTPk5=o4,`Iɨ|yO[V `품+нTq##b^:3R|H L ́9YfʆOV\R3Mf[Rufk9Me!@7B yL{&&@lid!qc 7P˨p EТY Êc'1!< `<(t#iܲ )Y;d&2>uʀI4J|~2*Έ>U."[ )ѦI% ߑ f ~w#Ýj}y { R`35rf mNtIJ`;HK4;Kf5EY.uV{fo7~{~]~QC̄Yv`;ڋ.Ғ:!Edy|>SNd˘ۻz~֎r:_᷾ҒG9}XhҳJ=Z9ҨXa &e "@./}캳"C=G\|exdzoITF;z:A"} 5|)u3Qq}eѿЕx9 x $|-hs8s2xۗQӱR\`}@y.CA[IL{!x||u;#:Q:s C`7ŽpSSYcМ0 C > yq@jkəóid޷;3}[8z4<4k/ ;q/N_g tM犏v!N$I& u CƏmq,w±Pbyu Xl0)ݚPKӴZ_y9`2T'T5Io}E% f.*+ QAu~'V;CU?-Gh™֞7,|sM=0@R(LCA1Ys_jy"aǡf$([U蠔rt^ebO"D𨻒9/b@eu âo9It2/f2Z %:]pMe ݷn\Ԧ_ }\9 h%=;ҕ dH{5?02nπRA 80+Wݿ0ad XhpJ{;Ҩ3'sl[;@oXF&趗*XWb}^ 3%HUT›k.# RX<|xozaS uѬN|zty m1&b]d7sLer7[ׅ7: . 'LjiMbJ"d tRa7#Uw>oN/>ȔK +n ).PgfQLd+BgQq_ JnG!bjiiW6%]*PY糣TN7H\/"?̼L>Zx8m1<4ӯMI0X%GciZcXʢVʯF7QNަIXM f^MGɳyhne%*UG>Za{' I"a"b-c'S mxJ,3_\/6)0dNQg%0ſfKZJh| ֙|9mTר]md\=wNbEw@~UHLL4A.\΢ݰ<լu6mZ%iuFeH94Aa"fU/k D1s|Fe+"ޕjKa5&v)( cՠ[ ݌ݡDܴu9 Ү|>)MZV#`eBt tȞv3+f,^ 4s0gD&yM$V iQoC] IKi zETs Z &, hQf7"sCH' (ϭӑ m[|\r RųE_S_E/kV#֊XQQr+3ԝp4#+O}WMU;ݛ5_qf7zّc5Ex{Fz}w)5JCBdWztEQZJi&H$c{<e/xbKLդ V٫88bW<$YTI:ڥeU~?s]8)ÊȑʿL5T!;״o ŷ2tImI+jsoJ p0 {Q΢* K8w;S5D# [9W8X-?H,{ۓkǭ P|ƞCQMo[Ct% e.҈IkuG3_Zߙ5D,a;5 1rVqZE2j^_%M0BrU(j4:\8KXeZ^AXqL Z:z˙ȃFoSL*z?XRY ]wA*-\[2$hHBhHXNDiP l gtg1eq^-A\12ii.l_G/ˍ^OULkE_g^g'tl2<*RA* F _g<8EʛߑAm2M-c'ۆ8IrzbF%,"TU3ƊD3f؉dKeu;II;[;@opHِz~{fw;O|N%115-ZNi))+8$>Հ\ Ǚlyn&C V 7U7n_q3>:C٣,s~|yz_Nh@fI9&\_]9)9x7OՌ -slMp1<%농♙ L3V,Wnt)ǭrM˫ɫNnksۻDju&2{& {-"9 geE:Ě&0 R4j[qIu %ɳ>!0C.\I#3 xU,_)W =<HD_\24q}85y",y?˥m ^N]J!S`&]ӓ5 7QLV7*țOx a NQ~kVs (E)8oXnfxoQ:DejrT.8HO'+Ol(7rL2_,%uTb !B17Kd\s}Q -xQ*w Wh9!)p<2 ,Ɏ1| @MXY0l\}xԾAf4u*bb=Pv:c}E볮ǍgD2YQY5d5vVSݬ|;>tscύ4ݒxa=E#n[HպVދ61Dhqv%yPP5+6[6`&5jIXБ1_$a(Āb.鐄J;͡R/$MuaneχA_B`TޝLnyPUfIOnH\A%^?(W`aÔz5tc)q=UVMjoϛԢҧFcBNpfdYlWBb;:زWD2[<2J@^Thj)a33\ `^_פt$,b3>,XSR9ޚg .}Hq1~2 .6fŵYy%$8ԫQ: -チLk w{ͳ涑>Qj .M BVZfS*E(^>vtjMw5+%[SZ5s٘0N19`a*ʲpRr2wC#t}&Ts (vHǏgK"LvYR,F (j&,j?huCr^pEÚQp5&&?9m)Kt*IئSE{//n $EQ9:W_Oԡ6i v4I{?-*~*_%'Ё%I4FH5DK_eE*)rD EeE/栵0MpT)qVg!&$d&$ۖ=8Po%H7FѯDBTS!K&X?o,BU>+8d!)4 n iTf&]$&mLwhH7`HYF&fov /~X%i뻲 O i؊^>)y).1ԵQTޱT"i{p9EDlYBV(j>I s8sOO&Fl7ZRF@+ +ڀf8Pk:Pu+)SL[akQ-"7nH9Sn|,C>eN:譒X\oSx4[!FS|D[Xg˄1s%b&T|O6f9:MxRMoߡh1iZ(wb3jJl[g?X,no9ʀ^}@jy :>{ -bΈ< tsBW`MLd; ڼ{].Ta3ƥFuH!-Cò?mgo8y!:>;6 )S˩9$FK>pqĤ5T8;qz"$J: jz@F|X* ZGQ)?Hk*N,b͋yFRٷ8r7kĔ}![܎a<8C걪'j?hck4=PWQP w鿸44(7*|J̕jM5YW!I9JQo`.< ;oZS"qC⺯Q9߬k["zPŞL oS%@0I.0!L߱ ~+Uɵ栵D"$m * u1%z^{kn-]z57氋p.0rOpL+?%8]H*ZM,Ð/oiC7[؁Orxj"AxL 1fW(#7]WJ@=n2L[AJAhe [@'Y(kYTAxiތ=Ξ#ya!s gLLơ ҋ^Ix(zcʢCϯ*Ê|Ae^Mc74[o?+Y+#DKl]yQr'$y0?/kORޘHo#-x=SZi-#68ќAPE>~]Eüu(؏^H(^u,saynODl[˶I?Mx-D8BMzx9/MMG l;gP>uc QƋHY ? \!L̺ b8tOٝmt%F2D]6̡5wXKHE!Ù!ŷKy_։FvɠLPy،lcxwו7՗L(V.5eZ12ǀF. gˉDJls\e \Ͻo`R̸fF ų{l4.^ewB~g\ <.h.u7A1&yT8lqiTY՞WY<`y+B8mM:͓iYko0bX@U@.80I%L-Zh0xqY1A>xj"QJ Ȼxnsk܍l85/XFҴ#>XzE,ҥ ~K~X 8ݼԹ7ĝ$mb cxW&OA=F2q9iPV֌e_s0x=ѩ/zޘ&FflI>#ӘS ,h2FhmJ+b?hTCR"3̳3nht !>z>Y45OlʿTnLAW#fj]$ OgZT)qoʘH%nI›"&Wz*(3('d88z™Iޡ0%4jjh>h^^>A G|A06މ,Z[?|\.{b+370JD oBs9맲Mb|62B~Ƅ΂d0z ۪>liJP1G$KNbK:%?B Po7 &V}~R&WwB8Dr.gJdzGHT(#C?bhf50(b=~>? $aojjbJkQn) {/9MWIRL&bBc #V6[W7CssZ) *w9#D^a6՞2UǼlW7 88̔ <*#ơ>EF 1,%N1'j,3m +N3uwɵ쬳VMqLܶkSF*>񏑤-)+Zd!0?I#s>M3 Am~UkbXJĨdFŰC\#u0Pr#Q4d/QhF#w[_t".3,7\{Ua(.60,4DfaXz"R3|MjD`s~ ,9OٹUF]KmqirD|[f1A%jU Pme:,@u,Rlj&>.}٦*I߯+h%*i}͊ĤrlY|aqq ƾPԊ@Ih0 Ve!.L>b|YČ*٪tkJkv0Qс} s'^ܼE 8X@%JLK_+6M~+u DwvҸEH5BxΡ[FjԢ ~[9 Y͢2 QawL1ۤU$3U:LC%Lgdh;MXXÑ8S:iM9:#nk W0YG7m~Ú:MY'ˆ]`,8}\@`,z&_@Аj` Rd&IR,d,l1Q sH2ʁ`Кh<XepUX`u~3xjD o}ܥ"9J֖bDs qh ^H,1'j=y va0;%{c3-ؓ&Qɱ\ * cIB: qlqDWX5dts+o/&ue*:tEc<]a_ тQ_B~R$b\w'SV'QۦDYP+ygr~acRRVg9=>O߭J㭇ʢG+BHPfd;?ֺKfly>f#\Wp`# =.\*4le͞DA8 ck .$xo @*QeN| 2'фGX\0=-OS")Ig~-S nVK>7p1`h@K#7ƞ[Eǔ Q#AkҐLސk^- /M鐛pܿ%:TہtCJ< N!ge9nFhP/]27b8)BI,c#I\ uDM$:_pS+(ɋ/(+X!P%.ܚY7}֕wVbճބzrm%_˘@r r0ʶW爣=Sϫ} %FC+aCTv.5+}gKXƖd?,&˙%"eB!a$XXGlS7 $#,%:UPF̏#iWyrMw+nxZ[^ް$S%Ma I+޿>˲f}عy.\'oCHIk5ӻ+%S8 &٭wޮCt#\y]^1?WNnW?jH)Kp}SKaR8Bɾ%ۭ%(>,OO\<βp~spq,(')pYC7-Ȇ^Nozy91]K^ւ;?je2[^70mME\9Mx 7Ȕ- 1U!iwʍAaAd  J(nf' ƙkk\o=?fYtjʬ;X1':l!D ҶY* <pMo1Dػbc%lP3֟9{2!Z9r;k`fI` iL7`\jkI"F!ƂE]El YSМ F>F xq%3x9{X1 NɾnmRBD?gύSQ@xc=q?{jPu6G[8 4 gގ24/枬i29ϔ貾$?R7A~0Rʄ& #PUFq#mC%"IvbCc̘ǣA,^˥OXDrڱe(1z|gr b)DPW1zqp탷6$'-;aE=$+ >ը6 ;;PһDѵ2fqM^9ѽ-ytz6l,-׋|·s^{^+CNb]!7AU4!i=o  c{3>٧to8#A#t.| v_9oz܋k.7R lɖw)IU(K'RP= VI~d\^cL*m|M&m`joZ>%;la쩬 :E_bgw"Mu)S+eSG=7棾D~2bqʽXL+ {Q1&% ː2222L8Mʍ.}wr,#^kr@5C?T)9͏n>xoN=;4m'U|TPX!K5K,|QR7 7k܊9"["0sJa̞KAЖJIKoˊC4xvwJ,N  qӹ!uW{ǹ3L:_`N@9VȂrom!oq"ǀT<ڰEIX_LDn<Ľ/0ldZ9@oq^Pvj6(גQ2F?:_0UEbxJXXGXހnNx;wX^ qcP 0ߒyGwkb?B)}r7 5D2ҏF{90KYG6`(uGd_P,<~ =꿛jj;  2 ONZ<-w?0Z 7 f4s׫Mf姊]Θ?̀"l#}Z2Rdr`">~-lV->i·A-d B:V(t̡;c=6`:mݫ`* mh!A),%}4˺[2my-rNM ~˴<̎YyXN] };ե qy\ ==2Q\RYK}ܵe2v2 7 }YK !m\7 rR^r\e {4h8K !۩Ȃ\LYV"V :D>VszqŴ)x@ %u(~ЧuOS| )8 ӭ:ki'G1ADU䣇'%#=S~bxQIu(Ë.s :h0#-,GO4sne:tIhͪy+$1 vTyу-}l CXIA1谾HS#ltD7g2t&{'( `{փM>넭PY}lhv1F WCl*vU[~j7SMr"e&2-Fc%IBݽ`9ܯS+RHDVHeD9PDe6V.=˭d"DAtȓ|î'\F -xr嚇ũ eQ5c]K֑96/ Q/rV&>bvutľ1Z:q:b劵5F|Vm,1=c>7fi,(.ĝyyxOڨ 1չx+B#Y`[AZy=>5מxk]hvLElQX[[h;$fgו(d7?˜+>I_Sa.;dlG|si(3dxW|#*ܥZqF:gK%gU" ;ISK7hQ:yW50Z^Zzw۸}} rHFD$ޞz,G+g^\qi`Y H~_Ks/CqkTmt&?[n6mNVFosP.k.Ƶ5-ȩ%\INt',9+3EZKnzku>oa2N.(q~G ݧK̺ 4 hF@TQΎ,+wό,-g JmJFf{>jZ'G%Q yPO5Nb}-Wr-~0[ Dkΰ}'#䀏mbxBr#ֺ@J*FY#zkZ;.I=#sB[z:`{wɢUBSr>&ei \hyҡ6?Ϊ`#*Lr#4_JT%{AA"[]7w1J2'~0ogu\/&ՄW%Zy?X 4{A[CY{B{?񻫟b".my7~$7L6:a^~ll[tOn9e+8mjLQһ T%ZnՂEcJu󳈓ԪiLT UD+XJAOCh"l_gr@Xo#k0?/o>IbU3Ѣ xҔ}H8KȊB&5,t S|5F'ń^Q] dq>!:qD0S!W{Du o³iV<)|Qe+Eʮ6]`ghͫΥ|#"r2fe01Slhkm ))EDU]g M\a$D.R/I*mVMMMS\ÿȣ6aHv4[ czNTEj/l,l.ΉÎ"NCJcS2.ğA{+DA{57x:niW>WUBOz+lH8%)<{!-Q.};QH@b`ػjt6 %Ζ mHh.M;27ˆ3?Wy ij'rM%'ob!g $zjCgmRUEmģv!g4 P).,0- Gw96;t[`L2wlܾt}À9śCfI RYuGpu t w~XǼb(ZBws肶nl`k0\ fNF'hfSZ$ [n92%xvp;Z[z*e>(z_ G|wt}=C$<'!/SaKV C/IG8lngnJu*=E "3*FZ,U17^w͔p3dO,e<xm2^w\f] CGfmc wD9[ B"9 miFi&ıvAx#\qZt,%LVN!y~Khޘ*0d|Gz2a=IJ.#G_fWu5hYNi JyLRs;ߕ9!GQqZ/Y{wF=/ |MyXDtŹ=R={f1d×ION1 BDpWɞ "5[hs%S}Q444 RqD4@.McTwR?7W>ne8[X{`Zm?t*Rؓ:@վ͞H] l$b\H&{c~ |tDJo;b|D\vwq .VoE׵wSFvǤR'XNDBFC+vn7Ju {`iv'@ɻz5D^#&_6*'HJJL ?0 E1#箒bdۤHz{C>q\m-scL !2QDm p#dIu};{+a 'D&VHjLRgȣɄ',LAi{erOO 8sω adHO{dƑw==-wC)|'!T&ț1RrZcX:⥋c'26Dc*yQ#u[XJ-v f=scE rkl>quT.wN-Ħ {E,5*#~{1fo@ˀ5z}pJڏqBIe0j~] N75qCΖѣ#! ѰM֥&|כN3pTMpO'oMry!(@A}8 xvx 2b=&u` 1e?̠72y!Fۆ۳lu_Mm@B'L-Ĭuɞ&UM_܂ }<u. ]z r8-T 7YNV1mKpqKaVPaynugƊ=Yl\ M7 ۑ)CJW,AL)O OyŖ~Ubb0#ߋ)cWL뚌m]s4ζaO̙BgYNCGX>f[?HRŰ4Jog*}㟀8j12` HAzcgE") OC;Y%N |j`dcu)#tH\|GZIH5`D:oYso$ZA`x7,avaih>@$W I߼z|~[[5oߨNj3%1>S浒X.FP Ik5eɟzץ(NA+K ۘeb?3.ǩ 7L*h3ٙ&5 vYrXSJ|sT3a! 7TPxWSV &j$Mߕje (ɚhhse""wsV\:Kg09AnB8?&vN")SH|OÛBwsdD@BD24:=95-!|?gT(pPe4%VTg"t4/NaI.7L'1fK ]܂7.fr&7+GRzZc{4/ǐOTKjFCUK9Ӯ7g&N̚t@{T$1BG;sv Yn+Y;׎2rO:9@b驑Q+[i1=NO,luJTXTT=vD[[Dm {C ;20{bBF_dܕm6(;fSu3(%q:VxYge`zifalWQ YF[@a%.I̶QRrVhxF Pq&;^bd`=ɺF~,XH>nl|(&;z74)N퇵ljKj@D-,, cLL쪬\(& cBOY7$QK<_)ɩΑa c9KIu.R0~'f٪8SHYaƽX,bݩB渡Ek~-*D-ex20*7KH-]ggl.%_nUL:; Dnbc 2b^ߩ7O#49)DK y|wF,{lEX\:J8fO6E(#duV?d*G?n&l ǩ)i˵] 6%19'Ӌ-$3+$SdvR3MR&D$0=ۼVj2O")<a1ncvFvPlm5eo'?~xSVGDg`[sM Aj.'цmѰN?&jjpUh8mCeP("Kl0~xXKߴLUԊ{j3b؄;TөVo}g4p]2Dᬜ Ha~yxc ]Ta;_jxl7/[}j!ft80SVM׺V#CŋiMeUގ|y@Ҷ:\u~oZ a{^ ܺjnn5ݓƅVN}>Fx3 Cj_FCU]$K#L;2y$U꡷^ktSMf`p<-E:;+85O@@v UbƱ&[M~,˫m ,<[ [c}4A-<}#f^>Vwna:oUF!.q֓ZM2BKR@L\o1g1GE;  ĕ;Xɴx@]DUDYSCK~NH_픀r9[5M") H8r-++ w3&fsczEU aE4QrYNKn#B1yhC|ȩPl>-TY[c eUK=[C3)PbvFrw wM|38Ȥ1?ĶJ֤ jZFi w>_I3:H biJ<"QQ/[.w̎ ێbS]Ma1JDTZ=I.Hk_}}5BUi;[j4itZ QAf8^@;q0q(Z<5V8ZVJ0q娘F%饴`JfH$b'yIL~kvi JP6k̑/(-*&OLDu͡а & /ѳjvy EㆩFR'B8XkqP; "hE^kCM'C,||*$@Rg*^+o̞U!QQgPm,PL?Y['LDFvtU2ܶa%#Q uUϞHgA~j$@[>+rWl*Pؖ4+VN_)?MQ:CjѝW12Z rSבmieV*UtL8ȅ%%'X tYN i_Jy@F("YEUUp8E_bg'8)jxj>Z^)X4Cx>n§6zpv7EGr;k:D3E(mY<:Glg>I9pD9Hjg*=GYWϞopC91hG+xuCaf£MZ]3Da ō HqFl\?>?<0[>)D]AʉtHT>бhp'{9B-*6g0rcm2GXUIX9.6*x:4J ddma=k7@KbUw8B2@(ƖNJuCڋC!!Yk̫w0G{v"#jƑiWؖd/ /R "2؝DYnES2ǫHvydhL;~#'6OU lji#@#~X+1 гxP\)5QɅY(OA7y=<}--r(w2%xʲ-@L@WY',/lpZdK'QZt,4_ǩP,AU  `n&0+-s?ZoSƜی;ֺ;m7X8F2 R6=Aqإ&/9i1R(rdsVʨv ?Tgک44,?ꦉBφ7w(F cY="E1 ۥsh[T8]h"\q>1C1r:ԅFI@U 0N=a ܘ Lۦh=CsuXS:ԧ_\\oooA8'f\wqdhC6K͕?_Tkq `,2Eщm"ϻiSd?yf+ӵiPQLtגN8z΅yttGiGSmexEzssprqah Ξ~=qprY>㎒~ATX.?ơ7c,,+=8'm$0e" 8i4Br4=gL7 XosZU6S9W7}QN1ɢf_(>ʗ2~jq]bH91|pMRҢ!c*_JM3I։-A SZr4!AʽwLv\{ro3rёbnP3H=dIpLv Z=q {X~'<ӥ(KuO{_>^??w> ѡ!^>n_w̯CF^ oWOߍX+9d=f_2 {!26*L|#m`A4;5`EwO| D2wRy4W}"7 }g| No&h9=8HUhvJ`xyݼ1_EAD(v]yn.ٸQtptaL؍)I:91mCCnivGO<]2IMy9y5@QЗ %G';?uUsNI#²ɻ7\AÕ>`F/`wBj,kwr2G{j[?1`<3 $v#)nuڈd}$a@n@4)ǽ Ҧ!j'3w_w=#$¹/r97'.{Ȝ/Ł;,? ¡QB d@${@%ڗ+s*n_' sLTqmhO:@~v| 4fGbd¡sS[ ۨghaZ(=k x?D T$ixrZ'́k5yUqveJ0-+r.e(qC~7t DꏭcxG萲XxoN +O]a X/-.9|H ލߨ_uR CI}{*;r~t{<(ǩflńíyhO###WbBoraNc'cwu!)G#O9Leni1e` J&;*Ge;8lN Y9|!X8) :2aT_Q8eV)CD;V8?ǬA  +axzC>(:mi߳1'y*Kwhΐ[S h)6[zDor>Y[/-rWHTl,3v$z)K&eV^}jAˤ˩~+<)CՄgHDsnc %13YgiƙQq& taPLh^@Zʃpϯ^{tMMav%Bv>5$ƊHddZ8ԙs64P)Y:9 CSƊT]7ƙuk9t0LX%;dE | s@v߼DcUk$u1Y##>Prm|nSp'bmIpG`>ȦL%4/ ~?Ql*Qùfڻ=4ѣ nTL8$5qjtKT39@8mcaEQlh@9+s`/H}܉7S0Sx>&{+y PSiA/[fЩ}Xbi>3 $#kK%݆J&3qAw3(-AWlk8 qv.9*;sC{1Jq)YthkҪM W&8XGRǵןҭ$=pLϩ9i|r=-Ml?-H%AahS#3}?ahmTsbştcGx[?ߎa}+$Uo w'ec%~Ș77$7#fVcCMKcD;\36gײNyέl"RWPKx4a![q | 1hIV>8Eƹ;~7ES!~USE4m`ϩ4]APua,k+D{yub{՞ÖJ薇)@#?J( E`cK`򤜺I*"i^*5gܐK wJ"p磵?ZJߴA}類Zvw15"$@(yڡז{|`Mb|9A|~ʜ,oIlkl4NӨ@.JhVU6_p:'3Y* #i@E,Χ(k$L X0@A=ҁBY$r kts-Dڌ3DMҩ*`|&m@4Gn;i! $pƚ [t~)ʹi;Ø`P?'JlOE j(KkRUrfP۶T*%#jJHE&|+ }W@DrcOѡ`aO[o*ޗ!?9h#.?y 3>1㽽?J~HSuaJc1Lq>ע2 nl`s=s_n >LJ* n2k>9qkr,X`\,D۟J!yMI @9Qtʚzda_ITن"?^6izj--1i6L 4ONuu݊|! ύFkNi|B8Zbq <룽tG,F5MBM2ۿG?n#"޻Ww<սeLX#*w8s̀.>5ϜkZ4tD K LCۈT,6Onq*@bt6AAK\XUt`/:fCX1 (1^Vu.64'c.(==@ r.u֮ Cߚ:F3g6V>ŰEx0~JBR([x]T,MiTnFzđ&Iȿ(!7_>+"?=f^ܒtr wy*89LJ)V&m۶m۶{k۶m۶mv7٤KSR]Zpȸs8sru03_F$zX [w4T']rdo=%<\qRJsWmTJNh6%ŤoB2Bc p.mtz Mof?:sYD B,6#ubw}׸<DAh 6(Ko^R^5 }4:YÂe1w"go.%''7*(Du, ? 5ێ[&> 5ؿ 8rJ(ٌ#br^69Irv 9H$=fd`ҕ4DjIut,ϼv ao5Ya*Ԙg(,%#cU.!cuC֒PT^ )ꀪ:o!Xz,a3(l'Fmpk8/nd Ao`g (_\: DF>M6AژSxkz|\^8lɔEOx*[hCkn(.yF$"`X҇jǐ_AƄ92U"Qzky ,q~B4I=y[Rɂd$>#G պY:X"b @DFHV[SmX(mמ=Ԁo?IRLeA~oDiSΞbo:>Gcձַ RhM5*#շ} ~!]=|EDN|~"o%rvQoof.Jf6FڊK,u{z;Q(vna_l6`CV-zPy["q0_u+\XA5Vs>F!YrB)qet ^̗e [G?E~,X {e`byO]L1]s\7WvEYRւ ڵy/ ϳjQLq&:)u?GdՈՃ|]'?뤧 g))ג;}g1 _ {hhrb%\ULictҞ-8.. ZStH_ +qkŭ}؉,~K]d)|ƸdI)6Aw,}'=/6kAc^@$yv8>!f:@H! wH*\3_V-#9)pıZĥGJ,Yؚj `JPSU/>8;89Vefhp[hn>#jp~"cFB F.6g+x;J >E.sQb Mɑ$kIBLEAN$tHVg Q27 k0Ver ţ <[at;Ӝ c@QI#ϻɐ5 \B52̋NKmga01 a^FU\8p)ԅ/VRbxos0P|69KJ0GqB` sQ ]'Oi<[D.VJe d:#~;Uf|\TJNv$am<_%#F Y֬o\+ Eڌ%cb.Em(Ucͤ6nf},e@۽M~ZwMYi[gܨfBój92]g!{I5AIsse5KaRs|!4Btw]- `9d13kSiĘ0 S1;* !JKy Ĵ< +.xhdnɒ@qXrܫ'Ń0Ս/̼AQ[R'axsWV%/`Idw *}lC޹* ʞO6wlJplFwӋ O,S_'Ʉ9pyѧ8s'e*7 &@%޺f{~򵟊tQ'e]Z&SB y)Ͽv2aJ*Pe3LPnF![UÝkwwEu4_}5XnczjFcC5Wl*{+Oi)ӻi@x#/*\:ĽPtG\NՁЊCC΂fDj1MbZ%P^b$sң`p+ٱM<|H@ 0_P'Gӯ9&ؘϓ4}KEtM{Ρ0RJ.ښhbU"6Dm.hXnj:e#c!maxZDvS9f껽̞#$|J "FE(]J8:$3!S@Dc>%+! ,59Ӹj|wSO_PCl].STsTOC| Cm<̓: pTiUM߫Nh ͙~eAq֘ƩTȦ?D4 yP: t>% Acd NiHVUSc$g=:O[qaJRl}ՙYyvл:g>_y+7U˦銏`sň w v`*k׸;kpZǒc:)` 9Smc,D\w)v92Z\%vmǃU4!^D]uXnP4:RM9ugf pi*<лv{s"gV_jՀZ̭uNj(mZFVaR եz2R,rs=8Z!P>{gJb-lvHbo0i&nwY0Z}KuVaYD,,-'Ƽ[Q6l}Zڞ\Q`f1S:^4qGϷ)]Jj"~7`Wl)D^*8{d@j9T6fB R?-k&=hmQŽ3p:Hژ]MIua"q{_ 81NF}zr gaH_'Ae]e?ma)7Շb7^;FkF Rq_խE@8s|xN\vrT,Oyd*4bWm=U]?DOn0~m |i୏e?D@>.b hh.ǒą`nVL 6'eEٖvբ]H(ff4(57Vj BJ]/Xf e/5[PtʽZ3\gr4X&aqT%43)K\ڟZYs6|5nOB-忛BGʩȈ"\nG8,f4 q5ώWnN>WN?UXjqY/_Wݣ,KuzIrT\h3hO('-_^>;0 jwšMGW٘7,hd2gHZS@ܹ᳷.Q\]yNƌm}RtWT.Gd{1z wZϲKJOCx'A۱n!g` &w,cqKR9r/74'R:Y؁Vch?OOI IUZF Dz:Q"E 8w[rP!.YvB0穫gm?qpz;'ɘjJq@1D3Z0]Kn ~F - 5v/BBۚ/+2:ןQ|>%<(:ٜ| @ng(7M Sar ^4&H6ay'ԩRZ:H RO9 iJfQgj iyȲΎ!_Q;߄ԬScMI=T vibK=Mj~P˜PMO:7xU.荖>Lšk|Y;OcߞyĴ`hL={'T of--ɳ{ Q؟~rvbmQ[[AЭpvT)6R_g8һĄB\pxqjlRz.2Xm3 w8J;W%seuф"5)2ziؔ{$B 6tI67~©#,c`me`W` zcS:*5`SU()וaª@-I1)ykV,">XLLOR=-hqվmMDnc/5?,hgn)Ā}zSYU`Y]>9̨=-wv|di*|Ek>$%yۨ?~77^z<܆WvLn!O%]&i<'9+Zp͈;-\8}ݠԎiqb]5 훪M]uokjU9,W:IidmnUR/K^ _aSh-0_?X= bR2&b30BB.HLVd .oxo .?|CvSi AGak9ZHJXrT}&ba/T8Lܠ`PЖrn)4V1Ф]ӑFs< y)%M!*g4& ^VtJ%nBjL_)D$Z& TnI'<&HSb:A*٢C= \-|OqaDNZ;H;U(7ӄPT=>7J(&r\)=eh/߭5F|^UwrMZIH]$D_&"'Iy\W?)i9r vsCMrh ׸z YHjREKڍBH*&=,KU_:P'O5ɵ&bL-]#?U]}dl!KK+As̚ ІU*+WYLZ?8g,>x/<+O*%%^\.<99zvL6uBRg񰴿 d4tAv85"!Ca ( 1 5<\_ȝ2@>́6p2=@"]A Oe\s>ˬYRr!OֶDث}m iZKWZvkX7nP:%DS.Ys :>572m[g\=EY w#͚h8.T7ڒ0e׭xej_w8yTxV3)JO\GXn1ofϥ4Κ\)&8Y)Jp$V:t*l6=uN:%bEpȃQI,z 5(,uju[;;ۗo% !ڟ˙ ' KesW56'nbtN,YRvOٌ#8ȓ|%q0T(gM\\CtSģ>Laa@Zg4S.PL3>aK2|^_h@pz A)q0ā?F9ʈeY2XMMUJ T )H✿H^׮oGlazG ZƵ6cu5kʡe@r/S=ט"1wLS'D(ҖE3Q}=ojћ燚  J1ĵyySl6Ug엔YBFqAWIf֖lvZbdAAmwBi@@NߔW!B;*p/'W1Rz rDQ@gD䧎Ȋ]O 1~ly,Y ⥋ . !zΛ?+$]HmvNKf9}\1aΈj-+QeY UkP,,eHg|uy.+ h/^<-}_X%5|)TqIɯ'V~CXa8>zjd.Dz9kEf 83{;f:ieSC8eNPnyEejiU{o~V6{n뾱 UdVʸK9Wz Aq=䥠ǬT{|q,=*Nha85C-& >^ꞼmL?OZZ.bL &=c|v33h'`Q}p2@0n6D;tXf츼BI2/̀/;i~=US.P dfy`*7d#g#2`dQezn+V,;*.ҒFB.٫[ ɖ2TC_' He˖IvQ2I-ҪKC LqtL3б$rUw>vƤY&YI@ULH ~2l(x‹=XeP {}l~4r!݀|;jcjUu] . {'Z @-=8*]5ݗ htق边Yyrst[cB臛Te3kP)dU*"-#@3@zGNB䛎o2s$O1 A0ѥ/4K;3nrT AdX60cI XE3gYxV~*Qe<v@2C%M+NqzyaւxY$܊I)\;/!!I&ӆS95J`Pf04'6E'(C;նkzCQ3= +L{iճa|mȶ t;;EpR~8iV؟pup@' wl]f?~Q3M`C;xn;\(X Z'Ђ][4WSVjutf<?i ƈ'0Pgd(k/`3~UH/:vU~) |T&(xD; _)|.@tEGp`kO"yT[mn}/]WF.*ϙiW&b[tS 'BA0|OS5CL(pӞFPXyw6\F.Ej^?_NUTXy1#8qv qލU!9$GU 75 +lV Z8Di|B=<o۔o'Ww}wq- *H0aPr@$L ~Um 5{eD0j(*^\0tT%%%IۺִT$,g׵KDecXPԇ0,l3)_{D\VrWd='%$$.GaDwh4-06nsh@ D_rGg59bW D}=Ž}AS @VzKx9 0 ]ڭ*O051lH7Ɯm@mrכ)t]x=O&aj X●1aŬ3Jxڪ'oĽ~ Oƃ!;ű,޵ >CJ<`]d t0n\lx]υ8@g/컒asM37n@\9^(d7APPZ539wiW4P{. ە^sYbT=$})A:E4s_ y1P|\WQ9[YN*AtpnẖLAjNл!Ar3,d2J$oDc)0g(9ytyx!'KkCa !7 vŽm*@g67@,2cbЧj0{7M -UPH4!h0h&[vbv-@*)47x~#2̌NȖ)G霪ƫ.wp/3UVOnbi,?ܝ>C<$i;kXm:V*uBAj9OQ8C!GxAR>.)xhDVK>WL9ͰVp'ohf]2v[f Xw0kzy/Yh7%.E*;{.+B2LOc^̴z6A ^np΢.֬!o[kX8O&L]qg"Κ F5E1PM튡5SV=Ić' Pq] ͗wnZއh.ۍ>Z1ncSzRWܽwAIQgK]TIR5a٢BQv[b=)|+ 8܋jyCϚgS;z$eso=2ǹȔ"GB&|U,  krJ>ˬ9ثl9.SStXT-.S>< ֫p%9ؒXɿd!|d ?p` 2(˴{z>i0NLJH+p:tۉsyltc|GQwKq;S{]z1O*a/?ξ ZOeg<;ቧ?00q3A ƌtO$X3Ξ=zܣKQdUHzݚ{7_zVҤ&;=N(w(luswO_u x"CWWwGow?olꉾOoY3ߟWUj4j 5L5匓5̓5 Uתt.*P Cfb?wle0^ Cy5Slͮ%,:DSukkemeM"SukyneMRo|Tpert԰C] ܷWO9nh}A\&g AK>bCuy_+Ӛ籽sfUPc+`AR;*Pό꟝Pxc#Pm>aNǍPP88kuݼ%G<jww5Y<KR8-bA-;+jAn]+m~-+FU*xXazիֆ̧'1) ,ح?ィ#VNdB(u{9t|'M>y;.YZNR _5dfRxynx[0Z0DpV0YxTӬ[8P[_ǂ> 5kR05:\U\]:gc6k̄:Ԝ[Ơ*>a:ͭU]2B cprk^ҺܫzޅoVC׏7f8ܚ-W͆(Ncr*S8>Lrr3 Gɺc6rz6kwFB7syb 0Ѯj"F?<U AD;Y2صthg=Ӭ^cKUCq4}> c}^5 Ac߽Px}c_3 ū ^*E:ȘRoeEafMȅ 9[CS-ܨ1&amɷ`xװJLIm*TЎ LƊAycPs[pZͩW# )}5N4ozܼDn.rW+sǣ#84Ѥ(eɋ|uZT2'>P}=^8 A})EafD }WtWoNYMm.ΎႩ9RpEKA|7|"&ލ%6t>䚐ߣ:a`@#/P-/>ԎSϢo ZRA7x rU){5^>?"Ľ̂u;]~w E@vI!pX% PiG^ʻG`0 9&;f,Z4|G%kFR?slj]yJ6^ᠲ\&jǑ,_Tj:ZV(K6C=.n- oxFrgO\2gҊw%.bh#^SPpUZLU{z.@׍(yY?`zԬRj΢]qA3_Og@&!b=+QZ. D7E yms#AFgfs" ™qgAh(Ih5MU_nIPf6ߩx8v_J `TU-0^:ǑOHAC"79@vn`/J4M\5V 6҃dp}mMF_ݹ~7'DfO)2u/ ܁5B$NCY-w(p,߲8Yޱg>*XTD+4׳|ƲG*m#r|SD&qKz*Kbja&˛ǾHP]6{u^fR@{l]_*~2^.g 9N'>%`qC}1Jbl5l6_݁YTheV!{Q]Gwd8׾׿SS&m:rzQCdE;Z]+{:D,$ ]hqPТ-־V-sP%COxxǔS4Һ#@@: AW(Rv\-8+z|wϳ7sx~igCb'-x.oKv=-4Nz@HԤX2x^h:Y~WsO8`Zryke.tԖӤ>0-$Z>О(X $jPmh @rx7`$Zhh/J);̈́5-hZZS^|Nbw.J4T堈WirPQ>L<լv4 Y1Lp ecFJ8l7M֜6 0q39&eNV'몔tiM%4;|t-ӎ,_5zLôY[om ,7Eq2>%㩭GDRW@eYQpY S'u;UC>dhiE@ 6j.?ckuA.]RlÓclk>E&^m-D=O?EHFT.T۫8w2xF{ākBl{1԰SMX٭"edrJTa-[)tFJ2hRi(Ђg 45/L HMU(o*]DuX݂YH>lb@Bk[ah>wE6c 3C^ARlr`V0[XǓ'E9N},iekAYzJÑ2}kZDm/o̕np;u#nȀu/ d'Z ~ [\j܈0/20^V"@j R4>͛$LTlRBsM ~Ƙ}:mV(ྸPcP>=O=TkTDVkA-A LrtV5-Sca yWfʥX 9d(HrLGVX[D $uؘ?P[r}zSZQEҿTO,14*\}p5zaR:a mUK̔ vi|o<$#kk؂;tO٨nC'tc4.UIl#Ь|p4$Nw$`YbϚz[` r E}xz[e;^_3  f֜it {X 9`ߞ{QSMm2g3o螙㻞5s wOZ@:oP&c&n`"?$x3o%IWU0;<QS; ͊Bx!Ηw%>mI_8Jфݗm6EV\uOJf26 )KAh&wϘSqm{71M_qu)u_Mk^O[^)g{vˡbTU.gN#V\ C~q^sVe"* x;Q:cT͠ Gw 6L*V$=5Z^<<0_ƳSu1y~\|Ō8:}g(["R*f}Ƿ8iu"}9n^fε RMzkt 򺷘猭 kG\g=HAVS窱|OI dͭe^dq-7]9UfR;kfgj mFԐ1؍GˡbNdFD,E$ȡXZaL+Bƿ \Q9o\g]Ŝ [_o3"Arbk( ם< }%sŚ>ňѶDW,>X/Z伎p'V=H'L3j͛O݈lb-q/&Le08 *lbs|q 9pc51*٬~090s?B4<-x̝h`4(Lmέt#~\ 4T)XbUH=;z.5M-RVnUN,a 㸼<9VFmV;͝9x_:%̦[` My姑gxӺqn-lSj''*! " ɪ臁a! g{C!TyW~TUдjbdga"'0}y^N*V~6-GēUTU}{k$j\`h}EχW RFUذ]cd=XmQBxAX'8l#u|wH6u'*_i,C_Q7?tIͤs% X1v ` Lo7>F3JT]-Q%Et5 V3W&8##fVV;?#Qv'Dq!I~Idڨ=Q`#ɓ!G[ݚwNux,spެ1_dj0_7 +(% EDkG[~qͱOyWņSh}~x #͢}I8dbftPֻ0к^tKwvF&)Rێ JL4K D Jw`*i,$S#j]CAX +z=ditcdRY$)aåܣ^9}_N-Kur%~:Vj =Y#Ibתʨ[KF ݫ?էO^x KAY~<|mK;jj,ܨ.o|`Ƚʈwz6h/5\* O=F{ԯg`sVwWF $C&1qac7|XٛQEpm.l.7mnx})4շ2 yhRׇwn:3:*N;0'͑Hq{2"fho vC2c`r#̞SfA<p5qy|1R(bV2);0èa ;q+P,qh&q6O8FC8'e*q6G"ۀsxX?8L8, L8z=O1zYoR^ȑ@s `/pK%@(גymL`%)0beJ0 t7Ô-& ٢ @x2?D Hl@O"Atu["Jۊ:vH,tLyX. ~O,ctd\3vV'Nsh7~ 큥`÷{C9HQ^..jy4j:]Yi,m3q7_XTL^IqJ=&)*co8^=ˈ,bO|}D7p|1˵>DKKXwl.{rEԝ+DHPG]Q>'ۑ;Loi/M/GX@EyT`,=w<T~9(1_5+<$ށ)xd!F2GY3uZ^6Z:8m>+v*.3p֪> ,-CU Tpg/`` ltwҤ"w剫'3A~Q:?@ 9^D]S=(u_6~ $UaU=B9^="D(3Ù6sL1)ꦮݽC0ǽo޽7RjJx@jyE)Nl4SRMZS7x1IQE`U7$LjCXs.;6nr=-YA Y) &8/*s:fYetW(FP,[y$^N%XS-d%x<'Nʜ*-pV8x7#ࣚ*p}8LBhF? \ޜPz2 ;+IL6?-s|P|!3tdgsZ! wdI Gg,X{T/n@wHv!= 4KȞ$#jy;%a'ahX&@JV1 A CY.) ؾ))4e9BvxV { ʗ@g"\[bA7ggq䟛A*gyk3"l7@C_ͷڇmDpeyʉ9Mٔbc6[t#A%Hj&J|H%y~kwbːƸIf[$cԅ&5$"#!><'N! lhGoaI<9.cکh]-k„H`SqKuExO[d f"ZǶ 1"uL>y; m/l*@Y㉈X!5HۨI0ѯ+ S=c8|:kBToRFOi/VHN^@z1ljvT@P  ENB:JirHYN%rj1qbL*(KGGdlξ2G[Mv9}5c653s|i}xNB4[I>PX#Քq}^sAij)*Iհ]mp[Tfg5&M.M <;\iָjhyi|TΖnHO}V"qcpGu`ZٞeĊ!yqQC`eJ"EVNs,~Bttt6#_rIgɠݤ'k ym݁xg8*G6Mvޠzej4?5X!D5:Oootw~=Am ZQB_b?b#T0ݟ'& ǵ QZQZ0|jv%aS>MfɒB%rP+[!==?n ~1|X-Z!;2 7$S5 N*Ùzb] i[مZˆrd/<.ϙn2)mVFN2`Ea 8(H/_{|-ؗdn;_X wt3SHP|5:פ/,/h a W;aҠ+ƫupůjG/*Zh~á{Ayس`"7q:eaL@'w=׃÷޷!`s۞%%WDqzc*1*[M^A^KwqEGju,ڝ˼gLӞ%r*q2-Tv@o?ǁǏ6( nooYiTQmlc_vy&7uvy'EѶ _~AŰ_ã}8OA&<ꖭ9i #g1:&K`;y&nV^ wwnEFm*\5\6R;&|i4U#r:=[Fc]5W=,NjMpM٬[1W=(CMfB$l. ŲDLVZju{0R1f!*U F}k$v>?T<t?f(rdc97ytɞ%fBJ$pͰa_xHJaq?3gi*1Y $qQzv;N(Lw-j5:}ܟջrXC pBWtSp3㧕 <{<$ɠ;0o̙c1[y7Uٟ|u'M#7p,^Ę;pDʫ݉MR؛rт,$,*Ɠz$dnf.)'2ݕiAcxW% Cv{9Cl|Aq-75;:qpz{vs3$-Pj |oxIV: @yHv3R-$`} ckyoM]ɻj U2[?NQN)VLTXh =+TJAJ§Nae*RoXI |iT6/wHzI׌. u?j"=3HcZ @;mDy.K앛Dhuoh 0QζP zpsp&^gyHr@⎸V^պ'a( o%7cI/Ү ۰v'H# ),?&Pa7׈v>J_Df͑WOIA*0[}@j`a 6=& Zx5|w2avoK/ܺk=eZjbc{^C\~ؔkEHmsrU@ԫnH4(u*t,Ԫ-fJؚ\4f5A?&ibvMTghfu^g:N4Z2Y;x琴youo/ y7PL3ԑxѥ1ݫ=`ގ=9y`6"ΑOe߷VJ*XpZݢM 0Ej\+uOm: Db" 4b6PńkX1.(e5^ ZwvEH0ŀk+߹y\3Fm{7to_zIjd]Z `!Ao$,3 ]ˬPI.Xue"JܸCRwk_2H'SʸcZ׌.l?ܲ6p $+cJ:uQ2Xc-8!G&['Eo\Ѥs3s̕;)|עN ۉfI-x+NRڹ-eh$idzud>8BA;>@aBl3TL-yt1[5c'2i8 Ѧ]1?krW7SZ;6A!y9Z UBʣ1 ,ނ7kzS69ksضu3I9yG/HՒz$}xtIG7nێtbWoyCW%A)\K`s;Qi]53^+ =F+t.7ERZ xFl?SSMxH)YmwDCVD7_lh^k$yƪұ -]؅4PCԃ'R4SϜOg3WwӶ0b2s9eG*z-Fbg^]㵁5wHGA!F ɈHK`CW[뱷,+49X0G8g=F*VVuMaMaC1w偽M#/M/W5}N_'}U[%۶} s_c][IR2dCRx*&y4hQ8/&TKK`4x9&f?1d%̔]ʏg3:Racn8F˱DCNZ"!`|G:&\c$F<"S8|WYiksx?oxVv?]س|ͱ'h={%ZI0~2Iо+}-wwT/!Cڱ󋏇k*W@MTHxn_++Xy[݃DfU.31a!/āC/*KY&Be(Ϻɞz_T5yǫ eFMVYT[Z=ȘE M͔tQFQ];;p{_ ?6te.h=$UNNb(aevu'Ϡ⭮ș)5S:/z?{/Ϥ d(TW_fmI4[XI(_uEm?,\vnEJ?n8#}So.s%Cy;][)<<ͥnDkiigo=jVEI_\&!CFl/;ɞxGwLHt3g])ɰ};" ̇tWG2qwnǚQe=, _׊@1Oß)zőJ\Nm˖NJ!:gKsq` @d(Nͨ1X~,.ѫUO4(StrT?w?t'c,1N\t_|Kb,Kk"ubՃ弚"isFЀdȒB$6ƵK31Mβ2'Kv)w$EUH>J.h`vCy\1{edE>?$ɘbgS~FTګޯ[\\v3 l\ =j(L6>=CuhO(.E.E 9%=Ze~xF@5Z|kKR"+gYpPaxG˯E_iu_ϧ48 ~(ZG|%nvL'}yDv,uGl8Dʁ7yj]W^Ռ^0,<5bgU=z19^B?6܊C^)v0\C}]5^!uz26E^CE|6ƣ$%SZ/4b{'yfސ;v=(E`uMjA}hLqq6πtYgNIX\i_Hl)<;vc/D5JqE$VLQ?,YPo/][7j FՆ55dWe ~4h1`s_؆yTr;+}ekEazUV bsK?q9aKBd{weNsAsmnܿV(CF^ߴں1 [Lj__k|}!5[~HچDn= x^VrpY؃ -Vye_%9n;f`i#0 D ѱc(CT iw^|_Q%wdA4CB%&6lCLG%9־4),->?eYxA# !k7Rsinډ@ډD>mz20+˟Aj7{^V0})@7 zZ8Jf,N#j`ƣIMj+4$ |ov(6;XiM Nq F˃`q6td@m_x X:%Pژ*2 ]ϋ**Nb&lFDz_ddDNGr*Ÿ4~DѨZ$|rD)Wȗ+v$[#:'~ ~ `Ef: xRQь^^%㡏}ohjC 0R{1lAvjsEI*\3x]+UckNeG?13orHbv#/8*\[.m> S=pf Vvge w; JER|:mjy j1yD0FCqҞ]ӂPɥ\Yw^p̞M"OJm+ :\..ȋN֢6a.P2e#="ԩČWYKy+T ã&P6hϷd$ʞU' _pMX2Hxy$HNƀb^* )5%(U+ c]=6):_N#tAAWR^*HYexPku ~1Ʉ6i#?DBv6\)E|JAo4P'!6QSAac'JH]#|%?BXyK%?܆0'= [GY68rQ ]h[LM6?qskAX'v W6jr.k< +isQCabqVujE8NjHV%6 iHeA+oS<‹4׮[eb;!W$f8?̃Ɛdc5 ^LnS[}QH[my_k*W|:ta[L_gʑq,5ho?;#F-h/k? Ue.pH^lzOex3 {(U:T`0;fNٗvח36K#]nܔEZDS.4ޠD nIW`)g׼ks"a! 2# Tޛ%s)O5s_y Y), #dúQl}2qj_2`:6 86c-hcO!q(1D`@&='5Sλj->Ep-\d `IgB-=}ûTm UsY\ k+G;In[+pV+ПK%ҒCػYVcN]m*UE1: rhvUܲEs{GAi cOku-ߑVq>J_缵n]+=[?ofw4'JxweT~ u C 0ڒ8*$N':G4{DlOepJ%)[&*P&Sicqx~g Fԕ^!R8OzFJ)j5bi55c8YӤN/7SX[㢉%3U s+}?by>G79(G`fsrn}C[v.tZpAU| ]J:i7 )9&1D_-joDdR:5UpVR%?B: sQ|J3.]l h7G`0Ova^0M}ƌuD#w326b>\46]M vnllaLIe%d D9J'"?.FSNx|M0B yYT|`3m.q|KiVL}6~/՛[G3.Uܤ:+sgc!|+AjP4z`IY;(Tsd6>(97(X`3uZWt)'rY+9ၙ*y|p8!Hњq}0ڶ( ,qL vNia{ꤷ4ZkDE΅yK)Y>K'M;L(4r{m)Sk^qBkZϛz6\y }}\)3D`бs$:,lGw{ɕ[6x F?>dvi_ |W #J`֐UiS`\geېbE*0*4[#zr?,bYtmp$ A rBl6x"w ~MN8+79hg 4S:]tKB#VoJR_.\-'|TP%p Z$مb1n~uO5mG:XJc)!zBX{Vd "u $sۤV%TG8kJ8NI|8. J |.ν _&`i 0afE>6sݤHL=9#3Yn[Z=ZR19O5~r+HcZ, 'ChrmNx͝һ$OeU,a R>++{pW@ o@ڴ4{rN]:F<#ӂCJûĿ#f,0-tV9>8t]QNlZ [JoX9Js5󸝜Ss9lM Ar6 '%X:F+ (HaX5ܺzoa)ޠj`x `r20 &0Zpwg|VӦ"Z~L5 awVEDn6Ty i]_&sp{hJa~gV$]#y/e*6r\>:lܼiYeGyxlҐȋvaF*j3 s&&Ȯo&xcc9ߩ^9%pC 7{},411 p\h0e!{*0 {H [<:OÏ>AoTsݐVIZ*%\,@kVЮ9;(֟ % :;9[%:殽N)>&ÍnlXBOÜ#Yt#r$ w=#ilE7`>V((T0y -{ )wj–5QXCo {73lэ7-RJ 3O:aC1XU ^C,%sc sXdUpnn54}P1hC~s?1eʒl%V׍rD*ɘS[xRk! E[5eD{SR̺M8u* 1F~ˆ$J?`Q'n Ƅ~0$n쀙v^>>lr 6P)L6#f(oAep64D B>I@HNX ynJ#y >=<"<]e? P0#w|P]E4_0A H&xI֞leRUt{p)BYbnFWk1f c-d-l+ν1{bnsiypF{CyC{.sY]5wkt=snpv<[x%"hn@x.6v.tY (!{OԱiaZy*+tFޭ'{wBJ+ >9AYW/+~F+К@ n*Dʤ ` [+Ox439kX{?ԘD)R15Ñ0 'Xʚ׵-H71buwCMvbcPRV*yD0))36ªm@Ƀg}+#y >XXܟi!~vxB3&I d/<2hl:I1ZBP@ xerQ?Gq0ZS_󜆐VfTb" o!~G vԽ#)(0W}dtdf#p"&qB#ށ`j]mk pLKz]E0pK3ݰR*T+Q?dP,17\c3xEͳoA{Tˌ6h0CqC`R9z8i1"1>H C40Ps }|Av}jڰ61a2k-^`i9}Ǩƫm<Y DPxX3u#((|ѿ\.ct9EwwD;,1Z.љ!.?0C_ ("d`N!(L>W:O٬+z`)/^+'Y#NM9tYlw"\Ko&|pއ?Q Q" |Pޯ<ҠC{kϕ74󑍋㋋&~{RPEFX0\^GkK9gO;|xgE OCJݻ |3'C1ؓ%-h4\iOQ&Cu2;)=~=<|c^ -ˇHqI2giˮ./Dс!>ZIUPoɼ딕3d ,͚3pfᙹDӈnq34V8%s P-N&UOt-~x}Fq?5f5 p&+RZf6Мp-$mw) &8q#_ 4s5xoSAx7sZ]^,3$ HU]^_Mrm/t(K/o {M{뒲l>6ZڦdJ5 )" C1zJM sC3n&'W*"w,3QS׊\b əGn-&*6>A'B^UH^?J](;״hP^:O*ꖰEEߺ0mWHV ΰ*0x-m$F4t:͔A~s13%[R؝4VMze;*ߪE=S,ʜkp#&:}Ws{KӬf c;ӭptD.bkx64 >ub`R4 A74 }iҚZW+h_^*X'3>hc!S&Vk'&; GP-6k1ZS87ߊ>wҌ(DBC_,?8y`a]oٶm۶m۶m۶m߲mV9Te&2W}1W35tW[=[ǣP"5nnmd~̌f|^?~Jec"+kb7؝A^ݒ_JZyVM7O*z'M(Dq, ߃Db/[ဓ߂. N^.,x@lD.,\-Dzxn|B(t[7F8<4:$'!c4c'iX}a}1V,O$z` nh_Z|` :?ɺaˤjq"cOdrY'T_cq>iN8x='J_+S2VSV+S4VEn$_ L1$ L,N>7Mr&q9޸J}O+ Hdyu0[Lw׮=(E7-.8Ŵ"gKt0cN`vN50mXEL,Y4yH?頋o`tެ'ȀOLóF/&'=2B(z|RVy]U)wXدr7tDf#:&K釅ua{:\Pz-Z; v۩emz.3;y& KHID\ +\Bj\ftE{*j*C|Uf,SQD}*(4^ k\MinfAs|΀bbI/'wVUua 3־V|a݌ oW5G.N&DNRbnJvpG>GnAABtk)pzTi8QH)#oQΜ;T&1@ >01Y(Fg[_k|H$+X*;Fe哬57[4D/ BG( gŰPQDxhKT;Hȋߝ_g~m 4^<vm}KH#)+H >{Q ޹`^wOGm-(gI%uDAB_ E\2Ɔ-Ce8dˣG2Bje*S$t2ʹGkZ̹/,Q1<3~=- :ˌ|h\{3j7],nh%[X ~Nxij80O&TzL<:c: =+Gh6բgAy4I!L?B*(eBf| ׍ tyg:U;E@hF1䠞n`gZi^DK)%ۘ*HeS`o}$QM#Eq=ԺF0?1-8X5 MX6Л B$rD莭 O fZ.UM(֕Ez\eY4>ĥoudJNGձy`%b-xYGFC'x 3Ch|=v 6l@0txKJNXϼX< |Uw*؉MT/V <̘5x^JCx30?<k 'yH/ϩ4)窣MU$m?6ҴC={YbiY;p~rfΈ!7n|q!&C*V%YҦn3<-AWiD-x_6hs"4Q4oҪ-T!"tN>9bIDd[)0@͕bݯ`llDy{{;9>W{D%ZB_M(Sqܝ:/Bt袾$EzF x/ t^h"DGE|vy,s!2N3)y/͞'.zrs %s.ۗԘ>@ JZϔQ H!6HB_! VoT1QS41 CBڭY?g50J8̳5.܊E*S-Á*˛/"z_ fv܆5>r]ߩ'lH[7,In@h>g }䍯?[3ԧ{H6,w#PE3AtJQx("q2d@QUH/k~jY؊eG;LWqNvA荻$%&uQMgl{2js۝{gI,1#*t5v?(QA"12t0q)_ 3VT->[-ձM јIʎڊ˶jb 3ͨ?+@X&;!ڭYA&s?YXl2 5\̲ŠgO2UNe'%5E6J54Aa14gJ$kJ3H{kEg_3Lک!vyvIVMvHUd@dHbK;ojIvըÌjU܁2E‹=N>d,{}7GW/oD7K!uX AnDžܸﳌ>M?;4_&2S-=*y_CwUsOvJWw4P;^g:$\4]/tEP=cC>a{wAA8[3TBD—⚀xa϶bnG!Ɂ- KN}xK/[ffDs#:ڶ.z8v^]['O/55҇)~=ýyU׶ɷ<-\ CX|I)"&n;q?WC+5LHO#"o ?0p1Fc9z6uӧ{۝Ѽ+\f҅p59bDl}а~κf8'4 Ìo4{|4n =<ʙ|b@eB 9ݬjO6q9Pm70_afl,'a,5Qh!϶)Nm,z qT*[,.apƖqJ+/&*?]/!4Ր\'!8[GV\Jc.> f$Λ)e:L󢙴p8^ ADބ2"[@-pI mF0 + w}:}@0TM5m3Qw75x^J)! "l(lۚgq$2&y.y#z(@w;!|-"HII6Siދ_1IpdSA'.L8AѭfD͡2?A,#~ä075+ >ZH-PCė\DCʴGj D6Pd*ztJt >r&1ۣL43$#,F 3;vHl#|j;W*ov B 3`9"ZDF~Hz F({=u.z7QM3ao~.N6JhjNh;#uA ZOVۗwrG_xuuG'* $'7nkr%*#u<)(:+|z œFNkWWSdጚ f;MpH!5MẀ&N׽q;X'2@H!+^)EyI^2nL Qm}k 3j84ل4#g[upU.6]a;pwŻ~Z@b$4 ڐzӧsvj}S}.I#=#Mj(rUDi[=!k,6iH^ļ%2^1Pěץ>dHg6z22;7qb}߷BԮCzPghk3@ߞܰ]4 7]@Rw! 3d͒y7_'keec0پWSP:**ߧJB]']19wQw̆`)RfNd=;g濺 3_y5EȌ6-ݸqc~%Ȩi%6PoRp/9֖5:ү_VVe#@QЏL0MAScZU࿽:XM].I?<M;+~rǎ3uE1gɛiQO,kz`%u.}ښ(+&bYf+WK :N.a0 qϛ3so`uV$1޸^7pnK^*HV"ԍ!`q6^}批3wv`/ YZk{ۅ˕#?$CCi܁!&?*U:Y0tݡյwG4,Q"3@ZB[1\QlO)@]IJSNW(3hZ`2_OL"4(BP\D DX AM3'PUI%W70SZ*=!0J!`ZzcT"AeL.=nlKs]\;ZQ펽l[Rۣ/rBd6Xn|(mIli=47o`Y_ͻU1C5*ZkKD:WMc6]Chc&0ngJN*>*ROL-76[m05gboNbc'Nj_*OVyU+8y5bX[wP}d J +|T B75 ? *r[5l K}"Y=ѬD>EIwS%{òM/H4͢`u~wbC<F<Xt2(Hge6ݾ56J*y||:Y~P/p ,9F=~v!gr5, 3/QNZ!hby2EDv  HY!רaT@J`L`#l#)MҰ.Q\v\mrC0caݬH@flWZ˦w<ق)s'29 H7TP`;nWJ;=XNG.U銙Fo4ޫEhV۷:%Eub -P՜ RM' X^}"3k .]]sAWXdy~QijPǡ1d{lsz[A8͈ 5q-= A,>իQBKܣj6nT[78 m䅢Dg0e#c >;R4efT0t-Xő4 "AcЙ%ެ%%ԃts2JxYQ"8X:͝؋|&qڈXDa8衢jŅTx((jѡ.*SBda3{ X_KؾLTCOfđu$ti 9 hd-N=ºQt:n]kLvS+^4g.x8#zg0@SȆz*;py;`8J½nYm<=)6iVH´y]]C&"4Xy\a{f vOf WxS. K>yQJ7%dܪ;h4Ls~-mG$qɧɳ :cLܗE Ͻ |(ӷx @)#+??ffahi?X~eO~ayVau ̮1ui+/8N]v3/2=vS0.Q N`]`\'<: =ƂS/T\1 \g߿߳{X NIvɥ^N[Da/{$@ða9]^(_r ذ_KS4`&DD4Xҳ?[;썌(. #?i~G( BKq€ɍ0'߆ʋR_sy_ ƶUITL_.gb;J >pi_yԸ0<\Uh3Dwn[_ /֨h_ܑz]BJPRگ; B1@8N?V ,t2k9@\(ˎ;DgNȇtj%%=:Q heI3Q/hXmL+ǧx%^9$0!OH,6U؜'Hi'[PP >2V??^tpp ^R6aHFdIZ.]/@dc7# 0B.T,0 Rau 'Gt#+OѲ Dfe,0@20 c=ŧΐ{>_ 2Gd[5nK5yFqc'|v[D{PDһ/O@aV(@hA QBROr uNmsXDÁnP4Dy`?Nh!{.=%^ၖJaEY4hBPh+&:{._{u XL˾7RrajJy`JWrpy+VQqZ^1~]0}r TI]RAҨ:L-j%5XJĔg-cV`1<%yCŌ;V1d1& ["JH/V4KR̊`[8̴?}dcI0 ^nҟGԒ?'B#i:JFxpz^綉} wOhL;+%6`'V?apl2 7+9ԼҔ6 Z1L(`mX\q'ip_84eZ[=&'*pPhQEOPE>~6CreBC\pØKd>Si QlNȲ].$Yjܩ]䥾 8 5)+@ ^<[tnDsIhZGiSB(RòIʒ" |=#H=m<Eïa襓)m< Mtv?SfLNG7ncO/omplf=IQ0Y;av;9vƩNջEnw1+)8vͱ2QS*]a7ɒ+F3jT<\_I'\d)!E% !% ]#ZfU!aQ/[d$, o"YY\tmrO!1m9r ޡkFؒxEF\7Mrt ͅ FcPqYFl s}`ejDs=x0Y~d`@ba9KC;1Wޥ찍 U%}]&1͌Nazb*i޶Fٰ i?4'^got16#JMiS!OX& n3QƢGviJ#t2 ?RF !O> Svh[tz$xfLzBpJ 䰿 DN.3}Sv흖 7MҢ_L۝'+{Mq+5m9&13=Il]" bO&w&UIJo$+K~[v_3}ЪSۓy :+kW{>HO~,,x-eV^ʆ'Y2[R!0Jbq@xKx\@G@c-iMQ>c=c.GrVcH?+`Nhj~ߟ??7}TGm`56F6nNΖvRS~ ~?0IKqu4"U{ޒ 92$,BSI>Rۯd4uRb]XA)ܤXQOtk-?y4b/~D[Jl f* +D-k "7g `KR1-<"G: 1Z6bh MUXJVJ'"/EbYdUHCt]f,.X ֌ک?pae٢ cj,)5j~4c2JlqO^۞JM-PƤt{,;.m6!46Qli֮rnso>ܕzUڪ+$f1o{׸8)倱UjE&p`c.ӕ g]ufBVN.5}VVwBܜӣ R5%ҥ+8;=u&Z e6:Ț-fi~\5фCuξ!͈:ֹD걵Ju!x [)/w4;6ۖ)˺% f`7C@*&(t5|An3;rYCOXy|¡TV&BfpkLg}@5G>qF >]IEWvp1~ןEyR6T6>X!yX6d,\+u+ \cuRf#?fM_.LWe+~WD= n0װp X.6'bwՔ"Q`QTU--EjsZx诗7mnuBˠx+bCCI$Ƭ50 u7XeБzHvZUWsV;VO*wi^DDdUMWv֕!pͥÍC..vXm , %t/sت]\\orFn,׬+ rK3aC']ͤYma|gB4k: BÈV8q~nu!_"?N:A[_rݹ&$~,m(KHyWLNxlxg mHGGj hiz 4r1yyr B哘8|nι~n!'_E_Y$\;5[{2a\-@vxBO},EG wOe#;.ś/9|-؝Ȇtj~N;[xEN%M Ov=X y;v^+Hr86.s9_os{mvm&fa!(.CAIݞ- ,i3du(dE=ǭp©e6:U)ZZE(oqn bb K6H2 #mq2`0y誥 |[N]:0 5s_+T8*Qs"vho;4 ޳7w!QX`=jTel2@Q=5HeDoxB#j8㈣3wkY&ғ'7L&%9 (4Z0SX.]I$JVH)ns(p(}CWu(^n,p჆椶w;^tUCq2Y`7"!K#`S ._>spD Y=tqDzo71QBZvkC}wȃ B 70Bb ]hB4p"6B)Y㼟ex;dwq0\b#;"}KA@&dGQ/;w8!~Gt1 Cw;z_dDROuC{]Tƭ8gtҔb?-|Y"(XBo&q]2tiN#zYmkFySΛ#ρ$mu#v齰{ /~cv% TpT'͌"=WK1:, ahErAi9#!oˢ!Lq4t;!Ci]]՛޻[MK2+/fЎLrk(騂aa_=pie'.4VUsl v7!dlk)nHs;I.S˲T98ik7yg|UQ@/~ӆ=5+ElI6nϞL*-5A:ʖEVK !k4ډhZ (i j\d^`:DhTq #/qP}Іʑ|s \3ᓀjyIUDCBRMMRO1,k{. mtw B@:j4ܑ@Ȣ(BYRZ)Qj@W> yd!|nQ~s3}&2{7*&g [LPvwql̞\[{j7\w;ƫ%"wd*FsjSXV/7?)OJ݈ǰa0RއOU2ʵzsNjl5qZ)a䰥7+ﮛ&qzxle.<e k,8)FmYT*8]M4Z("T_6@҈a 4-<(Gr.=٨fz\tnTXk@eܩ$7'̨7azم1kfҫk+wN[Z#_7mw@@[Bh{i1EU&3qKdn&bɈFBĎXAyq n^g\n֓ VѴP ^I$Ey@ֵO趨Kq3-z3 t8 RnNC=AqcLދ ,6/MXСy[k1*/Q̏<'<&":,q,őF FUB т `9K@Ic4^Ij |#8M(u4_ǚ8}tAѻ"Fx={)JO2(!JOKkf6󛬇ͧU'qfޠfǵM%=(c\9܏iIOd}A =&z ! LX_.8\LAtRNEW(k4z۩0'mZVPӍ,)}[n1A$>E uqD0>[WR.E 6Q /uu w`b#ʙcH)U:jBi[91Mo~D Jy SPC ?}/Yra8f^b*sanNy"tŏ*P ](!s=`}wdҲfWpp #wáU̘3Y7P)rMS]YqC3 K-1:أE1; 2; pnygea tAT{(Ie[m /ko9ℷZLK C:0 , b9YC>: A釻v 3ȍ3]xkzF!ПJmd|#ifKjJ3$}ac^mPmgcJ}0\Ngido\I @L]희T?|BfHnbZ˩X,"2 -:}Qr]@#hz<ʎ7P0alxKVSMYcaBT7!ˉJ;6 uߴS* hH`jE]q%Nt>t RuJYɈ> x$dg.LtOd2wNZp\i9P1nʚ< a1/#\MuSSp`)%(ӟO/~z*s0?sY}-.U^㏌ 9p/mT4cv%`HB%}Tz=U],Lwcf=7UPblSI]*`:i Iֶx \_aUtBr0d0O]*{S16{ϡ5Bx"umS%$^tKf*ǡ1\g!ѶCHI"b\z`5T)Ķpjo4U4lxpeV>WXSOUƷ2Mߑ$mڹp˳uCPN&BtJsE{aeK x[lY5R}oB9KmƖn]\i6R)r7/pײu]7t!]fqn=Ei@ӫ+ߪKoˇ߄)cs+ `iŊ I^| Q0 WJ?-YfeP*iⒶH-o ݥr¥B߱5}]Oݬ~Ly#0 9 Fy[hc34jgTn23ĆEw,]C=ګr~?hoBJn%roh}.?6AW0Hm1 b4Os,ԶtFOAYR(.D#c K8sopcY^02֏#yq217WVTiH` P;?`:b]yD8 ԛ9M2Xt<Àa)ݲa\게}Ӡ*b#>\тpSf_ 1?5pLSKo3*Ix%y3rK>% ]~a}$G.d%3}  D2k cpI7IOjnqJ&L<7lQ xN,g~]g &V,V&AF[njgq.1@6f{Gvނ{zX_QZ3!9&h WubGF;&4tX.x3aGCnG'b-dMf$BT㍥?Z̫.Re(LFk*q+_ΣgkcQ"z'%~h?҉Q=–fZ@!q "@c0Tp n4OOz5sOߗ^9NzA,geF}-ϫ1'B"̓\ioIPtqz[rgkͮnk]\+To#%c= Ö7S ^P;joiLOf=Tٽ+`z^!%=aSᎆW͛MAfғ59m z)@ ܚ %sXePQ}couStQ\~;"%(kV9s*L;qN k57QmU2wj(~!ansZrh *B^< *^S 4%ȄWiL0M:TJ*+p)'{cSg3hj§ǡߗY ع'@hڒ(V:~ GPʙ1I9>4Hհd%%>Ût&0+^tZ `d&&d Pe="K JKL"E20Z !$@vUnUoSHʹ@J HTMP`3RHrFP0oOh e~r voY˵mJv&\%O#U%?}l\;1rNCːbto#j1a BJYkig^ԃv&jM)og_3t"xMʠ#8zWI+_[[X9X!)Kǟ 6~UXOYes Q@Sq{p\%'"{R1@HHQe(Bźn|+gOA(ab 사3-4]_W;Y黮}IsVC,ka$3l9L6 Ĝ!Pay[uzK~ {7|X'4 R%zLLꋍ0G fwnbXc'~}J@S f*Y]5(׾cW([m!MBj_QO<^x|3nhwGW+v _뎿f,t6o଍ߐ\BoмB3[b5ٔ;Ġxuݣ+ <=^+$N`j?0}zzRFLE$K!=ɲ< ;A4 R. ƽS0ql]dʠ'/FTY̱#@XT1,ܩ3qVpo j,]ĮJ$Ix^sk8v]7ԧ.H%k[_Dʗq)NUA,}]fn9CVZv٭<2ʑH2 (@qZe|N1 aw0= U2aL?;Ȃ)73Y a.*'p/|.=n{I)9iO86kʸ׆ڟX`{^ M7i,`BCXw5RкyWcAwb[r -rFBZf" 'uߩi]>j n}`wd*9!ԽG v%O-wL#5T^;I= UjG<50v bZN8qh 6 q4-hv4H<*g9O̾ |֢V ܐsCF[eNᴌv%*>/K ysabrҋsn5A"LvE,uU/?C|5˧?6Lh@#oYe|)#Xwiamw#m*O3AFZb̲bȍ,N   ? M?0q~WNU9>Mw7 K}DcbcN6̼aĭFv.GdC? ڦLad(lc3W[1&=lc$va EcEi :6 ttIy~z<-Oj2 JOlpQ爠 Hz;D_I^ &ٓZ&Yp! _tj};by^F~ɚ㨝OeDAXYfOBVi&D;2sgE}OdQaG13|njPՓ eM$*ٳ*HuѼ{ ʃ'1>mvuJDr65;/"D#?aN ٽy,k?+jj&)~>S uk5R` 8MgmS-+caB>)@!0v4n/ȣny@=*6>16V/ !Z),,r$n\:­ 5bDEK dwjG!!C-])){eEb¼U\ᴐ6N"9ae_k+6ԄNIxם[lfp`YL\vbKx͍`r ɞ] WFקἡ  n hOjMe%^ch2`qmFNpҠ8` _c9^h4ф:WdW RYW38Tr-^g'%ЌP30h'/1<.9$KS0ʐbX,V$3e("tg'2":]R@YAYAe@{1AjS:jZUa*0^^sjnIb&.zvI##j} fjX9Z9mN9оQ;jf8Z_򊿐IP *nb^}ts[٩1ys[(YL艶i VXONY7$6LA,ji_"8%LNQJ͙g @{9kEJhd_ge"Id%PIa&@th[c$:Ř0?P%2|IV (f)t2{,8}y:swwGCrlq91ŃQ+NpCz0b H!%v:g2dHE*lAXP ʦwci4/fpړ} 4v?0#;:JΊ9؂l|+~x`P@m0|9'V!.5QQ"3kPo>OJ4J(fAϓp.@"@]Nn y-S)6#d˴?KESBrR2t#:rePQ * XDzGD q =:A"x+%Y%\Xrtd%L<e1@;ŽWU'n z(C ;\D83tf-v3OpSكe#]XnZ6Dg ٓ38=qyT?yqC@tI)Vv[n mGWB$ʈ?z#:>S')@qp4Ye A}ΐs'j_Ňa䲆{zpJ}O6^Z| i=S}Ϋŀ]Rk%s5o3 ,M?;Y%NSt28BeN aL.Ôy0bFr|zhSNsg$NWx(@wD+a<z֏Zdh-3RTręߏYmd8VG(l2 0E$_~* ?<5t)~4"nN >,R~qo 0reۮă>x-@  ]#UKR>̯#u?H0+ aH!LUf;IfD9At7rD|/ ޡ1++zC(R@RFpɇ*"߾la5 x$ ;b9pN]|v>Ws򏱅8 O(aĀP9.%ED53%b%s,Xhv X2>iʅO_+f{SZUZ C}3@W^9%b'޾LBC9k 5+ g<ӗIi & RHNf̲~{ض 6;}z*%ʨwʙI}Ч>(\e#"̋3ܑ*>T ~ :b];7|99,n{zA= 9A3XnM W:GD-e"2d"FDu#ώ) !Fj@2_ifX['Wι0':{Ⱦo+`H'78Ūj-uQ2 vFȆ= Uch(-S|?ō1"ٍ6㜇Ϯ/Eke֙Te@h(++XQxu?KM~y y1~(%gE!rEhg_#K{6S-(}3۳պ;6P{ͻ?9_p}KaնnP0qRizht髊 sV]yj,,Q{ր@'cS?l >|4;^aǥp1xN GI40;W~$u5=X@-|j*&ƥէuVDq`a$t 8/s˂ {xԉwh"BB3V&ZԺ5 `q9vV&q%Z|G4ɩ( z >}D4=o ~t@Gt'~7*Wm4Ŝ+urZM4rN0$?#d|OEϠE~˳[msdu]LAg a={lq|]"Z&nFcq%rf*mԷQd(fw ){Z8 E[΅dA,|ϥ~|E'>p372}#pd?jY|D QЊPT#Ei1WEI0^`*`y-o1kc*L;Qo0[&(e#^8EuDͅɎK,s%Qr)rt(^[CX CRZfr?ZDbFtE\><;xLCSb[*tOE|Ӧ8D^<J0x{guOt"_ PV+*|Ij) "q𡩧x4 "*|5BCD9j m#C=F~ YCXIH[I eEWaB]lrC] h~Hzn5 B/B:4o `oTE={T sϜk%H1-RT N=qJ֪댝Yv6`ZeڃIxF7}ԣN{f*R[ O+ѪC(#gk*0R*tZ忩~x^29D ^oŝOgwg^r}nCl~Ym񌷜 m6vwr,>G6yضd\/Tٲcd-GJCuKCO헒34rtz5|+@SRkԀ|&ɬEUh$N #J9@CwdFea@X!?@&sدm-z!߶Zu o{@NFc ]LQRyEA e|%s f$tNDy!Ί,g__J=㡦)/2#.fd`WW3JČ;*POFfRl33߽0~_f1IOp%-ENN2iG^}M}"*CQ<E /J~5ˆh ~4tѩCyY5cMIf }QMաOSmiQKј,)Ou/f\KX/c9.LB a@ʖJ(KI”7o M5ihdiXT{lHbnɖ>]1euȞx6g\uco{o A%KANXT8?֮:zjNfB¯z=8_xy'g|eudIm re#,٬%UEMyVh4RxCkmAE/sIsEDB0[w@rZ5-1a5WOjQWg5n), 'i{xВLz,o?-"!El/pTϮC  (3./Dp?dfP-h@oq!<"D\ݺ/t5"{6F9&a)̢WI62cƍU' y|j%!vBT"1+-4:ZT$[R%i-3^dnSnLy0[f9:/5ƒa\s%fE(kx3@ypOITXv:W6߹ rѻ`%X f ;  C>r⠡MYEOZx%N*19^ErdBf(Y%6מ c$3.1gA!04x@/_TtEWn b6@u*@؁KL:AuM`q7Hm|):zo'tΞoΘޏJu/)|$탯<]5D5~"u5~rc.ո}m`4l^ڧ uy6kԍEC4 @̢i[˅jtؙiPGhbqĬp(*F#-Ij咕K*QRQd~*u *T^ D6gKC_Pk fDpMp_]"[;QflB#DzdL*ӫZ =e(4MTA?z*3 pݚt-Fg)zh/^E(ORz҄caTMc@A#oD7."  86 f N%"R>gC0(%nįp{Ƹ2DV | dQ.[D<0–DpY-oä*Lh]t_'-G_=x)Fi%[6+O!^2vrt@T0`NBBXYňM3%=&0Οt`3l 5?pS_T-q3%^$O2)ط\jp\ ,n();w].oLϨRWsp[P"VZfpAPP'iγ) G+c+['d沜ٝ]vvͺs@j!9Ę`aډFk -?3Ηx&T9hBD2}5BC=F^aO(xl,<K[< YJy16dQb'yY|.CWɅswmN6de ^ vhK,GЫ#&n}sx5Ԡb& >NDnA_ĉf8ՄMm؞=&7o3sԓC(믢B1qr9G:.("3NȍptL#?J?}:$ M񱧞axJ(#;ѓ,M=?}ZzmSk,Y0za 8C!8p$IB''cEd!| *ețV1YEW ]ukKQVvljND%ZqO\< %rP5 #yɏZƷ>͇o%c0#+"-.Apz\źL[zӛMZw޵忷ikݛd/xswsz`E x异 xswQ EJ,mjUg-z4jG| Κ= :ZKR?J g$=g,;.5OJ֨umds)R[[(I(uPC1aFVmz`%S_T+M AFzil!_GZ:h MCzZ*FX}!}b|u4Kc =@w`9A~K{7;>TY0O9W$ϔ2fqd@AI_@@ΗD5Gv#c'؄H$!.kϻlLd?!>S'K֏-r?x{{)ʓY2y΋c6S둕M{ (ԇhHQ?&- 鞒7T?.EO^G_p66W]~ ﭞ |HF62 *Fnf9U4COXɄՂAKL N U m{B<حr5msi(9QT3!ބXI ra³ aVrܑ1>h1>a-U۱41[z!#biAVZ(цV2t$0, 8+odgz&/x*܆5[㥗+D06mf+sse sĿ454fu %%˚y]Qh4 |z#I̻7^;[qud7q::quH]z}q R/ Q ِsLQ"s@Q5@ J,(2OJ'JZfgnt}r;e >8)-eYZ CS rXiUΩ O`εiҞz5,(sKkF$UTjn^-9Rsכg?UrvAZ;gM- 6PS~ĻHOR:. jw8ǞujD꼧S2 QiTQVXGy# B|1ZxK P354Z"EUx`XR 1=|BT[j:D0\5Bе P`L c:l`|qV /D1'.T 4!RL43=UIθ j-0tWZz@L9^e~Ih%EyaXtQ ZEϫ JE8EuY3 b-(hAZy" Ȕji)T:ofobvCWWDͲYzGMs'~{ |~"Kp)tup,#00D4pQY)G2ڳ<{Hˢ ͕},:gdV QUWyS^k;\a+ep Y}GTisV[D\3L ҂r䫹?J?'s΋*Yzjk#o6/C4o[9zo:?f[^S$mR.Q_~em3gumZd|<3e"BC>ZL-xz{y ďu䱄غXSޒp:k^+Vޙ?%nj>*c'b=&~R_Hd.$lnE >,Fp2O)GJGX]%c[ߊ#Dnm4G8Jucjqzީ9= d0:;.gO3hgO~{iop2'r'uۗ \RcF^̴TM=*7ws+f%xzg03dݻQR谱?88k9&u;KSiZSˉ v0xXv`a?q%EȰմ{z D'/q8jae*bll TW$tClQ0IKJd djZUHĺo` to P%0hݼ[!Po;s/ߙ+c'|oQ3,OIDFl)GMpF5s)YQXLd/'a+1Ka 5"6'}g!BL-kh޸]|Q𓏙9)"Mu+ørOed)"c5&y96Y(1mCޯ?F2eN.Z g!6&M6Cer`n?rIGY+-)\EG.&~ǂH%K<+FbkyCX<҂BDp=,Su&"DX$q"E$.yֹ.N lԴ؇ȸ($x5U˥+@t!yX 0YMtr(b] &dظs#uOv 5߮ #3o'lѥ!ydkX->oԹRW;塢W dy(a6ʾy N v q;R#Dg\Uwj᭟q~ c~DIsOBLD+2eP}?^x[§ZB/5>J˅!ZyOh1uY 'xIaNwd{5Q[gcTu: [ai QHB˅OL7Z(հ,s-U ^Sbq͸}חa=‰z*q`^#Ɲ=N\9=f^q7I _}8 2%T'Z7,W)7Nbu&2PVd<GG&j%l P?3ơmMŹ:ͦ^;LK\@ZE@Κ_&4|hTqeyZV1n]@sٹ"6RUΎrN >>j2\|$ψq"eABh8ulSMYiL@NJ* :)#txWU{LtFiPGL_1Ȝ`6*xrEZ j[䱓?FBWD'*`"5蜾Q_`/. j 9ѹZҩ5Lcsnk2XK.'_5WWN:9(%gi$,zOF$H~Z^=r/ "q|7L%!/huc"#fpA `E2 \OaN+e@AHW?=q5(ơ ^m_geY~qs v}|$z:*ylb/W+XMFEqx\~[!"BMUk\ζvW.Yr[[uӘ^v1ܧkc۵zUxQo*lRd w+;;*#m0 y5vC譆ix'3j7j&2 L 0=X+2o 5 [ljh\Hs'.+!)# ^m90񥑱s SFb Kg.~DnB9Ht.uCm/3!JLϛ7=ÿgQӶ[]VHLgS\ň϶GURV{yu֔ qvhlLX*6 ANrj D &ӆMQ`qdļ!0(xf儧gT(7(7ad3CהL{pbyj\ fTi^^$)iF9gZHloCbCbBTj 4\kc*D/g.݃A:VVsqi<0FCRq"v$b8c+9a:I} ~vwD8ZSQidkǠp1p=cQ) n0Nю۝u.R^9zO*+z2 "q8 Mwھ@/.Np1l5a/*k4}=J,-B &wxE*lA${vؕC8 8mҾnU5%]{}DK muf*ɫjTQ7NCX,@Z#;ǴWqϱ%v;(`ֆ)c w;TRm( ~$ ʷZ+KNqv"4 YE#`M6Tv s4#)G.@IFT)f`$%dZ"uSI/ *<ҞA-:ґU~R31:O\^ҍ\U/gL/RùCpɮ3|'`3*禡3{ I ƶ~WUDl^[7|=?^ k?BO_@b|T"ӉJ+Wg::s0x&)-IemV61ܓJML/_aoSO^V&FK&ȝC`rj8 .p=:Zm5koMtPpCz0Ic%\8ˁjZ'RY߈MC7$~oE! '5\ٖ*#eTĤ-i78ި+G{YS?S65`l6A~/JI,֜IR O|ptN9p`VRt t;]S&.oE3zk{ 4#]Z{Ⱦ? [bRرn6ZmͲwyUEF<uȕ˜¯! waA<[~RlLپ6}._{\_7Xː?#p|Zb ]4("W,d Eܻ2{n7_JVUjQY J=@G*)B6a—26PgعDOx>\Օ BS+rX_bH3w753ܐ&w6'GXCZ[t|^E5('Ʋarq5HH6l.{sdȣs84 Z|.&qTZ!dLnb zf g4%e%a4Aêܱg,r>X[#QBZ9-rʈҚPaet}-?wt 3+(o>q<|T돗^;b*^hk(Sz;/ n$ q'9PM'!0yJ #Cd;="k#>xnZb0IE7uab;T3yjj?2]3X W^Q ?U`oY\JoaoۜMz[1vcʹL>Q%vkw*C Ώs}GtOZ^غ՟6 ߌ}}?\YX- sxIJ,z{EXi{ 1E<=8h!Ye"Ag\HMSkڮq3O}%abܪJZ]avp}8Z`e'{8nmlggT Y )~l{wY-̹h=.uMV*H> ʪRh[5@5ļdO%7rު3ǝNmGa_!^igqϻL_t}h"BA2H$J>iTA/d3֍]\w3ei:DXQkT)Ipq%H`')νM]t.?FF7u/Gs_eϷ4>_E$#jmU&W횭Uߡ3W?993+}!1!zǓiNo:_:Ty7gSw'dN`NoaNaNobL_:rOIy(0j{?9?9?>oeL_˺0W !XP4~o''L|r,D%7dƹ; #mHWXnLeO1OsR-x]7[TI i9]W8& QU#cpqKܘ9c&PXTAy#AQVC*t-Õ^^ԂŎC-Y 2֎ݸbj]̶%j й2l_͘qQ`5hVYYP؃$E#i=z PnZolnx 4dD ;=n n=ZD|9&W$4MCкqxR oڀwv|X,!9^'j& =xb|7_@bxQk!ƪP q!]ۜ5Ǻ>+<Ff]{<`2D*L5 aE!kPE׃O]){I%x ׮0F*Zİ>0k׺+6j6wug#$rv8=<Ƒt& de4 ]ϊK$'6Y[OJ\'}SiJQB s:~qnQO*J2m3H7:uHPHf%$PE+˚B| p2k &WPj;Fi)lj&ڙN7/0N\w2;.DcFfD0&7 GY&<Z2B^!(!uRY?dk]lxIĽE[hYl%'?mIϒ򨴐ӖIBcH7!? nLz{(L&{`%*]+l7xA):~@%zP^`7q ęJU[]e0HC30Uh6g #' DF|=vDX&SHbe.1R/ Gd2>}mpr6/*o9hhI$B,Cܳ IB|M1:C k8k-KuYVkNpq].]ef;R1;I=s2Sb~PKH2`mdO%q#ak=Lwfǝt]δ=qϟ/ v(|'EFk7Ԃ-b>CWYZJ3Lt69㙵we9Ckěu='\wӱ۪V` V3|RNWT13ČA 'Z9AkX=L[(c)pl4QpaAF^vOŹ-9X֋LiW!H1_Ua[`{@fDZgնUw&̇Z8ǙİŇ+/Ӽv}27V?>Wvp߃nhAtwT%wUXKDo`<`\q|yQ_[ħ8sk앰Br6U>7[zP>‰lq}RΎsSt "6 _psR_@/恦rdZlI|Ѽ6,>@#G_Êfr[6_lpk 55 rb6Li Q^do`rHLdCGloMxM{D6|ɬwCeFkV&+ƺWNnKO09$ۄjz0{=; 9#v m bbX[]+QHȁ0YK{(C271GvC6]"@G3\"DvsOy\DtCv2pnz1e1f%U{~#.6€~=%lHJ8Ԫ'`5%rčpi#ȌZH }+z,g i 8QFaEɂPa;6̗q7/^$cI6 xp5w'4EU֥Jg' 0an51i OfP'^7=]3Sa94YX_Ϲ]l0⌥B&{ !]X="g >˺QE^ ʦo5[f5OXR*bMUoSV(E.٤:m6W350 {82}ǜrjimJV"dvB(;T[MF1f1e$Z=b ˬpl c0\@! Y2Ɓ`T`WRa7p|+A覗1MxoQ'%iQMggbWP2 G¯ +j G%=w-j ~^( #r$OszzzH~1tlR]Q"$3zH ZPՐo)1ݻx`e1G%-8/G]xMA^UT,p4cTKZY bΕU,;ոi+Bc1 xsrh՚䡯YsyzנOR1JOJdC\@#)7>f #R&bU }68EE8xvxS`oE~F|;ʦZgA(xU%&r?,C:T$bT!4 "ʈ&9̛BA{cLjˊf*) Jrm |(Ey pVW yB+jTGX[s$msw**S#璸/Ef5mH%TX2:9"Y]M1 5ct)f/ i]ҧ*zJL,F%D6_|'p/Y*Yz9Ӟda_z![5 CKIHl{E:7>cwOE!Bp#mK5):}eTH1 $*&KBtg@g8Tik3Sz NTUR\9"O%XA0񴵁`%w NhEu{so)Vp)}@|Vd͘YFIrb Vx -AG$zNYyE| -Qy33r[e  Ahe<[HEuZdHo<ΣZ`5z1g|= <pn;2mrnϿEOe;|iAFRg9b11 iH|*~cws3Wjd{&0+ fϡjI;6 DlڂzEI2C3ʉ9}WK:BI1i z',w̎t zMa8B.C'33m*wfq4Xi?q\;C%6u{'nAQ!o߂6|MdjĞ*mW)?Ol9?Eؼ~kdxMpA*&;tײg$Zo^cי90ҁ^ˌH v  K .ާq]$+:Z3jك1mĔ^ N: cZi-Q3cY%Unؑ`1 %Ayng7j6͇Brp o:9J WA ^X^\ґkPEq֨,Wky䉪k\=XpSO!qR BUT^%] 1ÕfOya@҇Y!X8SPQUmYLlOcڽf_xQA¥?H x׀8yTo|i"{(h6"Z5mtG=t Ю*a$4(4Jb _2 CL> \˞_;ΐL֏7:^k*9U#Oiɣo#ɧB ߀t*|㴴4_&~[Y9x& o3}8m.Hzcb8q.*nD|.ʁTЇk }VVڭ\QB_bGoRc7̡M ]KBO8 RdK=:඘\tX JY!ܧ@wZ BJvfX=4:3VUuQ_LFhgB9f53&ͲͶxp9cbp]: h!?i ehcȌ酾˙Q.tĄz&Xn*%Ou?qt'd &'Z F36<X ϑrv-7> IDWHJ` iUrU?Zr- -Ҝ}j7p#%i񨗺(_\&'-OV;76.3uaFalF@D!02040502114X32#()O)');==5'9-=.A JCT-,E$B$fU4&HTL.OI)_64D7+ʫ7+yIz B+d,t#G 6H _[CK5WC?~Gn3u`> Ėpɶ:Jrm[XtRN_k.&]H'M(2hOzrx~ޤǴYt =ޗȄdU8!qF4A9.QZ-9jc2> db]gоB*ix2Ԩ 递zJC(sf"*#F)(^.Tjc'd+ 鋫5Ȇ)ݠ#\h_ΰtyU#s 1i֤ t;Aɇ8DrrԎ 9B5hՆ7|?3I#i;5-Lb)pKK?\,`^-f:Jc'd;s37&ELǐ5#= -\'FF6t[Med0Oв"@ '! Bs\ҸAV1\Y0 G˄(GwMŦ݀a!=jd:ϻ7eS4M)%H8׳j^ r$#cq5[8dKխpf +bB U1:LC4O>0EV VeTj_`22^\~ǻ%||9pl:uEG;6/=z| ~}i2S6hhٲLѨ{|%e7M+Ɛ"-4QaևSnC[-(R?-5. >@zf*K-yfb_݋6|j}!aa Pۅя=5qor FlN0tcFJ Dc1!" ECx}!s*#*#b!|rT-֬\g {ۛx*ӹZ.e)c[#.9D /k+ן^`+N~Q`AgIsĉ7hX^ׂ@J} H3@ڑN" 4O<8]NjgXWT 1f;ՑQe ǘ B04*g0==ȟc/h|y,GJC*]49ih凰E`VUFbY%%Q$y1`A9&O`D*#sZDž= +VH&GsceԀ ?TCxAC%EJHGU)'" ̈jfVjVڨo^7CO<:(S#+u[*D^Hma;/6Nz{'8x :{U̗ ˌmV/,=fRD"O8˺6稤D,nk) ^Bv F& [\#ʙ̵s5\H\Xr_G8*`gPP(\[:ApfSqs`mG¨hF#Ʃs+-^0kɉm 輔B - S,ʀMYkdE:*c2nUQ]qՊJ/daC'"ԐFTz BFdSFx8/YfzOF:di\ c^{Zj>YeޒWg:EӞDHakE~ zhC ZS}D6,c8?T뻅ˋs,]B툋Q)e^Avn8 ?jH;!ӹ*Z}?bj)y p =+~MC "tu_0ac9CW_ :aSx7.ݺmDJ+ ?:`D/h?3|h Nă}=}5x)tGϽ4\ɠ2|[+\D:/pa[57/#$,!Q+-E(ڐ"`l`RU\3,~{ƍa:Q#ƙd5jჄߌmEI0$4M"cx;62wj?8=khW&Wβw/Mp`& |('R!W`"iL݈ܐf5 VXنpW¾DIBȔ${61P-9wLt3Yt| r3HzîZ &FzԋV>y4wֱCNmԂݱ,&U(/dU-=(LD߷eNK/td}@=pRRdqqYcՋyv>J[:;d@Sh{<+(;YZN!@46&t{@vF?G~x#C, P䊘Nr"wKێnhlK!fdz1uh} |JF0$LL TAm=Qۀ )g|͝2xXX;?V6Rww#bu$/$uMgw:s RT[ǣzRK"Q]sJ^x2ᠷ 5#w)P|Jx1KI+8T R,yn0A[II+PGdH*lEr5A?#&  d`V2Ъ} !B$F7'(a))Åj'R5pxAsh1NNco4+>D|һ5dqM~D cle$3M@M<⠙$0QΤ:p {Eatyd'q:˜yefV` , C'$X)uLyu:Beď馌=CF? BWyk ;:`s<3"w,['r]0SYU_" *TRH{r ]HnU-b-ڨkJlv1z4243j6Fy5*Ažm^Ry !͐gLM[֤3i_$Np]'\jx qֹT T̻2Ԥ6fq<ݩUebƶ)׈ =apŕ]u q'q ϣǮ[vg !1<ív2JS VtV(E/dU!gedX(P'IoMѶz ~i2/XYQ4#=K4Fwt&BNJԐ_| ]+3疥r˔]rwtx^zיb'8Ƶgq".7CҖ޽ >|bpj$* |W;jw &G*9bwlCLcֽFPUS}|<|HexxeaQDZ@:Z_5Y. cmU:ŁMK \'˾vfü-{PfWc@ $[-؏ ڪj}2V̦V=K3K@D(W@Qu2y6IfDKa%!bm*[Aڿ?v<(I_TUQ_VAyz:OMwMKGwq8 olJF)Ln :!UqGSY=u ~(%+#[ I3[\TvK ݶW*l;FGC P?6Um M&%|"[B#4C8xcM`ԑ8Jz]BdC ٜrYvx]IøggP]܁6D@=5!4AyR%(I\%Hp 5kR^ ^d# ɩ %%D9h7ea#ICpjwOȧpD B_{ XI,Oj]~ė0YO Kf<]Bsħ$IXeD5нt<CVmdIc)vڧ@M@'9GN`ÎpLY8O0L>a};COpx26&U`l w\Nܝؠ  EtRy\k>}XKr(o9zRoC`Jnu\+%ll Dٯ:M8Ü BGc=VCv3*r)#ώ(br2^vF/QC.ʚL2N q={-}v֦m^@rp&"k dCoҲ>!&K$ %6HgDV5ی "_@ ‚d3+qLת]amsCRVBHFBWQѩDjEX t@U4q>U <|AseN9ϱ[j>{g<%]ԛ|&!1J/&n#dDL2 v6yz D9H[s>Q%lz>*0߷"g zEq܌*6Rű5Ib,@nOxO gi~'!V.d#Ӆq0;RHZIXf}wȋpy\ K`Bֲćy|BĆG4SWDFYr4Os=?lK꩘6~BQR B&cXvL# E`%ސelJ}-W[,lٵ+߄7]w%q@U рC]!nFas!ނm|N4x:y-O&?\;,=%qWf_e.rG'+yz{5Hx=C̩=䐅 Ė9ngEr%R23zIyꂔueZߘc+VRC|iF]Nq~o#I/ALG T1Tf KHCF)H9]v$O.N$=dÁyƾ|sl64qhR#Jޤ*Oe*4ާLrFHUWN8 ̹գq?,w *{:dvf:b؅p+l7PPĝu \T` c Y?Ϯ>/i;]"q5LYB1"&" awҠ д,v5kNW?h1O/{pof'ۓh8 SHbKUHMpt/ 닦/R-Qe9F$X`7`g$)o:B \EW.z&ERaܗ iK[Ԩ0Tۿg@ y&f6.aSy N?pi99EEJWٶ󁵑mK쁒r+F2Tbڊ 0䝈7R+l]c` =OZn=Ƶ)`7'f bz xJ:GlNX2ǀR~'6RsqPd2?e;,Ȉ"ܵR\bch\'ǡO]3ޮnzqHW+T#XhQҧ7^IIW:y ts{c{.&~,ĖM J><1R'C:2\<?z.Ss䓇Üᣓ oWSrIqpbASB`bs (+;EQK,`B8#OR"d=M/s%u<1͓@enj1ׁ4G ٟ'\qK)$DRQ#l #F&eÑߤ.0O0П5+i^~SzCjGk?\U2l2'lcпC{&-1Иۢ醐9sN*%vbݬnQgg$ۡWj>棓O.M~pҟΨ!3^l}Zg?s 5/}|c%NM:=^>m^p޼V7G19 ] 0x͆c#CQΉ$j@K[@6kqLW2-1}lq,Q$ň3T3ͨgɋObP* '$H6R.d&Olh/&#Nۤ4iNJCdTù}:>w{-b(Fҥ& -<7$(ڃ7́zU)KmѬEcY]3HVτ=&U; vr~!_$&X0gylh + 9`X*nULfZ&Ekё4xH+`|r3IQjד ӷJk&FD) ^!FA_O@6b!s]k)d辷֥Qzǹ,N qXjwEqկhyD82U ḾH#FC7H$TÈSw{>Ai̚ZT5tYlCy :~-yH:aƗ*c;Uj9WYi~f`E ~ U`]q48u4((d|#5 єBk(Ѧ#U[GZIĢYUҏH {o t_&2@R/sqH3)t)ʷ Jaf<= lsrU.??ޠv Z=4BE sn`ac38gaolrCu=+r`ḿ$X ntO ?_rÈn'OFbEԆ.K ]*4#FFm* ӻݭԳןI]:g˲k&VSya2]#iQ)T^L:âk/a]iE: k}"1kI׾l#rP%ӵ:eMܨYbn?7{􎻶f˵mWEYm2 5a.ئFGt۷!" G o ;`xI:Ӝ MgZ߻gU'$ni3rw-oG9/)Z=u:EA9cTMл(kZW claG<},J^\sw!~\yӍ nzL!1%R)T6:s;,͏lTsQfʤy| C7UY0ܭā0MoŠǤ-イ!R׈7 ָz~ iVbY&i1lO *=aU~(\*{+(0&WX:~w z5;#$ ?e\<}. kJN;m( @kT{Qu@p9{nkWV5#Ҽ@~Ga5H((dm  w JjDϣY *E,}F2^AR/[ypE?ٌ(Ņ_G10ycIieҎ\"*l!10_-0FwέpCK#ӯ6+BpSDQ4=tm^4rKoV_(>Yb8Z3`È)[cwŹ/p) xD B<­–Bxh{ Zo6{uI`}#m[P7OecSޮczm_! C޶3>5HKSr)ec-ْSޡd[͑LeˑB#D*o_c)bKj%ځ?'l\EcU_o.Ӕrl/_K#enIo>\O|PJw,v{^~UE")$/_=0L 54v((Q0F΋a2EADATIpy#G)Bbjfr ty%L;A B"PI;'XëGf_$N ذ S$x+چf7#0Ed9@?o% RBپ gfπgg?8⹲G>a${ڕ\ c2;bj}jeۡ CEWJv*YQwߌma,@Bl@1>O ڼ]UwVcg_z-=H48QH#gJ``Ń0l&4 4$)h5NL+q"dRHn)17dӿ/5-s󏄺 X$1} JK_K+LR{Z.Yb ҥI료 ^ i6Z.hE!RU_AAw F$iI15 g:7L P%%$fh6HZI]vIsI28 2,]R~tH&M#*iqa!{iH`&HݰFLН.QdIJۑ1[*2qf`GMbۇV$ir:=EˆM!VDK 5QBͫqؘq>˞lPwͱ ^V (Ad"Nt5ʒ\ZV%N[obI\Қ29gTX/YBꋊiWBD3/f F&0[ӡ=>\zZ\MMgžq w:wFu!1BZgÉw:0:#Ai<*z,AECݛ<q ]s6qY^2b#~>E DADm{cUA(R#47'߼H(>h#,7fZv+aZ3EM~`#&. KJ d.ҊO84r%%& 7 \o)X%:GmaHю'ZQT0HIf/!皖Iv1Kmd3VW #d|y~E?)pt=.easVjOr 8]g$ 5?FD~ Nu$I14Ɵ7PtH@~h =(wwѐ󽾧1|"rHTLz7MFCvBOsDpban~gݰ: G~zdwl .)0p6MӢts|Fػ_+xK8걚~_5 .GEJZaߟx[?QӋ5}̘)D=9xL G @WQ 7)^^=9 vC("f#s nҾ\!,uZ*rFkc%Y: mMi׀\E~c7>9z8[F2sƻ(x@Ea{c!բǥf3iu d%.^sU (A"UJ_M{r/Mj"-rў+P ƉN="T 5OH[5=uTV=]]b_E86.tQ{ 0Tmda-6S}"ǑHwI;Ek) Z=(z^w钊)SW$>(34Ym8yZ9sbZĶq7HWNz׃ye,I-s;Hˍ]!Sq|}BSK(c07|`)4-&cf3׹z`%0),TQ!':B3+Dm+'vvnBٴmt_`IV u?cž=~*g(N?Ý* Lj -sN,Ms"Y^팂Y06u1~XTŝV s*P> -rr!-5jdRje%(9r?e@s}Fcj:!FIU~! PP{"&*۴X1}{<)^pV^@ȿ [Pf$ĘTbf=IO`t.2qqOωpxJ 6V/en-ٞBX" +}!;nfk\Jk (E\Wt<>|"g-֛05<[GO oA{`}W]HTd]mQ8'C8IB{Kv̗$'7-)\kZ C0PO*O)(5WLàkoViHC쫜E8?WRu0 h1LZss׹=jL>1"[# Bp=xț{@:Nj(.2UMP]|t}#5mY%<k`ŧBX/I \=.qZ[&,9}W9AB\eR5„IohW;U6uʰep02$&:q>,b,_O9/T"NE]$TI ]))޿_5@x+@뺤Y*q;=B/<ChbVoq&Sh{rz"C̫d(޿ ٘ԃ:[|>yr,ә]A~ªr)w:$J!. 5Ne IJwjb+BU$d?N d,4}n[r*zށn.osT ?2oՍ]_پн?ߙ`'fACՃqPS8,A~Mj)2 RF@İuHaqJ$Z &]%'\֋ {LhLf%,%].>"$›^XHN숸 /?/Eָ#aܱyysǽw*<n},|-§BP:{1:]z+[p%hu|GcUVnZk ̌=\P? o]E&) dV1/X.* ' ١bQOg]t_|+iJ(nhj۝+D5AH~EӲw% ъ>!!&B,Ph da==!o ?r9SlI) sZ")>Hߛ> GB2әDžw79Eɿ~#"ӁϜr9KpWdEb ="Ŷȱ0w85+}Sѿ0Vi:-d?N{m.DָTcZ򌁣 IN;(qKiV^pQJ"52 {c,hҧ<'] m';&[(\4xeź[q%SwT0'9EdmG'V~M9哌 +ZQ-J~VPQjvNzC!: B+}XĻcw )R=} 6xߑEO@߭6 v>41}/ wϜ?Pq(]vd(>6 溋~Uۧ4{aF`%4,S|/LtjRps ]󿚢$ 1pNVUm%% jU;&IZnkn{Eh*!l|qZֳѥO/)V{Dm۶m۶m۶m۶mO2s?ݓIEU&uQSj=cJ5ѓ n ȹ,邴i#I|;u X/-*%[(mV^-TJRhP/tK 78w#`u7ZAwe6Hg=UHV5 u-|;`ҦxnG|WqUc_}Y_W=fM61.ĶvN !؟ݒa/7ZYAS!sV7~ӯiM$4}@t'?db6'~0k((?*Wbxٜ_oSxCЊྀ8$ܞa;QZMݘ<Ԃ^ݢ kL"uXR*ViRmMf ,(hKHYed ({ȷ^K**%'Ua?!<ՔD>nJ j􈾗+4a8htnJWQ>z߭?aR1k{b-k@DhI_rFh?F^0("iQbCsP[%u]O54.I@U`3XU{M4hXti?}{~mGM +_HjOJ$4Ac'A:{,XH]e6n TgCGA{D.A~4v,FHJI7V !:7^g_McR') 'G?Ve^A\JFɥ(5'3?N+H40'V D"MJVQbe ߮33\~tS=6`ލ{` 2F>tuBڛ3Hs0&ͧ`̏EI͓Y09Mc<(wh1YS[\2,s ZNm@LG N~91 D+&^JJˁ}_WC142[cXŀeRSy;Mx ZoTt"N/az\gHJ1^eZ +sNb1(ᜱ7qau Zfh.26O[;F C;&PtMQ1-ڂ 8PnLZ+`{6o|H"ie׬5CK }[sί'WSWB'ȢAQJ AdBQ=wB_dRԣ]iZIcn#ᤀREA` WB)X  -lo=/ ɂ$jB/_;?8?3sfww4&鍆OUE1UzaIFߔp4hOQQUa X$W\WW-T{ofNL{إa8`Ga yS|tW-\n#X\-XxJiN4<Q"hf4BD/4$(lQPCr9W^`2C ~Sfpbl>j}WӠ=y­ ? ~kA yTTݷ_fxZ2n{i1MMg$dXsᱸ Kb-N)͞$9bǮ(j:c ve\kb]7+[B 5``(0ZrVdr*ZSPZl``Tp ϋ+ X7,Y ዏ=\,n"* L#q ]lki:2Q2mɏY\c$Mq34l-@ | Pw3֮:$׻(M}seU1.4Dvp ;XQ%? DLtI ]\͉0h`|FtP6x2nN"stQ{gީxǔ{ sV8R 6B Zz-OD%/RQQf#hݏL٫-@PKb+s*E0VLM }#XԘyMT7r&DKW7$"H@,b+ 8-ea~PKq~ҠT $@wV5QrhI2Z+a~Ga\aE-v:t` ?xqQtH@t(h4A`*$ n0 jZxW*6EXlr`q2r<%ΞhE!D9j-%B:ƃ^xھH՘Ouv M{VNֻ0wvB ,n='u[u;!n'q0ڏo;z͢Bď2ߜs׃YeL~3Mu=~u{]%H hAyE;ay8{A 8Ͽ-KλvJI5g??cY5{RFE@~GP7L/S+ꆄ+D>VEzx$(J*za)^f6X]d j`? @ bʯ .+/ܚQTc!OqKG`VCf0x/B%qJZ;3>l"X[aďVQ(!mi%MfgJXf X! (peAm P+gݓ&5ʶC 9-^ 2(s6}X4?[]/DJslE&DTg)ч§s,wNT\` 4EF #cq;)(e9*݃MDŝHᨱBAxǰP{U }h )X1l5we2h}W$UdP}Aad@Y8aA>=`Lt*jU5'7u,`:t#fj;sJ]j?9UqiZ}8bT:N6Aɢ_0EF@,_/o z<yVC6qf0k FB=!EϽ 7OhKԲ֞'ZԲѲ 6&S\S ZF" `ѲѲ[/M˾W$sK)(27SBz;~W_a>WUnЯ>ܻwK%g8Mg="Kw''pJS;a# ;"08yI 1#;ZbH2'=PSLJJ{oҧIiQ溦C]?[z Lڠ8>!0,_@G[GZC*fQz&IFħI;4B$nrGw7@db뾍 9 tcv,0@͆]Qc ]~usYcS?砀A61epFcHGIS Z*yiBaH0PK,MNKlhc_|EE_^39*p)% t7.1DI=3h_Ϳ AX'M̔$b8$<2ˡ&]qusPT`y<}OnzFC7N zyӦaOK o|r2h]Ğ̑m[`hr9Q`-|۪G źQg$A [)b$}<ʂr:>|rˣ\7%Hp=^űSP`5 8Lg9aU\ d!#1"ߤdUٱg۹\hMـ4Kw,/r7"P|ceS'VG5]~,V?ޕ,EV ι_хʬHXpmQud> LYjC_&C{2SO!l[+-N׊݂gB8w)K tift0֦tJkX:XZ`6HQ`o|ZSG/mfIjtp u-eDx6˺E;e,aΑk[x>q51_)u7>Y!U9 jH:5KfBqOɣGdG*ԐdR\JY-9VyMȭi6LM]'ch'TɮpǗq0?StG\&ثKo n۲!dJxYRfvO7z0;hB->N km)bV-\머nJ^} W"☒aZyחf3W/D"&pGV]IVKlq 6Q#~h֩F ~ORi+bX׹nQOw01o\d7p6g3ʵSu$KM^ ?$߁-nSzDR< EA6MMkkBy3"H,gUb+~$}TNuꯞnY-*u?g6 ~֟>?ܰ>%멆[HW{H[Ȇ>Їy4J/PJ4*"nr- 5/X5RB+bBTVzy!-ѐ%s:.,Ml|n<|_σo~! 3H$ _[<#@6|!Y}J0c%4ۍ {KBL<-#7Nǝ(nܚ)M2/wِ9j* 8EByqP].ha4e~}ws,N"+~K#eU-đ6-HQaqYoI.#pE<3sMX`lAUee><%0S+oG70g-&'|xj%мGPuHZhQQ=k9U}-xC(%SʆX5 Ց )P#+@1Ӈ¸l.}:#O}2paʯ-a#gd$9C 4 Ȣ98'ke= =O*i*~)I0 [ NuS3lAqQqBe޴HZvmqlGAu%{䞛nJj1[c~acٴ=V|VFrAsZƴeTݾ͇wsb:Ul&$ w4B5y;Ot I.eay>aІLن|)w AlI|t$.j9/Z~ynu7a;qՀ`xXXU5k5T|>xjJnbCfG/Jt33Nj=-ڡ_@)Tj]@jfH`:fR7ڰX%zVx'}]qrM5ŤNj}j+NV "PedoXtj͕/ ke907ZYE7}eP= :Dh;ZEp_9Y89;3|PBcdd$6*43-q mVI Z1O+Vs19j^V`}UF+EК@ n*ˤ!` [+Ox439kX{?B Fe>4a `"OWmO ,Eu}N(JfD@pP/ pbpbR' 7 ) *+37mXXR (P*&P=[0dN}L&JIu,Ń % b :,b-7& =p(qwQbzV5J^f?zz\ТJ"إ=E{6mC YG'XΧŸ*O"DeZuQLD\6;E;Fm"$;I[>(㱶*KS.%tD&!&U5/EYiN#)GK " #w(/ Am3^6@!s)8-0> Gvab:q$x*9% DO6o "Kw ?ژvff#|'bM&b`äL|Z=FdM0{̢ZKҤ?j/eLzrieH`R\lEu݃͞N^ƖJ&x>rfyVm'\]' ~βd(:z21Yq9H4,3yZݡNdP&96Eja{nGR?z0Q&hu=zVe7;>R$Qv~?ԩbA}E"D? 9!j0sP>3RK.Rׂ47zlPCZq,zZ 铵Pm=d [į'C }C0CvX.A9S >g8Ϛws7jqWnmuҎZ3{2Yw$aS+\ Pa Xd\b$CxJ&WefX{7<;_4Ї$$?"=uy~_B !,gw|+,` J,NƩj"s"ewhxv^ pT +o~Q;BK1Lńd b70ODaH[0#/:G\C"YBps;2ƦAFZyD~ .nHB>*[g\*Ed'{"RXTeOaey}Y&V Or< >gjîi[rYZV.ۃ␜ 8[z_d2`/ofyW%CT06rrISCF=(秦\tIQ4i7F"aQ<~k.0vv,`5*0cl1ʃjWN-ܾ3|Z&2vd2ώ`_X0ש t$~$~ݿNݬ<2-+/<ysjOAȵB/n}˃/;-B\f:ATƀ\btC~>&vT{Z܌rD ~Iދ`X@P0^8on΁N)=*͡֐rS[$JARԙ-^}n@!_j>P̈́} .lrG!ErqT8y|L%KBtran4FG\A介NS23hAU UrG M M+„dh-G4kY&* #dhż?ۇu?;+_u@,_r onX&;+H!(${Y~8eF@ G?z=z ?Fv5AEF.3 ^e4r8=*jZm徹!==r)[Va~-H6߷TlFÿb]4ۂ_ј5p* 5zE3ܺˬ6g{/%q.'Wy|`~8/9NY݅Ģ>x k F+e)y'.᳊7~RCh;jZs/yhEMEfAJj4n2瀨X\܂ ֵ 5VB_@:A0*Ք\ _(;IߎbAތ9<5j({~U#%cAWc_֙R"%?A&.d/f"^)Dܮ*k?6-WF.#M+:_ܷFqFJKlM?*%w4 )nՏ V3n_Mx) bwrBR᪉481#WSB|%=9]"ӐwTs2"ŰoׁOGK֗?UP>v)!Hհx2gH5K)ʤNe]֍AY8PsK6[E"2`WWcR^736eIQ<64!%x )vHNa A%=2OW2OfC r&nOd=+!pF}LV VʓcCҵ /̶'fxbxL4.\@0_kw ax1-l=Oz akJn`ҋf2IW$Xl7)gnI!h % )9ƿ voT-LF BFfbRgu@ t8z֏@58K'J`E饓-GDv#2"[,e2b{ާ17/ ԖJxS۷J^1^:۾X^Fr-HwK}˽IΔ@^yEFO[W"uX09{R7{iD)su$Ac"QA&^Y/37aB.¸?nUL!#̶6dx1 yj6t#HtRMUQlOs؜J l,ݛLa!4ktyiSƅ)^'G1 1Nw1Ulެ 3wG>\!He@mBY,2,|9Q9yR˴GS%I| [_ճ]9&24LBH(%r *|K9.bP~*Kf#!lՇ0|Ĝr}5k "qsti} Y17ݫȇ4.T#.~/v I4}%@э3LOScy8,&ad&Ք#WNi=~ hk3lkRbU-Yȉf]`{u*Wd+pz((5XV"bC6A>M߾U[n 7v΍˗WLQu̮\oҟ$K>KXoUVA],teP!4O8~ /NlQQm)7rHE#JTD;sSG*EU]/\&:%CxRgrjgaO fCQ>WOewaL^Tf7ԊKf Lq:*O1zM[;$m whtM|By>?NGӌȹkW!t59T'RE]z e>187LԼzUWkBӾmuúUҒ"3 /lIJ:6ιa(N6Ӿ3@2noőy^MA5.ʬg} r5rj&J F @(Pݝ lMSm'} dlU&`z}mT\6m]]. 4$v{}%:.yv5 30c͍oY4OLHLxOL$644TJL(WO{yO5D&&pi)|JLMF4f='dFWEY@pmesffpI=ak@@7(lP&|>; UֶPcoO9% 3[Cxxr}Z9~XYٸ`&[;>>;3C t"75.v*`- $V2f>W^$%:Mt蘠™NviM٪VL!_m8ARrL KVX5]meSխCy`It#KY/׃Rlܽrj”[`,r[uvx-@_I\-294)x-D]9~ˍ|3}pS@GO5Y&=I,L0@#M̒({?ʋXY9Yy<=>u|>y}scw73e}"=9Np`1rQU|-VO[ )^J«NjX3Q@o2馸\Fmo Q*2GǎGi+ !αeW,9(|gqjk)\-g4<3.g'QIA g C4lfHmA5שnw$c[jnt(0^w17F/(ra"~AcaY$S=Q! A5t4Q!}Otn]:&8D9cpQp6 $@UqJ71籈N3պ>ۢ*66e_oB+3Du!댑*KI l`ZuFxe IGѠkj\ .nh{KDm(G_cD&m A5)IVs vUǐuQcDv*A'-2IA"pxH`ˢlaz |.4SM kNY=SOLR1PZPXU,Ѫչ>"Qc: Y=~ܑ Vw!P`qJC!boHZlb<htGEtSewp(Y=BVt=Ҏ_.EF$ܻ?O|B AQb9/vI!ɏ{Av > I2s=DZarv&۹VIKlw@p :2.>5:wxnZRuB'= )x q5إ<uĉwa13dxf!NyfgswsZ=V #a?Rrr($9*A\6g-YG,\ɐ0):Eyq@-S*LZ#tD3p!bIB&2 !b?l.|)A9UUObDhbpSgzN@Te!Sf3!(Uo#㠐4~Q­ bC]˝%RB&1!*&`~Vt.гŃFT旑ώqN:!۴0;\vL0HVCWf)-@ɺP6AFcrVkbF *P?P9AICGThtNwZs%\DF>¿`BfXL0&_+x)P'֑|^S>I79Q3`8&ѬF3"35 W*"J-b"fS'Yi0wYnlPRozun߸o]Fk#v*ƿxspTcTBOuW,EXu-F/*ǝ~6jBp`6)^8vp##}O8ԪY{ڬP԰S*Ҝb9K-sÀ|ZAj_҂ YI MIl{_kiPNt.UCnf.j$.#xGS~/(\NDɎUЃ{w5١぀̚nE%c{ʐgGtKI! fwX![(#$^m91VSHXcEӁ,XO49L>dwDx5<74S_/T~yS6|oy}uQt$2; S-L3e}~#_t?x70Lr w$PwX{ g+n[;L8v,S)+|]ָtCILl_3ʸ c=#ñqvXm$d{dbaǙ=eP۲Q◕J5FUMf96բd0[7I̸]|c,8VG*,f\%:oHK{ DŽ3Ǜ!Byz8)*`gJوISpY0:|q۠@,-q !\=0vhyFE'x:`3kT/B c!)ZIkM10|$c[.[.R4urV6x:{D=/,.>EP|B+X+4ʹT 8+Nr4/=xCzdgtbQW3`9ʰT1bk) RfT-7ɜ(s62 Ar䏳~W̎Qi% jOn*q/flCqn Z*(Zp[ə} 9q_4AFUE3X.@H\\O Og8R/"g-Sa!WpC C0sIC6`YvOlnW7GpBAA7u'3:4˲<$[reoaДhb2xHTˇy3UV͜8VGeDܸy"HL UU #Øq W~HO`Ȩ(<Qz`=gwՕj_%nǖ˽$Ei}WV|OQ |ʷcEqCڟ7<駚a8BD9<,dyTbok{c5p.r5u͐Y5RTviԢO),oOyA2\Sh+F.J V4̈́+ zu`€8x6}QAlbQÈ1~7&gϙK6y7V0IĈ~ƬyA)Cybٛ pG9r(٘z} @sȻٱ1ߘ ;hL(ƒ0[9q rB΋fgx&Sz]w?ȨctxmOX%6ԛ8*5!]Ɩhqt=#{ckO\u#g꼋 /݈kDU 0bUVkf\ޫLI ]~~0D=N敎z~u{F7i&KٛJpQPӞ A2ۛ.;ukpXȬx ׌@c?H|:7ca.z`a}Z Ư$D0GMWV<= JX:fsp~7DuQ{ %ee (9K; 쀬SS\_$Wi1%5\ o@Cfvaݦ}2ɸ'eዼgnY`Up BixPRxsvER^M35RayN[HzKkXKGf׾q,dMBWΨ 9l'p@9&w1{'͈o/׶>/yW9Nw~.L_U':y W'\-n {ZSpiLLOO.Gº ^7%~3{VQ>SM\݄ U+klb,xO! G MT8>ݪM,[*M!!yx]_GQb)!29@E,-?/(Ї%cG;H3聊L ;Sd?|K_=-Ԡ/8FfT[)_N_t@B74؎olr6 F/l3IJM}s-ձ|ԀX7JYPBCEIM޲wݡ>7 o,& ǺN㰘n٨90TIv-GGP  ZPMpD|mQvUEEEzG'ַL4-$[L0=*bI8Z'&&~V5?MC7u13Ttne3`iOju F0KYF&Q1DdmȜ^>_khGLYГ0%ܓujVH\G @>︿!VU"fMPnE=(:0ӘVnSux@l77Ƈjqg+ImScU™~S3P+e7-Thf:3|.k2ڥuV*eHkV* ȴ. ~Y\~\oJv"S6sw'\Lx t]eNj(N. =dNCD_ (?t>U'c"[/D\(YF/mrܻ?QhNŖpuPhtx*G%Sސ;Mu3M;U|c6E~Ms|# Z3ʃ+wN^~tCM&26}IHs,k=g`jP`lrQxҊ&Ve\oFn\r1 xF=#a;g&^aw` L]㒫ۙ^!M yCѪ )2?P'u7S6axs)ጺf[]g_i{0ZɃxZab##^*ʻq/qe|֖#B8y Ƹl޸ԂEĜ f QHჼ*}@2ޗ\Io~-z42&(;o皺Z筊4#4ס=էdnв!wouFQs}%`/KZԨ\GjT LѶj^Loiy\J=%mKg)Z5ٌ̦鳓 t1 ;VIn-zi! #B۾] f}CneށZغeÚ )Ax3ÙTwb}Q#ie$Kmτ^ <̍ 8@!1 ȸBr~5k2U@Mv?SB`ytu*ڍ.iFa>^ۥAf-t3j,]Nrw84Iyc}Jjn(Kь3΄>A{w#mdjm`40=}i j5yD*Tw9Cͻx2E{N+1GD!g1ikRޛ~1˚NwWu_Uf4#H~EN)&sÛsiЎ[慬۞Ii׶O[ƭ'si[慮۞Y׶Oƺ!*sJ Iƺmi[װYlbipi㐲sy-rtŶYe3ҎM[%s؞I]-җ4]Si 9CʗK~ye3O4I}El-S q4> sP4>ʑg{ElօA\?OB%*KbODٌɢ _Msp%#aj?ܚZ=:-l˴-c1|?5]?%cɛie$pZMys8é9T.c$%<0 |h9UZĵwRD/c, ߾<2<}ox]~v7N搦=˳SކY)BaKc_%FwiĔQNr521UFGhё382e K3APS6ӖrzēvnBĴ3%rűDVEm"JYnX3:YW³W!%LóJ0%ߩtcٞxԲDr`+ͦeM1*7 :: vle+Eᇤsw R|ho?,hrr'6MC4mjuEٜ6lvs,lG`TznVǘ]$w=/h4 ҔR {fF3li4a&%q n9DwjxfKœ2NJ[lOX:$|l2X0Ɉiv7Y' YI? S \fP[O@@q4Bԉҧ mj6@En:PM^sSd R|AGvmdbU#3^a-NĘ.r!3y!KH!ݪtv"Z7әO* LN<|΢xFtP&N:*;,ߚ4U-2yb.@vRq` pNd5f9lw԰x]rְ1DW]êLy)GA`["ĜKht I\őC"pCsEx56/K1a|AK!vhXQt9tНB:t B[Rn9$HmHY5VB8M|Z"&G*XC1Qhs:޶@gIt] tqtct%]E#ͨ;jʺ(>Njp`p`?jСfyϝ|h+%,pa4M4V&T/a9*>'0! (|AVMnDCYoI `tKE6i7.v]TwG̱WE\^/MD7} 1s4A귍aw(#^qHϖN'H,e'4k%Etqfc¬sc8D!tA"<##T[pxT"0 ds 1VZF[˼ 8|έ㡼bZt87 kqN1 7lcD<'.͖45jÖ@X3aInL:f^!:m;\W$YO?JMaT/lA2Dvn9![t1r%^_$] i9XMqiΐ]3SI)O'e߲;޻wˋ+ *ܦ!U a46Jxr:TsSh,tzqBGS|r\}:Q3/QW㳝69lIg$vOجrl~<~}/ׂ 3zrhQڟZ ХZj`,*~cDmgXd'ds~*xٯ-X slU3.$Үb;ؔ&Ԯڊ֦snڮ*MO :Oȶf$kJpX,Щk^jòk ?|Q ˬMmH(qj&blk{R@3`;pg6/G?ń UԚ،UH``f wu&6њ:qZ7ذb1]o6Mf #QRKX72.^F^ukO ٔy %Ls;"3U-fY#kW&̛xHʉ/F|*`+or;T9BnKsuRDy2SCֳ$2#`!Yy\_( @N3Qe@N9 @*6LH7Q )4',N0lV>&Ksjɰj{Kउ9HݗˆQ1lѵT9n@DpD'ж+`fxWs\f&d@ɛEV)%v'hn\a J&j,ܓL߱2 Xꣾ$$} tC†%Dˑ 0xX_|HG2w@vdx[ V9Fɛe ӞChp:ŊTTR4G!;q b(iVOSJ,.)KTGPR8IAMǮhm:Z"E_*m^9̉dpT9cK|F6L a@옼9]Z$Od"O\MC|d ;x{ӄ68gZ.L]\pl2aI5$p$SqMrUeunzRHjL ƘK l!QPGUg=vx ZW#sTs(3(uK:@9+s 2/㓕[ž -߽e"R}q"b_L ET-CN @?YxٞU)p~oV_ъ@@oD5ԉhu,9f 1*2vTc1`yTۃ5f9mT#[n 8{P' FHX"Hv-U`0ńw Z۵Ke'aKș^w-Ct ]"e"rqeN.(g%l#T\ߋE9oEkۤ˽{wGȜbJv'ܢOWK%h ^PY1!ڪ^RlD_ ;K8%#1$_K\l |a Unt A> SE_N ߉mkyd7JycI'Q,)2y| 3j!#%~'bZQtHY$Nq?5sTꝦ`7Zq!з8Ҏ=\@X(sAdpapmĿ̓HBdFd2~U֣:OEwƭtbHK B3GBqG4!&.kd04ɾkwl;hnSTVIQ[>qC FpV7UF$fZ|zA8{'}$xY4m{n=mn^]:XHHcR5BK72~r#s UdgmyS.0?$,3,rG[Pe3'tpk+pҎ~Eyd.E,!iSQX3V@Aӹ$c4n\+++V^~E}:VAlx&~ĈWJ[##]#]|%KNiؽ85,(UXaj< v[gb5}CRxHއj0|9 "XReJTտٯ2'G7`(zw\tJmDD*?ҨfzB1FAl?&Bd Ci)w7Gbu[`.I'DTV'l$ :s+"ЪʌުLl ; ʿ'Xw66z H9m#i& I)/,lWS%z ZY{#RY2 x華~?gC截"q4@["D55 C(b4SQݽzV3)cO7 )x8D^t!mMr>FW G'u9X(Z5R!@%Rb}Dt"u߾4zhvRKi\)y<83!]T8PaP J!ilAФѭ>A Ld4o2 h9XEY>hiT?ȹYqށN0E sBylZ噳$ UiEDh&0GН?vk^U6>}}Pl`ނP 0ǤhUrm N^X݂'mP%6y%I\:Vٗkͯ/Lï$^NP6h'QAbƳi*5ˢ*)~(j[ILn%R!zZs#odT8_ )-h@Oȶw'{2I:Vp`E=ǁT?gH+cFݣ"nQL9"qQ;[iǩ%K{j8&ʏosfԤ5Ȓ%Qb{dne@`z+X8@䥃jlmGC{ pv| cg` PPG$wM9qW9e%Y1 ɏC}]iIehխij9DLmK"dojq?WMȻ^,!ܫWx hwxLIH4"ws؏6.JؠNa ~J! "37hf1.[G~a**YM_ˉ{2z.P0s Fj!qj ZHdm)\3w)hEgMccpqq[`B-]r~4PXKJ lڞ_g<J7()Ƞɒ"/jN|ںOtFQWOEZ 9%US/3=dm/"^͝q 'x_K^^ G%&hS*6粴#PCUA9[08>J`QRjD+>۫翬B Ev&>i[f}^/#S]kjM:~K5ygܠq)q~+_ \2c8ۚLܹ8ѝf;#&/GVfl,Y' Uռ$ר g5ܔOgt"shVq2׫s91: [&>xI|}+(qjW>\bKUD(Icu0dDJΕ=U)Ε#'y>~>yW1A=BXY<1ʟB<`'8YqT]yuu;{BȔECמּQ.J=1~Սtzz4ڪ/_ytN2SnmܧEmaR:,wU3t᷍yF}(~3zԞ0 Ә s<f< om~496)Us{>aЫ:ƙSj5 lШEZ RckY t/bDJ?(BrSsSѻ&巶޴SDAqAÆ&7Ԩq3@!CzDLY$w<6Z,gS{-dfQ3{RMof&)6>t{tƄe3Qe߼-a.yݬ8>J=3wJ-YRR9dv%{G5F]mXªG&eKe.>c6b*Xݑ O-S̍ux!hsH8r4`PmoPޚ0uGi&y[E‘e# WB ;cm} Ȱ9QMɵ=Qa'x4LC{v)](_YK@,o=RMb)mpTVOM\tys_~7Z2/A D4s, .DcH'*ܾX*ӓ"fu8_%wjsc"qȓŕꦗwF]õ(cgOFMv#[z%\{ÿ\NŊIK|mis΅lQp[TZ7h؆Ĩq/D 4 5+ve8(gm )bj=|umٍǵcW*ʈD1V'm۶m۶m۶m۾k۶ߦi$oΧd63g0r *{_ E5= bv+&WS\dݎ>BjMeMj#Rǒ)cAul8N] a(c,#hrq+ѹρ2zcBQaL2?fҳRER$aQ 'rERPTB٬>Ta; uSTdա2M ) <pD ЍhKG4 .;dRa&x/3O{=Ixvwmhlʤ\@ڬtyo&"W-2)bf k&$-GR !o5Dm9%:AaO PvE`R#=H/֋$@~88°eތM"GOS@QR<1lKpWdkqc]Z qR{]G*͇{mE֖n pm#Sa[u@#ץi7\o< =Uz7E#xam@wE][N}Ō&eGSŦYv!v'xԖ[s{"lعwWqbkSm 2{"M O͌~nNUn{+bA6t̎הN"88!KcA^YMfo6;ݼfGYz@R^oCV^//E,Zl0M{|_Um쮮Vna-(zK\-ˑQto[L2'rT_"X̘r.{In)Zo.՜ҼѮɎP=qh3Hɐ@<+nJXJ;N oBrOR /qqۤ?Ffk~Å՛8iCuW nÐ-- ]<rЎ+kIՋ[oDCr ̩;IGҬWF]h?Q'UiVj\?%hQ h+Oō.kflFM.\3b9<82; >WUpe!0chTVNE%K'/GQ\&LóKi(2U+ܼx`V/Jd>>:ZMKsWu3[b8rEZ xېh xn<% Z!N[Vp7X!k _"s֐n83G) 깜> BqK{ dž'ңF8alxq[Lt)'N.b~i<ݺ^m}39U$i23JZUMԑ哄.1MIV[D#&jN1{7UD|-8P raVR6_O6ȝ!NQ?aB -z?(QZFH`V^,oMnoZp@-Aev>2 ZETtfW-O_] m2QSx~R4HsBA]2/> #!}ZK<2c2Xp 1 !gX*miv!6>[-ԭCi16<nT ,M 4@]Dfꪙ"ubq#zH8 @j3@|6gi>c\>b@X+OtUVDl4<~6SkNg=G߱ЮD6fuu\= ^[Ϧ=h<4줠8?ӡ0$cyxo$Zijk@߼ԇ!X0Hq`47ߍD05gD5!hZպǹz}t X4fA &Ƌbt5I^?hKܺfܦYVtO[8c7eL8)vqDԴnv7]LNDF85k!Y B"xѹl_r!>槮1.l{؀r A"[zoI$OdqTչ"!:3k|=7v1QS1妶vHn]I8w1h7lo#;l YkZpTaΕzHa[~V X;ɠАߝWJ85뽰*5g(A +j.ɪDKQJE~5_.jҲ"\hd,7"};//A^շ>,8G5rUs%DN=؄'rߕy,h} Mi(uy2KVw:"Q\][1#?NUR;Ƶ$WT|&Azb\ V, ӜZ3ЅUy %|ufQwF W}'uz칅g); |;D?~k?4>R+-cduqmrlIw`5Ns _/ssSG0p~a#hlGU;!rǼ5E0`] ($?7q]l}3r":c, y }M.2Q eˈ{qE]`~1]3ƾt{6ܷC幨FG9SCm " ޜSMWz5ڭ[)HP(1,ZEKtF8FT,QM}{Bwև?D |@MBe; 4a „eͅy>H32_a2O2a龍^9*&Tĝw*V%KL>g4郫OOIxjC3Wê6Y 9@d4YgFqJ4LHb#yOkti{v,_[Hہ1 ync؛Se8+lrhA\6&"׼蓧ۨXo8zM{TYgQgfEeiL?{ ˅[Gk䕥~TtaПc]pO0GZ>^ȶi+^fV VMj@5Y i8Z0l{Kk3< )C +Ls iP04.:9|/F'P%=zQ"66Cok5B:b)^Ff`hd|J^XdŢ -,G@#s6ʶ^k}aE&vuj8Xl=/Df|-~x(>ݨIq<6;]4U-t1F&HAhxo:#G>8UžyaX 0VpޛLG7Ijڳu`m\û0 G-Z*[~=;rsJ&r(RFWnb.U0`i>[m*3#*s\J[2"B\ \$p-pU!e݈N>VƽzԦWZ-kfqq 9_o5p^q=yS U^8.<ΛRs麵9H$Ywi4pZX/z0ILIˬs1XPWs !@&](K4hCRtMU1eYܢǎEOˠ/BhN~`C 5?v1cWG/6Z7Ǿ'SwV߻i=C \P1۟蛾\"#,.(rߡ6`-&֝ccбRtJ/ ~>&xBb0@Sg?B 9OYթ6@D @yo0 F{㈃ idr&'D ?:7Q?yy;q;tS//?};wGz9}J\ ~'v=[ã{.XXe1=TTlg};-S#4d>*y\qlD~qySH{ZVe|v]jouk0}?rJ!Uy,j|Nb@x ,MZNGsЭp7vH.j,C|O[V%"oxm"<} ~lQl&<(|.촃O?v;m;o;1N+ A)VE(Y;9xozB=2YݴaIuPڴCDli.+96}+!*6%/{T#MJ n%L<, kApEZY@єq&ZMѱ=G)wx?'3FbiOb`,'B%ablH%ZuˆG@ FK#Á %oEz߽q|R 896kJqs'0r'UW);k 6I qF-Ihb>ԙpMȧ;]eNם6Cg03?~|=]qa ,ϟ?_V+>G$V$0@.6aQ R;Ŕ #4H"ߑHfehE =v!>48NmgIw՝ nov+Ot4ke4-KڛL9o*wV7!@AԠ֒9 IPuWNbx҇]_ Ay{V t@'1ݤyA6D8U4b&yPan10gi_MAG)hRLV9#Tg%PpJ‚ @sZ.tF,keP|@B [guV6\k@x E6UYyE" <2ً+#U/U6!qXB,VKx7'7//H_7&lΔіTBx |~tdɢ7W-9*T֚탱fs>X0rЫI_ז˗z;p1'.՟$(^ۉ[(,pVMs؄TܗA,E v#Y;VRT'+B 5IJ,+{u}n#P)*@stPoTaS}8 l]h"Rc'jY#T!da7[Vk$(<Jeu9Z2>o- Vڎ3dؒ"S٦TU#Xv2T=UKݰsԾCWﱣ2Z|e i&c"J>K.$E<O2@C$u9j47hN+vgΓ6vc rxY`]6ou˂ĉ+Nj+::a9%QW48ڝ{aFx#J*ٜܱ)[UdQ%rQdB,MDSrT9rւ;=!vD 9o>t$WS(P $&8K|#&ްoŠ3˲NV؝!*#4-/T"S5yB,8Xڌ+L0-bT//+ŒSp+UhDÛV4t=Njyї-[)gxYٺ/N+;pR/txO,EH"( ;4N0Պ}l*0H׽< %x)@aQBATbC`S8%|*"qڀE1h8AZۈW9KwL~1P'LspvMLqf:='ln6L)+餥3I7l6%& |о'-XmUCGT8dҽ14Eڻ ́yb6lgWUe`js?ִ[۸ax;nqwm.rR$}d9ء5 l!Sk+ԨƍG$s_q\P a1oXqX6c9e1{ơ z'MH&l5-@kTb&f;1Հ5\DCt6-Z$JocurrKќl@njw:tձ5W9+5R靱6@MIk2q:s;Lq;ԭ0_8 ~j',Rv6r =n'Z+aڌx;7n7ԇw=>iNԠ$3Z]-ou `Τ)m]ySFs*ܭs_Xܾa h=T9'OeMZ4 PMES3~@ yL*" ZuRLS=#l"YZ1&$RR c\)0LTfôWGڨ*^{ETHmNX+5b| tNz7 h4dzQd [0 7BӲso^T=|Y|OP4t *A]IƠ_#5tz_les1m||Vŵk/(S>bB  ʦ@Txtϒ މ;B,J?>wsՀ{saeڑcݸ!hE\/*K{aP(1tA 8G*4Ufb;c9{Z\8)-.vuQs(wcfXb[!Vc |xV1gإf굲5??Éa~Q+_+=%5ʏl+hy*@T:`KΕ]ҀRaBoIq<(DEl.xVPSGw$RޔYA@mM𫕬j1x@9TMK6 9̪ Uf*ȂTn{zI>h&Ž3p(ZEc> ~XԼtk?|:;Xc<&+ZzȒpMc .Iτo,n*GyIbsP( 5 <{!?W 1-[ c![GqJW3W>(V1:SU h1o հ?,[?V-mWxH܄yvAʈT`){2"" w+98 à/?ZBuob?-'1&?QeT&lc|Ja~ݤU ,) c 3l2RQ\V9V?Va^3Yp#>++܈w˯,rbL,ƭښ6k|҉)7kM&mhSj&N8SHčѯ:30LexVh(l`Z(/K]=` ,wgӹSL8w9> w߷B?P %a˰l;A.t^SEzxw z Ujo)~r/rAH` 4D_&VD/1џ~H1џd"B ?pE4DD&=2d9/LgJ ?-dJ "?14G>I/<ˀȃаPAzh/Q QD$&b ?Mz/k"]tA"?:/DTew*-u3p=Y_ 8[EEd#H~%c)5ျݜvxI)e^i!s+ac1\e9l`"U"d䐦BPyΑJ`Y]W[O=Ja͑"?]u)Vjڼ/\Y ŝBzK^.?;_u9bu\*fC zY=L+h! `0ń 1n&Pb .D"y72, G-/)R9201EKYEΐi'i =?4Ǚ4lcɈrΥpub&핗G^7\v鴉x]!Tl_m>١ru`% ~^xw'°*)Ewmv: 6)Y~7Ѯ&2x5üY?2TQRnTƈLh(3Gp /Kce([O`xX#Y;#!^1n j&5:ݴaզֲo c'[LyYW\ݱHs) D ]~^hr)fs<[%,Jj ȅfӀC ЍLHāKkga&䤦h<Ȱ+Pb1y^/N,YILq*;UpgEafijR7~Slm[hV^W. $+Oq6ܵ5L!*l\ $b@G ڠ&5AT\)f=!<(I($c*见(~VC ,SS/QN9SvЗ [kwd*P ZP5_ P,O'9vH O382&ҥ_.bSNJK% $%S&FI2589 eڠ1ryDy I"nfY'$mS5 "tVI]\MW9:[|R#lU=Œ2]̕ݽ&y7 l͝ݵDvIY[7IUr2b(i-A6}`B0TQnGX#RWApZA"l;AH ~vk}yCi*u0'G ll7[LS ?m~;ihܟh3XX.ѠcNxHK Y1PCNoqfgA)1)YFPJwRxIڞûϝU!{,M(Gz*F!b7/sD^Jby@vm]B5udl(U:ggu7ZG>61" :̥PUFk/Z!.SR'+,SKo`ͯU]Q/g0BpOEE#~}LJ N4q!&Ұ!l9&s:%pڣ*VxGٶg% ݋I E/auM%ۈ3PL XuŨn$H %#9iٴhŋdMRS 4|j#0+%d{S˼i"n 5 & l.SU5ƳlLUlΗuH6+ 0qkuo{^]ڸ'Yí=߄*\ja.@*n,w.c4iQ`Ng'EףA-p(or5*4-f BNO1 - ud15{v+KՂYMbNX{Xo{.l. vw❓hb0|2z/Z4+(bҼz^hi*͚ w^.7 &[Z<ހ2 Xɇ\4eĂzy&RasOҔG F&ų~Y 4.Aɖܘەs#.9&Nkd5Ȧk"i e'vE#9y^jSĠa54]'5)Ґ:B}(_RW5Wm.wאu9 ]n,# cmǨlW m;$=%=zsTVTDR zS z|9Fkb m(:71' ^9i4U;@`'&,n醰săѰX덫>G(ޭax̩Zj&6 W.)j" :GN%4BHzIIv5xȩ|ueF. Wh!A`S[B;vb X:.^: p1Y 5={KKF5y.(ĚI8)z}4~˒Q?%c8yq ,}ċ?l>Ȅz.:,:k O5M_w>ɥND1ŹsPŇԋmXGŸ,zy[Hoy5O#-5Izm3fRl3hrW[66uM 7Wȯx(F"mh%\oMFV}Ȇ"jeQC:WB=\~ Ԅ4&kM,YK@DMs{d2fONOER* ׫%%)GF9x/AnМ=J0PNYL<囻_۲qC* KUSx+7g 摙P1cAw$PTbJGӂ֬N2mwع%^&rKٽt/W-㊽vn3WIʥ?M_ur^- `^\A D6n*an={ .qe'EcUO@v;[wϚmsT^dK_ L%x ?|Oh{%k#W̺+km"E)b~`H&W >4%P,g{AwKn$tFe&[.t:ꑙ8徊ǂxY`9 fZh,U]r81Xum͂gJdv^ @9;ŞBT(67G4X7 -DٵSuYiإVqj; UVj>Mc_ڃwv+;bۢ (kJոMRp*;D JyFfXUZ`jn!d_vnZ/c )Є#]v ٽŦQe6?g(6ܞBxS^Ãϓ%Qb ,̄1lk C=Wb+%gc$ c8K>jJsOE$cЗO ,k)Xx$y F}d\/Ãb#{{l,+QP0%3(߄BɞB dE` &$ (,HWv(=FB3z ќbO @wd٥9r"iDߤD֍ g?V[;E-.A5)@kWaJOC3wiZe3pRnӟbЭ٬i?37R:kmӌ|bX2ݏ /홞@']cGxpv7!5Vf'=f:H 0T{ &rg H0O 6wlIamG vN=Dk5%Ƹ C7wyѵ/QԞ$4kgvr\WF{DřD;HR)QzZҕI!#9L)%:Ϯ_$k;ߟq\Ći |[m~s=|bcGcCNJḾ3CnxC6K2LOcid3Ve819uݶʫj̬HY{2ot3B~ks|Mշ7BHKW:?ssqܴ3-326}eu>{U&b`@|3X6 ǕLaC%\h#|'Nhƪf>1V7W}_t$&2wĂ&yΑrѮ|hWQ7619WNή~[d. )ڻfݶU?˟hz>S>"f^\5 QOj:~$炮Y/h1<蘙VK& N(E'J$ayCd!n g@9fЧ~4Uw@?%Oc\O{pMvGvP4̒T_SڹSU&:/p谼oԎVۂ+:Sg/w|H4M` %aNj (ր{ߛ Qr[]{B#F]i RĶmBf5(աTJկQSJ|JuNEN} /Xf ;6U/OjR8Z/^ؙRz8 b-WzcM|yb҆-qM]QHW[B %OYi@fOV~{Z(w?!=_)]z '5'E/,7{V(;YR-1mAwn& K'+`wX]iՔ:87gyg=Ru /~1ڽ(bYHrQH_6*BT 8ڻ:/%M[[A.YMWa-tQ[fwKoȶNH#m;\tiLYsI_JFGo۰*UNbk=X'"UӴ/9M& Zb;Zj7cdʄ=x?fyB4&ASB H"us5|Åj, _ +.NwT7I.!pN_7&=ڱ;ÙGpH~SjB%T E\ĤXrdf8)0%< ̦+X5݄"S:hvM=4>G3noM9ӷWYCTaH/जCFl3JJ!> {&c=%LF]̠.A HչӒz7/ޗ@5D,%86OBTH (T~9CZ =rT>d^uQO2/ږyhxI6&P[wQIƏ *gXb84#{ZHAw+P0iB㜾HBƩ`3Sr綢(bЁa= TB΄U3q8q$+I<;RzPC\E%ĵbsRK%)FczICҷx%4`'xg}4KP~bd{]<5`/`d%8zȖc+Lhr!ԿOiQ3jZYd}FWA)(^NzO0fe=IIJҿE.Md g~>Pz44v]Jr2w*ڰ\^!ݧ~CP{&[!Էx1=tKs5=Jݝ7S~#.^'ywZ{I+q3+nЭ7CSH=w*}RDsp)=_0  ukGs?w 85S˧`$Vmg~wG{׭W7>݉Os2Ӧgx'\ڦqt[cDn\d6zD79Ԫ9L2.X? JWh4e=*721̭Mq u8RCz6:}ި]9M]-`-Z(Dy=3%!6T<]2NcAc g@=so-aG [Q?jeE-/>H!Q# JŸwj\װ?La|c[p.s9uh&_#1=f[+FO!lS,VRVSvz~hf.v`=? ykؿx`cm\]NYj|ddѓB kp|8#Uv_)/v; 3n1vcҶBxܳ<_bpY[ѹīfÊ>~& OY}@t_ -mFac;, +ˮ !Z\ 'zR D Yͧ`;sӂW4! d}Dl^F#G i̲EZоzxL )YU.] V.QcnX_Er󧋛lbZ4"O=Rc:f;+*y+?'Zա+N.V}jFfy3,OR5.X.֌ElAne +`?D =bʪ7zkjXn%L5h'Ah:R'e_X}hLC5%9=M'$wq*oGn>?u[#ĝ3/=ഛv 7\k7`[Kk7-p|AK7)@;|B; !C7Vx#0S O!1 fF1GLA?Gr/1JyF&( AK`<xf<}3`Ͱ} oI*5ivBe w{ai=_Q}tSSO  Aj=Uk{J?57?5wixol*xS|BA[{sw|aC~3m3 }mPsiAPB@f[_ Xnt9wTGwfuAxܮJHuVj.KW_Ћ5+y7)0K/ ou^ xC. Py}|4EɽscFtJcMPHMa:l$s *#L8<6D`t})嶘Cޚ`?+ҿ_E^W,C-?Hۼsw~ [p-|.h"!U}ChQ `+]AAP!'A9w*!H[Zle{@f̫O͇3ar φ7>`F"wXD~InЌu*3oS".Zh.ʳ~OG̜zH>hr J7WamGњ`L越ຽcl.Y'"ϑFa'ڬRY۝bM"iFCh}唍;mkX:>AAB}N cF3͑{Q}w!C pzf/W+2[.c! '~zw`e‰{2;a5[fäE16ql*?Z^ ?왊CY&xE~/e4MXp˳OajDnSMq!(t>Y9W Am8ݔ,mON*^FeoJT֒\ ۱k-^ě~jĩƅ=8e/q=7]"QX=-TUպ^; 3**` [6|kڱf?HdK(DG"v1>YLQەV%$B,zCNn !lНk4ٕb׵K.oȝxa( (Qd+Kkh@mAEAPu4!XK;Ӻ%OK0gTV)-2#R0iXa ylS#z7*D=VLF҅PHcfC sн\*OqnねxmxbP9 6IIWD<"a:IRd㡅8FvdYdĪ爱t>V435eUYIEͲǟ|W!=Fr=bBx7]nV} є}}ȞIt"V*!/蟫O* )ԪvVi#s߾~̖H)% IUܾbǡ5nu(>N=J+OMf)oQg:n[jA-?ʯph:4p;yr=.R6GU^P3SsKvɹ+Q7aպ]H"}20)V8\gwb[׳gTOĜw&"sDqJ˵NC0G_AVMi0^mrUVۺQbѨ׍+xN R=9;q C,Kd IEmgȂoh_vhn8gm>TL곿}}2E*|u2ou~l/XS"inʿwEtЛxi|ʗA1\Wue"Cjn]5M<j֘&. JN>d(Ј}2M(o?P_4$o\F`[+!* %Ԛݹ6 ^($]~ͱ6d~-_jgtԞ^j.YI{nj\ݸjR$5-gT=\&*í6Jn;kXNauӍɢGNav[ٰ [XƕTk 쉻ڼ(?_^ËsH=/I;7w\liZ6uqr.d`FqAzyS]@&⮫*ԹﲨFW>VKpUNJ[YݣE+% ٜՆEyY6)[%SN~DStW50G$WiCǪ[PBn}\}}g(Z:rn驙}9!0[?џl󫸺:o8Ԏt?F+@UHdzC<]. etxd+yI}}\\>\?^pߴ߃l/ϗ۲ ŀA~iI+mjMpoxzo4KF&+l -7p{_Kw(@}y/<Z]5}c]|*?NN)m;/77jr, ~9)˷nKUքΏ{pe￧'l.c}GO.ojEZ/s˦X:*XM(ӱ9LbYjH56e͘ _p)k5~Y"n[ U\Kb}iu}C%ΜU)}?OS EH͜5*-~A&ؽvPnҫDt>N{5v4.]szOmIBE-XckQ 5y0K3C}K64;T[( b(4$gz'];AYk ̎|OĤ p¯p L l0zNy@ceBַ,e JGU6n50  ma,j$ @z͜PfT*a!ݝhߋ&zXп <3Z@Q$SN6RSEvP)3hVQA,@2v A{ЀJ%G%o,a(Cb45q;E[E,D+Իٹ-@c]I3Y B"5e=CA'<5`i Ikge6e @ְ2mb (`<=q֤Vp1NZ}M}憚Vf氺-6F\,5 l~t>Q?}9gܳZ-*"x33:M i羊>X9^4,Wdf[>lڬ~ sh -Dc>5;eXFI[P1o(nf턧&SrB 7u9BpA2q+ ZEF /hhs *h`^Z88$ny, /D<h Q-t}!@|g]G_2J4 Tc" BYN˙fAV  uEM/6"b \xgų1N$;#߹مQ}%H("&aMFSaMV!g3a |޷!X Punu#/{ƠA4٩KfF ~0*3B  &@C9R>:,;.gukѓ6׆blb@_m:֜u/]B;B.c_GmԤXBlz&?h!=<=?2|ҝdGQ3 &,hY&,cIƉ)C " j.Upp=:QVb}+]{7vNkt8)5qȸR@]yFӕSz. DqMgC黋?Df7=W&h*X h&ρHLu2l5nS:A` g2@$ˉ_CD1: M_uwc^L*4Xf q̅Sq<,9 ]?88ȸmIJm(cJB2?Zdf«!hCBkK뤘V@S6wkZaC6b]X#x ;?ްk&TZZ=r][,ݾ@ą5}QLAe9`I-D8 dV hL(r<&]Ƌ2@&`ݓەd$;H*̈́x'>-c``)nF;hU03$G"i@W%1pƜe8z&p6!77#U+t]PԪoI Ȍl& |jӤ Vq:qIF ؍zU{L3ɤ*5W;p[E-۷JSX^eUޑ-Mҕ(ű/>,YF^?7 o?B*r*{fW=!tS`fC4@w>]eSS՘gF tz=#;be4KSXߚ'KXCK9q9ƀQ_2rhQݨRĦ)f6A3˽05=];3*ƸIP)֕@aplbE52)UG!> R&"^3a,0#!,-4yl d`jlBy7}l{tA`^> 6!|xE($'s,p9?_{yj4CU'4~(܈6FYg%fڗSH и2gy]"J1;vPCE<^"cKUaK.)(\XkZ"m2si뎤!6W~[e8-kKCq3YDŽw6^+(9Yts!v,BF\zYb>dg'pc,4nۧ%"9QK9 iKed^^VۊX& MQ;C~EspA}'ێ{ #}!BD2:A2(*$ܦJl*!33Q] yg-G&<9܈Qc(z%4Qšń5t쒋}-!|ZP@467L+ _<**0l'Aƶ WXFX}]{0&g=e+A 1+WXpGßr~ѷj攦mT 3yk,e;.<2 l|7-W5@p}.Xem˴S]G20K#4]ʏWxJ ^\h+ е5"W3+9DjIa@PƃZxѓށpehp(YFRXg^`Dy;wQ pѭ++tˋD_l*'&D?8 W#[%Z+_¦F̨vGL|~߹iDTIιDg]B@yuĂSJW-ۤC*dbR|$wd^Fgl,ZdXG GE KEɖqcpgJwGh1L'/^SY:%䰆VYi1nd^Q{ Wf݆ _Y\r;Q'6OAA D8IM1T'/~d oZ.QD's)Otd_ Y&ggc u_'oo&TxlѓPrǝk,Qҋִ=c9t5 *995y2t}([`@-c#ޥʤ5>9oAa$NhWȞrslݔ'H\ố$}L6 n :9[e?|D];~|^EcN:bz>WIsLfy5<ˌYu"L 2%,LaL~+ 8_ C\LfhY5nf&KBlhdȌ-ۚlp5zc.&i41" GgrGEOyԘKg,PBw iOYBjq^6);EW= Nedܵ +v!>TmoLbGBjՈ+jB#|Q\7SAIŇaY´BS?Sڱ|)uN{gjF j/4{|w$I\E;cO@9Ԏ=]Gl}6PuRF~0%챟:_/4{r⧉t%%2/ưylw$6j=簑yvCggd6ڳA9I>HGiUu)VYGš>p@!|Fu/Tq׊쑽%aw3d<{D~P'П mz 8D5ދI]lc?*&YzPv[y dZ1ʄ뒩x r2Ax] foQx_>sDoN{OypVh1!a3(g q~;seB˨Ǽ vDUiC4.jCL y_femT $c,#97$\Ӄ5Hncц㡕P|LFvg$FdՌo(GXۡLR뿄n95pZclbci˷n]/t2ڻ9F]\I!K!Р݋> [TcgtjZx wub7oaUm/ %8"S&RwhBҖ$ߔBeRYgFȯC=TCB/> =L p"ynzwَZ3 N9܉v-zi'ց ќrEftʠ~`Pn9[?CsDU:m!r7>`ɳ;]m&;pIQ[ʭSZk@:V+P 5ejYoTILj-e,:F'ǎj餌AW|C6| ң.zF  t&yTFG)xN4&%ʷ{VmJ;;M5Rf/^y4O*35U+_NTbS]pj чIot/TY::>>sW+d\qrlZcR=hwy&gׯ1-.I#ZP֠O|&CU3&hd+Xܵ5k``0$ Z.3o!=LMVxfy]iW`=hG矧KΉI8e붙gZ Wn3L,-A0T6+n_$Qʘ{9+^o519(]D@Gz~EZw&)d&<1 \uV=Cq Źkx{zk4,>?V%DzQ`K3t.?*1TbFOf.Mҫ4ɕ^2%uֻk?1(yK8H pF&Wվ0a8C~(ᮇV~gAgx d5u"e#xDB{'o=Qk\dR_G K)(CrO$egNPN,-F\(VK#GUu;=R(bWW,!oOvCJ*ǎEֱ}3&N4=񘩍I ddE_4R&']as&+3#Z$}oX #¼o|rL K}u8Oϊ_e;dUY<8/uTԤ.yl@| Gs}OLm~)2^\p_L-OgTpJפF,ŀ:Da]1ØKNl->5PG%_7f[2BjcWhMU[Ԉuc7o&:Zj,<^* y0:kIF.5N>)5Ԛ(-EűS_G2pjeSg o]/;?>b?TdTHߚwhFȐj8.ysaWpލ'(}${taЯ׳f\7e8?W9 J Lb.a%U?ҬY3} )ϯ[HӽS5(oqݴ_esH zl첍l6S&"gz5)ǣ|R]^Xdס̒;ĜtL JQrցs*)sVP _dsvn{s.`705U2n$1ntܻ 8$ꕧU q0 ;[wBcq*N*QԉLP*UrTєT%"Wm$j,N"$u:ҡ{۽|@saǽ|:Y򏫢Tm{>U5]6W4ma}T:zSe}USe~ڶW؆z$j]idҍQe,I _y`4 c,iRS:%s&~ܶEzxēS+RSXI3Q)W:HDT#srSY|dtTW8UlESJt-,WHAWiz,yTIO?SZzTWJWyϓR_+5_\?/|՗3NU|tPUT}tJk=uTG}Uط}߈m_-ɳ_ }MtJ˓G~HI|uizjvӦǷ*5Z$[I]*}_d2=UJ{328pRfd[h`pG@J#Q䤪>}[XyyjAj` -WRo)ޚ! o B'ef3[1fD`MPCP-:6WԖ|rMŚ&TثԟHGڮWu-u(.hNhvlAj!ϙr^./xӲvkW*t4OQIn.כMOz]>p*J߅p2nAhE*鎰XU?փ+b"5N6"lf2+j8F(}!vs{ Bԫs3wZoDBOfY?Ox>q_LԼX-*ؼJ^yt훆3S ~s*y2 -.erݨTlMO )b=Kk,ϒMmT굊[ qV[c]9 d ḧ́UОgX#A?#GWMIV+UO]ofԎޥh#i j\BV91Z]6l[N4?ŋ4K G7OzWE8%kNFpa<&Hv^KA@!o` !3lh(: p f"[x'DOP6܄e,Ke۶mg۶m۶m۶m۶uO;[U*"sXcٍ&8:v,%դ"tǛƣ( =?#eO]#dTpszC`l=2;.JaұCht u Jg4pէj\R9RA{5YfQ|HkjMCy*oFm(6s*f #"[бV65"WjנhWۂׅrStBchXK_7lSNS ,ķanUi=Oi:"ODH:T6j +jZL}#M[=Xv4*=kUNXGpٳv0gw%\%Oq Uv 4t4kQ\(Xn vc:XmFH5@~.;mI5w{ `7[w4cvMGⴕ>:eH!xsUAj$ͪmS D>ҳc) 7oM뺽 >_xy;G'3]Ĭ[d:AIJ9X*OɚQCdzPǢAov p^ɺ3a~*_z/ փ\^_NjQ> " S@hkEy-^?0gWidB)š(xxx#uz:UԑWM`Z0'SyѠXKHsԬoZ=EE&yv7z?)'yt{^Ea/(dL5"me+˩}.<EdǿCTXɁ{5}fP?驰[\ף+V4ּLq6Sq]z75=ნ}5fvQovFVzK Po3DUTٹU弤B$8b{5'T7Ḑޯڙa {66+`Y?.SŎ*@^!DGJ;YjUjnE PRD>03b$Crզ?a ès]ݣƟj&@#x;لÐw)IcY$Pk^ކ]oڍtC֟_(P_Wd͢.lyb)Ζ+E &lNisTJRT&,W8hpgΪ-K|j_?m'țG*kQtӺYi $͈^U.1 M+e0T޿odIXE!9wJE^pc }.rl,bhx !C-fo^P8LETeP,h{DѿCK Or ~ϸYP d\/kMخKKV=@W.JuL`8e `kÔwp.Qk&r^rX\ѵQا I{3тͲ*Ekp|oh9N7RFˍ7Pыm.';//ƀ2DgtU˽ laxu&y$=/9]_.B$Rx2!X\ʞ6{8r0geBo"LpJ773zdY۱PH俀b]Zw,N-oC2Q \) 0J 5C7t,+2nOy&:P{Inޓ;Ni@C"> pP@&,[ޜ@h,?'ĝHiXZØl>-tgYmOG q S>37oz+; 5Q"rd~t+' sG:=^;rrm qe'y2H/:,2O+?F{L=SrUc\DW/M>{ Bג^[5xUl2XiZų6:meyΠsI*pö-YhKKw{ÿݙ*hV:-*!h[ceCn+,l"52ͯP-]vUހ> *P3uiH=LˢIKSNrWX2c,WhQES2ՌaL4hdG/#@u k> P0!A-bg* lM)5x<Cs)qhf(CR3U'1Mx3m29O, ؚG]b(^9wBCW?ϙ;)4RR^f;MäKE9'2g _܇<'ͩʎ:Kc߉hoG9Y PC2gYF7 2<ܠIpYk X 2ߧN6)^ߚ&P2i8?L-2G<ޞ GD%YFmP|rW>? X&Bfli@T3AӸ9OXFdIJȢ8ȂA[≄9[ew IUg?QME(b?BRANeܫڰRQlB.)jjq0b[5~h]]Wt xO@_ ($oպك EM3"hv_S%R.fQF^94CwS)DIС˗<GQlH:negK\6ڊ٠zנ֨NluA\aEr\3[%.S0#nq}V$Ќ R# ad##5&7C G/iC8pg_H[dmr;y=|`kfT=O2;]K﾿k!s!Ue;MI4LZk3vV8{4kb }ꀬ0L6un2:vH J>FjMu,bgD,eC"jdK,hvDţ{/ZB :z 3h}^1. F3 'z,ڕrv5fRCTXT6ˀh+Ƴ I7צ}iF/K\@!p@B|-0y».c$:ARnkzy#hǮi*eJ}g w&ոGeeExm!,r4q: V[8m|ɘj(,Q?ko3"Qn$*H 4 -0m6jE >)) .3HaԪbNȕKLB(U `[#*EQ.0M26j`zc 0Z0_o)jd%OR@JvFط+ ӏ,8/)3`Add<>kTM  W6̺-KA[@SDtG nJ-Y4Xo)a,wBU; _DFrB,63NX4NbyWoJQ-S>B~M$+0I)#(0[`*; m~鰙hAw$ cܪɫ xr1j$+y(;HjiFMU_%zMQ6T#j.^i'|%]:Ux%9Le3ޑY8~ xY7û3x{ηI.6Sw2ަin6O)l<@?\#ѻ&gBJnM檞R606Xe`ZZnHȢXA~Kٺ(zgJ熌G\#Dt6B Nҁ.h\D#jwN}v{n5߻Ht,8 5JQ;}Fs .TyX^l{DI󱐩֙{A;4!i"& Q,n[YE4᦭v SA\&)ɡmޮvh(- 6Bֱc͡ST( ϨYEbq%&{6v F "9Wd+~ }QŅbҫU*`ݩEOd 7X/%~L77sOxhs5<<^k9B>> _N sP)k٘xJCF+ W 2=l1DkXƥcplj7]oDҒW$B Txov y<_4Z!G˘8c+Pd'Q3&\kPX|s)~4ō,g#,/ws]i1D<LJ99۶u;8K< 6!~bJM?j8!N{.m KFe M\`y M#cOA$nv,9,# \`g`+xh9x^"!3 4᭢gqސGQu;gy访n)@4I|#]dv6ͥm'߉+YMqL8aG:ٚ,- euTL(.pոVi}bP3v\Bj2#0+'rL}?vt<HCshzz#:0'pzt7;,i'1qGa΀IqxLjƵFҚVW8dÓJhHyϨ2nccÓQ`HmkT 0?&Sbd)`nsg?4LLӹ%1N*Pwgq[Xu|núGnEz#}1f] ֹ~W]+s Yrw809JZw1doB S!zZnWe݈)x I:+ILfJ|Phe0Wge5HtӴi/l$}dysbDiЦYB%sftkKݟpzPY3-'$Es1+u_Wy{"Ɔzg=yL)R<&'䏨 TY~5ҲҲ!Qīrw}p"n2= G nTJ֩ 䐉󽔥u:;ͦu7 xа DT1(F/D{Ա8.bd5X7'Q7͡D"%y=<ݒqteGSjt!(u';S⡚tXءW+qHx@G/~z-ayAP6!1.P8ޭ<8N3`n)%Fj:*?ℕMQ* rœSK jN; m. SFB]Aa.66|ٓ}㷹L(i4P|Yvcg}1uKE|VxmZ(p8. ]ffچfݦR4QנTp3nmvͶZ}MObIJDA Cl "䤺c^wyS#ũ{(B.#F-bpRryD muE`~E#'ƅaٺKאp |[HR'cA1ިǜ_PP"'~!f3c'/X~N9v=aO |ut~1E unQ , {iu˫16'"DفTSOsEV&:BL.un 8MX2#Dh ޫ6%9 I(o6Wlۏca1ЪYԀ؊X73M?O22AwVUipYmf1)$V>&RLw*)Mw% '81tįĆ8QI2rgMnah=I鑓"C&W2q=оv~b4lV`KHaqvP"&8 Tݾ6)plUhVi]HH>?-Bt!( ^in-c%tS;́c.W˒`!<ҙ.963EW&=7벦Ϛk'JYuE,8dzisrz 3`MO09e+Ri\),KQ)"7B{\V{42}Av עFtl# :s1 R5n \:mog Si$.GR^Qӊ°K~)߄HL{,Vҗi W|ou?^}pmԚ?W h'fqkGrkF3[n'֔Ge)y G] S N-t÷19Hݶ&Ɔ.>/?c5B$(^*ǮۚRkei!uuތՖxRW_DbrH?(vD- d)*r+lɵ˨UA[Q]|$E_62ryrr^`8ϭǽo3eк\2HN,䴸rW#+vcsRQ8Hэ`,ͭ''MwWH`w9[?-&=`%nW&dx{<_"&>|";W}IgKtE}9܌S6 OEB'1 ޴&^@a4}B8arg;!ZU_סӘ= `Z֧R@L#xZh p[>OLSf0<\ّr@[i,L펀Sr>K?WС'-_O9W>#a%}!RboqGc0BM=!f1o8ݶc Nxp^}E&$Q(ضNZQ[=-{e{k8$n4ИZ{~jZ]Mud t?9˯<؞e &dKhoL &y҆ - svZ(qM7.3A`LX~z^5Ouk>Ryo(5kU M5rrd.rd+Wjk&3֎@d{O?KroR'z*4׋x1|I6אC1*}C)W螥֜<.E'5-=.\Zѿ$?YQSH5c1f$8y(Й: ;Х̂᳏xL2}ʑ L( @ M(`y`p1`H%#%hd*%" OH߁ooӉo=HpY_ɋ#JuZPwuT;ڹZ8YK?B*"ڈ uCϢ[Κol"wשwPJD}S/h5AM@Nw~HXM~{JFY<۩S_p ׬.k~S*߬FٲӸ{)3mԺxpj~ot& QlsDbN8 3Ѫ %PҗֲMo|v6uu4Nе) Bʊb5>"E$Cň),=,Sr^zh/g!ML^+ٮ ǁ͆+d_KҀzmljGfɝ֒IYq; æs͝g4S.K!֠TQ! Ÿ:h:|v3CJzE~V",%Brν7#N5aqXvo>XK bOPH_B\%Az i[dF;[E,BH(p}N@*8D_mu*&WE] # ͣ >џݣ;?'8jڧXȁaO. -#u7 .L*8B ´g5W *7֯v [qYF'a֨ c@1*@̏6{K!Ƀ8_(1IKkL8`3RfM=H`JDx&ūywy}[nxcNlsL,2 kO˹+H@zs@a?Y1ޮ eOk91[&I;tc꤃'K#bĺ![_Bn~ 0}1xKe+SS|m+J"fXa|ɷ:?'ؠuךP"R[lhP2qa#1/ uSpv]OX<:^MmA=cSz6Ml W=k^63S*s(Gt#*, )lDJ-_%^a##p ʢi[ocyHF#kf|," 2ǡԣ2Y*'Zz(g%"d2|^ LF3>6sgQ-aw-Vɡi.<шCČ`}Fk_F*QJ,)G(% \I xn('EG|S8|Lwm Vԟ{ؗNo݈#tlWQ A[=ZIzU` LkS[s+"[8ڵfIgxLcǬʖD\6(z|0P1'ïprGSW¡ 3XY(:zܲUvfWv0y`w`fDEifC

ju.b%z|qy3PдmZBb+ {;y}@xbNEE?3,?$!F-3 cc)@+! H^D":'S0aG/E;?Sw|WqyR|Pg`%ӠӤӨǑcqP@4Px#'~FXRatjNkl>.C=oLح{?)Ѕ#):H`4)ɢO!Aqi@hZy#((0(}#QD:x֐cᢆ*2f]a.3,7/;SnC7"PFx%7ȚĮd}xJe RJzA:ZD>f2me0:H ⲱݯzfCNuP ?}G%JZ(#:k¶yIu؍tF@PjO%&[k0]dh-_NͶY[_:EG-'$|Np̫>\-j}/ M%ELGo #,v&vN9[#`_'di+JX_qݬ"}=y$Y!j!Qأ _7tУ䊌K~MdeJmvKB$y#i@o(`4H0q`xQ@"-PP?L0t5 =b91VkJCrf eX?P2fHlT%OEMT4VQMQK K WSYCFO An0z:!پp?9|+ps^7l4E(^% fO/*]\oTKɯ<Ҟڷ(J@bBOhչTrGKdFxIURqswSlf[ \S_ѶΩ(A6ElA4ɗvPu,"iڱĚZ&v6BH>[@ Vd5D "gݯϑ258,ൣjV:BmQ'kk3'<=^Bۏ q K?¤?PhhuJj@Q"I1 5Z`;d*G~{N3Ibta{-0*D>Gȥړeoqg|_M¾*DPRoh҈MR+aOr^ \Bpg$ JCJ  # h3O,6:o+),ԕ0Nl-wx.C]I``kʙs#2D)΃CG_ 2Lvl7` 5oRQ>xXRNLQ'i}ʘ 1ut?M3ݑ1Hԕ%+.K+UW%:'L1o4B$ʀ,FFOS/vMGʇHtIF7= :Ѹ15]:~:b%C"Xd|MEBk>{UdB>eуBց);cd{wYUCD faY$=)8q&"]C*q^ Q]]pq6g6Rti rsu}{ˎ~vvo.i;J D3Q1yߟI&Џ0u#3$nЬV󕜵V.@C: CMohd1 |8)P[Œ[Z8םD/CH|=+(ޒw Qڟ栶 9 Z*8K9ȜB#pl:՛W!жOYx[Xuf -WwkEovU#`ey`;t%UOy̫KC@XMC .0,@tVpÑfA D QAVnmDT6KK1xȳ?Yϼ(s$ƟԤ(Zխߊ՘5aTsĖʮoj~ׅD)C>/Q7b sp(<Ԍ |Ko>hH6ѹ¯[4?w8豾J) r/6cE51m&/꥙pvȂXO45 (Ni>m p2_ە(PH &SOi>KjJjWJPnT_鷕~Fߞi*h笀:>op ]ŠI7"=3QL58js# tU>It'˲[?;kW;ۍ\zK O0N? Kc__ =jPcT\8YbmEUb JaɁq}|2ZSp PJ6$|RmryDF]mc4= d3qvEBHI]U7@-ՍNODQW@JKT[JpX؁qM2,%_ƻ2Nyb,=8[lV'`s4t'3ߏ a]%Q4L"x*}tWDF慜19pCUCoT[MmF?oP_#;RL$Z:!Gw6)^tC.A<c}WVe{&[BO1|i6lױt5JAS $q y3{ָ`ǾL$N-e/f.eSxA ŶWookS-%L+tl qq]늿GR1%ؑѤ!UAm_Н&Ϳ Dڮ`.k5CVN\n)qDTFOm3b[1 %Gb:HXwA]r YvϢH MQGPnPl)0}*K \]vwv-)~NOXgu#i|"prREtp9\jh9eM(P5񸖱rz撆mY#u(^B}W^y[b{_KO]vmvBtP^9^{Nq o=_z5&; ,a`˩lڣ , gt[QBilBD; K@g2] Is jMG 4OGZKH?220'%,o%.+e8Y38^sЦ~{v:Ou{&a\ӛb_V;Frd*%?#i+GKsE@j(-rOт.,WEF"K WdѭST8XyBB7_t$G윪,43p37^wZnȾdR^.uK0"9,=ƾ2Y9{@۫^ñKnS7|e'ʜ}*d8k9pБ*eLCf,P/-5 پ3cP8*1d%E3{h$@U^ds !\l&}"fO%Hv>AdF)cdh.S>ؼhK\t޸Aǃ{ޯ4i,v:H_9U:X1KݥBrE00D)-;R);{!?ȴ/mXp# I"mt%D t~)Q 0 Tm*M""8?ܯRq: /X[;`q>TP3-6cXFP.Оu+1 ł=ݕ   lPS\߄a\TZ'4"DhT Rw1$hJ21E)gdP<U7^ 2!`2'mBVh;-ҵp%vˢ?Nyi7Y8u0mDp_wN CРꕭnjt#teӑqՄPSS{@ԲqJmH}3HP %[lY2qDFl8 T OJѕ?g0Qyͩ#Y)|&uS <8+hf!(`%Oj> {*?y,~G/hHl?BGEwTRHqT&#KUAdF|Q_N.{uPu'4`QOuxANdQ,Q&iTH ?F3[4: TL? aHY%|n{OmXY{EN_ EEG]tmtLaL*[wb* rK'*o5+әEǾ\K:L =cɏ}V=._VXYnPT@ kj#v[ "e_\v'EZrLɚqWS  2sºäD,ދ%I tG Gz)vY6wh} lNyG*K(iKcË'E-/禥v J6vO5w7O+jQ{D 1>n'[+i`ChYwgU𴮸F97YQABDD4Ehl˵aM%ʓ~ m`] xfSiTN.D%)O q,z1.HH#DJv1.Ow5ٷ72R ᕀ^F{!D 9Au%.6hA&@Sw |= z@ΞqI6-Xo|q#n%^8Z'`w_`5.nX`HadBvODOw*TxFIۜP+6x~~J|vuRj%&HՃԤ2rINVN~I4 %F=]TCJkgU"WIK%TGl FX6a-ؼJ,֐CMKhb\f)3ap_-z!HeF#W6ڵ˛4/*ێ R|!oWVWk%j`coL9 5zi̲^ѭ:KCx[TT;Nѝ&ՈL0`jZzuBP /I3BBal.I=/'B'w%{8tnvi?`530&'xkTvǻ:AqEhw*lp.4Q:!KM42k]ZIjmFYyt Y,|+[@VmC\B̶3AWLbJ{R2BQEL*]vR`=!"0pJ2I `ឥo %5kpVDخ_`2C[S_zJsk.&﹈*+n U[otiEQ,.L\q6Ҧ-m9.*(ѐGEҍo?f=&GO?䎑[+Gc?#dP[W% 6-¸xr}4rZu rd@ Г b~kUJ1&ˎy1')l gqD~vz?p?ÑjJCK@dmQJ}kS:/kδ` 7Ujn,E--$AaҶ"q[fԋM_$[YBFIi+iqhjSbRFk M,NYO0=Uf)D;9{®t)2W&w9O:?ŬwwW܍d>M%p 5Zĩ\ [Ǝڳm|.P='#&ʲF'sS,vք6i-Mp-<;-IO%PP$2 *π"xǁnCWD)soLG%SNxLm\pZ7  zLs.Eyr]W+@1VUk`]֟5LǴpFQ_sG6O8XߢlnW\tkug\haЋ`Yel]ϤZ&lO]AvqcȀF()wXsc b$z2ODI(+[ۮ!"!>⌻8XMU5kmlǭe)'%Tz~`w}>\/iAT0-F dOkLPKQdk_hAx/RTfx#B/4-`ɸ?'de~꒴lX^\/#ծW-)F3RX rUgm:+И>1-S9Y2p5^)95H#J _%PF%=B  lA6P%pkfl US[d[!I(,t[UhϦ_.!b4>h,ń6OAfF4q,Z-_2<ьukSo&.6mpi.J*XԦnzh?.!*Vyv}qtb*%n0Q>+k8sa!wl| Vff,?־Kbi,Lo&oִ-[>|G-:_$z HV1G״ ǫ  U=;zG_J쀬g7YҮ}@1 k n0~3qXK)c82,t]ѥe{\UHg@ e az~4I'ZL-'S򑣑ݫ `bb{o,_?ևf&U.{o=6 _+Vr/x8A.dt_[s٣/ ?euSdPuFYh K9i kyx:kyHC͔ځx *,(RrbfK.N3bq!δzWnlmUK'74ZElCSY;uո#%-e)/:tgā^ lOoAo0j!4q@ )FGX[ypl?3%eכ㓣\.(7"maI ihdeۗdb mj{%f?@'ŸOU_^2vjcI>X. Os 's`4,ȬB0^zuF?mcȿ pFv.1ԥzϡWƣsȓWdp8'03'ނ'$- >onۤrxB 3F1Mȃ)cOLLA23x>Xsa-}%uu=|;3}9yvS3~b`&4ZeH$6#id?9믳V%0v]N){4'zK΂\/tS,芑qK n&8`wP$rB=^.~9b R쩼&EiWT@2d~@dѦECAoVoQ(4sR2#@f cIӆB:/c"vщ<6\V0a˸-RD0Bm r ݪ}ӄfP( U0O/<:醺E2j= 'D) 1%||I$Q:$ cAE'_V{GFT!b7;~ń"ʰVz%u!f[G[ag뱹/Mκto/sB sӏ'=ajPJn2~~Ed62| km1]X !o@ DwאfY86{P8"Ƈ֟YҫZn"t w 9aiΊXΙ!8d`H1&?}eD@~=Gwٮ;\nd rLGaWAG>cxD;0NzFl R2RW,"4j=v)59Ã}q)w$܉!7+ T_[{n({ﯥ8aE8!;G$"}-B|t:|'fࣩsI _ڃb#lX7#n' F~~"R^@&RF"c-vi\qM !BsAS}Fěz~Vxg'_[' D l}7س;}Jɺ^gܧ \A?I??~.2'.4I|EFEv/FnШ%] { ?0_AFI2VX^gX[gQ!ETJBHp,5.[$"gbvc$(BvYojR)=Rԯo$/ /h0@AFW[Ch!3AdL|lMrsT.E>WYuv,*A7)l(b!9M.%Dflv9,UN=n!u K3dvp.@` [SLX`CӚ`hԝEEx+ͥhL^L L5YClDi.\tghɄD~T\]sR4pD &}mvO6埋ax0%w"y51ց;snAX s,ܠ77kY'o4eR%Ⴟ>j"M;^H+J$YEC' # DEnO>mlrz}_Zy6_~Rz\/mJ[<yLK"@VL}\Yjg= }Z>fa]r?r9)'d$bnFsxCJ7\CcD$xQhU$,+3ti%]ι[4>|@&R!2o)2 7ǽ?TBe:'3b[ڨ˹C  yȟ!8^DA#bG}oՎqhx11d8X^VXQfZԗǯʥ))N>$2CYݤ.G=zQ\K V]fX 6}xJ[E;dnlMDQ߽c_ҡeaz]-$2]v͑J3W̻׽t<̈́=Jp&"jlǺy;A {# }k+vkYGQOKsCPdEg*&%6Hlqe~\.k.'N` C7tc/t/лbV/V7LBPkHPME߇#rSF"97Q^Fz<@.< Wo`@hZ!a[ȱ , Zp*DPF.NQ|YH2#`] ء&BtNZܜM8l[﷭^_9>j Uvz\]/AM9*, 'mD&]5e^K3daԳᬚqÓ9ըf{L?W#"xx;C}tyhְ~w @4IQLZ [AɬXbup Ikࢋ8ӀRqi3BJ9B0e$1=pwE+# 6EAUdv}wkLlKߒID*GRgo2˜^3Lٙge{emF]Enε@`'?As?ڲ\|#C@9D㪧1W8X)7tzPA][Q4,95xFtG5o)r!VDbXoD]wwʤ䋚U/6,֫-:fl뎻5x2}-A lY {]0P7zrDv1xwIDqT {l{2IE7-SE>}xs8\z0gl/>9E\דl#9z{_<$ycV_[mӊ*ͥE:8Ø\ L8re@|iHBۓߦS/: ./p r4YGa2oeE*oT^>@u@IFlt61uM΁=2kaTw|ۘrfH9>t`J)E=K@dI&r>0U#alu̻ܺcu`!-ғ$ɤHefQnd%T`MtC5GA<%.gf[8ݯ`Cm Y0jnEZU_KwJ huw)dS,$3p. 5f*L"?4܌~|2p ( i[j|hlEQ jOHe4fZ a4xŋ`㒠h J» UgޡMn}젘6f@*Jj<0dGAv:puL@{\' eZ#1rp(MbAqEs2=f@ L%@ E8/}=r}tQ=z}'3zlp#?obι,i'EF 1-J[a *07(B|񒎮;hd^zne\*b)ؐpLICx>to]cHC/pϰ&wQg`jN Z{|{TP-dn,,%wqɻWs'zӾiv!T0k1E$DΣxX"Lh| d^b22^lP JM mwʌUH'kOAg^nRj*sղ+X4 2ߛ\\6ק=o3g"1U!\^oG'zn2$^ves{A'{BcsUGh$29 3$f80 Z{㕼W)[N6t<a*yM ͈čY1(DrCѷ@ٿjǩ]k$so^1..>pVV䛹?Kq%:*'#i卣}-2'hnh2yoj|f]VyfŠ,STFd.6K+ -_Of`]6\0/˄T=|i`Ws؊+:/cnKiZdC(p0* 󼟵8IjH-(r<:#0&+{lUFk3vgcN7r&XH /n ULQ.dvɰTQzvI#kwjG+3(;mmbTbQzȼys^nHZ$bOaIBmd:OXy󘭼4/O橢F4cc^9e\Ȯ>ێ02eSUc x=Yl^%Y㗼Ukq=H{>/y^l{a!N ?\}WwO Ky(!N)\x␤`VdGځێC2e=aIJ&gSbzwmY}U)ELIl,CǘA~|qwv$YcU> "`C.SE޶0M1t]Oʗ5iyܗ&&,?1*.wrp29f|Mvܩ-z|CMngvۀ,\i73W^k7m:h` LQ͏og/FœҴS@sN5Ͼ%1@ [!X0էB۳Z0'd ;*;3kQtq4xz_{} iH ˆ0L0\ E#Mx} kEn.͏K0<ъH NlI"&vg+(ϮmNq,v9>likm1}K ?"5`Qxеx5~Q;k/ffIzZŌ`(HT z|`5[",Xv%d~xR0Q~s?<СbZ<4lTw{tw$J3 zIc2&/l* z^TN94|Yy9@APþmU[ǦiCYqk4;PE[_ҩqeKj!iB-)mDL&f0FwZNIhSY |W'DInQч,SS9hmU%6k-ϚJפVf5=f$"-B&K~o24+m۳p"d=K]s:Ԣ؞·-ӭ, >6ӣNXI: V8 dvm672s ?NnВiI5G "P˻NW?\$Ӈ!G9 VH|P ]&+~-vȷԬzB.:_P6 =quƝ,]I_DU*a;LM[e>z􋊤uass4:u=~/ڋƐw}Pj!{\D,܅1.$=I5]$Ӟ@k+X!SɅ3W]'SC\입u\f:N4-{CTO2 uz>?|DT|ĸr;\'` #U/kO8u  ֘ꐜɸBP=\SfnA?cRsVQkF# $qmhmJ}ఠùvuL$$l ,OR~~EPUP@$`?/y AM`*x1Jp: hӪU/GK'Ti)Z%y-=/V?5S~ո걯XѐAZXauxl]$dVo ܍e(,Y۞K̔BR7]#p{_,2ͅӽ,`N:c7F A |Ljv?wYI0+yB_?ck1/pBׂ4룍gOVл#63B\LO :X,W̃0Œ<¹rWᵅf>\THbdJ`Cy//.]Ŕ+IPg&?LkI%obC tEډhGyϤ5gU ,NL(+ }9o<$a:gOT{tj(C|z೎^9]B~ n_Bjf»p9I/~q LqeFqc$VǮ?k]9~sB' `i,#4lc0ہ|Q>/”>O½'#@L>xA*&azˋ#Fڴf]\FlBh+'CKi&jGCnR[2Z`x+;%PT\ e3 sLI$$ۯfZU1A|fYH9T'{c?a9쿾RM5VHfx fR>U*T<.I1ÍRM`ѝ |]b.v+Wkʾ-fG迼zM(QT(FLso2B7"ҵ$AYr~-C+7}JM'?x47,ȥd <|(=:oWpU:2JDJ!.ʺ&?8%b4Ѕ+̳~man awΪQ}&ieV򨸸Pp?cr`3M8-\nQd}$$#{ j "HR'AJ$_vs)2֜( VNf7&L#!)W*9268֍ ֵȩ;lC׬zڏ+e^X>/62chr_W'}D ].oUW.=]` Q]KU0OW߄M*ݘ3EU()>3 jjzpT"*jr+\uZXi}J_i{LDD^hfZvus*Ir-s&M:#N{w<ΔeӲ]f'7 ms2`u m għ *}i^IS4yq]Khxv>QÂrintd̏Bwy-d̋$3n+-6_ hVM6ɬHu*=l54k:D xټyHر_g4%gF:Qj (-P dH ҒJ^]yCClgr!.G~ygF9H&ęo XV @P2P5}`ğU-C3!fZW6KGiRFF]M MHe8HwCgϛ6`!v̚#v_dKfFiG{ko_}f6^1O"!PaMQƾS1 PU[ӽK")&Y>6nggs[qPQL=NcK"Y `Zo_ aa>~x{xðܐ)nϜgCx9N(@c|)D7F^+ jZI3Ynj6!hyMt=9op0cn?I;ѢFކw.3G,:i_|V?b[QM|s>1 zyZOZ&"`BKZc4PR93$y6Ɖ[.I#?P@v3NY ,Yv}z-ߣǖ}6=Ө7+m.Zr)=vWx[Y1]6L=Joupir|C\vRntGEcj/^ZH=nx-EʉjNP H?wGS'lv_!, Yձ?8*F~e mlJ(Q-.NRRX3ugt`ߢp-,AO"%p+VEfͪ$ gΈavlb:`!ft h Y`\Z_tس;1KwoGm~lֆ)ziz6}ᛡ @ɏJ9DR+Û9jE6TMۨ3M}c=4_keUu}_ɚF}L״IWch8{]2i= 7bIPiRe5Wx|/3-%ȯC IejA=s}#{n\q}C-M¶[H_~pPVy/Uឨ4_\GsCk],v|3I`!j6nmrJcYiOkg?,=:ٻ~t͐4ձB/ @Ns&Æcr6U_xgq/T*BiIjP="DՂ7= 3G8( /Y[mb{tv+dXM9nR˷'C "@;ާږ:AMRˎY]Ez^E7g 7~y'+@;pK#'J%)>B|0дg0$톢dd!IA{]*$ =`?vpA |} {%fwo;O,AF^$o哴@Qp<^8!^qˍ\kM@ΈFl[r*V|%!@`,//֮7"mEl2[ukήf:{GMTau zor,jJu w-ugH|}Y!`;ɐ}T6˖1EZJ?ƢvlS,tmjÈO\' @ppOY|ko/1mǷ!Og%bqDNfarm%cRwF,9% i @OoZbgC׮`3DV+,k*GovuJre>,0>Aϳ!@ ^ɋ#&""Aa 5WuP =giNHiJȋB' c9HNAzI9B{W"250 -pO8Sx1tT!߾VtXGvXJ?n郟CXb^Jr&@ۂ͐z(]$U1$8tHv rv8jB^t?˗iڍ}0Ș7N>[^qynAY>* [+R-7<Ю;4Y uT}uɫ oW ?7M9T=$}ܠi2!dÆ&"")dʁ--yNO{G4Gi}D kfiis)a7e}qyP"5>.'vL osC$ ]K"tXxIn!u&!;WF@SS^Nh~.tW5rVyY*פ+8` Eu/0h/_a2urI?eb"R4Lyl>f$gG^Qh{>bzp BsF:ʤ/n^ZNGg{oo0O\D'wRi.O *M>=4zA0h_JESr3W=V{m\ASNY>XHX.F057)`u=Z¾\iR~iP #F 3!{h F|6y(ė1rU`/K͇'NjZ1P?|$aMxwOa.GQJ1V-,l,-+^4։|ƨl1LX*/w؆E0m|ұAu5$ND 3 aҊ'IC=5XaZE,'h !`GӞ"qר*a8%a;sP-aM~_prG]k+H ܄-eDPz4PgHpH$6^΂zKB%Ah~%{+d+KX a?1Nۨ Q{yì WN>wr6סVAR,ҥ}GY4j2}A*լI^9,VF:nQШ) C5'3%ZNߴ0nizg9>ßYFK |P+ꦣQ"k}07FҾYV KΗ4g/M$w5RNZ 7\'-] 2YglPpu^x˨tgQ>}dj5X +B0#*1Q"x\|:(PF25HW ,|a=`96ǀ/XzS^s`jB9 C?s)MGFư`߭@;[p90[[+Dܜ P6vPٗ(i8jL#@*J&U+dvJxqԉҟmҺ\f4zFqj1Ӛ{9į|xfQq›kM˳WWϕ{?7.`rqLurҽ#!W3;Oj>_10Wc6_w4?ʷNy5P9 .^/4,N|~UGsp]gZWL]Q)/#u3jpU܇nnWmL ]37홿1}X\vg䜄?~o}V&ryG֐[x" quo\G@ˈ',Dz珜 -4nv1^z)?7GLx@"W=Qu5) .eD3l&v^bUtY`N,ЩQ"l·g5Dj_;u,W^dm\sSͬ^BhdH;Y^GŠQ /(p>_Wip Q @h?@o7<}@h\\pkS w.!R2SW1w#Se&`ZoaKS6(5kT\o$hg_,̫D.,iGS'(b9&K8|*. NY)|՘JO!T@ʢ>$!C嬋!w=RwC2<_^C ǎ0'3k|{ۓ<ڮ 45QjzO:sغ[jLpXinY*)'wE_[(` p1wl޳a5HFUġ0LS# /?+F94<swefl|1H6pxesED6C@#|3yS ;i6{15,2oڲ߃˼n%0$@xsޏ'䘆R,'Ѣ4![5N&Jd&lDǼ%]u‹ .̽g:t&B᷽B[襷 I6>)I5j @0Åjk}%Q$aɉ5MkPZU>  i3}p'{ւ&a`H/7qÊwZo~;ס~c++ %{Qa~^@3ȃ[.u C1D%~<栍>vԧ-l9m@R}ꙵdpCE4NRB ̈́ &=.B~pNCJTiay2*D͘gte+uJg%&WrTexO"Mzb{-uAz9'v*WiͪB"D qЭ+(jB|hM$o!o*[zܭ®1B^k> leINQ'+jHi~MB MQz9utkd}x#ʈ%aX*eBGbwvE͖KVOQ}\eb=̰&k3]>-x "jeDŽK0b5}ʠʉLC* dD- *c"X'?:?DVF*wOXPU9Kx}K[^WsB^ pȼFџv!' 29*_R/?8AoStm4m@jLxg_9=iplu.q/O>u}<ԚEjP$^ET-:ы!ʊ#~^GL[bT+pv;9l8M=k?IvKVK+\/t>1A+rM3ڰQkuAubgP,չWXw]K7ȭ͚;q'T)G=X&6J.vyn>*^Y-^ hˤtJMp30r'dv<|ֵkm`oބDQ}DboA[^ګ&7[oUCEbUnouPCRLdpmc|fP9>Nctwrٌ eqxb"<*5>",|c)@:ICe$MzM"$intĒi rYf V:ǚSQrvW 6&E*2X~ ^LG,yOpݷt[i.fۦSS=ݭ,d'%P@ǟp{ۦ3yҖŚ#z%HQI9PzK2l=I k,b͛]rC|yqz*-rH*8Kz3`+4J#lG-ύݎ~6QN ##Zꥂ!q%LnUj7fP nYLϲ}꩞qwyW3dRH`خu̅Ym#7(G ul!(G-elgBk95HǴNzKl*1|%҈ER+fI>Iʍ/ E*@ܘD\+ eO "Dc)z[5"D(NRJq/2L47TwD篤a-k)Im39ڕ#[rǬ/ w:8r{R@ sE=iz8n O ՗xL ˥`]$ ~7QC6 [.{%=ru!EzEytԦjhm,u塂qh #I^((,b$$尵; IX[c824,F8rEcfr~ng]߂"`9-Eet>T> IeP[DzN \$C*2`jPf^H՘ɏbDZn>ώq )<Ȕ%x.Ó'v8+(̪1sdЇpӬј~ԅ5N@xYTԁǪSjtYK^GV!@ iwAqV{_|&_DCN\<~H \{py}zpz;If ^<9i"_v> rrU'VdwS6mw&2W>әfΫ>%O.:-uEh} ڠ t!2W T~8/aS'tcP)ƷF#e`QŰ.Ěs +D7u^ <~jfOKLeeт7>u-v$;e<8V}ʪ f0m.n(ܞ*`4nSwTVŋ䕑͸.tm?0T3̵RϬ" B]#Nm?6 ><|*&I YL#L䘂@,a1 ~8RVs8r.-{RWA?^ͳLt^^?=53n7&-NcDAGq"q H#oɱIGUF2q״CRz% b R߳rX*J8N0B3uܬ5E+J*t Ǘ Ny{mUѳQYU٭VcߟBY)啽2=?T}9</C(';^aV8+A9+K o='H ]W;G"-]gJ+¸7mƺ^G`~Y! oC}Q^4{g:?ipg)]d&fE4]]}aHodpKda͡j%*4o+4Jzc眼#wc2^9T *w15%y cjBsy c8UgJ *+^ϯ{nɍ8w_QS\|TU,!NR#YS1~".b5}qV 7,@DGMc2'?5'`%ҡˈoV: \B:ki|#+.k3=0On"),.?85d=Oy;-~ol,m]~Z1W5VNSOEMdWwlA+IզwQ=o ׏{&16 Ǜ;p|=?n~2XxvӘIt{橿weq}pqDrI-*~57a9:pE8H;Zy#۠<wS,ofD 9\ @ݤLS֎[H.8Jт%cpZy]<ӓEΘ(r'6kd9fna?Nr퉖Mc->~ڒXIx{7˾&Z_Ld2E".w.k㞬,!奀]Ȅ-?_=EJ5.f5Yc%0CN ΉLj5{Dι}[ ?V :ײK J8Aw7;u܈_n-J2B91UɊRz-(l> {YCjy@Q >oKXD0xvE۷>V1id+IMY6 a5飽TP:T}^it8HD43_V?ZͿ(+Hf:>XF~+4:3%^7N;Jׅs[c=ԐLTԚd/LwU8mD);ܪ2^Ace_b8À9eK̨cg\L=T_R)<-yj #Y&,2/>KN8I+m1aT ʻz{2 G3q6$E#xqP9 UmɻjG #QHeG1_Zl2`u2/O1B硴pD2i6HD"v2"unM;R)a!Zݳ@ٴdU?(Bp# gn£yGfSavL}A(ϲ֯l!1Ƙʹ9՝,Yw~R=k ª[Yg#Y UTtsVMq[EW>UN'JڳEzJ( G5t},O|Tt{]$ԟ|($jiTn2=OѧZt>J~LʱY4 U1)I)9mէN>c_+cSn(b%#?Qwi/ڿe5$@z{ Klg1fR@`(J!q{|Tlܿ/-;1YӖ,||a0R1%uz==~M{@ /H6jZ`LX 5krVh[|c9APAoc U f4!jޢF<+ ki`C{O=HyM_25?uq޴GReBY-8:"0I Ho\Nue7FL},zD*c1;(n5:]ܞbBLjBoO?𴎔 too;U jJ-7tq\xnw6k#+z$!n#YI IJn qRf?Cf| _*e&Y?D۸uߖWjO燫b;٭-hr/hZ pFׅo +>OG] e{4hBsUV2Hdl6Rύ9l{d@ٍ"(MksҍQѮ/9Q7N{ EvGl'cFJzo!6h8ȘB'.x5霱vy K ܥhH:/%5w:lXaT/4kyM/-I[sp(d͑aV )#9%߼85`NiBaGn7]#…2Q"7Qg+Z M Uߟqpbl.#V@62HK(2F 2)lv[-҈q4:] o}"xUyTa߽ 6Vz]פ!\agZMݏٵeCu]m0 ll>.Eg6z}E-xES2c&TjJOm~0}k)z=84K ?SpM wq2B! q7$^" vp~U-jb%,Z',|!C=~־}S`N5xvh'ltϹAd+V\34zQQr˵h荃K*}V1?H_8|[>⿯%}*jݢŜBgB$k&ғV^m_2<1.5D.a`B}uf2a5o0:PR)L5 JCD&OW`ƐVCTk]i܊&:puL핉,(dIHi"6>J%#P. 4D\z!\.,#[4KIKs4wT4n+N*Av%'ʦW%`O/]j,YE1l9>7%:Y,o3мf䉝ͺ9z@җ8o9CƉ'XM#c&ny0k=FG b>) ~}DY*\nW g5I_za:JƊּFWWږ xA 5s\]is,&oj #HQ K86h}_ҶՀ{kwXvQD]6!EMfI4#Z(cΧpDT%Lr]kz;ةng?Sj3JPr←D(`96tLε/UX\ʜczT \:zЅ P;&G%)/ Doڧ;fsGW]D1+;›Ό̸phG.y-}.mrbjOrJn;ݤ<+ˌ諀dI-6mDԠ,SJ@y%)b:,ŤSI AP UO#+y^WWĕ, ?pp<~~WEKJK+H(2*^t  Cq)\UQ]ET\)!rU@fff&dP)6E9XYN?QVNFfOkPo$Zpq\੎&R4PɋPmM(3Saٿc=VS/t +%';$1 ѦψD&l,)~tySFS]9[3i7/W){onbflegyR%# i#3`_We gh #"9ȧiĥVQWO_gmgaaqB)a{8=5)֧vW?M9,>|߱! '^%Dy>kh s5st= &FO0( %ު'D~ɳsrqެKX:V7_?`՜n:;-iJ~z\Ku4PqO R@~PC3>i;Ϻ=RK&`|L(%J0m0|o7X9ϿDX10m$;lS*%>Eovfv?KdvnħgRwTjg都fVF? V$@S }&U2ΊHJL=SW?@e1zi s3z8Cڊ}V[_@{Sw=@u{*ϩNCMC늉XDb̌L"Vj*1<gb, i|=g?$#<PF4!|i.`UzYt~ǴsrY[Wwd dVR jwoSmWofwԧ*LU5>YԢucBOߋE|P s3 L,ݿ;9ۙkz rXy'3ɼZ@CL lifdldje3V,xN&> \øGolobaTo9bFgmK.:S=gihC.%>U!LaHG' ?KSm̜L㲂zaz4?I^} lԧd;Ws3[ןv|l(0OGbL.|^#P?NhldoԪY5#f `:`қW1zn`ʼO{Lj1pD*nV?e[shiUDJMc./ +_o튩'x^9Xc#`[xX`~kZgv,F9ҧ@c3ZxiC5qw~6ܧ֟?`b&~j\pͭ~v.8"xjN}>9^=tfVf<F#{;\w3 Ť_H7x[3g7;׿0v:¢?$0g6Oguv] Sɔ!㏉/$ r`S> g,. ZzivD}iW2w@z$;3aKƛ@K:U`rtMg~+uXlv.&fVN ĵہ%DKӷ &yu]f pBq< SYD,7`LZ7\zO`Sm{*翲n40(SYx唳 0Az* tZW,qv$(O3WزݿȜ^>G϶, _sqk_ XkWb?vAϩFw)wsKq]Iwg*-Vm?~Ɨ\5+`d&/gr$I!f_j}WwgK#[Sk`6Ch)0w*yo|Cw=0+{gӺ<`{F"ȖZk@GLL KަOY@lfdWm%h%-5Ox.e"'?ԩqS$5 &埣M M*L؄΋!=ЧL>l_DrRNKD OH*L0#j_; Ы+u@ S3Xix@h6ӑjo+D.qhWik B:LtP9r,P4F_ )XR L,> )v_dninP}V(|*_g_90 +]>Lxg5+0~_f  OpgbsMٜb(XQ6Kb=L,}Ue 0a5s14:C{AqL. 0$`xCu#c.RVTCI}zfTP?7tϤ>=o~OOϹ|L̿k|&1g~M<􀝿q{& }Y5%v|>m*O/7,gҞoiO7y8<ȿ`)PK"3>MunHandlerManifest.jsonUT ΰ[ΰ[ux 1 0FDV\tv+RTnl+TApC/&1ވ4]Z"q<ז^WH9!CM렽c-M? df WlEi>/U8%j,t-Ix\:̬`ta~0 0?:!8m#z\PK"3>M8%\ manifest.xmlUT ΰ[ΰ[ux S0H{L B+D# BE^M2Ďl@v%衫^,k77\pBSo =@ɜԫ٠pdz&%A3%ܛ %ieٯZe¯i1@ሼis zt~}xm8YL`rT7{s,r %3pȵJe$Ĵbյu/ϳ {S^*[^]칳qh̽Հ쥂2Č!Ujc_A-kFC0 !.uxe\Otb k`uԨ6cDu#>psw2ˬw #`D$PK"3>Mx 8#bin/WALinuxAgent-2.2.32.2-py2.7.eggUTΰ[ux PK"3>MunjHandlerManifest.jsonUTΰ[ux PK"3>M8%\ rmanifest.xmlUTΰ[ux PKWALinuxAgent-2.2.32/tests/data/imds/000077500000000000000000000000001335416306700171255ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/imds/unicode.json000066400000000000000000000020031335416306700214410ustar00rootroot00000000000000{ "compute": { "location": "wéstus", "name": "héalth", "offer": "UbuntuSérvér", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "Canonical", "resourceGroupName": "tésts", "sku": "16.04-LTS", "subscriptionId": "21b2dc34-bcé6-4é63-9449-d2a8d1c2339é", "tags": "", "version": "16.04.201805220", "vmId": "é7fdbfc4-2déb-4a4é-8615-éa6aaf50162é", "vmScaleSetName": "", "vmSize": "Standard_D2_V2", "zone": "" }, "network": { "interface": [ { "ipv4": { "ipAddress": [ { "privateIpAddress": "10.0.1.4", "publicIpAddress": "40.112.128.120" } ], "subnet": [ { "address": "10.0.1.0", "prefix": "24" } ] }, "ipv6": { "ipAddress": [] }, "macAddress": "000D3A3382E8" } ] } } WALinuxAgent-2.2.32/tests/data/imds/valid.json000066400000000000000000000017661335416306700211310ustar00rootroot00000000000000{ "compute": { "location": "westus", "name": "health", "offer": "UbuntuServer", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "Canonical", "resourceGroupName": "tests", "sku": "16.04-LTS", "subscriptionId": "21b2dc34-bce6-4e63-9449-d2a8d1c2339e", "tags": "", "version": "16.04.201805220", "vmId": "e7fdbfc4-2deb-4a4e-8615-ea6aaf50162e", "vmScaleSetName": "", "vmSize": "Standard_D2_V2", "zone": "" }, "network": { "interface": [ { "ipv4": { "ipAddress": [ { "privateIpAddress": "10.0.1.4", "publicIpAddress": "40.112.128.120" } ], "subnet": [ { "address": "10.0.1.0", "prefix": "24" } ] }, "ipv6": { "ipAddress": [] }, "macAddress": "000D3A3382E8" } ] } } WALinuxAgent-2.2.32/tests/data/metadata/000077500000000000000000000000001335416306700177515ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/metadata/certificates.json000066400000000000000000000002051335416306700233060ustar00rootroot00000000000000{ "certificates":[{ "name":"foo", "thumbprint":"bar", "certificateDataUri":"certificates_data" }] } WALinuxAgent-2.2.32/tests/data/metadata/certificates_data.json000066400000000000000000000112531335416306700243040ustar00rootroot00000000000000{"certificateData":"MIINswYJKoZIhvcNAQcDoIINpDCCDaACAQIxggEwMIIBLAIBAoAUvyL+x6GkZXog QNfsXRZAdD9lc7IwDQYJKoZIhvcNAQEBBQAEggEArhMPepD/RqwdPcHEVqvrdZid 72vXrOCuacRBhwlCGrNlg8oI+vbqmT6CSv6thDpet31ALUzsI4uQHq1EVfV1+pXy NlYD1CKhBCoJxs2fSPU4rc8fv0qs5JAjnbtW7lhnrqFrXYcyBYjpURKfa9qMYBmj NdijN+1T4E5qjxPr7zK5Dalp7Cgp9P2diH4Nax2nixotfek3MrEFBaiiegDd+7tE ux685GWYPqB5Fn4OsDkkYOdb0OE2qzLRrnlCIiBCt8VubWH3kMEmSCxBwSJupmQ8 sxCWk+sBPQ9gJSt2sIqfx/61F8Lpu6WzP+ZOnMLTUn2wLU/d1FN85HXmnQALzTCC DGUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIbEcBfddWPv+AggxAAOAt/kCXiffe GeJG0P2K9Q18XZS6Rz7Xcz+Kp2PVgqHKRpPjjmB2ufsRO0pM4z/qkHTOdpfacB4h gz912D9U04hC8mt0fqGNTvRNAFVFLsmo7KXc/a8vfZNrGWEnYn7y1WfP52pqA/Ei SNFf0NVtMyqg5Gx+hZ/NpWAE5vcmRRdoYyWeg13lhlW96QUxf/W7vY/D5KpAGACI ok79/XI4eJkbq3Dps0oO/difNcvdkE74EU/GPuL68yR0CdzzafbLxzV+B43TBRgP jH1hCdRqaspjAaZL5LGfp1QUM8HZIKHuTze/+4dWzS1XR3/ix9q/2QFI7YCuXpuE un3AFYXE4QX/6kcPklZwh9FqjSie3I5HtC1vczqYVjqT4oHrs8ktkZ7oAzeXaXTF k6+JQNNa/IyJw24I1MR77q7HlHSSfhXX5cFjVCd/+SiA4HJQjJgeIuXZ+dXmSPdL 9xLbDbtppifFyNaXdlSzcsvepKy0WLF49RmbL7Bnd46ce/gdQ6Midwi2MTnUtapu tHmu/iJtaUpwXXC0B93PHfAk7Y3SgeY4tl/gKzn9/x5SPAcHiNRtOsNBU8ZThzos Wh41xMLZavmX8Yfm/XWtl4eU6xfhcRAbJQx7E1ymGEt7xGqyPV7hjqhoB9i3oR5N itxHgf1+jw/cr7hob+Trd1hFqZO6ePMyWpqUg97G2ThJvWx6cv+KRtTlVA6/r/UH gRGBArJKBlLpXO6dAHFztT3Y6DFThrus4RItcfA8rltfQcRm8d0nPb4lCa5kRbCx iudq3djWtTIe64sfk8jsc6ahWYSovM+NmhbpxEUbZVWLVEcHAYOeMbKgXSu5sxNO JZNeFdzZqDRRY9fGjYNS7DdNOmrMmWKH+KXuMCItpNZsZS/3W7QxAo3ugYLdUylU Zg8H/BjUGZCGn1rEBAuQX78m0SZ1xHlgHSwJIOmxOJUDHLPHtThfbELY9ec14yi5 so1aQwhhfhPvF+xuXBrVeTAfhFNYkf2uxcEp7+tgFAc5W0QfT9SBn5vSvIxv+dT4 7B2Pg1l/zjdsM74g58lmRJeDoz4psAq+Uk7n3ImBhIku9qX632Q1hanjC8D4xM4W sI/W0ADCuAbY7LmwMpAMdrGg//SJUnBftlom7C9VA3EVf8Eo+OZH9hze+gIgUq+E iEUL5M4vOHK2ttsYrSkAt8MZzjQiTlDr1yzcg8fDIrqEAi5arjTPz0n2s0NFptNW lRD+Xz6pCXrnRgR8YSWpxvq3EWSJbZkSEk/eOmah22sFnnBZpDqn9+UArAznXrRi nYK9w38aMGPKM39ymG8kcbY7jmDZlRgGs2ab0Fdj1jl3CRo5IUatkOJwCEMd/tkB eXLQ8hspJhpFnVNReX0oithVZir+j36epk9Yn8d1l+YlKmuynjunKl9fhmoq5Q6i DFzdYpqBV+x9nVhnmPfGyrOkXvGL0X6vmXAEif/4JoOW4IZpyXjgn+VoCJUoae5J Djl45Bcc2Phrn4HW4Gg/+pIwTFqqZZ2jFrznNdgeIxTGjBrVsyJUeO3BHI0mVLaq jtjhTshYCI7mXOis9W3ic0RwE8rgdDXOYKHhLVw9c4094P/43utSVXE7UzbEhhLE Ngb4H5UGrQmPTNbq40tMUMUCej3zIKuVOvamzeE0IwLhkjNrvKhCG1EUhX4uoJKu DQ++3KVIVeYSv3+78Jfw9F3usAXxX1ICU74/La5DUNjU7DVodLDvCAy5y1jxP3Ic If6m7aBYVjFSQAcD8PZPeIEl9W4ZnbwyBfSDd11P2a8JcZ7N99GiiH3yS1QgJnAO g9XAgjT4Gcn7k4lHPHLULgijfiDSvt94Ga4/hse0F0akeZslVN/bygyib7x7Lzmq JkepRianrvKHbatuxvcajt/d+dxCnr32Q1qCEc5fcgDsjvviRL2tKR0qhuYjn1zR Vk/fRtYOmlaGBVzUXcjLRAg3gC9+Gy8KvXIDrnHxD+9Ob+DUP9fgbKqMeOzKcCK8 NSfSQ+tQjBYD5Ku4zAPUQJoRGgx43vXzcl2Z2i3E2otpoH82Kx8S9WlVEUlTtBjQ QIGM5aR0QUNt8z34t2KWRA8SpP54VzBmEPdwLnzna+PkrGKsKiHVn4K+HfjDp1uW xyO8VjrolAOYosTPXMpNp2u/FoFxaAPTa/TvmKc0kQ3ED9/sGLS2twDnEccvHP+9 zzrnzzN3T2CWuXveDpuyuAty3EoAid1nuC86WakSaAZoa8H2QoRgsrkkBCq+K/yl 4FO9wuP+ksZoVq3mEDQ9qv6H4JJEWurfkws3OqrA5gENcLmSUkZie4oqAxeOD4Hh Zx4ckG5egQYr0PnOd2r7ZbIizv3MKT4RBrfOzrE6cvm9bJEzNWXdDyIxZ/kuoLA6 zX7gGLdGhg7dqzKqnGtopLAsyM1b/utRtWxOTGO9K9lRxyX82oCVT9Yw0DwwA+cH Gutg1w7JHrIAYEtY0ezHgxhqMGuuTyJMX9Vr0D+9DdMeBK7hVOeSnxkaQ0f9HvF6 0XI/2OTIoBSCBpUXjpgsYt7m7n2rFJGJmtqgLAosCAkacHnHLwX0EnzBw3sdDU6Q jFXUWIDd5xUsNkFDCbspLMFs22hjNI6f/GREwd23Q4ujF8pUIcxcfbs2myjbK45s tsn/jrkxmKRgwCIeN/H7CM+4GXSkEGLWbiGCxWzWt9wW1F4M7NW9nho3D1Pi2LBL 1ByTmjfo/9u9haWrp53enDLJJbcaslfe+zvo3J70Nnzu3m3oJ3dmUxgJIstG10g3 lhpUm1ynvx04IFkYJ3kr/QHG/xGS+yh/pMZlwcUSpjEgYFmjFHU4A1Ng4LGI4lnw 5wisay4J884xmDgGfK0sdVQyW5rExIg63yYXp2GskRdDdwvWlFUzPzGgCNXQU96A ljZfjs2u4IiVCC3uVsNbGqCeSdAl9HC5xKuPNbw5yTxPkeRL1ouSdkBy7rvdFaFf dMPw6sBRNW8ZFInlgOncR3+xT/rZxru87LCq+3hRN3kw3hvFldrW2QzZSksO759b pJEP+4fxuG96Wq25fRmzHzE0bdJ+2qF3fp/hy4oRi+eVPa0vHdtkymE4OUFWftb6 +P++JVOzZ4ZxYA8zyUoJb0YCaxL+Jp/QqiUiH8WZVmYZmswqR48sUUKr7TIvpNbY 6jEH6F7KiZCoWfKH12tUC69iRYx3UT/4Bmsgi3S4yUxfieYRMIwihtpP4i0O+OjB /DPbb13qj8ZSfXJ+jmF2SRFfFG+2T7NJqm09JvT9UcslVd+vpUySNe9UAlpcvNGZ 2+j180ZU7YAgpwdVwdvqiJxkeVtAsIeqAvIXMFm1PDe7FJB0BiSVZdihB6cjnKBI dv7Lc1tI2sQe7QSfk+gtionLrEnto+aXF5uVM5LMKi3gLElz7oXEIhn54OeEciB1 cEmyX3Kb4HMRDMHyJxqJXwxm88RgC6RekoPvstu+AfX/NgSpRj5beaj9XkweJT3H rKWhkjq4Ghsn1LoodxluMMHd61m47JyoqIP9PBKoW+Na0VUKIVHw9e9YeW0nY1Zi 5qFA/pHPAt9AbEilRay6NEm8P7TTlNo216amc8byPXanoNrqBYZQHhZ93A4yl6jy RdpYskMivT+Sh1nhZAioKqqTZ3HiFR8hFGspAt5gJc4WLYevmxSicGa6AMyhrkvG rvOSdjY6JY/NkxtcgeycBX5MLF7uDbhUeqittvmlcrVN6+V+2HIbCCrvtow9pcX9 EkaaNttj5M0RzjQxogCG+S5TkhCy04YvKIkaGJFi8xO3icdlxgOrKD8lhtbf4UpR cDuytl70JD95mSUWL53UYjeRf9OsLRJMHQOpS02japkMwCb/ngMCQuUXA8hGkBZL Xw7RwwPuM1Lx8edMXn5C0E8UK5e0QmI/dVIl2aglXk2oBMBJbnyrbfUPm462SG6u ke4gQKFmVy2rKICqSkh2DMr0NzeYEUjZ6KbmQcV7sKiFxQ0/ROk8eqkYYxGWUWJv ylPF1OTLH0AIbGlFPLQO4lMPh05yznZTac4tmowADSHY9RCxad1BjBeine2pj48D u36OnnuQIsedxt5YC+h1bs+mIvwMVsnMLidse38M/RayCDitEBvL0KeG3vWYzaAL h0FCZGOW0ilVk8tTF5+XWtsQEp1PpclvkcBMkU3DtBUnlmPSKNfJT0iRr2T0sVW1 h+249Wj0Bw=="}WALinuxAgent-2.2.32/tests/data/metadata/ext_handler_pkgs.json000066400000000000000000000002701335416306700241640ustar00rootroot00000000000000{ "versions": [{ "version":"1.3.0.0", "uris":[{ "uri":"http://localhost/foo1" },{ "uri":"http://localhost/foo2" }] }] } WALinuxAgent-2.2.32/tests/data/metadata/ext_handlers.json000066400000000000000000000006611335416306700233270ustar00rootroot00000000000000[{ "name":"foo", "properties":{ "version":"1.3.0.0", "upgradePolicy": "manual", "state": "enabled", "extensions":[{ "name":"baz", "sequenceNumber":0, "publicSettings":{ "commandToExecute": "echo 123", "uris":[] } }] }, "versionUris":[{ "uri":"http://ext_handler_pkgs/versionUri" }] }] WALinuxAgent-2.2.32/tests/data/metadata/ext_handlers_no_ext.json000066400000000000000000000000031335416306700246710ustar00rootroot00000000000000[] WALinuxAgent-2.2.32/tests/data/metadata/identity.json000066400000000000000000000000641335416306700224750ustar00rootroot00000000000000{ "vmName":"foo", "subscriptionId":"bar" } WALinuxAgent-2.2.32/tests/data/metadata/trans_cert000066400000000000000000000021271335416306700220420ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDBzCCAe+gAwIBAgIJANujJuVt5eC8MA0GCSqGSIb3DQEBCwUAMBkxFzAVBgNV BAMMDkxpbnV4VHJhbnNwb3J0MCAXDTE0MTAyNDA3MjgwN1oYDzIxMDQwNzEyMDcy ODA3WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBANPcJAkd6V5NeogSKjIeTXOWC5xzKTyuJPt4YZMVSosU 0lI6a0wHp+g2fP22zrVswW+QJz6AVWojIEqLQup3WyCXZTv8RUblHnIjkvX/+J/G aLmz0G5JzZIpELL2C8IfQLH2IiPlK9LOQH00W74WFcK3QqcJ6Kw8GcVaeSXT1r7X QcGMqEjcWJkpKLoMJv3LMufE+JMdbXDUGY+Ps7Zicu8KXvBPaKVsc6H2jrqBS8et jXbzLyrezTUDz45rmyRJzCO5Sk2pohuYg73wUykAUPVxd7L8WnSyqz1v4zrObqnw BAyor67JR/hjTBfjFOvd8qFGonfiv2Vnz9XsYFTZsXECAwEAAaNQME4wHQYDVR0O BBYEFL8i/sehpGV6IEDX7F0WQHQ/ZXOyMB8GA1UdIwQYMBaAFL8i/sehpGV6IEDX 7F0WQHQ/ZXOyMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAMPLrimT Gptu5pLRHPT8OFRN+skNSkepYaUaJuq6cSKxLumSYkD8++rohu+1+a7t1YNjjNSJ 8ohRAynRJ7aRqwBmyX2OPLRpOfyRZwR0rcFfAMORm/jOE6WBdqgYD2L2b+tZplGt /QqgQzebaekXh/032FK4c74Zg5r3R3tfNSUMG6nLauWzYHbQ5SCdkuQwV0ehGqh5 VF1AOdmz4CC2237BNznDFQhkeU0LrqqAoE/hv5ih7klJKZdS88rOYEnVJsFFJb0g qaycXjOm5Khgl4hKrd+DBD/qj4IVVzsmdpFli72k6WLBHGOXusUGo/3isci2iAIt DsfY6XGSEIhZnA4= -----END CERTIFICATE----- WALinuxAgent-2.2.32/tests/data/metadata/trans_prv000066400000000000000000000032501335416306700217120ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDT3CQJHeleTXqI EioyHk1zlguccyk8riT7eGGTFUqLFNJSOmtMB6foNnz9ts61bMFvkCc+gFVqIyBK i0Lqd1sgl2U7/EVG5R5yI5L1//ifxmi5s9BuSc2SKRCy9gvCH0Cx9iIj5SvSzkB9 NFu+FhXCt0KnCeisPBnFWnkl09a+10HBjKhI3FiZKSi6DCb9yzLnxPiTHW1w1BmP j7O2YnLvCl7wT2ilbHOh9o66gUvHrY128y8q3s01A8+Oa5skScwjuUpNqaIbmIO9 8FMpAFD1cXey/Fp0sqs9b+M6zm6p8AQMqK+uyUf4Y0wX4xTr3fKhRqJ34r9lZ8/V 7GBU2bFxAgMBAAECggEBAM4hsfog3VAAyIieS+npq+gbhH6bWfMNaTQ3g5CNNbMu 9hhFeOJHzKnWYjSlamgBQhAfTN+2E+Up+iAtcVUZ/lMumrQLlwgMo1vgmvu5Kxmh /YE5oEG+k0JzrCjD1trwd4zvc3ZDYyk/vmVTzTOc311N248UyArUiyqHBbq1a4rP tJhCLn2c4S7flXGF0MDVGZyV9V7J8N8leq/dRGMB027Li21T+B4mPHXa6b8tpRPL 4vc8sHoUJDa2/+mFDJ2XbZfmlgd3MmIPlRn1VWoW7mxgT/AObsPl7LuQx7+t80Wx hIMjuKUHRACQSLwHxJ3SQRFWp4xbztnXSRXYuHTscLUCgYEA//Uu0qIm/FgC45yG nXtoax4+7UXhxrsWDEkbtL6RQ0TSTiwaaI6RSQcjrKDVSo/xo4ZySTYcRgp5GKlI CrWyNM+UnIzTNbZOtvSIAfjxYxMsq1vwpTlOB5/g+cMukeGg39yUlrjVNoFpv4i6 9t4yYuEaF4Vww0FDd2nNKhhW648CgYEA0+UYH6TKu03zDXqFpwf4DP2VoSo8OgfQ eN93lpFNyjrfzvxDZkGF+7M/ebyYuI6hFplVMu6BpgpFP7UVJpW0Hn/sXkTq7F1Q rTJTtkTp2+uxQVP/PzSOqK0Twi5ifkfoEOkPkNNtTiXzwCW6Qmmcvln2u893pyR5 gqo5BHR7Ev8CgYAb7bXpN9ZHLJdMHLU3k9Kl9YvqOfjTxXA3cPa79xtEmsrTys4q 4HuL22KSII6Fb0VvkWkBAg19uwDRpw78VC0YxBm0J02Yi8b1AaOhi3dTVzFFlWeh r6oK/PAAcMKxGkyCgMAZ3hstsltGkfXMoBwhW+yL6nyOYZ2p9vpzAGrjkwKBgQDF 0huzbyXVt/AxpTEhv07U0enfjI6tnp4COp5q8zyskEph8yD5VjK/yZh5DpmFs6Kw dnYUFpbzbKM51tToMNr3nnYNjEnGYVfwWgvNHok1x9S0KLcjSu3ki7DmmGdbfcYq A2uEyd5CFyx5Nr+tQOwUyeiPbiFG6caHNmQExLoiAQKBgFPy9H8///xsadYmZ18k r77R2CvU7ArxlLfp9dr19aGYKvHvnpsY6EuChkWfy8Xjqn3ogzgrHz/rn3mlGUpK vbtwtsknAHtTbotXJwfaBZv2RGgGRr3DzNo6ll2Aez0lNblZFXq132h7+y5iLvar 4euORaD/fuM4UPlR5mN+bypU -----END PRIVATE KEY----- WALinuxAgent-2.2.32/tests/data/metadata/vmagent_manifest1.json000066400000000000000000000006541335416306700242610ustar00rootroot00000000000000{ "versions": [ { "version": "2.2.8", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.8.zip" } ] }, { "version": "2.2.9", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.9.zip" } ] } ] }WALinuxAgent-2.2.32/tests/data/metadata/vmagent_manifest2.json000066400000000000000000000006541335416306700242620ustar00rootroot00000000000000{ "versions": [ { "version": "2.2.8", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.8.zip" } ] }, { "version": "2.2.9", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.9.zip" } ] } ] }WALinuxAgent-2.2.32/tests/data/metadata/vmagent_manifests.json000066400000000000000000000002601335416306700243540ustar00rootroot00000000000000{ "versionsManifestUris" : [ { "uri" : "https://notused.com/vmagent_manifest1.json" }, { "uri" : "https://notused.com/vmagent_manifest2.json" } ] } WALinuxAgent-2.2.32/tests/data/metadata/vmagent_manifests_invalid1.json000066400000000000000000000003121335416306700261410ustar00rootroot00000000000000{ "notTheRightKey": [ { "uri": "https://notused.com/vmagent_manifest1.json" }, { "uri": "https://notused.com/vmagent_manifest2.json" } ] }WALinuxAgent-2.2.32/tests/data/metadata/vmagent_manifests_invalid2.json000066400000000000000000000003121335416306700261420ustar00rootroot00000000000000{ "notTheRightKey": [ { "foo": "https://notused.com/vmagent_manifest1.json" }, { "bar": "https://notused.com/vmagent_manifest2.json" } ] }WALinuxAgent-2.2.32/tests/data/ovf-env-2.xml000066400000000000000000000037141335416306700204370ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net true true true false WALinuxAgent-2.2.32/tests/data/ovf-env-3.xml000066400000000000000000000037101335416306700204340ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net true true false WALinuxAgent-2.2.32/tests/data/ovf-env-4.xml000066400000000000000000000037201335416306700204360ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net bad data true true false WALinuxAgent-2.2.32/tests/data/ovf-env.xml000066400000000000000000000037151335416306700203010ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net false true true false WALinuxAgent-2.2.32/tests/data/safe_deploy.json000066400000000000000000000010161335416306700213540ustar00rootroot00000000000000{ "blacklisted" : [ "^1.2.3$", "^1.3(?:\\.\\d+)*$" ], "families" : { "ubuntu-x64": { "versions": [ "^Ubuntu,(1[4-9]|2[0-9])\\.\\d+,.*$" ], "require_64bit": true, "partition": 85 }, "fedora-x64": { "versions": [ "^Oracle[^,]*,([7-9]|[1-9][0-9])\\.\\d+,.*$", "^Red\\sHat[^,]*,([7-9]|[1-9][0-9])\\.\\d+,.*$" ], "partition": 20 } } }WALinuxAgent-2.2.32/tests/data/test_waagent.conf000066400000000000000000000067431335416306700215370ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Key / value handling test entries =Value0 FauxKey1= Value1 FauxKey2=Value2 Value2 FauxKey3=delalloc,rw,noatime,nobarrier,users,mode=777 # Enable instance creation Provisioning.Enabled=y # Enable extension handling Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=y # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # An EOL comment that should be ignored # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n#Another EOL comment that should be ignored # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=y#Another EOL comment that should be ignored # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval OS.SshClientAliveInterval=42#Yet another EOL comment with a '#' that should be ignored # Set the path to SSH keys and configuration files OS.SshDir=/notareal/path # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # OS.HomeDir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=n # OS.UpdateRdmaDriver=n # OS.CheckRdmaDriver=n # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=n # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.32/tests/data/wire/000077500000000000000000000000001335416306700171375ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/data/wire/certs.xml000066400000000000000000000117071335416306700210070ustar00rootroot00000000000000 2012-11-30 12 Pkcs7BlobWithPfxContents MIINswYJKoZIhvcNAQcDoIINpDCCDaACAQIxggEwMIIBLAIBAoAUvyL+x6GkZXog QNfsXRZAdD9lc7IwDQYJKoZIhvcNAQEBBQAEggEArhMPepD/RqwdPcHEVqvrdZid 72vXrOCuacRBhwlCGrNlg8oI+vbqmT6CSv6thDpet31ALUzsI4uQHq1EVfV1+pXy NlYD1CKhBCoJxs2fSPU4rc8fv0qs5JAjnbtW7lhnrqFrXYcyBYjpURKfa9qMYBmj NdijN+1T4E5qjxPr7zK5Dalp7Cgp9P2diH4Nax2nixotfek3MrEFBaiiegDd+7tE ux685GWYPqB5Fn4OsDkkYOdb0OE2qzLRrnlCIiBCt8VubWH3kMEmSCxBwSJupmQ8 sxCWk+sBPQ9gJSt2sIqfx/61F8Lpu6WzP+ZOnMLTUn2wLU/d1FN85HXmnQALzTCC DGUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIbEcBfddWPv+AggxAAOAt/kCXiffe GeJG0P2K9Q18XZS6Rz7Xcz+Kp2PVgqHKRpPjjmB2ufsRO0pM4z/qkHTOdpfacB4h gz912D9U04hC8mt0fqGNTvRNAFVFLsmo7KXc/a8vfZNrGWEnYn7y1WfP52pqA/Ei SNFf0NVtMyqg5Gx+hZ/NpWAE5vcmRRdoYyWeg13lhlW96QUxf/W7vY/D5KpAGACI ok79/XI4eJkbq3Dps0oO/difNcvdkE74EU/GPuL68yR0CdzzafbLxzV+B43TBRgP jH1hCdRqaspjAaZL5LGfp1QUM8HZIKHuTze/+4dWzS1XR3/ix9q/2QFI7YCuXpuE un3AFYXE4QX/6kcPklZwh9FqjSie3I5HtC1vczqYVjqT4oHrs8ktkZ7oAzeXaXTF k6+JQNNa/IyJw24I1MR77q7HlHSSfhXX5cFjVCd/+SiA4HJQjJgeIuXZ+dXmSPdL 9xLbDbtppifFyNaXdlSzcsvepKy0WLF49RmbL7Bnd46ce/gdQ6Midwi2MTnUtapu tHmu/iJtaUpwXXC0B93PHfAk7Y3SgeY4tl/gKzn9/x5SPAcHiNRtOsNBU8ZThzos Wh41xMLZavmX8Yfm/XWtl4eU6xfhcRAbJQx7E1ymGEt7xGqyPV7hjqhoB9i3oR5N itxHgf1+jw/cr7hob+Trd1hFqZO6ePMyWpqUg97G2ThJvWx6cv+KRtTlVA6/r/UH gRGBArJKBlLpXO6dAHFztT3Y6DFThrus4RItcfA8rltfQcRm8d0nPb4lCa5kRbCx iudq3djWtTIe64sfk8jsc6ahWYSovM+NmhbpxEUbZVWLVEcHAYOeMbKgXSu5sxNO JZNeFdzZqDRRY9fGjYNS7DdNOmrMmWKH+KXuMCItpNZsZS/3W7QxAo3ugYLdUylU Zg8H/BjUGZCGn1rEBAuQX78m0SZ1xHlgHSwJIOmxOJUDHLPHtThfbELY9ec14yi5 so1aQwhhfhPvF+xuXBrVeTAfhFNYkf2uxcEp7+tgFAc5W0QfT9SBn5vSvIxv+dT4 7B2Pg1l/zjdsM74g58lmRJeDoz4psAq+Uk7n3ImBhIku9qX632Q1hanjC8D4xM4W sI/W0ADCuAbY7LmwMpAMdrGg//SJUnBftlom7C9VA3EVf8Eo+OZH9hze+gIgUq+E iEUL5M4vOHK2ttsYrSkAt8MZzjQiTlDr1yzcg8fDIrqEAi5arjTPz0n2s0NFptNW lRD+Xz6pCXrnRgR8YSWpxvq3EWSJbZkSEk/eOmah22sFnnBZpDqn9+UArAznXrRi nYK9w38aMGPKM39ymG8kcbY7jmDZlRgGs2ab0Fdj1jl3CRo5IUatkOJwCEMd/tkB eXLQ8hspJhpFnVNReX0oithVZir+j36epk9Yn8d1l+YlKmuynjunKl9fhmoq5Q6i DFzdYpqBV+x9nVhnmPfGyrOkXvGL0X6vmXAEif/4JoOW4IZpyXjgn+VoCJUoae5J Djl45Bcc2Phrn4HW4Gg/+pIwTFqqZZ2jFrznNdgeIxTGjBrVsyJUeO3BHI0mVLaq jtjhTshYCI7mXOis9W3ic0RwE8rgdDXOYKHhLVw9c4094P/43utSVXE7UzbEhhLE Ngb4H5UGrQmPTNbq40tMUMUCej3zIKuVOvamzeE0IwLhkjNrvKhCG1EUhX4uoJKu DQ++3KVIVeYSv3+78Jfw9F3usAXxX1ICU74/La5DUNjU7DVodLDvCAy5y1jxP3Ic If6m7aBYVjFSQAcD8PZPeIEl9W4ZnbwyBfSDd11P2a8JcZ7N99GiiH3yS1QgJnAO g9XAgjT4Gcn7k4lHPHLULgijfiDSvt94Ga4/hse0F0akeZslVN/bygyib7x7Lzmq JkepRianrvKHbatuxvcajt/d+dxCnr32Q1qCEc5fcgDsjvviRL2tKR0qhuYjn1zR Vk/fRtYOmlaGBVzUXcjLRAg3gC9+Gy8KvXIDrnHxD+9Ob+DUP9fgbKqMeOzKcCK8 NSfSQ+tQjBYD5Ku4zAPUQJoRGgx43vXzcl2Z2i3E2otpoH82Kx8S9WlVEUlTtBjQ QIGM5aR0QUNt8z34t2KWRA8SpP54VzBmEPdwLnzna+PkrGKsKiHVn4K+HfjDp1uW xyO8VjrolAOYosTPXMpNp2u/FoFxaAPTa/TvmKc0kQ3ED9/sGLS2twDnEccvHP+9 zzrnzzN3T2CWuXveDpuyuAty3EoAid1nuC86WakSaAZoa8H2QoRgsrkkBCq+K/yl 4FO9wuP+ksZoVq3mEDQ9qv6H4JJEWurfkws3OqrA5gENcLmSUkZie4oqAxeOD4Hh Zx4ckG5egQYr0PnOd2r7ZbIizv3MKT4RBrfOzrE6cvm9bJEzNWXdDyIxZ/kuoLA6 zX7gGLdGhg7dqzKqnGtopLAsyM1b/utRtWxOTGO9K9lRxyX82oCVT9Yw0DwwA+cH Gutg1w7JHrIAYEtY0ezHgxhqMGuuTyJMX9Vr0D+9DdMeBK7hVOeSnxkaQ0f9HvF6 0XI/2OTIoBSCBpUXjpgsYt7m7n2rFJGJmtqgLAosCAkacHnHLwX0EnzBw3sdDU6Q jFXUWIDd5xUsNkFDCbspLMFs22hjNI6f/GREwd23Q4ujF8pUIcxcfbs2myjbK45s tsn/jrkxmKRgwCIeN/H7CM+4GXSkEGLWbiGCxWzWt9wW1F4M7NW9nho3D1Pi2LBL 1ByTmjfo/9u9haWrp53enDLJJbcaslfe+zvo3J70Nnzu3m3oJ3dmUxgJIstG10g3 lhpUm1ynvx04IFkYJ3kr/QHG/xGS+yh/pMZlwcUSpjEgYFmjFHU4A1Ng4LGI4lnw 5wisay4J884xmDgGfK0sdVQyW5rExIg63yYXp2GskRdDdwvWlFUzPzGgCNXQU96A ljZfjs2u4IiVCC3uVsNbGqCeSdAl9HC5xKuPNbw5yTxPkeRL1ouSdkBy7rvdFaFf dMPw6sBRNW8ZFInlgOncR3+xT/rZxru87LCq+3hRN3kw3hvFldrW2QzZSksO759b pJEP+4fxuG96Wq25fRmzHzE0bdJ+2qF3fp/hy4oRi+eVPa0vHdtkymE4OUFWftb6 +P++JVOzZ4ZxYA8zyUoJb0YCaxL+Jp/QqiUiH8WZVmYZmswqR48sUUKr7TIvpNbY 6jEH6F7KiZCoWfKH12tUC69iRYx3UT/4Bmsgi3S4yUxfieYRMIwihtpP4i0O+OjB /DPbb13qj8ZSfXJ+jmF2SRFfFG+2T7NJqm09JvT9UcslVd+vpUySNe9UAlpcvNGZ 2+j180ZU7YAgpwdVwdvqiJxkeVtAsIeqAvIXMFm1PDe7FJB0BiSVZdihB6cjnKBI dv7Lc1tI2sQe7QSfk+gtionLrEnto+aXF5uVM5LMKi3gLElz7oXEIhn54OeEciB1 cEmyX3Kb4HMRDMHyJxqJXwxm88RgC6RekoPvstu+AfX/NgSpRj5beaj9XkweJT3H rKWhkjq4Ghsn1LoodxluMMHd61m47JyoqIP9PBKoW+Na0VUKIVHw9e9YeW0nY1Zi 5qFA/pHPAt9AbEilRay6NEm8P7TTlNo216amc8byPXanoNrqBYZQHhZ93A4yl6jy RdpYskMivT+Sh1nhZAioKqqTZ3HiFR8hFGspAt5gJc4WLYevmxSicGa6AMyhrkvG rvOSdjY6JY/NkxtcgeycBX5MLF7uDbhUeqittvmlcrVN6+V+2HIbCCrvtow9pcX9 EkaaNttj5M0RzjQxogCG+S5TkhCy04YvKIkaGJFi8xO3icdlxgOrKD8lhtbf4UpR cDuytl70JD95mSUWL53UYjeRf9OsLRJMHQOpS02japkMwCb/ngMCQuUXA8hGkBZL Xw7RwwPuM1Lx8edMXn5C0E8UK5e0QmI/dVIl2aglXk2oBMBJbnyrbfUPm462SG6u ke4gQKFmVy2rKICqSkh2DMr0NzeYEUjZ6KbmQcV7sKiFxQ0/ROk8eqkYYxGWUWJv ylPF1OTLH0AIbGlFPLQO4lMPh05yznZTac4tmowADSHY9RCxad1BjBeine2pj48D u36OnnuQIsedxt5YC+h1bs+mIvwMVsnMLidse38M/RayCDitEBvL0KeG3vWYzaAL h0FCZGOW0ilVk8tTF5+XWtsQEp1PpclvkcBMkU3DtBUnlmPSKNfJT0iRr2T0sVW1 h+249Wj0Bw== WALinuxAgent-2.2.32/tests/data/wire/encrypted.enc000066400000000000000000000010551335416306700216240ustar00rootroot00000000000000MIIBlwYJKoZIhvcNAQcDoIIBiDCCAYQCAQIxggEwMIIBLAIBAoAUW4P+tNXlmDXW H30raKBkpUhXYwUwDQYJKoZIhvcNAQEBBQAEggEAP0LpwacLdJyvNQVmSyXPGM0i mNJSHPQsAXLFFcmWmCAGiEsQWiHKV9mON/eyd6DjtgbTuhVNHPY/IDSDXfjgLxdX NK1XejuEaVTwdVtCJWl5l4luOeCMDueitoIgBqgkbFpteqV6s8RFwnv+a2HhM0lc TUwim6skx1bFs0csDD5DkM7R10EWxWHjdKox8R8tq/C2xpaVWRvJ52/DCVgeHOfh orV0GmBK0ue/mZVTxu8jz2BxQUBhHXNWjBuNuGNmUuZvD0VY1q2K6Fa3xzv32mfB xPKgt6ru/wG1Kn6P8yMdKS3bQiNZxE1D1o3epDujiygQahUby5cI/WXk7ryZ1DBL BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECAxpp+ZE6rpAgChqxBVpU047fb4zinTV 5xaG7lN15YEME4q8CqcF/Ji3NbHPmdw1/gtf WALinuxAgent-2.2.32/tests/data/wire/ext_conf.xml000066400000000000000000000030521335416306700214660ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_autoupgrade.xml000066400000000000000000000040721335416306700240710ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_autoupgrade_internalversion.xml000066400000000000000000000040721335416306700273730ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_internalversion.xml000066400000000000000000000040721335416306700247730ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_missing_family.xml000066400000000000000000000061231335416306700245620ustar00rootroot00000000000000 Prod Test https://rdfepirv2bl2prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl4prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl4prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr06.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr09a.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl6prdstr02a.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml eastus https://walaautoasmeastus.blob.core.windows.net/vhds/walaautos73small.walaautos73small.walaautos73small.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=u%2BCA2Cxb7ticiEBRIW8HWgNW7gl2NPuOGQl0u95ApQE%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_no_public.xml000066400000000000000000000116201335416306700235200ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK"}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_no_settings.xml000066400000000000000000000111241335416306700241010ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_sequencing.xml000066400000000000000000000046041335416306700237130ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ext_conf_upgradeguid.xml000066400000000000000000000031351335416306700240500ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.32/tests/data/wire/ga_manifest.xml000066400000000000000000000044721335416306700221450ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.0.0 1.1.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.1.0 1.1.1 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.1.1 1.2.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.2.0 2.0.0http://host/OSTCExtensions.WALinuxAgent__2.0.0 2.1.0http://host/OSTCExtensions.WALinuxAgent__2.1.0 2.1.1http://host/OSTCExtensions.WALinuxAgent__2.1.1 2.2.0http://host/OSTCExtensions.WALinuxAgent__2.2.0 3.0http://host/OSTCExtensions.WALinuxAgent__3.0 3.1http://host/OSTCExtensions.WALinuxAgent__3.1 4.0.0.0http://host/OSTCExtensions.WALinuxAgent__3.0 4.0.0.1http://host/OSTCExtensions.WALinuxAgent__3.1 4.1.0.0http://host/OSTCExtensions.WALinuxAgent__3.1 99999.0.0.0http://host/OSTCExtensions.WALinuxAgent__99999.0.0.0 WALinuxAgent-2.2.32/tests/data/wire/ga_manifest_1.xml000066400000000000000000000005771335416306700223670ustar00rootroot00000000000000 2.2.13 url1_13 2.2.14 url1_14 WALinuxAgent-2.2.32/tests/data/wire/ga_manifest_2.xml000066400000000000000000000007371335416306700223660ustar00rootroot00000000000000 2.2.13 url2_13 2.2.14 url2_14 2.2.15 url1_15 WALinuxAgent-2.2.32/tests/data/wire/goal_state.xml000066400000000000000000000021061335416306700220020ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://extensionsconfiguri/ http://fullconfiguri/ DummyRoleConfigName.xml WALinuxAgent-2.2.32/tests/data/wire/goal_state_no_ext.xml000066400000000000000000000017021335416306700233570ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://fullconfiguri/ WALinuxAgent-2.2.32/tests/data/wire/goal_state_remote_access.xml000066400000000000000000000022211335416306700246740ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 http://remoteaccessinfouri/ MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://extensionsconfiguri/ http://fullconfiguri/ DummyRoleConfigName.xml WALinuxAgent-2.2.32/tests/data/wire/hosting_env.xml000066400000000000000000000043251335416306700222100ustar00rootroot00000000000000 WALinuxAgent-2.2.32/tests/data/wire/manifest.xml000066400000000000000000000055451335416306700215000ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0 1.1.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.1.0 1.1.1 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.1.1 1.2.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.2.0 2.0.0http://host/OSTCExtensions.ExampleHandlerLinux__2.0.0 2.1.0http://host/OSTCExtensions.ExampleHandlerLinux__2.1.0 True 2.1.1http://host/OSTCExtensions.ExampleHandlerLinux__2.1.1 2.2.0http://host/OSTCExtensions.ExampleHandlerLinux__2.2.0 3.0http://host/OSTCExtensions.ExampleHandlerLinux__3.0 3.1http://host/OSTCExtensions.ExampleHandlerLinux__3.1 4.0.0.0http://host/OSTCExtensions.ExampleHandlerLinux__3.0 4.0.0.1http://host/OSTCExtensions.ExampleHandlerLinux__3.1 4.1.0.0http://host/OSTCExtensions.ExampleHandlerLinux__3.1 1.3.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.3.0 2.3.0http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0 2.4.0http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0 WALinuxAgent-2.2.32/tests/data/wire/manifest_deletion.xml000066400000000000000000000005601335416306700233530ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0 WALinuxAgent-2.2.32/tests/data/wire/remote_access_10_accounts.xml000066400000000000000000000065631335416306700247060ustar00rootroot00000000000000 1 1 testAccount1 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount2 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount3 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount4 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount5 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount6 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount7 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount8 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount9 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount10 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.32/tests/data/wire/remote_access_duplicate_accounts.xml000066400000000000000000000014501335416306700264260ustar00rootroot00000000000000 1 1 testAccount encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.32/tests/data/wire/remote_access_no_accounts.xml000066400000000000000000000002151335416306700250660ustar00rootroot00000000000000 1 1 WALinuxAgent-2.2.32/tests/data/wire/remote_access_single_account.xml000066400000000000000000000007401335416306700255530ustar00rootroot00000000000000 1 1 testAccount encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.32/tests/data/wire/remote_access_two_accounts.xml000066400000000000000000000014521335416306700252670ustar00rootroot00000000000000 1 1 testAccount1 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount2 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.32/tests/data/wire/sample.pem000066400000000000000000000032471335416306700211310ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC3zCdThkBDYu83 M7ouc03caqyEwV6lioWbtYdnraoftbuCJrOhy+WipSCVAmhlu/tpaItuzwB9/VTw eSWfB/hB2sabVTKgU8gTQrI6ISy2ocLjqTIZuOETJuGlAIw6OXorhdUr8acZ8ohb ftZIbS9YKxbO7sQi+20sT2ugROJnO7IDGbb2vWhEhp2NAieJ8Nnq0SMv1+cZJZYk 6hiFVSl12g0egVFrRTJBvvTbPS7amLAQkauK/IxG28jZR61pMbHHX+xBg4Iayb2i qp8YnwK3qtf0stc0h9snnLnHSODva1Bo6qVBEcrkuXmtrHL2nUMsV/MgWG3HMgJJ 6Jf/wSFpAgMBAAECggEBALepsS6cvADajzK5ZPXf0NFOY6CxXnPLrWGAj5NCDftr 7bjMFbq7dngFzD46zrnClCOsDZEoF1TO3p8CYF6/Zwvfo5E7HMDrl8XvYwwFdJn3 oTlALMlZXsh1lQv+NSJFp1hwfylPbGzYV/weDeEIAkR3om4cWDCg0GJz5peb3iXK 5fimrZsnInhktloU2Ep20UepR8wbhS5WP7B2s32OULTlWiGdORUVrHJQbTN6O0NZ WzmAcsgfmW1KEBOR9sDFbAdldt8/WcLJVIfWOdFVbCbOaxrnRnZ8j8tsafziVncD QFRpNeyOHZR5S84oAPo2EIVeFCLLeo3Wit/O3IFmhhUCgYEA5jrs0VSowb/xU/Bw wm1cKnSqsub3p3GLPL4TdODYMHH56Wv8APiwcW9O1+oRZoM9M/8KXkDlfFOz10tY bMYvF8MzFKIzzi5TxaWqSWsNeXpoqtFqUed7KRh3ybncIqFAAauTwmAhAlEmGR/e AY7Oy4b2lnRU1ssIOd0VnSnAqTcCgYEAzF6746DhsInlFIQGsUZBOmUtwyu0k1kc gkWhJt5SyQHZtX1SMV2RI6CXFpUZcjv31jM30GmXdvkuj2dIHaDZB5V5BlctPJZq FH0RFxmFHXk+npLJnKKSX1H3/2PxTUsSBcFHEaPCgvIz3720bX7fqRIFtVdrcbQA cB9DARbjWl8CgYBKADyoWCbaB+EA0vLbe505RECtulF176gKgSnt0muKvsfOQFhC 06ya+WUFP4YSRjLA6MQjYYahvKG8nMoyRE1UvPhJNI2kQv3INKSUbqVpG3BTH3am Ftpebi/qliPsuZnCL60RuCZEAWNWhgisxYMwphPSblfqpl3hg290EbyMZwKBgQCs mypHQ166EozW+fcJDFQU9NVkrGoTtMR+Rj6oLEdxG037mb+sj+EAXSaeXQkj0QAt +g4eyL+zLRuk5E8lLu9+F0EjGMfNDyDC8ypW/yfNT9SSa1k6IJhNR1aUbZ2kcU3k bGwQuuWSYOttAbT8cZaHHgCSOyY03xkrmUunBOS6MwKBgBK4D0Uv7ZDf3Y38A07D MblDQj3wZeFu6IWi9nVT12U3WuEJqQqqxWnWmETa+TS/7lhd0GjTB+79+qOIhmls XSAmIS/rBUGlk5f9n+vBjQkpbqAvcXV7I/oQASpVga1xB9EuMvXc9y+x/QfmrYVM zqxRWJIMASPLiQr79V0zXGXP -----END PRIVATE KEY-----WALinuxAgent-2.2.32/tests/data/wire/shared_config.xml000066400000000000000000000046351335416306700224640ustar00rootroot00000000000000 WALinuxAgent-2.2.32/tests/data/wire/sshd_config000066400000000000000000000047771335416306700213670ustar00rootroot00000000000000# Package generated configuration file # See the sshd_config(5) manpage for details # What ports, IPs and protocols we listen for Port 22 # Use these options to restrict which interfaces/protocols sshd will bind to #ListenAddress :: #ListenAddress 0.0.0.0 Protocol 2 # HostKeys for protocol version 2 HostKey /etc/ssh/ssh_host_rsa_key HostKey /etc/ssh/ssh_host_dsa_key HostKey /etc/ssh/ssh_host_ecdsa_key HostKey /etc/ssh/ssh_host_ed25519_key #Privilege Separation is turned on for security UsePrivilegeSeparation yes # Lifetime and size of ephemeral version 1 server key KeyRegenerationInterval 3600 ServerKeyBits 1024 # Logging SyslogFacility AUTH LogLevel INFO # Authentication: LoginGraceTime 120 PermitRootLogin without-password StrictModes yes RSAAuthentication yes PubkeyAuthentication yes #AuthorizedKeysFile %h/.ssh/authorized_keys # Don't read the user's ~/.rhosts and ~/.shosts files IgnoreRhosts yes # For this to work you will also need host keys in /etc/ssh_known_hosts RhostsRSAAuthentication no # similar for protocol version 2 HostbasedAuthentication no # Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication #IgnoreUserKnownHosts yes # To enable empty passwords, change to yes (NOT RECOMMENDED) PermitEmptyPasswords no # Change to yes to enable challenge-response passwords (beware issues with # some PAM modules and threads) ChallengeResponseAuthentication no # Change to no to disable tunnelled clear text passwords #PasswordAuthentication yes # Kerberos options #KerberosAuthentication no #KerberosGetAFSToken no #KerberosOrLocalPasswd yes #KerberosTicketCleanup yes # GSSAPI options #GSSAPIAuthentication no #GSSAPICleanupCredentials yes X11Forwarding yes X11DisplayOffset 10 PrintMotd no PrintLastLog yes TCPKeepAlive yes #UseLogin no #MaxStartups 10:30:60 #Banner /etc/issue.net # Allow client to pass locale environment variables AcceptEnv LANG LC_* Subsystem sftp /usr/lib/openssh/sftp-server # Set this to 'yes' to enable PAM authentication, account processing, # and session processing. If this is enabled, PAM authentication will # be allowed through the ChallengeResponseAuthentication and # PasswordAuthentication. Depending on your PAM configuration, # PAM authentication via ChallengeResponseAuthentication may bypass # the setting of "PermitRootLogin without-password". # If you just want the PAM account and session checks to run without # PAM authentication, then enable this but set PasswordAuthentication # and ChallengeResponseAuthentication to 'no'. UsePAM yes Match group root WALinuxAgent-2.2.32/tests/data/wire/trans_cert000066400000000000000000000021271335416306700212300ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDBzCCAe+gAwIBAgIJANujJuVt5eC8MA0GCSqGSIb3DQEBCwUAMBkxFzAVBgNV BAMMDkxpbnV4VHJhbnNwb3J0MCAXDTE0MTAyNDA3MjgwN1oYDzIxMDQwNzEyMDcy ODA3WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBANPcJAkd6V5NeogSKjIeTXOWC5xzKTyuJPt4YZMVSosU 0lI6a0wHp+g2fP22zrVswW+QJz6AVWojIEqLQup3WyCXZTv8RUblHnIjkvX/+J/G aLmz0G5JzZIpELL2C8IfQLH2IiPlK9LOQH00W74WFcK3QqcJ6Kw8GcVaeSXT1r7X QcGMqEjcWJkpKLoMJv3LMufE+JMdbXDUGY+Ps7Zicu8KXvBPaKVsc6H2jrqBS8et jXbzLyrezTUDz45rmyRJzCO5Sk2pohuYg73wUykAUPVxd7L8WnSyqz1v4zrObqnw BAyor67JR/hjTBfjFOvd8qFGonfiv2Vnz9XsYFTZsXECAwEAAaNQME4wHQYDVR0O BBYEFL8i/sehpGV6IEDX7F0WQHQ/ZXOyMB8GA1UdIwQYMBaAFL8i/sehpGV6IEDX 7F0WQHQ/ZXOyMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAMPLrimT Gptu5pLRHPT8OFRN+skNSkepYaUaJuq6cSKxLumSYkD8++rohu+1+a7t1YNjjNSJ 8ohRAynRJ7aRqwBmyX2OPLRpOfyRZwR0rcFfAMORm/jOE6WBdqgYD2L2b+tZplGt /QqgQzebaekXh/032FK4c74Zg5r3R3tfNSUMG6nLauWzYHbQ5SCdkuQwV0ehGqh5 VF1AOdmz4CC2237BNznDFQhkeU0LrqqAoE/hv5ih7klJKZdS88rOYEnVJsFFJb0g qaycXjOm5Khgl4hKrd+DBD/qj4IVVzsmdpFli72k6WLBHGOXusUGo/3isci2iAIt DsfY6XGSEIhZnA4= -----END CERTIFICATE----- WALinuxAgent-2.2.32/tests/data/wire/trans_prv000066400000000000000000000032501335416306700211000ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDT3CQJHeleTXqI EioyHk1zlguccyk8riT7eGGTFUqLFNJSOmtMB6foNnz9ts61bMFvkCc+gFVqIyBK i0Lqd1sgl2U7/EVG5R5yI5L1//ifxmi5s9BuSc2SKRCy9gvCH0Cx9iIj5SvSzkB9 NFu+FhXCt0KnCeisPBnFWnkl09a+10HBjKhI3FiZKSi6DCb9yzLnxPiTHW1w1BmP j7O2YnLvCl7wT2ilbHOh9o66gUvHrY128y8q3s01A8+Oa5skScwjuUpNqaIbmIO9 8FMpAFD1cXey/Fp0sqs9b+M6zm6p8AQMqK+uyUf4Y0wX4xTr3fKhRqJ34r9lZ8/V 7GBU2bFxAgMBAAECggEBAM4hsfog3VAAyIieS+npq+gbhH6bWfMNaTQ3g5CNNbMu 9hhFeOJHzKnWYjSlamgBQhAfTN+2E+Up+iAtcVUZ/lMumrQLlwgMo1vgmvu5Kxmh /YE5oEG+k0JzrCjD1trwd4zvc3ZDYyk/vmVTzTOc311N248UyArUiyqHBbq1a4rP tJhCLn2c4S7flXGF0MDVGZyV9V7J8N8leq/dRGMB027Li21T+B4mPHXa6b8tpRPL 4vc8sHoUJDa2/+mFDJ2XbZfmlgd3MmIPlRn1VWoW7mxgT/AObsPl7LuQx7+t80Wx hIMjuKUHRACQSLwHxJ3SQRFWp4xbztnXSRXYuHTscLUCgYEA//Uu0qIm/FgC45yG nXtoax4+7UXhxrsWDEkbtL6RQ0TSTiwaaI6RSQcjrKDVSo/xo4ZySTYcRgp5GKlI CrWyNM+UnIzTNbZOtvSIAfjxYxMsq1vwpTlOB5/g+cMukeGg39yUlrjVNoFpv4i6 9t4yYuEaF4Vww0FDd2nNKhhW648CgYEA0+UYH6TKu03zDXqFpwf4DP2VoSo8OgfQ eN93lpFNyjrfzvxDZkGF+7M/ebyYuI6hFplVMu6BpgpFP7UVJpW0Hn/sXkTq7F1Q rTJTtkTp2+uxQVP/PzSOqK0Twi5ifkfoEOkPkNNtTiXzwCW6Qmmcvln2u893pyR5 gqo5BHR7Ev8CgYAb7bXpN9ZHLJdMHLU3k9Kl9YvqOfjTxXA3cPa79xtEmsrTys4q 4HuL22KSII6Fb0VvkWkBAg19uwDRpw78VC0YxBm0J02Yi8b1AaOhi3dTVzFFlWeh r6oK/PAAcMKxGkyCgMAZ3hstsltGkfXMoBwhW+yL6nyOYZ2p9vpzAGrjkwKBgQDF 0huzbyXVt/AxpTEhv07U0enfjI6tnp4COp5q8zyskEph8yD5VjK/yZh5DpmFs6Kw dnYUFpbzbKM51tToMNr3nnYNjEnGYVfwWgvNHok1x9S0KLcjSu3ki7DmmGdbfcYq A2uEyd5CFyx5Nr+tQOwUyeiPbiFG6caHNmQExLoiAQKBgFPy9H8///xsadYmZ18k r77R2CvU7ArxlLfp9dr19aGYKvHvnpsY6EuChkWfy8Xjqn3ogzgrHz/rn3mlGUpK vbtwtsknAHtTbotXJwfaBZv2RGgGRr3DzNo6ll2Aez0lNblZFXq132h7+y5iLvar 4euORaD/fuM4UPlR5mN+bypU -----END PRIVATE KEY----- WALinuxAgent-2.2.32/tests/data/wire/version_info.xml000066400000000000000000000003361335416306700223630ustar00rootroot00000000000000 2012-11-30 2010-12-15 2010-28-10 WALinuxAgent-2.2.32/tests/distro/000077500000000000000000000000001335416306700165645ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/distro/__init__.py000066400000000000000000000011651335416306700207000ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/distro/test_resourceDisk.py000066400000000000000000000076411335416306700226470ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx import sys from azurelinuxagent.common.utils import shellutil from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler from tests.tools import * class TestResourceDisk(AgentTestCase): def test_mkfile(self): # setup test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 if os.path.exists(test_file): os.remove(test_file) # execute get_resourcedisk_handler().mkfile(test_file, file_size) # assert assert os.path.exists(test_file) # cleanup os.remove(test_file) def test_mkfile_dd_fallback(self): with patch.object(shellutil, "run") as run_patch: # setup run_patch.return_value = 1 test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 # execute if sys.version_info >= (3,3): with patch("os.posix_fallocate", side_effect=Exception('failure')): get_resourcedisk_handler().mkfile(test_file, file_size) else: get_resourcedisk_handler().mkfile(test_file, file_size) # assert assert run_patch.call_count > 1 assert "fallocate" in run_patch.call_args_list[0][0][0] assert "dd if" in run_patch.call_args_list[-1][0][0] def test_mkfile_xfs_fs(self): # setup test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 if os.path.exists(test_file): os.remove(test_file) # execute resource_disk_handler = get_resourcedisk_handler() resource_disk_handler.fs = 'xfs' with patch.object(shellutil, "run") as run_patch: resource_disk_handler.mkfile(test_file, file_size) # assert if sys.version_info >= (3,3): with patch("os.posix_fallocate") as posix_fallocate: self.assertEqual(0, posix_fallocate.call_count) assert run_patch.call_count == 1 assert "dd if" in run_patch.call_args_list[0][0][0] def test_change_partition_type(self): resource_handler = get_resourcedisk_handler() # test when sfdisk --part-type does not exist with patch.object(shellutil, "run_get_output", side_effect=[[1, ''], [0, '']]) as run_patch: resource_handler.change_partition_type(suppress_message=True, option_str='') # assert assert run_patch.call_count == 2 assert "sfdisk --part-type" in run_patch.call_args_list[0][0][0] assert "sfdisk -c" in run_patch.call_args_list[1][0][0] # test when sfdisk --part-type exists with patch.object(shellutil, "run_get_output", side_effect=[[0, '']]) as run_patch: resource_handler.change_partition_type(suppress_message=True, option_str='') # assert assert run_patch.call_count == 1 assert "sfdisk --part-type" in run_patch.call_args_list[0][0][0] if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/distro/test_scvmm.py000066400000000000000000000064111335416306700213240ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx import mock from tests.tools import * import azurelinuxagent.daemon.scvmm as scvmm from azurelinuxagent.daemon.main import * from azurelinuxagent.common.osutil.default import DefaultOSUtil class TestSCVMM(AgentTestCase): def test_scvmm_detection_with_file(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) fileutil.write_file(scvmm_file, "") with patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as po: with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): with patch('time.sleep', return_value=0): # execute failed = False try: scvmm.get_scvmm_handler().run() except: failed = True # assert self.assertTrue(failed) self.assertTrue(po.call_count == 1) # cleanup os.remove(scvmm_file) def test_scvmm_detection_with_multiple_cdroms(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) # execute with mock.patch.object(DefaultOSUtil, 'mount_dvd') as patch_mount: with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): scvmm.ScvmmHandler().detect_scvmm_env() # assert assert patch_mount.call_count == 3 assert patch_mount.call_args_list[0][1]['dvd_device'] == '/dev/sr0' assert patch_mount.call_args_list[1][1]['dvd_device'] == '/dev/sr1' assert patch_mount.call_args_list[2][1]['dvd_device'] == '/dev/sr2' def test_scvmm_detection_without_file(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) if os.path.exists(scvmm_file): os.remove(scvmm_file) with mock.patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as patch_start: # execute scvmm.ScvmmHandler().detect_scvmm_env() # assert patch_start.assert_not_called() if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/ga/000077500000000000000000000000001335416306700156475ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/ga/__init__.py000066400000000000000000000011651335416306700177630ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/ga/test_extension.py000066400000000000000000001245221335416306700213020ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os.path from tests.protocol.mockwiredata import * from azurelinuxagent.ga.exthandlers import * from azurelinuxagent.common.protocol.wire import WireProtocol class TestExtensionCleanup(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.ext_handlers = ExtHandlersHandler() self.lib_dir = tempfile.mkdtemp() def _install_handlers(self, start=0, count=1, handler_state=ExtHandlerState.Installed): src = os.path.join(data_dir, "ext", "sample_ext-1.3.0.zip") version = FlexibleVersion("1.3.0") version += start - version.patch for i in range(start, start+count): eh = ExtHandler() eh.name = "sample_ext" eh.properties.version = str(version) handler = ExtHandlerInstance(eh, "unused") dst = os.path.join(self.lib_dir, handler.get_full_name()+HANDLER_PKG_EXT) shutil.copy(src, dst) if not handler_state is None: zipfile.ZipFile(dst).extractall(handler.get_base_dir()) handler.set_handler_state(handler_state) version += 1 def _count_packages(self): return len(glob.glob(os.path.join(self.lib_dir, "*.zip"))) def _count_installed(self): paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths if os.path.isdir(p) and self._is_installed(p)]) def _count_uninstalled(self): paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths if os.path.isdir(p) and not self._is_installed(p)]) def _is_installed(self, path): path = os.path.join(path, 'config', 'HandlerState') return fileutil.read_file(path) != "NotInstalled" @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_leaves_installed_extensions(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=ExtHandlerState.Enabled) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 10) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 10) self.assertEqual(self._count_uninstalled(), 0) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_removes_uninstalled_extensions(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=ExtHandlerState.NotInstalled) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 5) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 5) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_removes_orphaned_packages(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=None) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 5) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) class TestHandlerStateMigration(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) handler_name = "Not.A.Real.Extension" handler_version = "1.2.3" self.ext_handler = ExtHandler(handler_name) self.ext_handler.properties.version = handler_version self.ext_handler_i = ExtHandlerInstance(self.ext_handler, "dummy protocol") self.handler_state = "Enabled" self.handler_status = ExtHandlerStatus( name=handler_name, version=handler_version, status="Ready", message="Uninteresting message") return def _prepare_handler_state(self): handler_state_path = os.path.join( self.tmp_dir, "handler_state", self.ext_handler_i.get_full_name()) os.makedirs(handler_state_path) fileutil.write_file( os.path.join(handler_state_path, "state"), self.handler_state) fileutil.write_file( os.path.join(handler_state_path, "status"), json.dumps(get_properties(self.handler_status))) return def _prepare_handler_config(self): handler_config_path = os.path.join( self.tmp_dir, self.ext_handler_i.get_full_name(), "config") os.makedirs(handler_config_path) return def test_migration_migrates(self): self._prepare_handler_state() self._prepare_handler_config() migrate_handler_state() self.assertEquals(self.ext_handler_i.get_handler_state(), self.handler_state) self.assertEquals( self.ext_handler_i.get_handler_status().status, self.handler_status.status) return def test_migration_skips_if_empty(self): self._prepare_handler_config() migrate_handler_state() self.assertFalse( os.path.isfile(os.path.join(self.ext_handler_i.get_conf_dir(), "HandlerState"))) self.assertFalse( os.path.isfile(os.path.join(self.ext_handler_i.get_conf_dir(), "HandlerStatus"))) return def test_migration_cleans_up(self): self._prepare_handler_state() self._prepare_handler_config() migrate_handler_state() self.assertFalse(os.path.isdir(os.path.join(conf.get_lib_dir(), "handler_state"))) return def test_migration_does_not_overwrite(self): self._prepare_handler_state() self._prepare_handler_config() state = "Installed" status = "NotReady" code = 1 message = "A message" self.assertNotEquals(state, self.handler_state) self.assertNotEquals(status, self.handler_status.status) self.assertNotEquals(code, self.handler_status.code) self.assertNotEquals(message, self.handler_status.message) self.ext_handler_i.set_handler_state(state) self.ext_handler_i.set_handler_status(status=status, code=code, message=message) migrate_handler_state() self.assertEquals(self.ext_handler_i.get_handler_state(), state) handler_status = self.ext_handler_i.get_handler_status() self.assertEquals(handler_status.status, status) self.assertEquals(handler_status.code, code) self.assertEquals(handler_status.message, message) return def test_set_handler_status_ignores_none_content(self): """ Validate that set_handler_status ignore cases where json.dumps returns a value of None. """ self._prepare_handler_state() self._prepare_handler_config() status = "Ready" code = 0 message = "A message" try: with patch('json.dumps', return_value=None): self.ext_handler_i.set_handler_status(status=status, code=code, message=message) except Exception as e: self.fail("set_handler_status threw an exception") @patch("shutil.move", side_effect=Exception) def test_migration_ignores_move_errors(self, shutil_mock): self._prepare_handler_state() self._prepare_handler_config() try: migrate_handler_state() except Exception as e: self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return @patch("shutil.rmtree", side_effect=Exception) def test_migration_ignores_tree_remove_errors(self, shutil_mock): self._prepare_handler_state() self._prepare_handler_config() try: migrate_handler_state() except Exception as e: self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return class ExtensionTestCase(AgentTestCase): @classmethod def setUpClass(cls): CGroups.disable() @classmethod def tearDownClass(cls): CGroups.enable() @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestExtension(ExtensionTestCase): def _assert_handler_status(self, report_vm_status, expected_status, expected_ext_count, version, expected_handler_name="OSTCExtensions.ExampleHandlerLinux"): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertNotEquals(0, len(vm_status.vmAgent.extensionHandlers)) handler_status = vm_status.vmAgent.extensionHandlers[0] self.assertEquals(expected_status, handler_status.status) self.assertEquals(expected_handler_name, handler_status.name) self.assertEquals(version, handler_status.version) self.assertEquals(expected_ext_count, len(handler_status.extensions)) return def _assert_no_handler_status(self, report_vm_status): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertEquals(0, len(vm_status.vmAgent.extensionHandlers)) return def _create_mock(self, test_data, mock_http_get, MockCryptUtil): """Test enable/disable/uninstall of an extension""" handler = get_exthandlers_handler() #Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get MockCryptUtil.side_effect = test_data.mock_crypt_util protocol = WireProtocol("foo.bar") protocol.detect() protocol.report_ext_status = MagicMock() protocol.report_vm_status = MagicMock() handler.protocol_util.get_protocol = Mock(return_value=protocol) return handler, protocol def test_ext_handler(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") #Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) #Test hotfix test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 2) #Test upgrade test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", "seqNo=\"3\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 3) #Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.2.0") #Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_ext_handler_no_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 0, "1.0.0") def test_ext_handler_no_public_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") def test_ext_handler_no_ext(self, *args): test_data = WireProtocolData(DATA_FILE_NO_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Assert no extension handler status exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_ext_handler_sequencing(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_SEQUENCING) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") self._assert_ext_status(protocol.report_ext_status, "success", 0) # check handler list self.assertTrue(exthandlers_handler.ext_handlers is not None) self.assertTrue(exthandlers_handler.ext_handlers.extHandlers is not None) self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 1) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 2) #Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") #Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"2\"", "dependencyLevel=\"3\"") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"1\"", "dependencyLevel=\"4\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 3) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 4) #Test disable test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.0.0", expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 4) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 3) #Test uninstall test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"3\"", "dependencyLevel=\"6\"") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"4\"", "dependencyLevel=\"5\"") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 6) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 5) def test_ext_handler_rollingupgrade(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_ROLLINGUPGRADE) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test minor version bump test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test hotfix version bump test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.1.1") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.1.1") #Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test re-install test_data.goal_state = test_data.goal_state.replace("7<", "8<") test_data.ext_conf = test_data.ext_conf.replace("uninstall", "enabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test version bump post-re-install test_data.goal_state = test_data.goal_state.replace("8<", "9<") test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test rollback test_data.goal_state = test_data.goal_state.replace("9<", "10<") test_data.ext_conf = test_data.ext_conf.replace("1.2.0", "1.1.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_download_failure_transient(self, mock_add_event, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.download_ext_handler_pkg = Mock(side_effect=ProtocolError) exthandlers_handler.run() self.assertEquals(0, mock_add_event.call_count) @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_report_status_permanent(self, mock_add_event, mock_error_state, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.report_vm_status = Mock(side_effect=ProtocolError) mock_error_state.return_value = True exthandlers_handler.run() self.assertEquals(5, mock_add_event.call_count) args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) self.assertTrue("Failed to report vm agent status" in kw['message']) self.assertEquals("ReportStatusExtended", kw['op']) @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_report_status_resource_gone(self, mock_add_event, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.report_vm_status = Mock(side_effect=ResourceGoneError) exthandlers_handler.run() self.assertEquals(4, mock_add_event.call_count) args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) self.assertTrue("ResourceGoneError" in kw['message']) self.assertEquals("ExtensionProcessing", kw['op']) @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') @patch('azurelinuxagent.common.event.add_event') def test_ext_handler_download_failure_permanent(self, mock_add_event, mock_error_state, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.get_ext_handler_pkgs = Mock(side_effect=ProtocolError) mock_error_state.return_value = True exthandlers_handler.run() self.assertEquals(1, mock_add_event.call_count) args, kw = mock_add_event.call_args_list[0] self.assertEquals(False, kw['is_success']) self.assertTrue("Failed to get ext handler pkgs" in kw['message']) self.assertTrue("Failed to get artifact" in kw['message']) self.assertEquals("GetArtifactExtended", kw['op']) @patch('azurelinuxagent.ga.exthandlers.fileutil') def test_ext_handler_io_error(self, mock_fileutil, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) mock_fileutil.write_file.return_value = IOError("Mock IO Error") exthandlers_handler.run() def test_handle_ext_handlers_on_hold_true(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.ext_handlers, exthandlers_handler.last_etag = protocol.get_ext_handlers() protocol.get_artifacts_profile = MagicMock() exthandlers_handler.protocol = protocol # Disable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=False) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() self.assertEqual(1, patch_handle_ext_handler.call_count) # enable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=True) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() self.assertEqual(0, patch_handle_ext_handler.call_count) def test_handle_ext_handlers_on_hold_false(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.ext_handlers, exthandlers_handler.last_etag = protocol.get_ext_handlers() exthandlers_handler.protocol = protocol # enable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=True) #Test when is_on_hold returns False from azurelinuxagent.common.protocol.wire import InVMArtifactsProfile mock_in_vm_artifacts_profile = InVMArtifactsProfile(MagicMock()) mock_in_vm_artifacts_profile.is_on_hold = Mock(return_value=False) protocol.get_artifacts_profile = Mock(return_value=mock_in_vm_artifacts_profile) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() self.assertEqual(1, patch_handle_ext_handler.call_count) #Test when in_vm_artifacts_profile is not available protocol.get_artifacts_profile = Mock(return_value=None) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() self.assertEqual(1, patch_handle_ext_handler.call_count) def _assert_ext_status(self, report_ext_status, expected_status, expected_seq_no): self.assertTrue(report_ext_status.called) args, kw = report_ext_status.call_args ext_status = args[-1] self.assertEquals(expected_status, ext_status.status) self.assertEquals(expected_seq_no, ext_status.sequenceNumber) def test_ext_handler_no_reporting_status(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") #Remove status file and re-run collecting extension status status_file = os.path.join(self.tmp_dir, "OSTCExtensions.ExampleHandlerLinux-1.0.0", "status", "0.status") self.assertTrue(os.path.isfile(status_file)) os.remove(status_file) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "error", 0) def test_ext_handler_version_decide_autoupgrade_internalversion(self, *args): for internal in [False, True]: for autoupgrade in [False, True]: if internal: config_version = '1.3.0' decision_version = '1.3.0' if autoupgrade: datafile = DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION else: datafile = DATA_FILE_EXT_INTERNALVERSION else: config_version = '1.0.0' decision_version = '1.0.0' if autoupgrade: datafile = DATA_FILE_EXT_AUTOUPGRADE else: datafile = DATA_FILE _, protocol = self._create_mock(WireProtocolData(datafile), *args) ext_handlers, _ = protocol.get_ext_handlers() self.assertEqual(1, len(ext_handlers.extHandlers)) ext_handler = ext_handlers.extHandlers[0] self.assertEqual('OSTCExtensions.ExampleHandlerLinux', ext_handler.name) self.assertEqual(config_version, ext_handler.properties.version, "config version.") ExtHandlerInstance(ext_handler, protocol).decide_version() self.assertEqual(decision_version, ext_handler.properties.version, "decision version.") def test_ext_handler_version_decide_between_minor_versions(self, *args): """ Using v2.x~v4.x for unit testing Available versions via manifest XML (I stands for internal): 2.0.0, 2.1.0, 2.1.1, 2.2.0, 2.3.0(I), 2.4.0(I), 3.0, 3.1, 4.0.0.0, 4.0.0.1, 4.1.0.0 See tests/data/wire/manifest.xml for possible versions """ # (installed_version, config_version, exptected_version, autoupgrade_expected_version) cases = [ (None, '2.0', '2.0.0'), (None, '2.0.0', '2.0.0'), ('1.0', '1.0.0', '1.0.0'), (None, '2.1.0', '2.1.0'), (None, '2.1.1', '2.1.1'), (None, '2.2.0', '2.2.0'), (None, '2.3.0', '2.3.0'), (None, '2.4.0', '2.4.0'), (None, '3.0', '3.0'), (None, '3.1', '3.1'), (None, '4.0', '4.0.0.1'), (None, '4.1', '4.1.0.0'), ] _, protocol = self._create_mock(WireProtocolData(DATA_FILE), *args) version_uri = Mock() version_uri.uri = 'http://some/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml' for (installed_version, config_version, expected_version) in cases: ext_handler = Mock() ext_handler.properties = Mock() ext_handler.name = 'OSTCExtensions.ExampleHandlerLinux' ext_handler.versionUris = [version_uri] ext_handler.properties.version = config_version ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) ext_handler_instance.get_installed_version = Mock(return_value=installed_version) ext_handler_instance.decide_version() self.assertEqual(expected_version, ext_handler.properties.version) @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) def test_extensions_disabled(self, _, *args): # test status is reported for no extensions test_data = WireProtocolData(DATA_FILE_NO_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) # test status is reported, but extensions are not processed test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_extensions_deleted(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_DELETION) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial enable is successful exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Update incarnation, simulate new extension version and old one deleted test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') # Ensure new extension can be enabled exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('subprocess.Popen.poll') def test_install_failure(self, patch_poll, *args): """ When extension install fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install is unsuccessful patch_poll.call_count = 0 patch_poll.return_value = 1 exthandlers_handler.run() # capture process output also calls poll self.assertEqual(2, patch_poll.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.0") # Ensure subsequent no further retries are made exthandlers_handler.run() self.assertEqual(2, patch_poll.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') def test_enable_failure(self, patch_get_enable_command, *args): """ When extension enable fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install is successful, but enable fails patch_get_enable_command.call_count = 0 patch_get_enable_command.return_value = "exit 1" exthandlers_handler.run() self.assertEqual(1, patch_get_enable_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") exthandlers_handler.run() self.assertEqual(1, patch_get_enable_command.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test_disable_failure(self, patch_get_disable_command, *args): """ When extension disable fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install and enable is successful, but disable fails patch_get_disable_command.call_count = 0 patch_get_disable_command.return_value = "exit 1" exthandlers_handler.run() self.assertEqual(0, patch_get_disable_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Next incarnation, disable extension test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") # Ensure there are no further retries exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(3, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') def test_uninstall_failure(self, patch_get_uninstall_command, *args): """ When extension uninstall fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install and enable is successful, but uninstall fails patch_get_uninstall_command.call_count = 0 patch_get_uninstall_command.return_value = "exit 1" exthandlers_handler.run() self.assertEqual(0, patch_get_uninstall_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Next incarnation, disable extension test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") exthandlers_handler.run() self.assertEqual(1, patch_get_uninstall_command.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) self._assert_no_handler_status(protocol.report_vm_status) # Ensure there are no further retries exthandlers_handler.run() self.assertEqual(1, patch_get_uninstall_command.call_count) self.assertEqual(3, protocol.report_vm_status.call_count) self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) self._assert_no_handler_status(protocol.report_vm_status) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_update_command') def test_upgrade(self, patch_get_update_command, *args): """ Extension upgrade failure should not be retried """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install and enable is successful exthandlers_handler.run() self.assertEqual(0, patch_get_update_command.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Next incarnation, update version test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') # Update command should fail patch_get_update_command.return_value = "exit 1" exthandlers_handler.run() self.assertEqual(1, patch_get_update_command.call_count) # On the next iteration, update should not be retried exthandlers_handler.run() self.assertEqual(1, patch_get_update_command.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.1") if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/ga/test_exthandlers.py000066400000000000000000000126631335416306700216110ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. import json from azurelinuxagent.common.protocol.restapi import ExtensionStatus, Extension, ExtHandler, ExtHandlerProperties from azurelinuxagent.ga.exthandlers import parse_ext_status, ExtHandlerInstance from tests.tools import * class TestExtHandlers(AgentTestCase): def test_parse_extension_status00(self): """ Parse a status report for a successful execution of an extension. """ s = '''[{ "status": { "status": "success", "formattedMessage": { "lang": "en-US", "message": "Command is finished." }, "operation": "Daemon", "code": "0", "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" }, "version": "1.0", "timestampUTC": "2018-04-20T21:20:24Z" } ]''' ext_status = ExtensionStatus(seq_no=0) parse_ext_status(ext_status, json.loads(s)) self.assertEqual('0', ext_status.code) self.assertEqual(None, ext_status.configurationAppliedTime) self.assertEqual('Command is finished.', ext_status.message) self.assertEqual('Daemon', ext_status.operation) self.assertEqual('success', ext_status.status) self.assertEqual(0, ext_status.sequenceNumber) self.assertEqual(0, len(ext_status.substatusList)) def test_parse_extension_status01(self): """ Parse a status report for a failed execution of an extension. The extension returned a bad status/status of failed. The agent should handle this gracefully, and convert all unknown status/status values into an error. """ s = '''[{ "status": { "status": "failed", "formattedMessage": { "lang": "en-US", "message": "Enable failed: Failed with error: commandToExecute is empty or invalid ..." }, "operation": "Enable", "code": "0", "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" }, "version": "1.0", "timestampUTC": "2018-04-20T20:50:22Z" }]''' ext_status = ExtensionStatus(seq_no=0) parse_ext_status(ext_status, json.loads(s)) self.assertEqual('0', ext_status.code) self.assertEqual(None, ext_status.configurationAppliedTime) self.assertEqual('Enable failed: Failed with error: commandToExecute is empty or invalid ...', ext_status.message) self.assertEqual('Enable', ext_status.operation) self.assertEqual('error', ext_status.status) self.assertEqual(0, ext_status.sequenceNumber) self.assertEqual(0, len(ext_status.substatusList)) @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_largest_seq_no') def assert_extension_sequence_number(self, patch_get_largest_seq, patch_add_event, goal_state_sequence_number, disk_sequence_number, expected_sequence_number): ext = Extension() ext.sequenceNumber = goal_state_sequence_number patch_get_largest_seq.return_value = disk_sequence_number ext_handler_props = ExtHandlerProperties() ext_handler_props.version = "1.2.3" ext_handler = ExtHandler(name='foo') ext_handler.properties = ext_handler_props instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) seq, path = instance.get_status_file_path(ext) try: gs_seq_int = int(goal_state_sequence_number) gs_int = True except ValueError: gs_int = False if gs_int and gs_seq_int != disk_sequence_number: self.assertEqual(1, patch_add_event.call_count) args, kw_args = patch_add_event.call_args self.assertEqual('SequenceNumberMismatch', kw_args['op']) self.assertEqual(False, kw_args['is_success']) self.assertEqual('Goal state: {0}, disk: {1}' .format(gs_seq_int, disk_sequence_number), kw_args['message']) else: self.assertEqual(0, patch_add_event.call_count) self.assertEqual(expected_sequence_number, seq) if seq > -1: self.assertTrue(path.endswith('/foo-1.2.3/status/{0}.status'.format(expected_sequence_number))) else: self.assertIsNone(path) def test_extension_sequence_number(self): self.assert_extension_sequence_number(goal_state_sequence_number="12", disk_sequence_number=366, expected_sequence_number=12) self.assert_extension_sequence_number(goal_state_sequence_number=" 12 ", disk_sequence_number=366, expected_sequence_number=12) self.assert_extension_sequence_number(goal_state_sequence_number=" foo", disk_sequence_number=3, expected_sequence_number=3) self.assert_extension_sequence_number(goal_state_sequence_number="-1", disk_sequence_number=3, expected_sequence_number=-1) WALinuxAgent-2.2.32/tests/ga/test_monitor.py000066400000000000000000000270661335416306700207620ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from datetime import timedelta from azurelinuxagent.common.protocol.wire import WireProtocol from tests.tools import * from azurelinuxagent.ga.monitor import * @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch('azurelinuxagent.common.osutil.get_osutil') @patch('azurelinuxagent.common.protocol.get_protocol_util') @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol') @patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestMonitor(AgentTestCase): def test_parse_xml_event(self, *args): data_str = load_data('ext/event.xml') event = parse_xml_event(data_str) self.assertNotEqual(None, event) self.assertNotEqual(0, event.parameters) self.assertNotEqual(None, event.parameters[0]) def test_add_sysinfo(self, *args): data_str = load_data('ext/event.xml') event = parse_xml_event(data_str) monitor_handler = get_monitor_handler() vm_name = 'dummy_vm' tenant_name = 'dummy_tenant' role_name = 'dummy_role' role_instance_name = 'dummy_role_instance' container_id = 'dummy_container_id' vm_name_param = "VMName" tenant_name_param = "TenantName" role_name_param = "RoleName" role_instance_name_param = "RoleInstanceName" container_id_param = "ContainerId" sysinfo = [TelemetryEventParam(vm_name_param, vm_name), TelemetryEventParam(tenant_name_param, tenant_name), TelemetryEventParam(role_name_param, role_name), TelemetryEventParam(role_instance_name_param, role_instance_name), TelemetryEventParam(container_id_param, container_id)] monitor_handler.sysinfo = sysinfo monitor_handler.add_sysinfo(event) self.assertNotEqual(None, event) self.assertNotEqual(0, event.parameters) self.assertNotEqual(None, event.parameters[0]) counter = 0 for p in event.parameters: if p.name == vm_name_param: self.assertEqual(vm_name, p.value) counter += 1 elif p.name == tenant_name_param: self.assertEqual(tenant_name, p.value) counter += 1 elif p.name == role_name_param: self.assertEqual(role_name, p.value) counter += 1 elif p.name == role_instance_name_param: self.assertEqual(role_instance_name, p.value) counter += 1 elif p.name == container_id_param: self.assertEqual(container_id, p.value) counter += 1 self.assertEqual(5, counter) @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_heartbeat") @patch("azurelinuxagent.ga.monitor.MonitorHandler.collect_and_send_events") @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_host_plugin_heartbeat") @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_cgroup_telemetry") @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_imds_heartbeat") def test_heartbeats(self, patch_imds_heartbeat, patch_cgroup_telemetry, patch_hostplugin_heartbeat, patch_send_events, patch_telemetry_heartbeat, *args): monitor_handler = get_monitor_handler() MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) self.assertEqual(0, patch_hostplugin_heartbeat.call_count) self.assertEqual(0, patch_send_events.call_count) self.assertEqual(0, patch_telemetry_heartbeat.call_count) self.assertEqual(0, patch_imds_heartbeat.call_count) self.assertEqual(0, patch_cgroup_telemetry.call_count) monitor_handler.start() time.sleep(1) self.assertTrue(monitor_handler.is_alive()) self.assertNotEqual(0, patch_hostplugin_heartbeat.call_count) self.assertNotEqual(0, patch_send_events.call_count) self.assertNotEqual(0, patch_telemetry_heartbeat.call_count) self.assertNotEqual(0, patch_imds_heartbeat.call_count) self.assertNotEqual(0, patch_cgroup_telemetry.call_count) monitor_handler.stop() @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_cgroup_telemetry") def test_heartbeat_timings_updates_after_window(self, *args): monitor_handler = get_monitor_handler() MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertEqual(None, monitor_handler.last_event_collection) self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertEqual(None, monitor_handler.last_imds_heartbeat) monitor_handler.start() time.sleep(0.2) self.assertTrue(monitor_handler.is_alive()) self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertNotEqual(None, monitor_handler.last_event_collection) self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat heartbeat_imds = monitor_handler.last_imds_heartbeat heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat events_collection = monitor_handler.last_event_collection time.sleep(0.5) self.assertNotEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) self.assertNotEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) self.assertNotEqual(events_collection, monitor_handler.last_event_collection) self.assertNotEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) monitor_handler.stop() @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_cgroup_telemetry") def test_heartbeat_timings_no_updates_within_window(self, *args): monitor_handler = get_monitor_handler() MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(seconds=1) MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(seconds=1) MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(seconds=1) MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(seconds=1) self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertEqual(None, monitor_handler.last_event_collection) self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertEqual(None, monitor_handler.last_imds_heartbeat) monitor_handler.start() time.sleep(0.2) self.assertTrue(monitor_handler.is_alive()) self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertNotEqual(None, monitor_handler.last_event_collection) self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat heartbeat_imds = monitor_handler.last_imds_heartbeat heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat events_collection = monitor_handler.last_event_collection time.sleep(0.5) self.assertEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) self.assertEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) self.assertEqual(events_collection, monitor_handler.last_event_collection) self.assertEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) monitor_handler.stop() @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") def test_heartbeat_creates_signal(self, patch_report_heartbeat, *args): monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.send_host_plugin_heartbeat() self.assertEqual(1, patch_report_heartbeat.call_count) self.assertEqual(0, args[5].call_count) monitor_handler.stop() @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered', return_value=True) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") def test_failed_heartbeat_creates_telemetry(self, patch_report_heartbeat, _, *args): monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.send_host_plugin_heartbeat() self.assertEqual(1, patch_report_heartbeat.call_count) self.assertEqual(1, args[5].call_count) self.assertEqual('HostPluginHeartbeatExtended', args[5].call_args[1]['op']) self.assertEqual(False, args[5].call_args[1]['is_success']) monitor_handler.stop() @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_get") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state') @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', return_value=WireProtocol('endpoint')) class TestMonitorFailure(AgentTestCase): @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") def test_error_heartbeat_creates_no_signal(self, patch_report_heartbeat, *args): patch_http_get = args[2] patch_add_event = args[4] monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) patch_http_get.side_effect = IOError('client error') monitor_handler.send_host_plugin_heartbeat() # health report should not be made self.assertEqual(0, patch_report_heartbeat.call_count) # telemetry with failure details is sent self.assertEqual(1, patch_add_event.call_count) self.assertEqual('HostPluginHeartbeat', patch_add_event.call_args[1]['op']) self.assertTrue('client error' in patch_add_event.call_args[1]['message']) self.assertEqual(False, patch_add_event.call_args[1]['is_success']) monitor_handler.stop() WALinuxAgent-2.2.32/tests/ga/test_remoteaccess.py000066400000000000000000000133261335416306700217420ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import xml from tests.tools import * from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.common.osutil import get_osutil class TestRemoteAccess(AgentTestCase): def test_parse_remote_access(self): data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals("1", remote_access.incarnation) self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.") self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', return_value=GoalState(load_data('wire/goal_state.xml'))) def test_update_remote_access_conf_no_remote_access(self, _): protocol = WireProtocol('12.34.56.78') goal_state = protocol.client.get_goal_state() protocol.client.update_remote_access_conf(goal_state) def test_parse_two_remote_access_accounts(self): data_str = load_data('wire/remote_access_two_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals("1", remote_access.incarnation) self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") self.assertEquals("testAccount2", remote_access.user_list.users[1].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") def test_parse_ten_remote_access_accounts(self): data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.") def test_parse_duplicate_remote_access_accounts(self): data_str = load_data('wire/remote_access_duplicate_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") def test_parse_zero_remote_access_accounts(self): data_str = load_data('wire/remote_access_no_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', return_value=GoalState(load_data('wire/goal_state_remote_access.xml'))) @patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config', return_value=load_data('wire/remote_access_single_account.xml')) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert') def test_update_remote_access_conf_remote_access(self, _1, _2, _3): protocol = WireProtocol('12.34.56.78') goal_state = protocol.client.get_goal_state() protocol.client.update_remote_access_conf(goal_state) self.assertNotEquals(None, protocol.client.remote_access) self.assertEquals(1, len(protocol.client.remote_access.user_list.users)) self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name) self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password) def test_parse_bad_remote_access_data(self): data = "foobar" self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data)WALinuxAgent-2.2.32/tests/ga/test_remoteaccess_handler.py000066400000000000000000000616471335416306700234500ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from datetime import timedelta from azurelinuxagent.common.exception import RemoteAccessError from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.ga.remoteaccess import RemoteAccessHandler from tests.common.osutil.mock_osutil import MockOSUtil from tests.tools import * info_messages = [] error_messages = [] def get_user_dictionary(users): user_dictionary = {} for user in users: user_dictionary[user[0]] = user return user_dictionary def log_info(msg_format, *args): info_messages.append(msg_format.format(args)) def log_error(msg_format, *args): error_messages.append(msg_format.format(args)) def mock_add_event(name, op, is_success, version, message): TestRemoteAccessHandler.eventing_data = (name, op, is_success, version, message) class TestRemoteAccessHandler(AgentTestCase): eventing_data = [()] def setUp(self): super(TestRemoteAccessHandler, self).setUp() del info_messages[:] del error_messages[:] for data in TestRemoteAccessHandler.eventing_data: del data # add_user tests @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_add_user(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) actual_user = users[tstuser] expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") self.assertEqual(actual_user[7], expected_expiration) self.assertEqual(actual_user[4], "JIT_Account") self.assertEqual(0, len(error_messages)) self.assertEqual(1, len(info_messages)) self.assertEqual(info_messages[0], "User '{0}' added successfully with expiration in {1}" .format(tstuser, expected_expiration)) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_add_user_bad_creation_data(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "" expiration = datetime.utcnow() + timedelta(days=1) pwd = tstpassword error = "Error adding user {0}. test exception for bad username".format(tstuser) self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) self.assertEqual(0, len(rah.os_util.get_users())) self.assertEqual(0, len(error_messages)) self.assertEqual(0, len(info_messages)) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="") def test_add_user_bad_password_data(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "" tstuser = "foobar" expiration = datetime.utcnow() + timedelta(days=1) pwd = tstpassword error = "Error adding user {0} cleanup successful\nInner error: test exception for bad password".format(tstuser) self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) self.assertEqual(0, len(rah.os_util.get_users())) self.assertEqual(0, len(error_messages)) self.assertEqual(1, len(info_messages)) self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_add_user_already_existing(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) self.assertEqual(1, len(users.keys())) actual_user = users[tstuser] self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) # add the new duplicate user, ensure it's not created and does not overwrite the existing user. # this does not test the user add function as that's mocked, it tests processing skips the remaining # calls after the initial failure new_user_expiration = datetime.utcnow() + timedelta(days=5) self.assertRaises(RemoteAccessError, rah.add_user, tstuser, pwd, new_user_expiration) # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users after dup user attempted".format(tstuser)) self.assertEqual(1, len(users.keys())) actual_user = users[tstuser] self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) # delete_user tests @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_delete_user(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) rah.delete_user(tstuser) # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertFalse(tstuser in users) self.assertEqual(0, len(error_messages)) self.assertEqual(2, len(info_messages)) self.assertEqual("User '{0}' added successfully with expiration in {1}".format(tstuser, expected_expiration), info_messages[0]) self.assertEqual("User deleted {0}".format(tstuser), info_messages[1]) def test_handle_failed_create_with_bad_data(self): mock_os_util = MockOSUtil() testusr = "foobar" mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) rah = RemoteAccessHandler() rah.os_util = mock_os_util self.assertRaises(RemoteAccessError, rah.handle_failed_create, "") users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(1, len(users.keys())) self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) def test_delete_user_does_not_exist(self, _1, _2): mock_os_util = MockOSUtil() testusr = "foobar" mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) rah = RemoteAccessHandler() rah.os_util = mock_os_util testuser = "Carl" error = "Failed to clean up after account creation for {0}.\n" \ "Inner error: test exception, user does not exist to delete".format(testuser) self.assertRaisesRegex(RemoteAccessError, error, rah.handle_failed_create, testuser) users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(1, len(users.keys())) self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) self.assertEqual(0, len(error_messages)) self.assertEqual(0, len(info_messages)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_new_user(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) tstuser = remote_access.user_list.users[0].name expiration_date = datetime.utcnow() + timedelta(days=1) expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[0].expiration = expiration rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) actual_user = users[tstuser] expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") self.assertEqual(actual_user[7], expected_expiration) self.assertEqual(actual_user[4], "JIT_Account") def test_do_not_add_expired_user(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) expiration = (datetime.utcnow() - timedelta(days=2)).strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[0].expiration = expiration rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertFalse("testAccount" in users) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) def test_error_add_user(self, _1, _2): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstuser = "foobar" expiration = datetime.utcnow() + timedelta(days=1) pwd = "bad password" error = "Error adding user foobar cleanup successful\n" \ "Inner error: \[CryptError\] Error decoding secret\n" \ "Inner error: Incorrect padding".format(tstuser) self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(0, len(users)) self.assertEqual(0, len(error_messages)) self.assertEqual(1, len(info_messages)) self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) def test_handle_remote_access_no_users(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_no_accounts.xml') remote_access = RemoteAccess(data_str) rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(0, len(users.keys())) def test_handle_remote_access_validate_jit_user_valid(self): rah = RemoteAccessHandler() comment = "JIT_Account" result = rah.validate_jit_user(comment) self.assertTrue(result, "Did not identify '{0}' as a JIT_Account".format(comment)) def test_handle_remote_access_validate_jit_user_invalid(self): rah = RemoteAccessHandler() test_users = ["John Doe", None, "", " "] failed_results = "" for user in test_users: if rah.validate_jit_user(user): failed_results += "incorrectly identified '{0} as a JIT_Account'. ".format(user) if len(failed_results) > 0: self.fail(failed_results) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_two_accounts.xml') remote_access = RemoteAccess(data_str) testusers = [] count = 0 while count < 2: user = remote_access.user_list.users[count].name expiration_date = datetime.utcnow() + timedelta(days=count + 1) expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[count].expiration = expiration testusers.append(user) count += 1 rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(testusers[0] in users, "{0} missing from users".format(testusers[0])) self.assertTrue(testusers[1] in users, "{0} missing from users".format(testusers[1])) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") # max fabric supports in the Goal State def test_handle_remote_access_ten_users(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(10, len(users.keys())) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_user_removed(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(10, len(users.keys())) del rah.remote_access.user_list.users[:] self.assertEqual(10, len(users.keys())) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_bad_data_and_good_data(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) if count is 2: user.name = "" expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(9, len(users.keys())) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_deleted_user_readded(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) tstuser = remote_access.user_list.users[0].name expiration_date = datetime.utcnow() + timedelta(days=1) expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[0].expiration = expiration rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) os_util = rah.os_util os_util.__class__ = MockOSUtil os_util.all_users.clear() # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser not in users) rah.handle_remote_access() # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") @patch('azurelinuxagent.common.osutil.get_osutil', return_value=MockOSUtil()) @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', return_value=WireProtocol("12.34.56.78")) @patch('azurelinuxagent.common.protocol.wire.WireProtocol.get_incarnation', return_value="1") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_remote_access', return_value="asdf") def test_remote_access_handler_run_bad_data(self, _1, _2, _3, _4, _5): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) rah.run() self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users_one_removed(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess deleted_user = rah.remote_access.user_list.users[3] del rah.remote_access.user_list.users[3] rah.handle_remote_access() users = rah.os_util.get_users() self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) self.assertEqual(9, len(users)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users_null_remote_access(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess rah.remote_access = None rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(0, len(users)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users_error_with_null_remote_access(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess rah.remote_access = None rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(0, len(users)) def test_remove_user_error(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() error = "Failed to delete user {0}\nInner error: test exception, bad data".format("") self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, "") def test_remove_user_not_exists(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() user = "bob" error = "Failed to delete user {0}\n" \ "Inner error: test exception, user does not exist to delete".format(user) self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, user) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_remove_and_add(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess new_user = "tstuser11" deleted_user = rah.remote_access.user_list.users[3] rah.remote_access.user_list.users[3].name = new_user rah.handle_remote_access() users = rah.os_util.get_users() self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) self.assertTrue(new_user in [u[0] for u in users], "user {0} not in users".format(new_user)) self.assertEqual(10, len(users)) @patch('azurelinuxagent.ga.remoteaccess.add_event', side_effect=mock_add_event) @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', side_effect=RemoteAccessError("foobar!")) def test_remote_access_handler_run_error(self, _1, _2): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() rah.run() print(TestRemoteAccessHandler.eventing_data) check_message = "foobar!" self.assertTrue(check_message in TestRemoteAccessHandler.eventing_data[4], "expected message {0} not found in {1}" .format(check_message, TestRemoteAccessHandler.eventing_data[4])) self.assertEqual(False, TestRemoteAccessHandler.eventing_data[2], "is_success is true") WALinuxAgent-2.2.32/tests/ga/test_update.py000066400000000000000000002202071335416306700205450ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. from __future__ import print_function from azurelinuxagent.common.event import * from azurelinuxagent.common.protocol.hostplugin import * from azurelinuxagent.common.protocol.metadata import * from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.common.utils.fileutil import * from azurelinuxagent.ga.update import * from tests.tools import * NO_ERROR = { "last_failure" : 0.0, "failure_count" : 0, "was_fatal" : False } FATAL_ERROR = { "last_failure" : 42.42, "failure_count" : 2, "was_fatal" : True } WITH_ERROR = { "last_failure" : 42.42, "failure_count" : 2, "was_fatal" : False } EMPTY_MANIFEST = { "name": "WALinuxAgent", "version": 1.0, "handlerManifest": { "installCommand": "", "uninstallCommand": "", "updateCommand": "", "enableCommand": "", "disableCommand": "", "rebootAfterInstall": False, "reportHeartbeat": False } } def get_agent_pkgs(in_dir=os.path.join(data_dir, "ga")): path = os.path.join(in_dir, AGENT_PKG_GLOB) return glob.glob(path) def get_agents(in_dir=os.path.join(data_dir, "ga")): path = os.path.join(in_dir, AGENT_DIR_GLOB) return [a for a in glob.glob(path) if os.path.isdir(a)] def get_agent_file_path(): return get_agent_pkgs()[0] def get_agent_file_name(): return os.path.basename(get_agent_file_path()) def get_agent_path(): return fileutil.trim_ext(get_agent_file_path(), "zip") def get_agent_name(): return os.path.basename(get_agent_path()) def get_agent_version(): return FlexibleVersion(get_agent_name().split("-")[1]) def faux_logger(): print("STDOUT message") print("STDERR message", file=sys.stderr) return DEFAULT class UpdateTestCase(AgentTestCase): def agent_bin(self, version, suffix): return "bin/{0}-{1}{2}.egg".format(AGENT_NAME, version, suffix) def rename_agent_bin(self, path, src_v, dst_v): src_bin = glob.glob(os.path.join(path, self.agent_bin(src_v, '*')))[0] dst_bin = os.path.join(path, self.agent_bin(dst_v, '')) shutil.move(src_bin, dst_bin) def agents(self): return [GuestAgent(path=path) for path in self.agent_dirs()] def agent_count(self): return len(self.agent_dirs()) def agent_dirs(self): return get_agents(in_dir=self.tmp_dir) def agent_dir(self, version): return os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, version)) def agent_paths(self): paths = glob.glob(os.path.join(self.tmp_dir, "*")) paths.sort() return paths def agent_pkgs(self): return get_agent_pkgs(in_dir=self.tmp_dir) def agent_versions(self): v = [FlexibleVersion(AGENT_DIR_PATTERN.match(a).group(1)) for a in self.agent_dirs()] v.sort(reverse=True) return v def get_error_file(self, error_data=NO_ERROR): fp = tempfile.NamedTemporaryFile(mode="w") json.dump(error_data if error_data is not None else NO_ERROR, fp) fp.seek(0) return fp def create_error(self, error_data=NO_ERROR): with self.get_error_file(error_data) as path: err = GuestAgentError(path.name) err.load() return err def copy_agents(self, *agents): if len(agents) <= 0: agents = get_agent_pkgs() for agent in agents: shutil.copy(agent, self.tmp_dir) return def expand_agents(self): for agent in self.agent_pkgs(): path = os.path.join(self.tmp_dir, fileutil.trim_ext(agent, "zip")) zipfile.ZipFile(agent).extractall(path) def prepare_agent(self, version): """ Create a download for the current agent version, copied from test data """ self.copy_agents(get_agent_pkgs()[0]) self.expand_agents() versions = self.agent_versions() src_v = FlexibleVersion(str(versions[0])) from_path = self.agent_dir(src_v) dst_v = FlexibleVersion(str(version)) to_path = self.agent_dir(dst_v) if from_path != to_path: shutil.move(from_path + ".zip", to_path + ".zip") shutil.move(from_path, to_path) self.rename_agent_bin(to_path, src_v, dst_v) return def prepare_agents(self, count=20, is_available=True): # Ensure the test data is copied over agent_count = self.agent_count() if agent_count <= 0: self.copy_agents(get_agent_pkgs()[0]) self.expand_agents() count -= 1 # Determine the most recent agent version versions = self.agent_versions() src_v = FlexibleVersion(str(versions[0])) # Create agent packages and directories return self.replicate_agents( src_v=src_v, count=count-agent_count, is_available=is_available) def remove_agents(self): for agent in self.agent_paths(): try: if os.path.isfile(agent): os.remove(agent) else: shutil.rmtree(agent) except: pass return def replicate_agents(self, count=5, src_v=AGENT_VERSION, is_available=True, increment=1): from_path = self.agent_dir(src_v) dst_v = FlexibleVersion(str(src_v)) for i in range(0, count): dst_v += increment to_path = self.agent_dir(dst_v) shutil.copyfile(from_path + ".zip", to_path + ".zip") shutil.copytree(from_path, to_path) self.rename_agent_bin(to_path, src_v, dst_v) if not is_available: GuestAgent(to_path).mark_failure(is_fatal=True) return dst_v class TestGuestAgentError(UpdateTestCase): def test_creation(self): self.assertRaises(TypeError, GuestAgentError) self.assertRaises(UpdateError, GuestAgentError, None) with self.get_error_file(error_data=WITH_ERROR) as path: err = GuestAgentError(path.name) err.load() self.assertEqual(path.name, err.path) self.assertNotEqual(None, err) self.assertEqual(WITH_ERROR["last_failure"], err.last_failure) self.assertEqual(WITH_ERROR["failure_count"], err.failure_count) self.assertEqual(WITH_ERROR["was_fatal"], err.was_fatal) return def test_clear(self): with self.get_error_file(error_data=WITH_ERROR) as path: err = GuestAgentError(path.name) err.load() self.assertEqual(path.name, err.path) self.assertNotEqual(None, err) err.clear() self.assertEqual(NO_ERROR["last_failure"], err.last_failure) self.assertEqual(NO_ERROR["failure_count"], err.failure_count) self.assertEqual(NO_ERROR["was_fatal"], err.was_fatal) return def test_save(self): err1 = self.create_error() err1.mark_failure() err1.mark_failure(is_fatal=True) err2 = self.create_error(err1.to_json()) self.assertEqual(err1.last_failure, err2.last_failure) self.assertEqual(err1.failure_count, err2.failure_count) self.assertEqual(err1.was_fatal, err2.was_fatal) def test_mark_failure(self): err = self.create_error() self.assertFalse(err.is_blacklisted) for i in range(0, MAX_FAILURE): err.mark_failure() # Agent failed >= MAX_FAILURE, it should be blacklisted self.assertTrue(err.is_blacklisted) self.assertEqual(MAX_FAILURE, err.failure_count) return def test_mark_failure_permanent(self): err = self.create_error() self.assertFalse(err.is_blacklisted) # Fatal errors immediately blacklist err.mark_failure(is_fatal=True) self.assertTrue(err.is_blacklisted) self.assertTrue(err.failure_count < MAX_FAILURE) return def test_str(self): err = self.create_error(error_data=NO_ERROR) s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( NO_ERROR["last_failure"], NO_ERROR["failure_count"], NO_ERROR["was_fatal"]) self.assertEqual(s, str(err)) err = self.create_error(error_data=WITH_ERROR) s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( WITH_ERROR["last_failure"], WITH_ERROR["failure_count"], WITH_ERROR["was_fatal"]) self.assertEqual(s, str(err)) return class TestGuestAgent(UpdateTestCase): def setUp(self): UpdateTestCase.setUp(self) self.copy_agents(get_agent_file_path()) self.agent_path = os.path.join(self.tmp_dir, get_agent_name()) def test_creation(self): self.assertRaises(UpdateError, GuestAgent, "A very bad file name") n = "{0}-a.bad.version".format(AGENT_NAME) self.assertRaises(UpdateError, GuestAgent, n) self.expand_agents() agent = GuestAgent(path=self.agent_path) self.assertNotEqual(None, agent) self.assertEqual(get_agent_name(), agent.name) self.assertEqual(get_agent_version(), agent.version) self.assertEqual(self.agent_path, agent.get_agent_dir()) path = os.path.join(self.agent_path, AGENT_MANIFEST_FILE) self.assertEqual(path, agent.get_agent_manifest_path()) self.assertEqual( os.path.join(self.agent_path, AGENT_ERROR_FILE), agent.get_agent_error_file()) path = ".".join((os.path.join(conf.get_lib_dir(), get_agent_name()), "zip")) self.assertEqual(path, agent.get_agent_pkg_path()) self.assertTrue(agent.is_downloaded) self.assertFalse(agent.is_blacklisted) self.assertTrue(agent.is_available) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") def test_clear_error(self, mock_downloaded): self.expand_agents() agent = GuestAgent(path=self.agent_path) agent.mark_failure(is_fatal=True) self.assertTrue(agent.error.last_failure > 0.0) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) agent.clear_error() self.assertEqual(0.0, agent.error.last_failure) self.assertEqual(0, agent.error.failure_count) self.assertFalse(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_available(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_available) agent._unpack() self.assertTrue(agent.is_available) agent.mark_failure(is_fatal=True) self.assertFalse(agent.is_available) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_blacklisted(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_blacklisted) agent._unpack() self.assertFalse(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) agent.mark_failure(is_fatal=True) self.assertTrue(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_resource_gone_error_not_blacklisted(self, mock_loaded, mock_downloaded): try: mock_downloaded.side_effect = ResourceGoneError() agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_blacklisted) except ResourceGoneError: pass except: self.fail("Exception was not expected!") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_ioerror_not_blacklisted(self, mock_loaded, mock_downloaded): try: mock_downloaded.side_effect = IOError() agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_blacklisted) except IOError: pass except: self.fail("Exception was not expected!") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_downloaded(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_downloaded) agent._unpack() self.assertTrue(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_mark_failure(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) agent.mark_failure() self.assertEqual(1, agent.error.failure_count) agent.mark_failure(is_fatal=True) self.assertEqual(2, agent.error.failure_count) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_unpack(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isdir(agent.get_agent_dir())) self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_unpack_fail(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) os.remove(agent.get_agent_pkg_path()) self.assertRaises(UpdateError, agent._unpack) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) agent._unpack() agent._load_manifest() self.assertEqual(agent.manifest.get_enable_command(), agent.get_agent_cmd()) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_missing(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() os.remove(agent.get_agent_manifest_path()) self.assertRaises(UpdateError, agent._load_manifest) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_is_empty(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) with open(agent.get_agent_manifest_path(), "w") as file: json.dump(EMPTY_MANIFEST, file) self.assertRaises(UpdateError, agent._load_manifest) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_is_malformed(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) with open(agent.get_agent_manifest_path(), "w") as file: file.write("This is not JSON data") self.assertRaises(UpdateError, agent._load_manifest) def test_load_error(self): agent = GuestAgent(path=self.agent_path) agent.error = None agent._load_error() self.assertTrue(agent.error is not None) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") def test_download(self, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) mock_http_get.return_value= ResponseMock(response=agent_pkg) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) agent._download() self.assertTrue(os.path.isfile(agent.get_agent_pkg_path())) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") def test_download_fail(self, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) mock_http_get.return_value= ResponseMock(status=restutil.httpclient.SERVICE_UNAVAILABLE) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertRaises(UpdateError, agent._download) self.assertFalse(os.path.isfile(agent.get_agent_pkg_path())) self.assertFalse(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") @patch("azurelinuxagent.ga.update.restutil.http_post") def test_download_fallback(self, mock_http_post, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) mock_http_get.return_value = ResponseMock( status=restutil.httpclient.SERVICE_UNAVAILABLE, response="") ext_uri = 'ext_uri' host_uri = 'host_uri' api_uri = URI_FORMAT_GET_API_VERSIONS.format(host_uri, HOST_PLUGIN_PORT) art_uri = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(host_uri, HOST_PLUGIN_PORT) mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri(uri=ext_uri)) agent = GuestAgent(pkg=pkg) agent.host = mock_host # ensure fallback fails gracefully, no http self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 2) self.assertEqual(mock_http_get.call_args_list[0][0][0], ext_uri) self.assertEqual(mock_http_get.call_args_list[1][0][0], api_uri) # ensure fallback fails gracefully, artifact api failure with patch.object(HostPluginProtocol, "ensure_initialized", return_value=True): self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 4) self.assertEqual(mock_http_get.call_args_list[2][0][0], ext_uri) self.assertEqual(mock_http_get.call_args_list[3][0][0], art_uri) a, k = mock_http_get.call_args_list[3] self.assertEqual(False, k['use_proxy']) # ensure fallback works as expected with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[art_uri, {}]): self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 6) a, k = mock_http_get.call_args_list[3] self.assertEqual(False, k['use_proxy']) self.assertEqual(mock_http_get.call_args_list[4][0][0], ext_uri) a, k = mock_http_get.call_args_list[4] self.assertEqual(mock_http_get.call_args_list[5][0][0], art_uri) a, k = mock_http_get.call_args_list[5] self.assertEqual(False, k['use_proxy']) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_ensure_downloaded(self, mock_http_get): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) mock_http_get.return_value= ResponseMock(response=agent_pkg) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) self.assertTrue(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._download", side_effect=UpdateError) def test_ensure_downloaded_download_fails(self, mock_download): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertFalse(agent.error.was_fatal) self.assertFalse(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack", side_effect=UpdateError) def test_ensure_downloaded_unpack_fails(self, mock_unpack, mock_download): self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack") @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest", side_effect=UpdateError) def test_ensure_downloaded_load_manifest_fails(self, mock_manifest, mock_unpack, mock_download): self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack") @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest") def test_ensure_download_skips_blacklisted(self, mock_manifest, mock_unpack, mock_download): agent = GuestAgent(path=self.agent_path) self.assertEqual(0, mock_download.call_count) agent.clear_error() agent.mark_failure(is_fatal=True) self.assertTrue(agent.is_blacklisted) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) self.assertEqual(0, mock_download.call_count) self.assertEqual(0, mock_unpack.call_count) class TestUpdate(UpdateTestCase): def setUp(self): UpdateTestCase.setUp(self) self.event_patch = patch('azurelinuxagent.common.event.add_event') self.update_handler = get_update_handler() self.update_handler.protocol_util = Mock() def test_creation(self): self.assertTrue(self.update_handler.running) self.assertEqual(None, self.update_handler.last_attempt_time) self.assertEqual(0, len(self.update_handler.agents)) self.assertEqual(None, self.update_handler.child_agent) self.assertEqual(None, self.update_handler.child_launch_time) self.assertEqual(0, self.update_handler.child_launch_attempts) self.assertEqual(None, self.update_handler.child_process) self.assertEqual(None, self.update_handler.signal_handler) def test_emit_restart_event_emits_event_if_not_clean_start(self): try: mock_event = self.event_patch.start() self.update_handler._set_sentinel() self.update_handler._emit_restart_event() self.assertEqual(1, mock_event.call_count) except Exception as e: pass self.event_patch.stop() def _create_protocol(self, count=20, versions=None): latest_version = self.prepare_agents(count=count) if versions is None or len(versions) <= 0: versions = [latest_version] return ProtocolMock(versions=versions) def _test_ensure_no_orphans(self, invocations=3, interval=ORPHAN_WAIT_INTERVAL, pid_count=0): with patch.object(self.update_handler, 'osutil') as mock_util: # Note: # - Python only allows mutations of objects to which a function has # a reference. Incrementing an integer directly changes the # reference. Incrementing an item of a list changes an item to # which the code has a reference. # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 return iterations[0] < invocations mock_util.check_pid_alive = Mock(side_effect=iterator) pid_files = self.update_handler._get_pid_files() self.assertEqual(pid_count, len(pid_files)) with patch('os.getpid', return_value=42): with patch('time.sleep', return_value=None) as mock_sleep: self.update_handler._ensure_no_orphans(orphan_wait_interval=interval) for pid_file in pid_files: self.assertFalse(os.path.exists(pid_file)) return mock_util.check_pid_alive.call_count, mock_sleep.call_count def test_ensure_no_orphans(self): fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41)) calls, sleeps = self._test_ensure_no_orphans(invocations=3, pid_count=1) self.assertEqual(3, calls) self.assertEqual(2, sleeps) def test_ensure_no_orphans_skips_if_no_orphans(self): calls, sleeps = self._test_ensure_no_orphans(invocations=3) self.assertEqual(0, calls) self.assertEqual(0, sleeps) def test_ensure_no_orphans_ignores_exceptions(self): with patch('azurelinuxagent.common.utils.fileutil.read_file', side_effect=Exception): calls, sleeps = self._test_ensure_no_orphans(invocations=3) self.assertEqual(0, calls) self.assertEqual(0, sleeps) def test_ensure_no_orphans_kills_after_interval(self): fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41)) with patch('os.kill') as mock_kill: calls, sleeps = self._test_ensure_no_orphans( invocations=4, interval=3*GOAL_STATE_INTERVAL, pid_count=1) self.assertEqual(3, calls) self.assertEqual(2, sleeps) self.assertEqual(1, mock_kill.call_count) @patch('azurelinuxagent.ga.update.datetime') def test_ensure_partition_assigned(self, mock_time): path = os.path.join(conf.get_lib_dir(), AGENT_PARTITION_FILE) mock_time.utcnow = Mock() self.assertFalse(os.path.exists(path)) for n in range(0,99): mock_time.utcnow.return_value = Mock(microsecond=n* 10000) self.update_handler._ensure_partition_assigned() self.assertTrue(os.path.exists(path)) s = fileutil.read_file(path) self.assertEqual(n, int(s)) os.remove(path) def test_ensure_readonly_sets_readonly(self): test_files = [ os.path.join(conf.get_lib_dir(), "faux_certificate.crt"), os.path.join(conf.get_lib_dir(), "faux_certificate.p7m"), os.path.join(conf.get_lib_dir(), "faux_certificate.pem"), os.path.join(conf.get_lib_dir(), "faux_certificate.prv"), os.path.join(conf.get_lib_dir(), "ovf-env.xml") ] for path in test_files: fileutil.write_file(path, "Faux content") os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) self.update_handler._ensure_readonly_files() for path in test_files: mode = os.stat(path).st_mode mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) self.assertEqual(0, mode ^ stat.S_IRUSR) def test_ensure_readonly_leaves_unmodified(self): test_files = [ os.path.join(conf.get_lib_dir(), "faux.xml"), os.path.join(conf.get_lib_dir(), "faux.json"), os.path.join(conf.get_lib_dir(), "faux.txt"), os.path.join(conf.get_lib_dir(), "faux") ] for path in test_files: fileutil.write_file(path, "Faux content") os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) self.update_handler._ensure_readonly_files() for path in test_files: mode = os.stat(path).st_mode mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) self.assertEqual( stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, mode) def _test_evaluate_agent_health(self, child_agent_index=0): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertFalse(latest_agent.is_blacklisted) self.assertTrue(len(self.update_handler.agents) > 1) child_agent = self.update_handler.agents[child_agent_index] self.assertTrue(child_agent.is_available) self.assertFalse(child_agent.is_blacklisted) self.update_handler.child_agent = child_agent self.update_handler._evaluate_agent_health(latest_agent) def test_evaluate_agent_health_ignores_installed_agent(self): self.update_handler._evaluate_agent_health(None) def test_evaluate_agent_health_raises_exception_for_restarting_agent(self): self.update_handler.child_launch_time = time.time() - (4 * 60) self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 self.assertRaises(Exception, self._test_evaluate_agent_health) def test_evaluate_agent_health_will_not_raise_exception_for_long_restarts(self): self.update_handler.child_launch_time = time.time() - 24 * 60 self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX self._test_evaluate_agent_health() def test_evaluate_agent_health_will_not_raise_exception_too_few_restarts(self): self.update_handler.child_launch_time = time.time() self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 2 self._test_evaluate_agent_health() def test_evaluate_agent_health_resets_with_new_agent(self): self.update_handler.child_launch_time = time.time() - (4 * 60) self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 self._test_evaluate_agent_health(child_agent_index=1) self.assertEqual(1, self.update_handler.child_launch_attempts) def test_filter_blacklisted_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) kept_agents = self.update_handler.agents[::2] blacklisted_agents = self.update_handler.agents[1::2] for agent in blacklisted_agents: agent.mark_failure(is_fatal=True) self.update_handler._filter_blacklisted_agents() self.assertEqual(kept_agents, self.update_handler.agents) def test_find_agents(self): self.prepare_agents() self.assertTrue(0 <= len(self.update_handler.agents)) self.update_handler._find_agents() self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) def test_find_agents_does_reload(self): self.prepare_agents() self.update_handler._find_agents() agents = self.update_handler.agents self.update_handler._find_agents() self.assertNotEqual(agents, self.update_handler.agents) def test_find_agents_sorts(self): self.prepare_agents() self.update_handler._find_agents() v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') def test_get_host_plugin_returns_host_for_wireserver(self, mock_get_host): protocol = WireProtocol('12.34.56.78') mock_get_host.return_value = "faux host" host = self.update_handler._get_host_plugin(protocol=protocol) print("mock_get_host call cound={0}".format(mock_get_host.call_count)) self.assertEqual(1, mock_get_host.call_count) self.assertEqual("faux host", host) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') def test_get_host_plugin_returns_none_otherwise(self, mock_get_host): protocol = MetadataProtocol() host = self.update_handler._get_host_plugin(protocol=protocol) mock_get_host.assert_not_called() self.assertEqual(None, host) def test_get_latest_agent(self): latest_version = self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) self.assertEqual(latest_version, latest_agent.version) def test_get_latest_agent_excluded(self): self.prepare_agent(AGENT_VERSION) self.assertFalse(self._test_upgrade_available( versions=self.agent_versions(), count=1)) self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_no_updates(self): self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_skip_updates(self): conf.get_autoupdate_enabled = Mock(return_value=False) self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_skips_unavailable(self): self.prepare_agents() prior_agent = self.update_handler.get_latest_agent() latest_version = self.prepare_agents(count=self.agent_count()+1, is_available=False) latest_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, latest_version)) self.assertFalse(GuestAgent(latest_path).is_available) latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.version < latest_version) self.assertEqual(latest_agent.version, prior_agent.version) def test_get_pid_files(self): pid_files = self.update_handler._get_pid_files() self.assertEqual(0, len(pid_files)) def test_get_pid_files_returns_previous(self): for n in range(1250): fileutil.write_file(os.path.join(self.tmp_dir, str(n)+"_waagent.pid"), ustr(n+1)) pid_files = self.update_handler._get_pid_files() self.assertEqual(1250, len(pid_files)) pid_dir, pid_name, pid_re = self.update_handler._get_pid_parts() for p in pid_files: self.assertTrue(pid_re.match(os.path.basename(p))) def test_is_clean_start_returns_true_when_no_sentinel(self): self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.assertTrue(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_when_sentinel_exists(self): self.update_handler._set_sentinel(agent=CURRENT_AGENT) self.assertFalse(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_for_exceptions(self): self.update_handler._set_sentinel() with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=Exception): self.assertFalse(self.update_handler._is_clean_start) def test_is_orphaned_returns_false_if_parent_exists(self): fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('os.getppid', return_value=42): self.assertFalse(self.update_handler._is_orphaned) def test_is_orphaned_returns_true_if_parent_is_init(self): with patch('os.getppid', return_value=1): self.assertTrue(self.update_handler._is_orphaned) def test_is_orphaned_returns_true_if_parent_does_not_exist(self): fileutil.write_file(conf.get_agent_pid_file_path(), ustr(24)) with patch('os.getppid', return_value=42): self.assertTrue(self.update_handler._is_orphaned) def test_is_version_available(self): self.prepare_agents(is_available=True) self.update_handler.agents = self.agents() for agent in self.agents(): self.assertTrue(self.update_handler._is_version_eligible(agent.version)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False) def test_is_version_available_rejects(self, mock_current): self.prepare_agents(is_available=True) self.update_handler.agents = self.agents() self.update_handler.agents[0].mark_failure(is_fatal=True) self.assertFalse(self.update_handler._is_version_eligible(self.agents()[0].version)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=True) def test_is_version_available_accepts_current(self, mock_current): self.update_handler.agents = [] self.assertTrue(self.update_handler._is_version_eligible(CURRENT_VERSION)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False) def test_is_version_available_rejects_by_default(self, mock_current): self.prepare_agents() self.update_handler.agents = [] v = self.agents()[0].version self.assertFalse(self.update_handler._is_version_eligible(v)) def test_purge_agents(self): self.prepare_agents() self.update_handler._find_agents() # Ensure at least three agents initially exist self.assertTrue(2 < len(self.update_handler.agents)) # Purge every other agent. Don't add the current version to agents_to_keep explicitly; # the current version is never purged agents_to_keep = [] kept_agents = [] purged_agents = [] for i in range(0, len(self.update_handler.agents)): if self.update_handler.agents[i].version == CURRENT_VERSION: kept_agents.append(self.update_handler.agents[i]) else: if i % 2 == 0: agents_to_keep.append(self.update_handler.agents[i]) kept_agents.append(self.update_handler.agents[i]) else: purged_agents.append(self.update_handler.agents[i]) # Reload and assert only the kept agents remain on disk self.update_handler.agents = agents_to_keep self.update_handler._purge_agents() self.update_handler._find_agents() self.assertEqual( [agent.version for agent in kept_agents], [agent.version for agent in self.update_handler.agents]) # Ensure both directories and packages are removed for agent in purged_agents: agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) self.assertFalse(os.path.exists(agent_path)) self.assertFalse(os.path.exists(agent_path + ".zip")) # Ensure kept agent directories and packages remain for agent in kept_agents: agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) self.assertTrue(os.path.exists(agent_path)) self.assertTrue(os.path.exists(agent_path + ".zip")) def _test_run_latest(self, mock_child=None, mock_time=None, child_args=None): if mock_child is None: mock_child = ChildMock() if mock_time is None: mock_time = TimeMock() with patch('subprocess.Popen', return_value=mock_child) as mock_popen: with patch('time.time', side_effect=mock_time.time): with patch('time.sleep', side_effect=mock_time.sleep): self.update_handler.run_latest(child_args=child_args) self.assertEqual(1, mock_popen.call_count) return mock_popen.call_args def test_run_latest(self): self.prepare_agents() agent = self.update_handler.get_latest_agent() args, kwargs = self._test_run_latest() args = args[0] cmds = textutil.safe_shlex_split(agent.get_agent_cmd()) if cmds[0].lower() == "python": cmds[0] = get_python_cmd() self.assertEqual(args, cmds) self.assertTrue(len(args) > 1) self.assertTrue(args[0].startswith("python")) self.assertEqual("-run-exthandlers", args[len(args)-1]) self.assertEqual(True, 'cwd' in kwargs) self.assertEqual(agent.get_agent_dir(), kwargs['cwd']) self.assertEqual(False, '\x00' in cmds[0]) def test_run_latest_passes_child_args(self): self.prepare_agents() agent = self.update_handler.get_latest_agent() args, kwargs = self._test_run_latest(child_args="AnArgument") args = args[0] self.assertTrue(len(args) > 1) self.assertTrue(args[0].startswith("python")) self.assertEqual("AnArgument", args[len(args)-1]) def test_run_latest_polls_and_waits_for_success(self): mock_child = ChildMock(return_value=None) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(2, mock_child.poll.call_count) self.assertEqual(1, mock_child.wait.call_count) def test_run_latest_polling_stops_at_success(self): mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(1, mock_child.poll.call_count) self.assertEqual(0, mock_child.wait.call_count) def test_run_latest_polling_stops_at_failure(self): mock_child = ChildMock(return_value=42) mock_time = TimeMock() self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(1, mock_child.poll.call_count) self.assertEqual(0, mock_child.wait.call_count) def test_run_latest_polls_frequently_if_installed_is_latest(self): mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) self._test_run_latest(mock_time=mock_time) self.assertEqual(1, mock_time.sleep_interval) def test_run_latest_polls_moderately_if_installed_not_latest(self): self.prepare_agents() mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) self._test_run_latest(mock_time=mock_time) self.assertNotEqual(1, mock_time.sleep_interval) def test_run_latest_defaults_to_current(self): self.assertEqual(None, self.update_handler.get_latest_agent()) args, kwargs = self._test_run_latest() self.assertEqual(args[0], [get_python_cmd(), "-u", sys.argv[0], "-run-exthandlers"]) self.assertEqual(True, 'cwd' in kwargs) self.assertEqual(os.getcwd(), kwargs['cwd']) def test_run_latest_forwards_output(self): try: tempdir = tempfile.mkdtemp() stdout_path = os.path.join(tempdir, "stdout") stderr_path = os.path.join(tempdir, "stderr") with open(stdout_path, "w") as stdout: with open(stderr_path, "w") as stderr: saved_stdout, sys.stdout = sys.stdout, stdout saved_stderr, sys.stderr = sys.stderr, stderr try: self._test_run_latest(mock_child=ChildMock(side_effect=faux_logger)) finally: sys.stdout = saved_stdout sys.stderr = saved_stderr with open(stdout_path, "r") as stdout: self.assertEqual(1, len(stdout.readlines())) with open(stderr_path, "r") as stderr: self.assertEqual(1, len(stderr.readlines())) finally: shutil.rmtree(tempdir, True) def test_run_latest_nonzero_code_marks_failures(self): # logger.add_logger_appender(logger.AppenderType.STDOUT) self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self._test_run_latest(mock_child=ChildMock(return_value=1)) self.assertTrue(latest_agent.is_blacklisted) self.assertFalse(latest_agent.is_available) self.assertNotEqual(0.0, latest_agent.error.last_failure) self.assertEqual(1, latest_agent.error.failure_count) def test_run_latest_exception_blacklists(self): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Force blacklisting"))) self.assertFalse(latest_agent.is_available) self.assertTrue(latest_agent.error.is_blacklisted) self.assertNotEqual(0.0, latest_agent.error.last_failure) self.assertEqual(1, latest_agent.error.failure_count) def test_run_latest_exception_does_not_blacklist_if_terminating(self): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self.update_handler.running = False self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Attempt blacklisting"))) self.assertTrue(latest_agent.is_available) self.assertFalse(latest_agent.error.is_blacklisted) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) @patch('signal.signal') def test_run_latest_captures_signals(self, mock_signal): self._test_run_latest() self.assertEqual(1, mock_signal.call_count) @patch('signal.signal') def test_run_latest_creates_only_one_signal_handler(self, mock_signal): self.update_handler.signal_handler = "Not None" self._test_run_latest() self.assertEqual(0, mock_signal.call_count) def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False, sleep_interval=(3,)): conf.get_autoupdate_enabled = Mock(return_value=enable_updates) # Note: # - Python only allows mutations of objects to which a function has # a reference. Incrementing an integer directly changes the # reference. Incrementing an item of a list changes an item to # which the code has a reference. # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 if iterations[0] >= invocations: self.update_handler.running = False return fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: with patch('time.sleep', side_effect=iterator) as mock_sleep: with patch('sys.exit') as mock_exit: if isinstance(os.getppid, MagicMock): self.update_handler.run() else: with patch('os.getppid', return_value=42): self.update_handler.run() self.assertEqual(1, mock_handler.call_count) self.assertEqual(mock_handler.return_value.method_calls, calls) self.assertEqual(1, mock_ra_handler.call_count) self.assertEqual(mock_ra_handler.return_value.method_calls, calls) self.assertEqual(invocations, mock_sleep.call_count) if invocations > 0: self.assertEqual(sleep_interval, mock_sleep.call_args[0]) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_exit.call_count) def test_run(self): self._test_run() def test_run_keeps_running(self): self._test_run(invocations=15, calls=[call.run()]*15) def test_run_stops_if_update_available(self): self.update_handler._upgrade_available = Mock(return_value=True) self._test_run(invocations=0, calls=[], enable_updates=True) def test_run_stops_if_orphaned(self): with patch('os.getppid', return_value=1): self._test_run(invocations=0, calls=[], enable_updates=True) def test_run_clears_sentinel_on_successful_exit(self): self._test_run() self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_run_leaves_sentinel_on_unsuccessful_exit(self): self.update_handler._upgrade_available = Mock(side_effect=Exception) self._test_run(invocations=0, calls=[], enable_updates=True) self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) def test_run_emits_restart_event(self): self.update_handler._emit_restart_event = Mock() self._test_run() self.assertEqual(1, self.update_handler._emit_restart_event.call_count) def test_set_agents_sets_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertTrue(len(self.update_handler.agents) > 0) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) def test_set_agents_sorts_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version def test_set_sentinel(self): self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.update_handler._set_sentinel() self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) def test_set_sentinel_writes_current_agent(self): self.update_handler._set_sentinel() self.assertTrue( fileutil.read_file(self.update_handler._sentinel_file_path()), CURRENT_AGENT) def test_shutdown(self): self.update_handler._set_sentinel() self.update_handler._shutdown() self.assertFalse(self.update_handler.running) self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_shutdown_ignores_missing_sentinel_file(self): self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.update_handler._shutdown() self.assertFalse(self.update_handler.running) self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_shutdown_ignores_exceptions(self): self.update_handler._set_sentinel() try: with patch("os.remove", side_effect=Exception): self.update_handler._shutdown() except Exception as e: self.assertTrue(False, "Unexpected exception") def _test_upgrade_available( self, base_version=FlexibleVersion(AGENT_VERSION), protocol=None, versions=None, count=20): if protocol is None: protocol = self._create_protocol(count=count, versions=versions) self.update_handler.protocol_util = protocol conf.get_autoupdate_gafamily = Mock(return_value=protocol.family) return self.update_handler._upgrade_available(base_version=base_version) def test_upgrade_available_returns_true_on_first_use(self): self.assertTrue(self._test_upgrade_available()) def test_upgrade_available_will_refresh_goal_state(self): protocol = self._create_protocol() protocol.emulate_stale_goal_state() self.assertTrue(self._test_upgrade_available(protocol=protocol)) self.assertEqual(2, protocol.call_counts["get_vmagent_manifests"]) self.assertEqual(1, protocol.call_counts["get_vmagent_pkgs"]) self.assertEqual(1, protocol.call_counts["update_goal_state"]) self.assertTrue(protocol.goal_state_forced) def test_upgrade_available_handles_missing_family(self): extensions_config = ExtensionsConfig(load_data("wire/ext_conf_missing_family.xml")) protocol = ProtocolMock() protocol.family = "Prod" protocol.agent_manifests = extensions_config.vmagent_manifests self.update_handler.protocol_util = protocol with patch('azurelinuxagent.common.logger.warn') as mock_logger: with patch('tests.ga.test_update.ProtocolMock.get_vmagent_pkgs', side_effect=ProtocolError): self.assertFalse(self.update_handler._upgrade_available(base_version=CURRENT_VERSION)) self.assertEqual(0, mock_logger.call_count) def test_upgrade_available_includes_old_agents(self): self.prepare_agents() old_version = self.agent_versions()[-1] old_count = old_version.version[-1] self.replicate_agents(src_v=old_version, count=old_count, increment=-1) all_count = len(self.agent_versions()) self.assertTrue(self._test_upgrade_available(versions=self.agent_versions())) self.assertEqual(all_count, len(self.update_handler.agents)) def test_upgrade_available_purges_old_agents(self): self.prepare_agents() agent_count = self.agent_count() self.assertEqual(20, agent_count) agent_versions = self.agent_versions()[:3] self.assertTrue(self._test_upgrade_available(versions=agent_versions)) self.assertEqual(len(agent_versions), len(self.update_handler.agents)) # Purging always keeps the running agent if CURRENT_VERSION not in agent_versions: agent_versions.append(CURRENT_VERSION) self.assertEqual(agent_versions, self.agent_versions()) def test_update_available_returns_true_if_current_gets_blacklisted(self): self.update_handler._is_version_eligible = Mock(return_value=False) self.assertTrue(self._test_upgrade_available()) def test_upgrade_available_skips_if_too_frequent(self): conf.get_autoupdate_frequency = Mock(return_value=10000) self.update_handler.last_attempt_time = time.time() self.assertFalse(self._test_upgrade_available()) def test_upgrade_available_skips_if_when_no_new_versions(self): self.prepare_agents() base_version = self.agent_versions()[0] + 1 self.update_handler._is_version_eligible = lambda x: x == base_version self.assertFalse(self._test_upgrade_available(base_version=base_version)) def test_upgrade_available_skips_when_no_versions(self): self.assertFalse(self._test_upgrade_available(protocol=ProtocolMock())) def test_upgrade_available_skips_when_updates_are_disabled(self): conf.get_autoupdate_enabled = Mock(return_value=False) self.assertFalse(self._test_upgrade_available()) def test_upgrade_available_sorts(self): self.prepare_agents() self._test_upgrade_available() v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version def test_write_pid_file(self): for n in range(1112): fileutil.write_file(os.path.join(self.tmp_dir, str(n)+"_waagent.pid"), ustr(n+1)) with patch('os.getpid', return_value=1112): pid_files, pid_file = self.update_handler._write_pid_file() self.assertEqual(1112, len(pid_files)) self.assertEqual("1111_waagent.pid", os.path.basename(pid_files[-1])) self.assertEqual("1112_waagent.pid", os.path.basename(pid_file)) self.assertEqual(fileutil.read_file(pid_file), ustr(1112)) def test_write_pid_file_ignores_exceptions(self): with patch('azurelinuxagent.common.utils.fileutil.write_file', side_effect=Exception): with patch('os.getpid', return_value=42): pid_files, pid_file = self.update_handler._write_pid_file() self.assertEqual(0, len(pid_files)) self.assertEqual(None, pid_file) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', return_value=GoalState(load_data('wire/goal_state.xml'))) def test_package_filter_for_agent_manifest(self, _): protocol = WireProtocol('12.34.56.78') extension_config = ExtensionsConfig(load_data('wire/ext_conf.xml')) agent_manifest = extension_config.vmagent_manifests.vmAgentManifests[0] # has agent versions 13, 14 ga_manifest_1 = ExtensionManifest(load_data('wire/ga_manifest_1.xml')) # has agent versions 13, 14, 15 ga_manifest_2 = ExtensionManifest(load_data('wire/ga_manifest_2.xml')) goal_state = protocol.client.get_goal_state() disk_cache = os.path.join(conf.get_lib_dir(), AGENTS_MANIFEST_FILE_NAME.format( agent_manifest.family, goal_state.incarnation)) self.assertFalse(os.path.exists(disk_cache)) self.assertTrue(ga_manifest_1.allowed_versions is None) with patch( 'azurelinuxagent.common.protocol.wire.WireClient' '.get_gafamily_manifest', return_value=ga_manifest_1): pkg_list_1 = protocol.get_vmagent_pkgs(agent_manifest) self.assertTrue(pkg_list_1 is not None) self.assertTrue(len(pkg_list_1.versions) == 2) self.assertTrue(pkg_list_1.versions[0].version == '2.2.13') self.assertTrue(pkg_list_1.versions[0].uris[0].uri == 'url1_13') self.assertTrue(pkg_list_1.versions[1].version == '2.2.14') self.assertTrue(pkg_list_1.versions[1].uris[0].uri == 'url1_14') self.assertTrue(os.path.exists(disk_cache)) with patch( 'azurelinuxagent.common.protocol.wire.WireClient' '.get_gafamily_manifest', return_value=ga_manifest_2): pkg_list_2 = protocol.get_vmagent_pkgs(agent_manifest) self.assertTrue(pkg_list_2 is not None) self.assertTrue(len(pkg_list_2.versions) == 2) self.assertTrue(pkg_list_2.versions[0].version == '2.2.13') self.assertTrue(pkg_list_2.versions[0].uris[0].uri == 'url2_13') self.assertTrue(pkg_list_2.versions[1].version == '2.2.14') self.assertTrue(pkg_list_2.versions[1].uris[0].uri == 'url2_14') # does not contain 2.2.15 self.assertTrue(os.path.exists(disk_cache)) self.assertTrue(ga_manifest_2.allowed_versions is not None) self.assertTrue(len(ga_manifest_2.allowed_versions) == 2) self.assertTrue(ga_manifest_2.allowed_versions[0] == '2.2.13') self.assertTrue(ga_manifest_2.allowed_versions[1] == '2.2.14') @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) def test_update_happens_when_extensions_disabled(self, _): """ Although the extension enabled config will not get checked before an update is found, this test attempts to ensure that behavior never changes. """ self.update_handler._upgrade_available = Mock(return_value=True) self._test_run(invocations=0, calls=[], enable_updates=True, sleep_interval=(300,)) @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) def test_interval_changes_when_extensions_disabled(self, _): """ When extension processing is disabled, the goal state interval should be larger. """ self.update_handler._upgrade_available = Mock(return_value=False) self._test_run(invocations=15, calls=[call.run()] * 15, sleep_interval=(300,)) class MonitorThreadTest(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.event_patch = patch('azurelinuxagent.common.event.add_event') self.update_handler = get_update_handler() self.update_handler.protocol_util = Mock() def _test_run(self, invocations=1): iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 if iterations[0] >= invocations: self.update_handler.running = False return with patch('os.getpid', return_value=42): with patch.object(UpdateHandler, '_is_orphaned') as mock_is_orphaned: mock_is_orphaned.__get__ = Mock(return_value=False) with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: with patch('time.sleep', side_effect=iterator) as mock_sleep: with patch('sys.exit') as mock_exit: self.update_handler.run() @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_start_threads(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor.return_value = mock_monitor_thread mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=0) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_monitor_thread.run.call_count) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_env_thread.run.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_check_if_monitor_thread_is_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor_thread.is_alive = MagicMock(return_value=True) mock_monitor_thread.start = MagicMock() mock_monitor.return_value = mock_monitor_thread self._test_run(invocations=0) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_monitor_thread.run.call_count) self.assertEqual(1, mock_monitor_thread.is_alive.call_count) self.assertEqual(0, mock_monitor_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_check_if_env_thread_is_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env_thread.is_alive = MagicMock(return_value=True) mock_env_thread.start = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=1) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_env_thread.run.call_count) self.assertEqual(1, mock_env_thread.is_alive.call_count) self.assertEqual(0, mock_env_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_monitor_thread_if_not_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor_thread.is_alive = MagicMock(return_value=False) mock_monitor_thread.start = MagicMock() mock_monitor.return_value = mock_monitor_thread self._test_run(invocations=1) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_monitor_thread.run.call_count) self.assertEqual(1, mock_monitor_thread.is_alive.call_count) self.assertEqual(1, mock_monitor_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_env_thread_if_not_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env_thread.is_alive = MagicMock(return_value=False) mock_env_thread.start = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=1) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_env_thread.run.call_count) self.assertEqual(1, mock_env_thread.is_alive.call_count) self.assertEqual(1, mock_env_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_monitor_thread(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor_thread.is_alive = MagicMock(return_value=False) mock_monitor_thread.start = MagicMock() mock_monitor.return_value = mock_monitor_thread self._test_run(invocations=0) self.assertEqual(True, mock_monitor.called) self.assertEqual(True, mock_monitor_thread.run.called) self.assertEqual(True, mock_monitor_thread.is_alive.called) self.assertEqual(True, mock_monitor_thread.start.called) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_env_thread(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env_thread.is_alive = MagicMock(return_value=False) mock_env_thread.start = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=0) self.assertEqual(True, mock_env.called) self.assertEqual(True, mock_env_thread.run.called) self.assertEqual(True, mock_env_thread.is_alive.called) self.assertEqual(True, mock_env_thread.start.called) class ChildMock(Mock): def __init__(self, return_value=0, side_effect=None): Mock.__init__(self, return_value=return_value, side_effect=side_effect) self.poll = Mock(return_value=return_value, side_effect=side_effect) self.wait = Mock(return_value=return_value, side_effect=side_effect) class ProtocolMock(object): def __init__(self, family="TestAgent", etag=42, versions=None, client=None): self.family = family self.client = client self.call_counts = { "get_vmagent_manifests" : 0, "get_vmagent_pkgs" : 0, "update_goal_state" : 0 } self.goal_state_is_stale = False self.goal_state_forced = False self.etag = etag self.versions = versions if versions is not None else [] self.create_manifests() self.create_packages() def emulate_stale_goal_state(self): self.goal_state_is_stale = True def create_manifests(self): self.agent_manifests = VMAgentManifestList() if len(self.versions) <= 0: return if self.family is not None: manifest = VMAgentManifest(family=self.family) for i in range(0,10): manifest_uri = "https://nowhere.msft/agent/{0}".format(i) manifest.versionsManifestUris.append(VMAgentManifestUri(uri=manifest_uri)) self.agent_manifests.vmAgentManifests.append(manifest) def create_packages(self): self.agent_packages = ExtHandlerPackageList() if len(self.versions) <= 0: return for version in self.versions: package = ExtHandlerPackage(str(version)) for i in range(0,5): package_uri = "https://nowhere.msft/agent_pkg/{0}".format(i) package.uris.append(ExtHandlerPackageUri(uri=package_uri)) self.agent_packages.versions.append(package) def get_protocol(self): return self def get_vmagent_manifests(self): self.call_counts["get_vmagent_manifests"] += 1 if self.goal_state_is_stale: self.goal_state_is_stale = False raise ResourceGoneError() return self.agent_manifests, self.etag def get_vmagent_pkgs(self, manifest): self.call_counts["get_vmagent_pkgs"] += 1 if self.goal_state_is_stale: self.goal_state_is_stale = False raise ResourceGoneError() return self.agent_packages def update_goal_state(self, forced=False, max_retry=3): self.call_counts["update_goal_state"] += 1 self.goal_state_forced = self.goal_state_forced or forced class ResponseMock(Mock): def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): Mock.__init__(self) self.status = status self.reason = reason self.response = response def read(self): return self.response class TimeMock(Mock): def __init__(self, time_increment=1): Mock.__init__(self) self.next_time = time.time() self.time_call_count = 0 self.time_increment = time_increment self.sleep_interval = None def sleep(self, n): self.sleep_interval = n def time(self): self.time_call_count += 1 current_time = self.next_time self.next_time += self.time_increment return current_time if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/pa/000077500000000000000000000000001335416306700156605ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/pa/__init__.py000066400000000000000000000011651335416306700177740ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/pa/test_deprovision.py000066400000000000000000000125021335416306700216320ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import signal import tempfile import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision import get_deprovision_handler from azurelinuxagent.pa.deprovision.default import DeprovisionHandler from tests.tools import * class TestDeprovision(AgentTestCase): @patch('signal.signal') @patch('azurelinuxagent.common.osutil.get_osutil') @patch('azurelinuxagent.common.protocol.get_protocol_util') @patch('azurelinuxagent.pa.deprovision.default.read_input') def test_confirmation(self, mock_read, mock_protocol, mock_util, mock_signal): dh = DeprovisionHandler() dh.setup = Mock() dh.setup.return_value = ([], []) dh.do_actions = Mock() # Do actions if confirmed mock_read.return_value = "y" dh.run() self.assertEqual(1, dh.do_actions.call_count) # Skip actions if not confirmed mock_read.return_value = "n" dh.run() self.assertEqual(1, dh.do_actions.call_count) # Do actions if forced mock_read.return_value = "n" dh.run(force=True) self.assertEqual(2, dh.do_actions.call_count) @distros("ubuntu") @patch('azurelinuxagent.common.conf.get_lib_dir') def test_del_lib_dir_files(self, distro_name, distro_version, distro_full_name, mock_conf): dirs = [ 'WALinuxAgent-2.2.26/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status' ] files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint', 'Extensions.1.xml', 'ExtensionsConfig.1.xml', 'GoalState.1.xml', 'Extensions.2.xml', 'ExtensionsConfig.2.xml', 'GoalState.2.xml', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/42.settings', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerStatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerState', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status/12.notstatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/mrseq', 'WALinuxAgent-2.2.26/config/0.settings' ] tmp = tempfile.mkdtemp() mock_conf.return_value = tmp for d in dirs: fileutil.mkdir(os.path.join(tmp, d)) for f in files: fileutil.write_file(os.path.join(tmp, f), "Value") deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings = [] actions = [] deprovision_handler.del_lib_dir_files(warnings, actions) deprovision_handler.del_ext_handler_files(warnings, actions) self.assertTrue(len(warnings) == 0) self.assertTrue(len(actions) == 2) self.assertEqual(fileutil.rm_files, actions[0].func) self.assertEqual(fileutil.rm_files, actions[1].func) self.assertEqual(11, len(actions[0].args)) self.assertEqual(3, len(actions[1].args)) for f in actions[0].args: self.assertTrue(os.path.basename(f) in files) for f in actions[1].args: self.assertTrue(f[len(tmp)+1:] in files) @distros("redhat") def test_deprovision(self, distro_name, distro_version, distro_full_name): deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolv.conf" in w for w in warnings) @distros("ubuntu") def test_deprovision_ubuntu(self, distro_name, distro_version, distro_full_name): deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) with patch("os.path.realpath", return_value="/run/resolvconf/resolv.conf"): warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolvconf/resolv.conf.d/tail" in w for w in warnings) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/pa/test_provision.py000066400000000000000000000315051335416306700213250ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import ProvisionError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.pa.provision import get_provision_handler from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * class TestProvision(AgentTestCase): @distros("redhat") @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') def test_provision(self, mock_util, distro_name, distro_version, distro_full_name): provision_handler = get_provision_handler(distro_name, distro_version, distro_full_name) mock_osutil = MagicMock() mock_osutil.decode_customdata = Mock(return_value="") provision_handler.osutil = mock_osutil provision_handler.protocol_util.osutil = mock_osutil provision_handler.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data("ovf-env.xml") fileutil.write_file(ovfenv_file, ovfenv_data) provision_handler.run() def test_customdata(self): base64data = 'Q3VzdG9tRGF0YQ==' data = DefaultOSUtil().decode_customdata(base64data) fileutil.write_file(tempfile.mktemp(), data) @patch('azurelinuxagent.common.conf.get_provision_enabled', return_value=False) def test_provisioning_is_skipped_when_not_enabled(self, mock_conf): ph = ProvisionHandler() ph.osutil = DefaultOSUtil() ph.osutil.get_instance_id = Mock( return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') ph.is_provisioned = Mock() ph.report_ready = Mock() ph.write_provisioned = Mock() ph.run() self.assertEqual(0, ph.is_provisioned.call_count) self.assertEqual(1, ph.report_ready.call_count) self.assertEqual(1, ph.write_provisioned.call_count) @patch('os.path.isfile', return_value=False) def test_is_provisioned_not_provisioned(self, mock_isfile): ph = ProvisionHandler() self.assertFalse(ph.is_provisioned()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('azurelinuxagent.pa.deprovision.get_deprovision_handler') def test_is_provisioned_is_provisioned(self, mock_deprovision, mock_read, mock_isfile): ph = ProvisionHandler() ph.osutil = Mock() ph.osutil.is_current_instance_id = Mock(return_value=True) ph.write_provisioned = Mock() deprovision_handler = Mock() mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) self.assertEqual(0, deprovision_handler.run_changed_unique_id.call_count) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('azurelinuxagent.pa.deprovision.get_deprovision_handler') def test_is_provisioned_not_deprovisioned(self, mock_deprovision, mock_read, mock_isfile): ph = ProvisionHandler() ph.osutil = Mock() ph.osutil.is_current_instance_id = Mock(return_value=False) ph.report_ready = Mock() ph.write_provisioned = Mock() deprovision_handler = Mock() mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) self.assertEqual(1, deprovision_handler.run_changed_unique_id.call_count) @distros() def test_provision_telemetry_pga_false(self, distro_name, distro_version, distro_full_name): """ ProvisionGuestAgent flag is 'false' """ self._provision_test(distro_name, distro_version, distro_full_name, OVF_FILE_NAME, 'false', True) @distros() def test_provision_telemetry_pga_true(self, distro_name, distro_version, distro_full_name): """ ProvisionGuestAgent flag is 'true' """ self._provision_test(distro_name, distro_version, distro_full_name, 'ovf-env-2.xml', 'true', True) @distros() def test_provision_telemetry_pga_empty(self, distro_name, distro_version, distro_full_name): """ ProvisionGuestAgent flag is '' """ self._provision_test(distro_name, distro_version, distro_full_name, 'ovf-env-3.xml', 'true', False) @distros() def test_provision_telemetry_pga_bad(self, distro_name, distro_version, distro_full_name): """ ProvisionGuestAgent flag is 'bad data' """ self._provision_test(distro_name, distro_version, distro_full_name, 'ovf-env-4.xml', 'bad data', True) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') def _provision_test(self, distro_name, distro_version, distro_full_name, ovf_file, provisionMessage, expect_success, patch_write_agent_disabled, patch_get_instance_id): """ Assert that the agent issues two telemetry messages as part of a successful provisioning. 1. Provision 2. GuestState """ ph = get_provision_handler(distro_name, distro_version, distro_full_name) ph.report_event = MagicMock() ph.reg_ssh_host_key = MagicMock(return_value='--thumprint--') mock_osutil = MagicMock() mock_osutil.decode_customdata = Mock(return_value="") ph.osutil = mock_osutil ph.protocol_util.osutil = mock_osutil ph.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data(ovf_file) fileutil.write_file(ovfenv_file, ovfenv_data) ph.run() if expect_success: self.assertEqual(2, ph.report_event.call_count) positional_args, kw_args = ph.report_event.call_args_list[0] # [call('Provisioning succeeded (146473.68s)', duration=65, is_success=True)] self.assertTrue(re.match(r'Provisioning succeeded \(\d+\.\d+s\)', positional_args[0]) is not None) self.assertTrue(isinstance(kw_args['duration'], int)) self.assertTrue(kw_args['is_success']) positional_args, kw_args = ph.report_event.call_args_list[1] self.assertTrue(kw_args['operation'] == 'ProvisionGuestAgent') self.assertTrue(kw_args['message'] == provisionMessage) self.assertTrue(kw_args['is_success']) expected_disabled = True if provisionMessage == 'false' else False self.assertTrue(patch_write_agent_disabled.call_count == expected_disabled) else: self.assertEqual(1, ph.report_event.call_count) positional_args, kw_args = ph.report_event.call_args_list[0] # [call(u'[ProtocolError] Failed to validate OVF: ProvisionGuestAgent not found')] self.assertTrue('Failed to validate OVF: ProvisionGuestAgent not found' in positional_args[0]) self.assertFalse(kw_args['is_success']) @distros() @patch( 'azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') def test_provision_telemetry_fail(self, mock_util, distro_name, distro_version, distro_full_name): """ Assert that the agent issues one telemetry message as part of a failed provisioning. 1. Provision """ ph = get_provision_handler(distro_name, distro_version, distro_full_name) ph.report_event = MagicMock() ph.reg_ssh_host_key = MagicMock(side_effect=ProvisionError( "--unit-test--")) mock_osutil = MagicMock() mock_osutil.decode_customdata = Mock(return_value="") ph.osutil = mock_osutil ph.protocol_util.osutil = mock_osutil ph.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data("ovf-env.xml") fileutil.write_file(ovfenv_file, ovfenv_data) ph.run() positional_args, kw_args = ph.report_event.call_args_list[0] self.assertTrue(re.match(r'Provisioning failed: \[ProvisionError\] --unit-test-- \(\d+\.\d+s\)', positional_args[0]) is not None) @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') @distros() def test_handle_provision_guest_agent(self, patch_write_agent_disabled, distro_name, distro_version, distro_full_name): ph = get_provision_handler(distro_name, distro_version, distro_full_name) patch_write_agent_disabled.call_count = 0 ph.handle_provision_guest_agent(provision_guest_agent='false') self.assertEqual(1, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='False') self.assertEqual(2, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='FALSE') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent=' ') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent=None) self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='true') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='True') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='TRUE') self.assertEqual(3, patch_write_agent_disabled.call_count) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/protocol/000077500000000000000000000000001335416306700171215ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/protocol/__init__.py000066400000000000000000000011651335416306700212350ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/protocol/mockmetadata.py000066400000000000000000000051441335416306700221310ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "identity": "metadata/identity.json", "certificates": "metadata/certificates.json", "certificates_data": "metadata/certificates_data.json", "ext_handlers": "metadata/ext_handlers.json", "ext_handler_pkgs": "metadata/ext_handler_pkgs.json", "trans_prv": "metadata/trans_prv", "trans_cert": "metadata/trans_cert", } DATA_FILE_NO_EXT = DATA_FILE.copy() DATA_FILE_NO_EXT["ext_handlers"] = "metadata/ext_handlers_no_ext.json" class MetadataProtocolData(object): def __init__(self, data_files): self.identity = load_data(data_files.get("identity")) self.certificates = load_data(data_files.get("certificates")) self.certificates_data = load_data(data_files.get("certificates_data")) self.ext_handlers = load_data(data_files.get("ext_handlers")) self.ext_handler_pkgs = load_data(data_files.get("ext_handler_pkgs")) self.trans_prv = load_data(data_files.get("trans_prv")) self.trans_cert = load_data(data_files.get("trans_cert")) def mock_http_get(self, url, *args, **kwargs): content = None if url.count(u"identity?") > 0: content = self.identity elif url.count(u"certificates") > 0: content = self.certificates elif url.count(u"certificates_data") > 0: content = self.certificates_data elif url.count(u"extensionHandlers") > 0: content = self.ext_handlers elif url.count(u"versionUri") > 0: content = self.ext_handler_pkgs else: raise Exception("Bad url {0}".format(url)) resp = MagicMock() resp.status = httpclient.OK if content is None: resp.read = Mock(return_value=None) else: resp.read = Mock(return_value=content.encode("utf-8")) return resp WALinuxAgent-2.2.32/tests/protocol/mockwiredata.py000066400000000000000000000156561335416306700221620ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "version_info": "wire/version_info.xml", "goal_state": "wire/goal_state.xml", "hosting_env": "wire/hosting_env.xml", "shared_config": "wire/shared_config.xml", "certs": "wire/certs.xml", "ext_conf": "wire/ext_conf.xml", "manifest": "wire/manifest.xml", "ga_manifest" : "wire/ga_manifest.xml", "trans_prv": "wire/trans_prv", "trans_cert": "wire/trans_cert", "test_ext": "ext/sample_ext-1.3.0.zip" } DATA_FILE_NO_EXT = DATA_FILE.copy() DATA_FILE_NO_EXT["goal_state"] = "wire/goal_state_no_ext.xml" DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy() DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml" DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy() DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml" DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml" DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy() DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml" DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy() DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml" DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml" DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy() DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml" DATA_FILE_EXT_DELETION = DATA_FILE.copy() DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml" DATA_FILE_EXT_SINGLE = DATA_FILE.copy() DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml" class WireProtocolData(object): def __init__(self, data_files=DATA_FILE): self.emulate_stale_goal_state = False self.call_counts = { "comp=versions" : 0, "/versions" : 0, "goalstate" : 0, "hostingenvuri" : 0, "sharedconfiguri" : 0, "certificatesuri" : 0, "extensionsconfiguri" : 0, "extensionArtifact" : 0, "manifest.xml" : 0, "manifest_of_ga.xml" : 0, "ExampleHandlerLinux" : 0 } self.version_info = load_data(data_files.get("version_info")) self.goal_state = load_data(data_files.get("goal_state")) self.hosting_env = load_data(data_files.get("hosting_env")) self.shared_config = load_data(data_files.get("shared_config")) self.certs = load_data(data_files.get("certs")) self.ext_conf = load_data(data_files.get("ext_conf")) self.manifest = load_data(data_files.get("manifest")) self.ga_manifest = load_data(data_files.get("ga_manifest")) self.trans_prv = load_data(data_files.get("trans_prv")) self.trans_cert = load_data(data_files.get("trans_cert")) self.ext = load_bin_data(data_files.get("test_ext")) def mock_http_get(self, url, *args, **kwargs): content = None resp = MagicMock() resp.status = httpclient.OK # wire server versions if "comp=versions" in url: content = self.version_info self.call_counts["comp=versions"] += 1 # HostPlugin versions elif "/versions" in url: content = '["2015-09-01"]' self.call_counts["/versions"] += 1 elif "goalstate" in url: content = self.goal_state self.call_counts["goalstate"] += 1 elif "hostingenvuri" in url: content = self.hosting_env self.call_counts["hostingenvuri"] += 1 elif "sharedconfiguri" in url: content = self.shared_config self.call_counts["sharedconfiguri"] += 1 elif "certificatesuri" in url: content = self.certs self.call_counts["certificatesuri"] += 1 elif "extensionsconfiguri" in url: content = self.ext_conf self.call_counts["extensionsconfiguri"] += 1 else: # A stale GoalState results in a 400 from the HostPlugin # for which the HTTP handler in restutil raises ResourceGoneError if self.emulate_stale_goal_state: if "extensionArtifact" in url: self.emulate_stale_goal_state = False self.call_counts["extensionArtifact"] += 1 raise ResourceGoneError() else: raise HttpError() # For HostPlugin requests, replace the URL with that passed # via the x-ms-artifact-location header if "extensionArtifact" in url: self.call_counts["extensionArtifact"] += 1 if "headers" not in kwargs or \ "x-ms-artifact-location" not in kwargs["headers"]: raise Exception("Bad HEADERS passed to HostPlugin: {0}", kwargs) url = kwargs["headers"]["x-ms-artifact-location"] if "manifest.xml" in url: content = self.manifest self.call_counts["manifest.xml"] += 1 elif "manifest_of_ga.xml" in url: content = self.ga_manifest self.call_counts["manifest_of_ga.xml"] += 1 elif "ExampleHandlerLinux" in url: content = self.ext self.call_counts["ExampleHandlerLinux"] += 1 resp.read = Mock(return_value=content) return resp else: raise Exception("Bad url {0}".format(url)) resp.read = Mock(return_value=content.encode("utf-8")) return resp def mock_crypt_util(self, *args, **kw): #Partially patch instance method of class CryptUtil cryptutil = CryptUtil(*args, **kw) cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert) return cryptutil def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file): with open(trans_prv_file, 'w+') as prv_file: prv_file.write(self.trans_prv) with open(trans_cert_file, 'w+') as cert_file: cert_file.write(self.trans_cert) WALinuxAgent-2.2.32/tests/protocol/test_healthservice.py000066400000000000000000000260231335416306700233630ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import json from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.protocol.healthservice import Observation, HealthService from azurelinuxagent.common.utils import restutil from tests.protocol.test_hostplugin import MockResponse from tests.tools import * class TestHealthService(AgentTestCase): def assert_status_code(self, status_code, expected_healthy): response = MockResponse('response', status_code) is_healthy = not restutil.request_failed_at_hostplugin(response) self.assertEqual(expected_healthy, is_healthy) def assert_observation(self, call_args, name, is_healthy, value, description): endpoint = call_args[0][0] content = call_args[0][1] jo = json.loads(content) api = jo['Api'] source = jo['Source'] version = jo['Version'] obs = jo['Observations'] fo = obs[0] obs_name = fo['ObservationName'] obs_healthy = fo['IsHealthy'] obs_value = fo['Value'] obs_description = fo['Description'] self.assertEqual('application/json', call_args[1]['headers']['Content-Type']) self.assertEqual('http://endpoint:80/HealthService', endpoint) self.assertEqual('reporttargethealth', api) self.assertEqual('WALinuxAgent', source) self.assertEqual('1.0', version) self.assertEqual(name, obs_name) self.assertEqual(value, obs_value) self.assertEqual(is_healthy, obs_healthy) self.assertEqual(description, obs_description) def assert_telemetry(self, call_args, response=''): args, kw_args = call_args self.assertFalse(kw_args['is_success']) self.assertEqual('HealthObservation', kw_args['op']) obs = json.loads(kw_args['message']) self.assertEqual(obs['Value'], response) def test_observation_validity(self): try: Observation(name=None, is_healthy=True) self.fail('Empty observation name should raise ValueError') except ValueError: pass try: Observation(name='Name', is_healthy=None) self.fail('Empty measurement should raise ValueError') except ValueError: pass o = Observation(name='Name', is_healthy=True, value=None, description=None) self.assertEqual('', o.value) self.assertEqual('', o.description) long_str = 's' * 200 o = Observation(name=long_str, is_healthy=True, value=long_str, description=long_str) self.assertEqual(200, len(o.name)) self.assertEqual(200, len(o.value)) self.assertEqual(200, len(o.description)) self.assertEqual(64, len(o.as_obj['ObservationName'])) self.assertEqual(128, len(o.as_obj['Value'])) self.assertEqual(128, len(o.as_obj['Description'])) def test_observation_json(self): health_service = HealthService('endpoint') health_service.observations.append(Observation(name='name', is_healthy=True, value='value', description='description')) expected_json = '{"Source": "WALinuxAgent", ' \ '"Api": "reporttargethealth", ' \ '"Version": "1.0", ' \ '"Observations": [{' \ '"Value": "value", ' \ '"ObservationName": "name", ' \ '"Description": "description", ' \ '"IsHealthy": true' \ '}]}' expected = sorted(json.loads(expected_json).items()) actual = sorted(json.loads(health_service.as_json).items()) self.assertEqual(expected, actual) @patch('azurelinuxagent.common.event.add_event') @patch("azurelinuxagent.common.utils.restutil.http_post") def test_reporting(self, patch_post, patch_add_event): health_service = HealthService('endpoint') health_service.report_host_plugin_status(is_healthy=True, response='response') self.assertEqual(1, patch_post.call_count) self.assertEqual(0, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, is_healthy=True, value='response', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_status(is_healthy=False, response='error') self.assertEqual(2, patch_post.call_count) self.assertEqual(1, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args, response='error') self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, is_healthy=False, value='error', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_extension_artifact(is_healthy=True, source='source', response='response') self.assertEqual(3, patch_post.call_count) self.assertEqual(1, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, is_healthy=True, value='response', description='source') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_extension_artifact(is_healthy=False, source='source', response='response') self.assertEqual(4, patch_post.call_count) self.assertEqual(2, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args, response='response') self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, is_healthy=False, value='response', description='source') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_heartbeat(is_healthy=True) self.assertEqual(5, patch_post.call_count) self.assertEqual(2, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, is_healthy=True, value='', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_heartbeat(is_healthy=False) self.assertEqual(3, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args) self.assertEqual(6, patch_post.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, is_healthy=False, value='', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_versions(is_healthy=True, response='response') self.assertEqual(7, patch_post.call_count) self.assertEqual(3, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, is_healthy=True, value='response', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_versions(is_healthy=False, response='response') self.assertEqual(8, patch_post.call_count) self.assertEqual(4, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args, response='response') self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, is_healthy=False, value='response', description='') self.assertEqual(0, len(health_service.observations)) patch_post.side_effect = HttpError() health_service.report_host_plugin_versions(is_healthy=True, response='') self.assertEqual(9, patch_post.call_count) self.assertEqual(4, patch_add_event.call_count) self.assertEqual(0, len(health_service.observations)) def test_observation_length(self): health_service = HealthService('endpoint') # make 100 observations for i in range(0, 100): health_service._observe(is_healthy=True, name='{0}'.format(i)) # ensure we keep only 10 self.assertEqual(10, len(health_service.observations)) # ensure we keep the most recent 10 self.assertEqual('90', health_service.observations[0].name) self.assertEqual('99', health_service.observations[9].name) def test_status_codes(self): # healthy self.assert_status_code(status_code=200, expected_healthy=True) self.assert_status_code(status_code=201, expected_healthy=True) self.assert_status_code(status_code=302, expected_healthy=True) self.assert_status_code(status_code=400, expected_healthy=True) self.assert_status_code(status_code=416, expected_healthy=True) self.assert_status_code(status_code=419, expected_healthy=True) self.assert_status_code(status_code=429, expected_healthy=True) self.assert_status_code(status_code=502, expected_healthy=True) # unhealthy self.assert_status_code(status_code=500, expected_healthy=False) self.assert_status_code(status_code=501, expected_healthy=False) self.assert_status_code(status_code=503, expected_healthy=False) self.assert_status_code(status_code=504, expected_healthy=False) WALinuxAgent-2.2.32/tests/protocol/test_hostplugin.py000066400000000000000000001155121335416306700227330ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import json import sys import datetime import azurelinuxagent.common.protocol.restapi as restapi import azurelinuxagent.common.protocol.wire as wire import azurelinuxagent.common.protocol.hostplugin as hostplugin from azurelinuxagent.common.errorstate import ErrorState from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.hostplugin import API_VERSION from azurelinuxagent.common.utils import restutil from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE from tests.protocol.test_wire import MockResponse from tests.tools import * if sys.version_info[0] == 3: import http.client as httpclient bytebuffer = memoryview elif sys.version_info[0] == 2: import httplib as httpclient bytebuffer = buffer hostplugin_status_url = "http://168.63.129.16:32526/status" hostplugin_versions_url = "http://168.63.129.16:32526/versions" health_service_url = 'http://168.63.129.16:80/HealthService' sas_url = "http://sas_url" wireserver_url = "168.63.129.16" block_blob_type = 'BlockBlob' page_blob_type = 'PageBlob' api_versions = '["2015-09-01"]' storage_version = "2014-02-14" faux_status = "{ 'dummy' : 'data' }" faux_status_b64 = base64.b64encode(bytes(bytearray(faux_status, encoding='utf-8'))) if PY_VERSION_MAJOR > 2: faux_status_b64 = faux_status_b64.decode('utf-8') class TestHostPlugin(AgentTestCase): def _init_host(self): test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_plugin = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertTrue(host_plugin.health_service is not None) return host_plugin def _init_status_blob(self): wire_protocol_client = wire.WireProtocol(wireserver_url).client status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") return status_blob def _compare_data(self, actual, expected): for k in iter(expected.keys()): if k == 'content' or k == 'requestUri': if actual[k] != expected[k]: print("Mismatch: Actual '{0}'='{1}', " "Expected '{0}'='{2}'".format(k, actual[k], expected[k])) return False elif k == 'headers': for h in expected['headers']: if not (h in actual['headers']): print("Missing Header: '{0}'".format(h)) return False else: print("Unexpected Key: '{0}'".format(k)) return False return True def _hostplugin_data(self, blob_headers, content=None): headers = [] for name in iter(blob_headers.keys()): headers.append({ 'headerName': name, 'headerValue': blob_headers[name] }) data = { 'requestUri': sas_url, 'headers': headers } if not content is None: s = base64.b64encode(bytes(content)) if PY_VERSION_MAJOR > 2: s = s.decode('utf-8') data['content'] = s return data def _hostplugin_headers(self, goal_state): return { 'x-ms-version': '2015-09-01', 'Content-type': 'application/json', 'x-ms-containerid': goal_state.container_id, 'x-ms-host-config-name': goal_state.role_config_name } def _validate_hostplugin_args(self, args, goal_state, exp_method, exp_url, exp_data): args, kwargs = args self.assertEqual(exp_method, args[0]) self.assertEqual(exp_url, args[1]) self.assertTrue(self._compare_data(json.loads(args[2]), exp_data)) headers = kwargs['headers'] self.assertEqual(headers['x-ms-containerid'], goal_state.container_id) self.assertEqual(headers['x-ms-host-config-name'], goal_state.role_config_name) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") @patch("azurelinuxagent.ga.update.restutil.http_get") @patch("azurelinuxagent.common.event.report_event") def assert_ensure_initialized(self, patch_event, patch_http_get, patch_report_health, response_body, response_status_code, should_initialize, should_report_healthy): host = hostplugin.HostPluginProtocol(endpoint='ws', container_id='cid', role_config_name='rcf') host.is_initialized = False patch_http_get.return_value = MockResponse(body=response_body, reason='reason', status_code=response_status_code) return_value = host.ensure_initialized() self.assertEqual(return_value, host.is_available) self.assertEqual(should_initialize, host.is_initialized) self.assertEqual(1, patch_event.call_count) self.assertEqual('InitializeHostPlugin', patch_event.call_args[0][0]) self.assertEqual(should_initialize, patch_event.call_args[1]['is_success']) self.assertEqual(1, patch_report_health.call_count) self.assertEqual(should_report_healthy, patch_report_health.call_args[1]['is_healthy']) actual_response = patch_report_health.call_args[1]['response'] if should_initialize: self.assertEqual('', actual_response) else: self.assertTrue('HTTP Failed' in actual_response) self.assertTrue(response_body in actual_response) self.assertTrue(ustr(response_status_code) in actual_response) def test_ensure_initialized(self): """ Test calls to ensure_initialized """ self.assert_ensure_initialized(response_body=api_versions, response_status_code=200, should_initialize=True, should_report_healthy=True) self.assert_ensure_initialized(response_body='invalid ip', response_status_code=400, should_initialize=False, should_report_healthy=True) self.assert_ensure_initialized(response_body='generic bad request', response_status_code=400, should_initialize=False, should_report_healthy=True) self.assert_ensure_initialized(response_body='resource gone', response_status_code=410, should_initialize=False, should_report_healthy=True) self.assert_ensure_initialized(response_body='generic error', response_status_code=500, should_initialize=False, should_report_healthy=False) self.assert_ensure_initialized(response_body='upstream error', response_status_code=502, should_initialize=False, should_report_healthy=True) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=False) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status") @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_default_channel(self, patch_update, patch_put, patch_upload, _): """ Status now defaults to HostPlugin. Validate that any errors on the public channel are ignored. Validate that the default channel is never changed as part of status upload. """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act wire_protocol_client.upload_status_blob() # assert direct route is not called self.assertEqual(0, patch_upload.call_count, "Direct channel was used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is only called once, non-forced self.assertEqual(1, patch_update.call_count, "Unexpected call count") self.assertEqual(0, len(patch_update.call_args[1]), "Unexpected parameters") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=True) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", side_effect=HttpError("503")) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_fallback_channel_503(self, patch_update, patch_put, patch_upload, _): """ When host plugin returns a 503, we should fall back to the direct channel """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act wire_protocol_client.upload_status_blob() # assert direct route is called self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is only called once, non-forced self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=True) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", side_effect=ResourceGoneError("410")) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_fallback_channel_410(self, patch_update, patch_put, patch_upload, _): """ When host plugin returns a 410, we should force the goal state update and return """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act wire_protocol_client.upload_status_blob() # assert direct route is not called self.assertEqual(0, patch_upload.call_count, "Direct channel was used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is called twice, forced=True on the second self.assertEqual(2, patch_update.call_count, "Update goal state unexpected call count") self.assertEqual(1, len(patch_update.call_args[1]), "Update goal state unexpected call count") self.assertTrue(patch_update.call_args[1]['forced'], "Update goal state unexpected call count") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=False) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", side_effect=HttpError("500")) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_fallback_channel_failure(self, patch_update, patch_put, patch_upload, _): """ When host plugin returns a 500, and direct fails, we should raise a ProtocolError """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) # assert direct route is not called self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is called twice, forced=True on the second self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") @patch("azurelinuxagent.common.event.add_event") def test_put_status_error_reporting(self, patch_add_event, _): """ Validate the telemetry when uploading status fails """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire.HostPluginProtocol.set_default_channel(False) with patch.object(wire.StatusBlob, "upload", return_value=False): wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.set_vm_status(status) put_error = wire.HttpError("put status http error") with patch.object(restutil, "http_put", side_effect=put_error) as patch_http_put: with patch.object(wire.HostPluginProtocol, "ensure_initialized", return_value=True): self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) # The agent tries to upload via HostPlugin and that fails due to # http_put having a side effect of "put_error" # # The agent tries to upload using a direct connection, and that succeeds. self.assertEqual(1, wire_protocol_client.status_blob.upload.call_count) # The agent never touches the default protocol is this code path, so no change. self.assertFalse(wire.HostPluginProtocol.is_default_channel()) # The agent never logs telemetry event for direct fallback self.assertEqual(1, patch_add_event.call_count) self.assertEqual('ReportStatus', patch_add_event.call_args[1]['op']) self.assertTrue('Falling back to direct' in patch_add_event.call_args[1]['message']) self.assertEqual(True, patch_add_event.call_args[1]['is_success']) def test_validate_http_request(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( status_blob.get_block_blob_headers(len(faux_status)), bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) plugin = wire_protocol_client.get_host_plugin() with patch.object(plugin, 'get_api_versions') as patch_api: patch_api.return_value = API_VERSION plugin.put_vm_status(status_blob, sas_url, block_blob_type) self.assertTrue(patch_http.call_count == 2) # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) # second call is to health service self.assertEqual('POST', patch_http.call_args_list[1][0][0]) self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_no_fallback(self, _): """ Validate fallback to upload status using HostGAPlugin is not happening when status reporting via default method is successful """ vmstatus = restapi.VMStatus(message="Ready", status="Ready") with patch.object(wire.HostPluginProtocol, "put_vm_status") as patch_put: with patch.object(wire.StatusBlob, "upload") as patch_upload: patch_upload.return_value = True wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.vm_status = vmstatus wire_protocol_client.upload_status_blob() self.assertTrue(patch_put.call_count == 0, "Fallback was engaged") def test_validate_block_blob(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) self.assertTrue(host_client.health_service is not None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.type = block_blob_type status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( status_blob.get_block_blob_headers(len(faux_status)), bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) with patch.object(wire.HostPluginProtocol, "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) self.assertTrue(patch_http.call_count == 2) # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) # second call is to health service self.assertEqual('POST', patch_http.call_args_list[1][0][0]) self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) def test_validate_page_blobs(self): """Validate correct set of data is sent for page blobs""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.type = page_blob_type status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url page_status = bytearray(status_blob.data, encoding='utf-8') page_size = int((len(page_status) + 511) / 512) * 512 page_status = bytearray(status_blob.data.ljust(page_size), encoding='utf-8') page = bytearray(page_size) page[0: page_size] = page_status[0: len(page_status)] mock_response = MockResponse('', httpclient.OK) with patch.object(restutil, "http_request", return_value=mock_response) as patch_http: with patch.object(wire.HostPluginProtocol, "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) self.assertTrue(patch_http.call_count == 3) # first call is to host plugin exp_data = self._hostplugin_data( status_blob.get_page_blob_create_headers( page_size)) self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) # second call is to health service self.assertEqual('POST', patch_http.call_args_list[1][0][0]) self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) # last call is to host plugin exp_data = self._hostplugin_data( status_blob.get_page_blob_page_headers( 0, page_size), page) exp_data['requestUri'] += "?comp=page" self._validate_hostplugin_args( patch_http.call_args_list[2], test_goal_state, exp_method, exp_url, exp_data) def test_validate_get_extension_artifacts(self): test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) expected_url = hostplugin.URI_FORMAT_GET_EXTENSION_ARTIFACT.format(wireserver_url, hostplugin.HOST_PLUGIN_PORT) expected_headers = {'x-ms-version': '2015-09-01', "x-ms-containerid": test_goal_state.container_id, "x-ms-host-config-name": test_goal_state.role_config_name, "x-ms-artifact-location": sas_url} host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) self.assertTrue(host_client.health_service is not None) with patch.object(wire.HostPluginProtocol, "get_api_versions", return_value=api_versions) as patch_get: actual_url, actual_headers = host_client.get_artifact_request(sas_url) self.assertTrue(host_client.is_initialized) self.assertFalse(host_client.api_versions is None) self.assertEqual(expected_url, actual_url) for k in expected_headers: self.assertTrue(k in actual_headers) self.assertEqual(expected_headers[k], actual_headers[k]) @patch("azurelinuxagent.common.utils.restutil.http_get") def test_health(self, patch_http_get): host_plugin = self._init_host() patch_http_get.return_value = MockResponse('', 200) result = host_plugin.get_health() self.assertEqual(1, patch_http_get.call_count) self.assertTrue(result) patch_http_get.return_value = MockResponse('', 500) result = host_plugin.get_health() self.assertFalse(result) patch_http_get.side_effect = IOError('client IO error') try: host_plugin.get_health() self.fail('IO error expected to be raised') except IOError: # expected pass @patch("azurelinuxagent.common.utils.restutil.http_get", return_value=MockResponse(status_code=200, body=b'')) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") def test_ensure_health_service_called(self, patch_http_get, patch_report_versions): host_plugin = self._init_host() host_plugin.get_api_versions() self.assertEqual(1, patch_http_get.call_count) self.assertEqual(1, patch_report_versions.call_count) @patch("azurelinuxagent.common.utils.restutil.http_get") @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_put") def test_put_status_healthy_signal(self, patch_http_put, patch_http_post, patch_http_get): host_plugin = self._init_host() status_blob = self._init_status_blob() # get_api_versions patch_http_get.return_value = MockResponse(api_versions, 200) # put status blob patch_http_put.return_value = MockResponse(None, 201) host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) self.assertEqual(1, patch_http_get.call_count) self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) self.assertEqual(2, patch_http_put.call_count) self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[0][0][0]) self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[1][0][0]) self.assertEqual(2, patch_http_post.call_count) # signal for /versions self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) jstr = patch_http_post.call_args_list[0][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) # signal for /status self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) jstr = patch_http_post.call_args_list[1][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) @patch("azurelinuxagent.common.utils.restutil.http_get") @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_put") def test_put_status_unhealthy_signal_transient(self, patch_http_put, patch_http_post, patch_http_get): host_plugin = self._init_host() status_blob = self._init_status_blob() # get_api_versions patch_http_get.return_value = MockResponse(api_versions, 200) # put status blob patch_http_put.return_value = MockResponse(None, 500) if sys.version_info < (2, 7): self.assertRaises(HttpError, host_plugin.put_vm_status, status_blob, sas_url) else: with self.assertRaises(HttpError): host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) self.assertEqual(1, patch_http_get.call_count) self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) self.assertEqual(1, patch_http_put.call_count) self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) self.assertEqual(2, patch_http_post.call_count) # signal for /versions self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) jstr = patch_http_post.call_args_list[0][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) # signal for /status self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) jstr = patch_http_post.call_args_list[1][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) @patch("azurelinuxagent.common.utils.restutil.http_get") @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_put") def test_put_status_unhealthy_signal_permanent(self, patch_http_put, patch_http_post, patch_http_get): host_plugin = self._init_host() status_blob = self._init_status_blob() # get_api_versions patch_http_get.return_value = MockResponse(api_versions, 200) # put status blob patch_http_put.return_value = MockResponse(None, 500) host_plugin.status_error_state.is_triggered = Mock(return_value=True) if sys.version_info < (2, 7): self.assertRaises(HttpError, host_plugin.put_vm_status, status_blob, sas_url) else: with self.assertRaises(HttpError): host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) self.assertEqual(1, patch_http_get.call_count) self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) self.assertEqual(1, patch_http_put.call_count) self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) self.assertEqual(2, patch_http_post.call_count) # signal for /versions self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) jstr = patch_http_post.call_args_list[0][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) # signal for /status self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) jstr = patch_http_post.call_args_list[1][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertFalse(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") def test_report_fetch_health(self, patch_report_artifact, patch_should_report): host_plugin = self._init_host() host_plugin.report_fetch_health(uri='', is_healthy=True) self.assertEqual(0, patch_should_report.call_count) host_plugin.report_fetch_health(uri='http://169.254.169.254/extensionArtifact', is_healthy=True) self.assertEqual(0, patch_should_report.call_count) host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/status', is_healthy=True) self.assertEqual(0, patch_should_report.call_count) self.assertEqual(None, host_plugin.fetch_last_timestamp) host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/extensionArtifact', is_healthy=True) self.assertNotEqual(None, host_plugin.fetch_last_timestamp) self.assertEqual(1, patch_should_report.call_count) self.assertEqual(1, patch_report_artifact.call_count) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_status") def test_report_status_health(self, patch_report_status, patch_should_report): host_plugin = self._init_host() self.assertEqual(None, host_plugin.status_last_timestamp) host_plugin.report_status_health(is_healthy=True) self.assertNotEqual(None, host_plugin.status_last_timestamp) self.assertEqual(1, patch_should_report.call_count) self.assertEqual(1, patch_report_status.call_count) def test_should_report(self): host_plugin = self._init_host() error_state = ErrorState(min_timedelta=datetime.timedelta(minutes=5)) period = datetime.timedelta(minutes=1) last_timestamp = None # first measurement at 0s, should report is_healthy = True actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(True, actual) # second measurement at 30s, should not report last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(False, actual) # third measurement at 60s, should report last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=60) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(True, actual) # fourth measurement unhealthy, should report and increment counter is_healthy = False self.assertEqual(0, error_state.count) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(1, error_state.count) self.assertEqual(True, actual) # fifth measurement, should not report and reset counter is_healthy = True last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) self.assertEqual(1, error_state.count) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(0, error_state.count) self.assertEqual(False, actual) class MockResponse: def __init__(self, body, status_code, reason=''): self.body = body self.status = status_code self.reason = reason def read(self): return self.body if sys.version_info[0] == 2 else bytes(self.body, encoding='utf-8') if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/protocol/test_image_info_matcher.py000066400000000000000000000111551335416306700243350ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); import json from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.protocol.imds import ComputeInfo, ImdsClient, IMDS_IMAGE_ORIGIN_CUSTOM, \ IMDS_IMAGE_ORIGIN_ENDORSED, IMDS_IMAGE_ORIGIN_PLATFORM, ImageInfoMatcher from azurelinuxagent.common.protocol.restapi import set_properties from azurelinuxagent.common.utils import restutil from tests.ga.test_update import ResponseMock from tests.tools import * class TestImageInfoMatcher(unittest.TestCase): def test_image_does_not_exist(self): doc = '{}' test_subject = ImageInfoMatcher(doc) self.assertFalse(test_subject.is_match("Red Hat", "RHEL", "6.3", "")) def test_image_exists_by_sku(self): doc = '''{ "CANONICAL": { "UBUNTUSERVER": { "16.04-LTS": { "Match": ".*" } } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "")) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "16.04.201805090")) self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "16.04.201805090")) def test_image_exists_by_version(self): doc = '''{ "REDHAT": { "RHEL": { "Minimum": "6.3" } } }''' test_subject = ImageInfoMatcher(doc) self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.1", "")) self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.2", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.4", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.5", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.0", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.1", "")) def test_image_exists_by_version01(self): """ Test case to ensure the matcher exhaustively searches all cases. REDHAT/RHEL have a SKU >= 6.3 is less precise than REDHAT/RHEL/7-LVM have a any version. Both should return a successful match. """ doc = '''{ "REDHAT": { "RHEL": { "Minimum": "6.3", "7-LVM": { "Match": ".*" } } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7-LVM", "")) def test_ignores_case(self): doc = '''{ "CANONICAL": { "UBUNTUSERVER": { "16.04-LTS": { "Match": ".*" } } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("canonical", "ubuntuserver", "16.04-lts", "")) self.assertFalse(test_subject.is_match("canonical", "ubuntuserver", "14.04.0-lts", "16.04.201805090")) def test_list_operator(self): doc = '''{ "CANONICAL": { "UBUNTUSERVER": { "List": [ "14.04.0-LTS", "14.04.1-LTS" ] } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "")) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.1-LTS", "")) self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "22.04-LTS", "")) def test_invalid_version(self): doc = '''{ "REDHAT": { "RHEL": { "Minimum": "6.3" } } }''' test_subject = ImageInfoMatcher(doc) self.assertFalse(test_subject.is_match("RedHat", "RHEL", "16.04-LTS", "")) # This is *expected* behavior as opposed to desirable. The specification is # controlled by the agent, so there is no reason to use these values, but if # one does this is expected behavior. # # FlexibleVersion chops off all leading zeros. self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.04", "")) # FlexibleVersion coerces everything to a string self.assertTrue(test_subject.is_match("RedHat", "RHEL", 6.04, "")) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/protocol/test_imds.py000066400000000000000000000520071335416306700214720ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); import json import azurelinuxagent.common.protocol.imds as imds from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.restapi import set_properties from azurelinuxagent.common.utils import restutil from tests.ga.test_update import ResponseMock from tests.tools import * class TestImds(AgentTestCase): @patch("azurelinuxagent.ga.update.restutil.http_get") def test_get(self, mock_http_get): mock_http_get.return_value = ResponseMock(response='''{ "location": "westcentralus", "name": "unit_test", "offer": "UnitOffer", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "UnitPublisher", "resourceGroupName": "UnitResourceGroupName", "sku": "UnitSku", "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", "tags": "Key1:Value1;Key2:Value2", "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", "version": "UnitVersion", "vmSize": "Standard_D1_v2" }'''.encode('utf-8')) test_subject = imds.ImdsClient() test_subject.get_compute() self.assertEqual(1, mock_http_get.call_count) positional_args, kw_args = mock_http_get.call_args self.assertEqual('http://169.254.169.254/metadata/instance/compute?api-version=2018-02-01', positional_args[0]) self.assertTrue('User-Agent' in kw_args['headers']) self.assertTrue('Metadata' in kw_args['headers']) self.assertEqual(True, kw_args['headers']['Metadata']) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_get_bad_request(self, mock_http_get): mock_http_get.return_value = ResponseMock(status=restutil.httpclient.BAD_REQUEST) test_subject = imds.ImdsClient() self.assertRaises(HttpError, test_subject.get_compute) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_get_empty_response(self, mock_http_get): mock_http_get.return_value = ResponseMock(response=''.encode('utf-8')) test_subject = imds.ImdsClient() self.assertRaises(ValueError, test_subject.get_compute) def test_deserialize_ComputeInfo(self): s = '''{ "location": "westcentralus", "name": "unit_test", "offer": "UnitOffer", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "UnitPublisher", "resourceGroupName": "UnitResourceGroupName", "sku": "UnitSku", "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", "tags": "Key1:Value1;Key2:Value2", "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", "version": "UnitVersion", "vmSize": "Standard_D1_v2", "vmScaleSetName": "MyScaleSet", "zone": "In" }''' data = json.loads(s, encoding='utf-8') compute_info = imds.ComputeInfo() set_properties("compute", compute_info, data) self.assertEqual('westcentralus', compute_info.location) self.assertEqual('unit_test', compute_info.name) self.assertEqual('UnitOffer', compute_info.offer) self.assertEqual('Linux', compute_info.osType) self.assertEqual('', compute_info.placementGroupId) self.assertEqual('0', compute_info.platformFaultDomain) self.assertEqual('0', compute_info.platformUpdateDomain) self.assertEqual('UnitPublisher', compute_info.publisher) self.assertEqual('UnitResourceGroupName', compute_info.resourceGroupName) self.assertEqual('UnitSku', compute_info.sku) self.assertEqual('e4402c6c-2804-4a0a-9dee-d61918fc4d28', compute_info.subscriptionId) self.assertEqual('Key1:Value1;Key2:Value2', compute_info.tags) self.assertEqual('f62f23fb-69e2-4df0-a20b-cb5c201a3e7a', compute_info.vmId) self.assertEqual('UnitVersion', compute_info.version) self.assertEqual('Standard_D1_v2', compute_info.vmSize) self.assertEqual('MyScaleSet', compute_info.vmScaleSetName) self.assertEqual('In', compute_info.zone) self.assertEqual('UnitPublisher:UnitOffer:UnitSku:UnitVersion', compute_info.image_info) def test_is_custom_image(self): image_origin = self._setup_image_origin_assert("", "", "", "") self.assertEqual(imds.IMDS_IMAGE_ORIGIN_CUSTOM, image_origin) def test_is_endorsed_CentOS(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.6", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.7", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.9", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.0", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-LVM", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-RAW", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.1", "")) def test_is_endorsed_CoreOS(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.4.0")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "899.17.0")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "1688.5.3")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.3.0")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "alpha", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "beta", "")) def test_is_endorsed_Debian(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "7", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "9", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "9-DAILY", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "10-DAILY", "")) def test_is_endorsed_Rhel(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.7", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.9", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.0", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-LVM", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-RAW", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("RedHat", "RHEL", "6.6", "")) def test_is_endorsed_SuSE(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "11-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "11-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("SuSE", "SLES", "11-SP3", "")) def test_is_endorsed_UbuntuServer(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.0-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.1-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.2-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.3-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.4-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.5-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.6-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.7-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.8-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "16.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "20.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "22.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "12.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "17.10", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-DAILY-LTS", "")) @staticmethod def _setup_image_origin_assert(publisher, offer, sku, version): s = '''{{ "publisher": "{0}", "offer": "{1}", "sku": "{2}", "version": "{3}" }}'''.format(publisher, offer, sku, version) data = json.loads(s, encoding='utf-8') compute_info = imds.ComputeInfo() set_properties("compute", compute_info, data) return compute_info.image_origin def test_response_validation(self): # invalid json or empty response self._assert_validation(http_status_code=200, http_response='', expected_valid=False, expected_response='JSON parsing failed') self._assert_validation(http_status_code=200, http_response=None, expected_valid=False, expected_response='JSON parsing failed') self._assert_validation(http_status_code=200, http_response='{ bad json ', expected_valid=False, expected_response='JSON parsing failed') # 500 response self._assert_validation(http_status_code=500, http_response='error response', expected_valid=False, expected_response='[HTTP Failed] [500: reason] error response') # 429 response self._assert_validation(http_status_code=429, http_response='server busy', expected_valid=False, expected_response='[HTTP Failed] [429: reason] server busy') # valid json self._assert_validation(http_status_code=200, http_response=self._imds_response('valid'), expected_valid=True, expected_response='') # unicode self._assert_validation(http_status_code=200, http_response=self._imds_response('unicode'), expected_valid=True, expected_response='') def test_field_validation(self): # TODO: compute fields (#1249) self._assert_field('network', 'interface', 'ipv4', 'ipAddress', 'privateIpAddress') self._assert_field('network', 'interface', 'ipv4', 'ipAddress') self._assert_field('network', 'interface', 'ipv4') self._assert_field('network', 'interface', 'macAddress') self._assert_field('network') def _assert_field(self, *fields): response = self._imds_response('valid') response_obj = json.loads(ustr(response, encoding="utf-8")) # assert empty value self._update_field(response_obj, fields, '') altered_response = json.dumps(response_obj).encode() self._assert_validation(http_status_code=200, http_response=altered_response, expected_valid=False, expected_response='Empty field: [{0}]'.format(fields[-1])) # assert missing value self._update_field(response_obj, fields, None) altered_response = json.dumps(response_obj).encode() self._assert_validation(http_status_code=200, http_response=altered_response, expected_valid=False, expected_response='Missing field: [{0}]'.format(fields[-1])) def _update_field(self, obj, fields, val): if isinstance(obj, list): self._update_field(obj[0], fields, val) else: f = fields[0] if len(fields) == 1: if val is None: del obj[f] else: obj[f] = val else: self._update_field(obj[f], fields[1:], val) @staticmethod def _imds_response(f): path = os.path.join(data_dir, "imds", "{0}.json".format(f)) with open(path, "rb") as fh: return fh.read() def _assert_validation(self, http_status_code, http_response, expected_valid, expected_response): test_subject = imds.ImdsClient() with patch("azurelinuxagent.common.utils.restutil.http_get") as mock_http_get: mock_http_get.return_value = ResponseMock(status=http_status_code, reason='reason', response=http_response) validate_response = test_subject.validate() self.assertEqual(1, mock_http_get.call_count) positional_args, kw_args = mock_http_get.call_args self.assertTrue('User-Agent' in kw_args['headers']) self.assertEqual(restutil.HTTP_USER_AGENT_HEALTH, kw_args['headers']['User-Agent']) self.assertTrue('Metadata' in kw_args['headers']) self.assertEqual(True, kw_args['headers']['Metadata']) self.assertEqual('http://169.254.169.254/metadata/instance/?api-version=2018-02-01', positional_args[0]) self.assertEqual(expected_valid, validate_response[0]) self.assertTrue(expected_response in validate_response[1], "Expected: '{0}', Actual: '{1}'" .format(expected_response, validate_response[1])) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/protocol/test_metadata.py000066400000000000000000000136441335416306700223220ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import json from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.restutil import httpclient from azurelinuxagent.common.protocol.metadata import * from azurelinuxagent.common.protocol.restapi import * from tests.protocol.mockmetadata import * from tests.tools import * class TestMetadataProtocolGetters(AgentTestCase): def load_json(self, path): return json.loads(ustr(load_data(path)), encoding="utf-8") @patch("time.sleep") def _test_getters(self, test_data ,_): with patch.object(restutil, 'http_get', test_data.mock_http_get): protocol = MetadataProtocol() protocol.detect() protocol.get_vminfo() protocol.get_certs() ext_handlers, etag = protocol.get_ext_handlers() for ext_handler in ext_handlers.extHandlers: protocol.get_ext_handler_pkgs(ext_handler) def test_getters(self, *args): test_data = MetadataProtocolData(DATA_FILE) self._test_getters(test_data, *args) def test_getters_no(self, *args): test_data = MetadataProtocolData(DATA_FILE_NO_EXT) self._test_getters(test_data, *args) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagents_manifests(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() manifests, etag = protocol.get_vmagent_manifests() self.assertEqual(mock_update.call_count, 1) self.assertEqual(mock_get.call_count, 1) manifests_uri = BASE_URI.format( METADATA_ENDPOINT, "vmAgentVersions", APIVERSION) self.assertEqual(mock_get.call_args[0][0], manifests_uri) self.assertEqual(etag, 42) self.assertNotEqual(None, manifests) self.assertEqual(len(manifests.vmAgentManifests), 1) manifest = manifests.vmAgentManifests[0] self.assertEqual(manifest.family, conf.get_autoupdate_gafamily()) self.assertEqual(len(manifest.versionsManifestUris), 2) # Same etag returns the same data data = self.load_json("metadata/vmagent_manifests_invalid1.json") mock_get.return_value = data, 42 next_manifests, etag = protocol.get_vmagent_manifests() self.assertEqual(etag, 42) self.assertEqual(manifests, next_manifests) # New etag returns new data mock_get.return_value = data, 43 self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagents_manifests_raises(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests_invalid1.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) data = self.load_json("metadata/vmagent_manifests_invalid2.json") mock_get.return_value = data, 43 self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagent_pkgs(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() manifests, etag = protocol.get_vmagent_manifests() manifest = manifests.vmAgentManifests[0] data = self.load_json("metadata/vmagent_manifest1.json") mock_get.return_value = data, 42 pkgs = protocol.get_vmagent_pkgs(manifest) self.assertNotEqual(None, pkgs) self.assertEqual(len(pkgs.versions), 2) for pkg in pkgs.versions: self.assertNotEqual(None, pkg.version) self.assertTrue(len(pkg.uris) > 0) for uri in pkg.uris: self.assertTrue(uri.uri.endswith("zip")) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._post_data") def test_report_event(self, mock_post): events = TelemetryEventList() data = self.load_json("events/1478123456789000.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) data = self.load_json("events/1478123456789001.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) data = self.load_json("events/1479766858966718.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) protocol = MetadataProtocol() protocol.report_event(events) events_uri = BASE_URI.format( METADATA_ENDPOINT, "status/telemetry", APIVERSION) self.assertEqual(mock_post.call_count, 1) self.assertEqual(mock_post.call_args[0][0], events_uri) self.assertEqual(mock_post.call_args[0][1], get_properties(events)) WALinuxAgent-2.2.32/tests/protocol/test_protocol_util.py000066400000000000000000000057471335416306700234450ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import * from azurelinuxagent.common.protocol import get_protocol_util, \ TAG_FILE_NAME @patch("time.sleep") class TestProtocolUtil(AgentTestCase): @patch("azurelinuxagent.common.protocol.util.MetadataProtocol") @patch("azurelinuxagent.common.protocol.util.WireProtocol") def test_detect_protocol(self, WireProtocol, MetadataProtocol, _): WireProtocol.return_value = MagicMock() MetadataProtocol.return_value = MagicMock() protocol_util = get_protocol_util() protocol_util.dhcp_handler = MagicMock() protocol_util.dhcp_handler.endpoint = "foo.bar" #Test wire protocol is available protocol = protocol_util.get_protocol() self.assertEquals(WireProtocol.return_value, protocol) #Test wire protocol is not available protocol_util.clear_protocol() WireProtocol.return_value.detect.side_effect = ProtocolError() protocol = protocol_util.get_protocol() self.assertEquals(MetadataProtocol.return_value, protocol) #Test no protocol is available protocol_util.clear_protocol() WireProtocol.return_value.detect.side_effect = ProtocolError() MetadataProtocol.return_value.detect.side_effect = ProtocolError() self.assertRaises(ProtocolError, protocol_util.get_protocol) def test_detect_protocol_by_file(self, _): protocol_util = get_protocol_util() protocol_util._detect_wire_protocol = Mock() protocol_util._detect_metadata_protocol = Mock() tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME) #Test tag file doesn't exist protocol_util.get_protocol(by_file=True) protocol_util._detect_wire_protocol.assert_any_call() protocol_util._detect_metadata_protocol.assert_not_called() #Test tag file exists protocol_util.clear_protocol() protocol_util._detect_wire_protocol.reset_mock() protocol_util._detect_metadata_protocol.reset_mock() with open(tag_file, "w+") as tag_fd: tag_fd.write("") protocol_util.get_protocol(by_file=True) protocol_util._detect_metadata_protocol.assert_any_call() protocol_util._detect_wire_protocol.assert_not_called() if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/protocol/test_restapi.py000066400000000000000000000027231335416306700222050ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * import uuid import unittest import os import shutil import time from azurelinuxagent.common.protocol.restapi import * class SampleDataContract(DataContract): def __init__(self): self.foo = None self.bar = DataContractList(int) class TestDataContract(unittest.TestCase): def test_get_properties(self): obj = SampleDataContract() obj.foo = "foo" obj.bar.append(1) data = get_properties(obj) self.assertEquals("foo", data["foo"]) self.assertEquals(list, type(data["bar"])) def test_set_properties(self): obj = SampleDataContract() data = { 'foo' : 1, 'baz': 'a' } set_properties('sample', obj, data) self.assertFalse(hasattr(obj, 'baz')) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/protocol/test_wire.py000066400000000000000000000534561335416306700215150ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import stat import zipfile from azurelinuxagent.common import event from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.common.utils.shellutil import run_get_output from tests.common.osutil.test_default import running_under_travis from tests.protocol.mockwiredata import * data_with_bom = b'\xef\xbb\xbfhehe' testurl = 'http://foo' testtype = 'BlockBlob' wireserver_url = '168.63.129.16' @patch("time.sleep") @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") class TestWireProtocol(AgentTestCase): def setUp(self): super(TestWireProtocol, self).setUp() HostPluginProtocol.set_default_channel(False) def _test_getters(self, test_data, __, MockCryptUtil, _): MockCryptUtil.side_effect = test_data.mock_crypt_util with patch.object(restutil, 'http_get', test_data.mock_http_get): protocol = WireProtocol(wireserver_url) protocol.detect() protocol.get_vminfo() protocol.get_certs() ext_handlers, etag = protocol.get_ext_handlers() for ext_handler in ext_handlers.extHandlers: protocol.get_ext_handler_pkgs(ext_handler) crt1 = os.path.join(self.tmp_dir, '33B0ABCE4673538650971C10F7D7397E71561F35.crt') crt2 = os.path.join(self.tmp_dir, '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.crt') prv2 = os.path.join(self.tmp_dir, '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.prv') self.assertTrue(os.path.isfile(crt1)) self.assertTrue(os.path.isfile(crt2)) self.assertTrue(os.path.isfile(prv2)) self.assertEqual("1", protocol.get_incarnation()) def test_getters(self, *args): """Normal case""" test_data = WireProtocolData(DATA_FILE) self._test_getters(test_data, *args) def test_getters_no_ext(self, *args): """Provision with agent is not checked""" test_data = WireProtocolData(DATA_FILE_NO_EXT) self._test_getters(test_data, *args) def test_getters_ext_no_settings(self, *args): """Extensions without any settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) self._test_getters(test_data, *args) def test_getters_ext_no_public(self, *args): """Extensions without any public settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) self._test_getters(test_data, *args) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") def test_getters_with_stale_goal_state(self, patch_report, *args): test_data = WireProtocolData(DATA_FILE) test_data.emulate_stale_goal_state = True self._test_getters(test_data, *args) # Ensure HostPlugin was invoked self.assertEqual(1, test_data.call_counts["/versions"]) self.assertEqual(2, test_data.call_counts["extensionArtifact"]) # Ensure the expected number of HTTP calls were made # -- Tracking calls to retrieve GoalState is problematic since it is # fetched often; however, the dependent documents, such as the # HostingEnvironmentConfig, will be retrieved the expected number self.assertEqual(2, test_data.call_counts["hostingenvuri"]) self.assertEqual(1, patch_report.call_count) def test_call_storage_kwargs(self, *args): from azurelinuxagent.common.utils import restutil with patch.object(restutil, 'http_get') as http_patch: http_req = restutil.http_get url = testurl headers = {} # no kwargs -- Default to True WireClient.call_storage_service(http_req) # kwargs, no use_proxy -- Default to True WireClient.call_storage_service(http_req, url, headers) # kwargs, use_proxy None -- Default to True WireClient.call_storage_service(http_req, url, headers, use_proxy=None) # kwargs, use_proxy False -- Keep False WireClient.call_storage_service(http_req, url, headers, use_proxy=False) # kwargs, use_proxy True -- Keep True WireClient.call_storage_service(http_req, url, headers, use_proxy=True) # assert self.assertTrue(http_patch.call_count == 5) for i in range(0,5): c = http_patch.call_args_list[i][-1]['use_proxy'] self.assertTrue(c == (True if i != 3 else False)) def test_status_blob_parsing(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(WireProtocolData(DATA_FILE).ext_conf) self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob, u'https://yuezhatest.blob.core.windows.net/vhds/test' u'-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se' u'=9999-01-01&sk=key1&sv=2014-02-14&sig' u'=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D') self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob_type, u'BlockBlob') pass def test_get_host_ga_plugin(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(WireClient, "get_goal_state", return_value = goal_state) as patch_get_goal_state: host_plugin = wire_protocol_client.get_host_plugin() self.assertEqual(goal_state.container_id, host_plugin.container_id) self.assertEqual(goal_state.role_config_name, host_plugin.role_config_name) self.assertEqual(1, patch_get_goal_state.call_count) @patch("azurelinuxagent.common.utils.restutil.http_request", side_effect=IOError) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_download_ext_handler_pkg_fallback(self, patch_request, patch_get_host, patch_http, *args): ext_uri = 'extension_uri' host_uri = 'host_uri' destination = 'destination' patch_get_host.return_value = HostPluginProtocol(host_uri, 'container_id', 'role_config') patch_request.return_value = [host_uri, {}] WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) self.assertEqual(patch_http.call_count, 2) self.assertEqual(patch_request.call_count, 1) self.assertEqual(patch_http.call_args_list[0][0][1], ext_uri) self.assertEqual(patch_http.call_args_list[1][0][1], host_uri) @skip_if_predicate_true(running_under_travis, "Travis unit tests should not have external dependencies") def test_download_ext_handler_pkg_stream(self, *args): ext_uri = 'https://dcrdata.blob.core.windows.net/files/packer.zip' tmp = tempfile.mkdtemp() destination = os.path.join(tmp, 'test_download_ext_handler_pkg_stream.zip') success = WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) self.assertTrue(success) self.assertTrue(os.path.exists(destination)) # verify size self.assertEqual(18380915, os.stat(destination).st_size) # verify unzip zipfile.ZipFile(destination).extractall(tmp) packer = os.path.join(tmp, 'packer') self.assertTrue(os.path.exists(packer)) fileutil.chmod(packer, os.stat(packer).st_mode | stat.S_IXUSR) # verify unpacked size self.assertEqual(87393596, os.stat(packer).st_size) # execute, verify result packer_version = '{0} --version'.format(packer) rc, stdout = run_get_output(packer_version) self.assertEqual(0, rc) self.assertEqual('1.2.5\n', stdout) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_default(self, *args): """ Default status blob method is HostPlugin. """ vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus with patch.object(WireClient, "get_goal_state") as patch_get_goal_state: with patch.object(HostPluginProtocol, "put_vm_status") as patch_host_ga_plugin_upload: with patch.object(StatusBlob, "upload") as patch_default_upload: HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() # do not call the direct method unless host plugin fails patch_default_upload.assert_not_called() # host plugin always fetches a goal state patch_get_goal_state.assert_called_once_with() # host plugin uploads the status blob patch_host_ga_plugin_upload.assert_called_once_with(ANY, testurl, 'BlockBlob') @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_host_ga_plugin(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(HostPluginProtocol, "ensure_initialized", return_value=True): with patch.object(StatusBlob, "upload", return_value=False) as patch_default_upload: with patch.object(HostPluginProtocol, "_put_block_blob_status") as patch_http: HostPluginProtocol.set_default_channel(False) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.upload_status_blob() patch_default_upload.assert_not_called() self.assertEqual(1, wire_protocol_client.get_goal_state.call_count) patch_http.assert_called_once_with(testurl, wire_protocol_client.status_blob) self.assertFalse(HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized") def test_upload_status_blob_unknown_type_assumes_block(self, _, __, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = "NotALegalType" wire_protocol_client.status_blob.vm_status = vmstatus with patch.object(WireClient, "get_goal_state") as patch_get_goal_state: with patch.object(StatusBlob, "prepare") as patch_prepare: with patch.object(StatusBlob, "upload") as patch_default_upload: HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() patch_prepare.assert_called_once_with("BlockBlob") patch_default_upload.assert_called_once_with(testurl) patch_get_goal_state.assert_called_once_with() @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_reports_prepare_error(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(StatusBlob, "prepare", side_effect=Exception) as mock_prepare: self.assertRaises(ProtocolError, wire_protocol_client.upload_status_blob) self.assertEqual(1, mock_prepare.call_count) def test_get_in_vm_artifacts_profile_blob_not_available(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) # Test when artifacts_profile_blob is null/None self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) #Test when artifacts_profile_blob is whitespace wire_protocol_client.ext_conf.artifacts_profile_blob = " " self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) def test_get_in_vm_artifacts_profile_response_body_not_valid(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) with patch.object(HostPluginProtocol, "get_artifact_request", return_value = ['dummy_url', {}]) as host_plugin_get_artifact_url_and_headers: #Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(None, 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) #Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(' '.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) #Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{ }'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(), in_vm_artifacts_profile.__dict__, 'If artifacts_profile_blob has empty json dictionary, in_vm_artifacts_profile ' 'should contain nothing') host_plugin_get_artifact_url_and_headers.assert_called_with(testurl) @patch("azurelinuxagent.common.event.add_event") def test_artifacts_profile_json_parsing(self, patch_event, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) # response is invalid json wire_protocol_client.call_storage_service = Mock(return_value=MockResponse("invalid json".encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() # ensure response is empty self.assertEqual(None, in_vm_artifacts_profile) # ensure event is logged self.assertEqual(1, patch_event.call_count) self.assertFalse(patch_event.call_args[1]['is_success']) self.assertTrue('invalid json' in patch_event.call_args[1]['message']) self.assertEqual('ArtifactsProfileBlob', patch_event.call_args[1]['op']) def test_get_in_vm_artifacts_profile_default(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{"onHold": "true"}'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) def test_fetch_manifest_fallback(self, *args): uri1 = ExtHandlerVersionUri() uri1.uri = 'ext_uri' uris = DataContractList(ExtHandlerVersionUri) uris.append(uri1) host_uri = 'host_uri' mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') client = WireProtocol(wireserver_url).client with patch.object(WireClient, "fetch", return_value=None) as patch_fetch: with patch.object(WireClient, "get_host_plugin", return_value=mock_host): with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[host_uri, {}]): HostPluginProtocol.set_default_channel(False) self.assertRaises(ProtocolError, client.fetch_manifest, uris) self.assertEqual(patch_fetch.call_count, 2) self.assertEqual(patch_fetch.call_args_list[0][0][0], uri1.uri) self.assertEqual(patch_fetch.call_args_list[1][0][0], host_uri) def test_get_in_vm_artifacts_profile_host_ga_plugin(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}']) with patch.object(HostPluginProtocol, "get_artifact_request", return_value=['dummy_url', {}]) as artifact_request: in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is not None) self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) artifact_request.assert_called_once_with(testurl) @patch("socket.gethostname", return_value="hostname") @patch("time.gmtime", return_value=time.localtime(1485543256)) def test_report_vm_status(self, *args): status = 'status' message = 'message' client = WireProtocol(wireserver_url).client actual = StatusBlob(client=client) actual.set_vm_status(VMStatus(status=status, message=message)) timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) formatted_msg = { 'lang': 'en-US', 'message': message } v1_ga_status = { 'version': str(CURRENT_VERSION), 'status': status, 'formattedMessage': formatted_msg } v1_ga_guest_info = { 'computerName': socket.gethostname(), 'osName': DISTRO_NAME, 'osVersion': DISTRO_VERSION, 'version': str(CURRENT_VERSION), } v1_agg_status = { 'guestAgentStatus': v1_ga_status, 'handlerAggregateStatus': [] } v1_vm_status = { 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, 'guestOSInfo' : v1_ga_guest_info } self.assertEqual(json.dumps(v1_vm_status), actual.to_json()) class MockResponse: def __init__(self, body, status_code): self.body = body self.status = status_code def read(self): return self.body if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/test_agent.py000066400000000000000000000156541335416306700200020ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os.path from azurelinuxagent.agent import * from azurelinuxagent.common.conf import * from tests.tools import * EXPECTED_CONFIGURATION = \ """AutoUpdate.Enabled = True AutoUpdate.GAFamily = Prod Autoupdate.Frequency = 3600 CGroups.EnforceLimits = False CGroups.Excluded = customscript,runcommand DVD.MountPoint = /mnt/cdrom/secure DetectScvmmEnv = False EnableOverProvisioning = True Extension.LogDir = /var/log/azure Extensions.Enabled = True HttpProxy.Host = None HttpProxy.Port = None Lib.Dir = /var/lib/waagent Logs.Verbose = False OS.AllowHTTP = False OS.CheckRdmaDriver = False OS.EnableFIPS = True OS.EnableFirewall = False OS.EnableRDMA = False OS.HomeDir = /home OS.OpensslPath = /usr/bin/openssl OS.PasswordPath = /etc/shadow OS.RootDeviceScsiTimeout = 300 OS.SshClientAliveInterval = 42 OS.SshDir = /notareal/path OS.SudoersDir = /etc/sudoers.d OS.UpdateRdmaDriver = False Pid.File = /var/run/waagent.pid Provisioning.AllowResetSysUser = False Provisioning.DecodeCustomData = False Provisioning.DeleteRootPassword = True Provisioning.Enabled = True Provisioning.ExecuteCustomData = False Provisioning.MonitorHostName = True Provisioning.PasswordCryptId = 6 Provisioning.PasswordCryptSaltLength = 10 Provisioning.RegenerateSshHostKeyPair = True Provisioning.SshHostKeyPairType = rsa Provisioning.UseCloudInit = True ResourceDisk.EnableSwap = False ResourceDisk.Filesystem = ext4 ResourceDisk.Format = True ResourceDisk.MountOptions = None ResourceDisk.MountPoint = /mnt/resource ResourceDisk.SwapSizeMB = 0""".split('\n') class TestAgent(AgentTestCase): def test_accepts_configuration_path(self): conf_path = os.path.join(data_dir, "test_waagent.conf") c, f, v, cfp = parse_args(["-configuration-path:" + conf_path]) self.assertEqual(cfp, conf_path) @patch("os.path.exists", return_value=True) def test_checks_configuration_path(self, mock_exists): conf_path = "/foo/bar-baz/something.conf" c, f, v, cfp = parse_args(["-configuration-path:"+conf_path]) self.assertEqual(cfp, conf_path) self.assertEqual(mock_exists.call_count, 1) @patch("sys.stderr") @patch("os.path.exists", return_value=False) @patch("sys.exit", side_effect=Exception) def test_rejects_missing_configuration_path(self, mock_exit, mock_exists, mock_stderr): try: c, f, v, cfp = parse_args(["-configuration-path:/foo/bar.conf"]) self.assertTrue(False) except Exception: self.assertEqual(mock_exit.call_count, 1) def test_configuration_path_defaults_to_none(self): c, f, v, cfp = parse_args([]) self.assertEqual(cfp, None) def test_agent_accepts_configuration_path(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(conf.get_fips_enabled()) @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_uses_default_configuration_path(self, mock_load): Agent(False) mock_load.assert_called_once_with("/etc/waagent.conf") @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_does_not_pass_configuration_path(self, mock_load, mock_handler): mock_daemon = Mock() mock_daemon.run = Mock() mock_handler.return_value = mock_daemon agent = Agent(False) agent.daemon() mock_daemon.run.assert_called_once_with(child_args=None) self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_passes_configuration_path(self, mock_load, mock_handler): mock_daemon = Mock() mock_daemon.run = Mock() mock_handler.return_value = mock_daemon agent = Agent(False, conf_file_path="/foo/bar.conf") agent.daemon() mock_daemon.run.assert_called_once_with(child_args="-configuration-path:/foo/bar.conf") self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_ensures_extension_log_directory(self, mock_dir): ext_log_dir = os.path.join(self.tmp_dir, "FauxLogDir") mock_dir.return_value = ext_log_dir self.assertFalse(os.path.isdir(ext_log_dir)) agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isdir(ext_log_dir)) @patch("azurelinuxagent.common.logger.error") @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_logs_if_extension_log_directory_is_a_file(self, mock_dir, mock_log): ext_log_dir = os.path.join(self.tmp_dir, "FauxLogDir") mock_dir.return_value = ext_log_dir fileutil.write_file(ext_log_dir, "Foo") self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) self.assertEqual(1, mock_log.call_count) def test_agent_get_configuration(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) actual_configuration = [] configuration = conf.get_configuration() for k in sorted(configuration.keys()): actual_configuration.append("{0} = {1}".format(k, configuration[k])) self.assertEqual(EXPECTED_CONFIGURATION, actual_configuration) def test_agent_usage_message(self): message = usage() # Python 2.6 does not have assertIn() self.assertTrue("-verbose" in message) self.assertTrue("-force" in message) self.assertTrue("-help" in message) self.assertTrue("-configuration-path" in message) self.assertTrue("-deprovision" in message) self.assertTrue("-register-service" in message) self.assertTrue("-version" in message) self.assertTrue("-daemon" in message) self.assertTrue("-start" in message) self.assertTrue("-run-exthandlers" in message) self.assertTrue("-show-configuration" in message) # sanity check self.assertFalse("-not-a-valid-option" in message) WALinuxAgent-2.2.32/tests/test_import.py000066400000000000000000000022031335416306700202000ustar00rootroot00000000000000from tests.tools import * import azurelinuxagent.common.osutil as osutil import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.protocol as protocol import azurelinuxagent.pa.provision as provision import azurelinuxagent.pa.deprovision as deprovision import azurelinuxagent.daemon as daemon import azurelinuxagent.daemon.resourcedisk as resourcedisk import azurelinuxagent.daemon.scvmm as scvmm import azurelinuxagent.ga.exthandlers as exthandlers import azurelinuxagent.ga.monitor as monitor import azurelinuxagent.ga.remoteaccess as remoteaccess import azurelinuxagent.ga.update as update class TestImportHandler(AgentTestCase): def test_get_handler(self): osutil.get_osutil() protocol.get_protocol_util() dhcp.get_dhcp_handler() provision.get_provision_handler() deprovision.get_deprovision_handler() daemon.get_daemon_handler() resourcedisk.get_resourcedisk_handler() scvmm.get_scvmm_handler() monitor.get_monitor_handler() update.get_update_handler() exthandlers.get_exthandlers_handler() remoteaccess.get_remote_access_handler() WALinuxAgent-2.2.32/tests/tools.py000066400000000000000000000320471335416306700170000ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Define util functions for unit test """ import difflib import os import pprint import re import shutil import tempfile import unittest from functools import wraps import time import azurelinuxagent.common.event as event import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.version import PY_VERSION_MAJOR # Import mock module for Python2 and Python3 try: from unittest.mock import Mock, patch, MagicMock, ANY, DEFAULT, call except ImportError: from mock import Mock, patch, MagicMock, ANY, DEFAULT, call test_dir = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(test_dir, "data") debug = False if os.environ.get('DEBUG') == '1': debug = True # Enable verbose logger to stdout if debug: logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.VERBOSE) def _do_nothing(): pass _MAX_LENGTH = 120 _MAX_LENGTH_SAFE_REPR = 80 def safe_repr(obj, short=False): try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH_SAFE_REPR] + ' [truncated]...' def skip_if_predicate_false(predicate, message): if not predicate(): if hasattr(unittest, "skip"): return unittest.skip(message) return lambda func: None return lambda func: func def skip_if_predicate_true(predicate, message): if predicate(): if hasattr(unittest, "skip"): return unittest.skip(message) return lambda func: None return lambda func: func def _safe_repr(obj, short=False): """ Copied from Python 3.x """ try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH] + ' [truncated]...' class AgentTestCase(unittest.TestCase): @classmethod def setUpClass(cls): # Setup newer unittest assertions missing in prior versions of Python if not hasattr(cls, "assertRegex"): cls.assertRegex = cls.assertRegexpMatches if hasattr(cls, "assertRegexpMatches") else _do_nothing if not hasattr(cls, "assertNotRegex"): cls.assertNotRegex = cls.assertNotRegexpMatches if hasattr(cls, "assertNotRegexpMatches") else _do_nothing if not hasattr(cls, "assertIn"): cls.assertIn = cls.emulate_assertIn if not hasattr(cls, "assertNotIn"): cls.assertNotIn = cls.emulate_assertNotIn if not hasattr(cls, "assertGreater"): cls.assertGreater = cls.emulate_assertGreater if not hasattr(cls, "assertLess"): cls.assertLess = cls.emulate_assertLess if not hasattr(cls, "assertIsNone"): cls.assertIsNone = cls.emulate_assertIsNone if not hasattr(cls, "assertIsNotNone"): cls.assertIsNotNone = cls.emulate_assertIsNotNone if hasattr(cls, "assertRaisesRegexp"): cls.assertRaisesRegex = cls.assertRaisesRegexp if not hasattr(cls, "assertRaisesRegex"): cls.assertRaisesRegex = cls.emulate_raises_regex if not hasattr(cls, "assertListEqual"): cls.assertListEqual = cls.emulate_assertListEqual def setUp(self): prefix = "{0}_".format(self.__class__.__name__) self.tmp_dir = tempfile.mkdtemp(prefix=prefix) self.test_file = 'test_file' conf.get_autoupdate_enabled = Mock(return_value=True) conf.get_lib_dir = Mock(return_value=self.tmp_dir) ext_log_dir = os.path.join(self.tmp_dir, "azure") conf.get_ext_log_dir = Mock(return_value=ext_log_dir) conf.get_agent_pid_file_path = Mock(return_value=os.path.join(self.tmp_dir, "waagent.pid")) event.init_event_status(self.tmp_dir) event.init_event_logger(self.tmp_dir) def tearDown(self): if not debug and self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) def emulate_assertIn(self, a, b, msg=None): if a not in b: msg = msg if msg is not None else "{0} not found in {1}".format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertNotIn(self, a, b, msg=None): if a in b: msg = msg if msg is not None else "{0} unexpectedly found in {1}".format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertGreater(self, a, b, msg=None): if not a > b: msg = msg if msg is not None else '{0} not greater than {1}'.format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertLess(self, a, b, msg=None): if not a < b: msg = msg if msg is not None else '{0} not less than {1}'.format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertIsNone(self, x, msg=None): if x is not None: msg = msg if msg is not None else '{0} is not None'.format(_safe_repr(x)) self.fail(msg) def emulate_assertIsNotNone(self, x, msg=None): if x is None: msg = msg if msg is not None else '{0} is None'.format(_safe_repr(x)) self.fail(msg) def emulate_raises_regex(self, exception_type, regex, function, *args, **kwargs): try: function(*args, **kwargs) except Exception as e: if re.search(regex, str(e), flags=1) is not None: return else: self.fail("Expected exception {0} matching {1}. Actual: {2}".format( exception_type, regex, str(e))) self.fail("No exception was thrown. Expected exception {0} matching {1}".format(exception_type, regex)) def emulate_assertListEqual(self, seq1, seq2, msg=None, seq_type=None): """An equality assertion for ordered sequences (like lists and tuples). For the purposes of this function, a valid ordered sequence type is one which can be indexed, has a length, and has an equality operator. Args: seq1: The first sequence to compare. seq2: The second sequence to compare. seq_type: The expected datatype of the sequences, or None if no datatype should be enforced. msg: Optional message to use on failure instead of a list of differences. """ if seq_type is not None: seq_type_name = seq_type.__name__ if not isinstance(seq1, seq_type): raise self.failureException('First sequence is not a %s: %s' % (seq_type_name, safe_repr(seq1))) if not isinstance(seq2, seq_type): raise self.failureException('Second sequence is not a %s: %s' % (seq_type_name, safe_repr(seq2))) else: seq_type_name = "sequence" differing = None try: len1 = len(seq1) except (TypeError, NotImplementedError): differing = 'First %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: try: len2 = len(seq2) except (TypeError, NotImplementedError): differing = 'Second %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: if seq1 == seq2: return seq1_repr = safe_repr(seq1) seq2_repr = safe_repr(seq2) if len(seq1_repr) > 30: seq1_repr = seq1_repr[:30] + '...' if len(seq2_repr) > 30: seq2_repr = seq2_repr[:30] + '...' elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr) differing = '%ss differ: %s != %s\n' % elements for i in xrange(min(len1, len2)): try: item1 = seq1[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of first %s\n' % (i, seq_type_name)) break try: item2 = seq2[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of second %s\n' % (i, seq_type_name)) break if item1 != item2: differing += ('\nFirst differing element %d:\n%s\n%s\n' % (i, safe_repr(item1), safe_repr(item2))) break else: if (len1 == len2 and seq_type is None and type(seq1) != type(seq2)): # The sequences are the same, but have differing types. return if len1 > len2: differing += ('\nFirst %s contains %d additional ' 'elements.\n' % (seq_type_name, len1 - len2)) try: differing += ('First extra element %d:\n%s\n' % (len2, safe_repr(seq1[len2]))) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of first %s\n' % (len2, seq_type_name)) elif len1 < len2: differing += ('\nSecond %s contains %d additional ' 'elements.\n' % (seq_type_name, len2 - len1)) try: differing += ('First extra element %d:\n%s\n' % (len1, safe_repr(seq2[len1]))) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of second %s\n' % (len1, seq_type_name)) standardMsg = differing diffMsg = '\n' + '\n'.join( difflib.ndiff(pprint.pformat(seq1).splitlines(), pprint.pformat(seq2).splitlines())) standardMsg = self._truncateMessage(standardMsg, diffMsg) msg = self._formatMessage(msg, standardMsg) self.fail(msg) @staticmethod def _create_files(tmp_dir, prefix, suffix, count, with_sleep=0): for i in range(count): f = os.path.join(tmp_dir, '.'.join((prefix, str(i), suffix))) fileutil.write_file(f, "faux content") time.sleep(with_sleep) def load_data(name): """Load test data""" path = os.path.join(data_dir, name) with open(path, "r") as data_file: return data_file.read() def load_bin_data(name): """Load test bin data""" path = os.path.join(data_dir, name) with open(path, "rb") as data_file: return data_file.read() supported_distro = [ ["ubuntu", "12.04", ""], ["ubuntu", "14.04", ""], ["ubuntu", "14.10", ""], ["ubuntu", "15.10", ""], ["ubuntu", "15.10", "Snappy Ubuntu Core"], ["coreos", "", ""], ["suse", "12", "SUSE Linux Enterprise Server"], ["suse", "13.2", "openSUSE"], ["suse", "11", "SUSE Linux Enterprise Server"], ["suse", "13.1", "openSUSE"], ["debian", "6.0", ""], ["redhat", "6.5", ""], ["redhat", "7.0", ""], ] def open_patch(): open_name = '__builtin__.open' if PY_VERSION_MAJOR == 3: open_name = 'builtins.open' return open_name def distros(distro_name=".*", distro_version=".*", distro_full_name=".*"): """Run test on multiple distros""" def decorator(test_method): @wraps(test_method) def wrapper(self, *args, **kwargs): for distro in supported_distro: if re.match(distro_name, distro[0]) and \ re.match(distro_version, distro[1]) and \ re.match(distro_full_name, distro[2]): if debug: logger.info("Run {0} on {1}", test_method.__name__, distro) new_args = [] new_args.extend(args) new_args.extend(distro) test_method(self, *new_args, **kwargs) # Call tearDown and setUp to create separated environment # for distro testing self.tearDown() self.setUp() return wrapper return decorator WALinuxAgent-2.2.32/tests/utils/000077500000000000000000000000001335416306700164205ustar00rootroot00000000000000WALinuxAgent-2.2.32/tests/utils/__init__.py000066400000000000000000000011651335416306700205340ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.32/tests/utils/process_target.sh000077500000000000000000000006621335416306700220070ustar00rootroot00000000000000#!/bin/sh count=0 exitcode=0 while [ $# -gt 0 ]; do case "$1" in -e) shift echo $1 1>&2 shift ;; -o) shift echo $1 shift ;; -t) shift count=$1 shift ;; -x) shift exitcode=$1 shift ;; *) break ;; esac done if [ $count -gt 0 ]; then for iter in $(seq 1 $count); do sleep 1 echo "Iteration $iter" done fi exit $exitcode WALinuxAgent-2.2.32/tests/utils/test_archive.py000066400000000000000000000211031335416306700214470ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. from datetime import datetime, timedelta import zipfile from azurelinuxagent.common.utils.archive import StateFlusher, StateArchiver, MAX_ARCHIVED_STATES from tests.tools import * debug = False if os.environ.get('DEBUG') == '1': debug = True # Enable verbose logger to stdout if debug: logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.VERBOSE) class TestArchive(AgentTestCase): def setUp(self): prefix = "{0}_".format(self.__class__.__name__) self.tmp_dir = tempfile.mkdtemp(prefix=prefix) def tearDown(self): if not debug and self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) def _write_file(self, fn, contents=None): full_name = os.path.join(self.tmp_dir, fn) fileutil.mkdir(os.path.dirname(full_name)) with open(full_name, 'w') as fh: data = contents if contents is not None else fn fh.write(data) return full_name @property def history_dir(self): return os.path.join(self.tmp_dir, 'history') def test_archive00(self): """ StateFlusher should move all 'goal state' files to a new directory under the history folder that is timestamped. """ temp_files = [ 'Prod.0.manifest.xml', 'Prod.0.agentsManifest', 'Microsoft.Azure.Extensions.CustomScript.0.xml' ] for f in temp_files: self._write_file(f) test_subject = StateFlusher(self.tmp_dir) test_subject.flush(datetime.utcnow()) self.assertTrue(os.path.exists(self.history_dir)) self.assertTrue(os.path.isdir(self.history_dir)) timestamp_dirs = os.listdir(self.history_dir) self.assertEqual(1, len(timestamp_dirs)) self.assertIsIso8601(timestamp_dirs[0]) ts = self.parse_isoformat(timestamp_dirs[0]) self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) for f in temp_files: history_path = os.path.join(self.history_dir, timestamp_dirs[0], f) msg = "expected the temp file {0} to exist".format(history_path) self.assertTrue(os.path.exists(history_path), msg) def test_archive01(self): """ StateArchiver should archive all history directories by 1. Creating a .zip of a timestamped directory's files 2. Saving the .zip to /var/lib/waagent/history/ 2. Deleting the timestamped directory """ temp_files = [ 'Prod.0.manifest.xml', 'Prod.0.agentsManifest', 'Microsoft.Azure.Extensions.CustomScript.0.xml' ] for f in temp_files: self._write_file(f) flusher = StateFlusher(self.tmp_dir) flusher.flush(datetime.utcnow()) test_subject = StateArchiver(self.tmp_dir) test_subject.archive() timestamp_zips = os.listdir(self.history_dir) self.assertEqual(1, len(timestamp_zips)) zip_fn = timestamp_zips[0] # 2000-01-01T00:00:00.000000.zip ts_s = os.path.splitext(zip_fn)[0] # 2000-01-01T00:00:00.000000 self.assertIsIso8601(ts_s) ts = self.parse_isoformat(ts_s) self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) zip_full = os.path.join(self.history_dir, zip_fn) self.assertZipContains(zip_full, temp_files) def test_archive02(self): """ StateArchiver should purge the MAX_ARCHIVED_STATES oldest files or directories. The oldest timestamps are purged first. This test case creates a mixture of archive files and directories. It creates 5 more values than MAX_ARCHIVED_STATES to ensure that 5 archives are cleaned up. It asserts that the files and directories are properly deleted from the disk. """ count = 6 total = MAX_ARCHIVED_STATES + count start = datetime.now() timestamps = [] for i in range(0, total): ts = start + timedelta(seconds=i) timestamps.append(ts) if i % 2 == 0: fn = os.path.join('history', ts.isoformat(), 'Prod.0.manifest.xml') else: fn = os.path.join('history', "{0}.zip".format(ts.isoformat())) self._write_file(fn) self.assertEqual(total, len(os.listdir(self.history_dir))) test_subject = StateArchiver(self.tmp_dir) test_subject.purge() archived_entries = os.listdir(self.history_dir) self.assertEqual(MAX_ARCHIVED_STATES, len(archived_entries)) archived_entries.sort() for i in range(0, MAX_ARCHIVED_STATES): ts = timestamps[i + count].isoformat() if i % 2 == 0: fn = ts else: fn = "{0}.zip".format(ts) self.assertTrue(fn in archived_entries, "'{0}' is not in the list of unpurged entires".format(fn)) def test_archive03(self): """ If the StateFlusher has to flush the same file, it should overwrite the existing one. """ temp_files = [ 'Prod.0.manifest.xml', 'Prod.0.agentsManifest', 'Microsoft.Azure.Extensions.CustomScript.0.xml' ] def _write_goal_state_files(temp_files, content=None): for f in temp_files: self._write_file(f, content) def _check_history_files(timestamp_dir, files, content=None): for f in files: history_path = os.path.join(self.history_dir, timestamp_dir, f) msg = "expected the temp file {0} to exist".format(history_path) self.assertTrue(os.path.exists(history_path), msg) expected_content = f if content is None else content actual_content = fileutil.read_file(history_path) self.assertEqual(expected_content, actual_content) timestamp = datetime.utcnow() _write_goal_state_files(temp_files) test_subject = StateFlusher(self.tmp_dir) test_subject.flush(timestamp) # Ensure history directory exists, has proper timestamped-based name, self.assertTrue(os.path.exists(self.history_dir)) self.assertTrue(os.path.isdir(self.history_dir)) timestamp_dirs = os.listdir(self.history_dir) self.assertEqual(1, len(timestamp_dirs)) self.assertIsIso8601(timestamp_dirs[0]) ts = self.parse_isoformat(timestamp_dirs[0]) self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) # Ensure saved files contain the right content _check_history_files(timestamp_dirs[0], temp_files) # re-write all of the same files with different content, and flush again. # .flush() should overwrite the existing ones _write_goal_state_files(temp_files, "--this-has-been-changed--") test_subject.flush(timestamp) # The contents of the saved files were overwritten as a result of the flush. _check_history_files(timestamp_dirs[0], temp_files, "--this-has-been-changed--") def test_archive04(self): """ The archive directory is created if it does not exist. This failure was caught when .purge() was called before .archive(). """ test_subject = StateArchiver(os.path.join(self.tmp_dir, 'does-not-exist')) test_subject.purge() def parse_isoformat(self, s): return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f') def assertIsIso8601(self, s): try: self.parse_isoformat(s) except: raise AssertionError("the value '{0}' is not an ISO8601 formatted timestamp".format(s)) def _total_seconds(self, td): """ Compute the total_seconds for a timedelta because 2.6 does not have total_seconds. """ return (0.0 + td.microseconds + (td.seconds + td.days * 24 * 60 * 60) * 10 ** 6) / 10 ** 6 def assertDateTimeCloseTo(self, t1, t2, within): if t1 <= t2: diff = t2 -t1 else: diff = t1 - t2 secs = self._total_seconds(within - diff) if secs < 0: self.fail("the timestamps are outside of the tolerance of by {0} seconds".format(secs)) def assertZipContains(self, zip_fn, files): ziph = zipfile.ZipFile(zip_fn, 'r') zip_files = [x.filename for x in ziph.filelist] for f in files: self.assertTrue(f in zip_files, "'{0}' was not found in {1}".format(f, zip_fn)) ziph.close() WALinuxAgent-2.2.32/tests/utils/test_crypt_util.py000066400000000000000000000060611335416306700222320ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import binascii import errno as errno import glob import random import string import subprocess import sys import tempfile import uuid import unittest import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.exception import CryptError from azurelinuxagent.common.version import PY_VERSION_MAJOR from tests.tools import * from subprocess import CalledProcessError def is_python_version_26(): return sys.version_info[0] == 2 and sys.version_info[1] == 6 class TestCryptoUtilOperations(AgentTestCase): def test_decrypt_encrypted_text(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/sample.pem")) secret = ']aPPEv}uNg1FPnl?' crypto = CryptUtil(conf.get_openssl_cmd()) decrypted_string = crypto.decrypt_secret(encrypted_string, prv_key) self.assertEquals(secret, decrypted_string, "decrypted string does not match expected") def test_decrypt_encrypted_text_missing_private_key(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, "abc" + prv_key) @skip_if_predicate_true(is_python_version_26, "Disabled on Python 2.6") def test_decrypt_encrypted_text_wrong_private_key(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "wrong.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/trans_prv")) crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) def test_decrypt_encrypted_text_text_not_encrypted(self): encrypted_string = "abc@123" prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/sample.pem")) crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/utils/test_file_util.py000066400000000000000000000252551335416306700220160ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import errno as errno import glob import random import string import tempfile import uuid import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.future import ustr from tests.tools import * class TestFileOperations(AgentTestCase): def test_read_write_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = ustr(uuid.uuid4()) fileutil.write_file(test_file, content) content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_write_file_content_is_None(self): """ write_file throws when content is None. No file is created. """ try: test_file=os.path.join(self.tmp_dir, self.test_file) fileutil.write_file(test_file, None) self.fail("expected write_file to throw an exception") except: self.assertEquals(False, os.path.exists(test_file)) def test_rw_utf8_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = u"\u6211" fileutil.write_file(test_file, content, encoding="utf-8") content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_remove_bom(self): test_file=os.path.join(self.tmp_dir, self.test_file) data = b'\xef\xbb\xbfhehe' fileutil.write_file(test_file, data, asbin=True) data = fileutil.read_file(test_file, remove_bom=True) self.assertNotEquals(0xbb, ord(data[0])) def test_append_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = ustr(uuid.uuid4()) fileutil.append_file(test_file, content) content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_findre_in_file(self): fp = tempfile.mktemp() with open(fp, 'w') as f: f.write( ''' First line Second line Third line with more words ''' ) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*rst line$")) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*ond line$")) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*with more.*")) self.assertNotEquals( None, fileutil.findre_in_file(fp, "^Third.*")) self.assertEquals( None, fileutil.findre_in_file(fp, "^Do not match.*")) def test_findstr_in_file(self): fp = tempfile.mktemp() with open(fp, 'w') as f: f.write( ''' First line Second line Third line with more words ''' ) self.assertTrue(fileutil.findstr_in_file(fp, "First line")) self.assertTrue(fileutil.findstr_in_file(fp, "Second line")) self.assertTrue( fileutil.findstr_in_file(fp, "Third line with more words")) self.assertFalse(fileutil.findstr_in_file(fp, "Not a line")) def test_get_last_path_element(self): filepath = '/tmp/abc.def' filename = fileutil.base_name(filepath) self.assertEquals('abc.def', filename) filepath = '/tmp/abc' filename = fileutil.base_name(filepath) self.assertEquals('abc', filename) def test_remove_files(self): random_word = lambda : ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) #Create 10 test files test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') test_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] for file in test_files: open(file, 'a').close() #Remove files using fileutil.rm_files test_file_pattern = test_file + '*' test_file_pattern2 = test_file2 + '*' fileutil.rm_files(test_file_pattern, test_file_pattern2) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern)))) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern2)))) def test_remove_dirs(self): dirs = [] for n in range(0,5): dirs.append(tempfile.mkdtemp()) for d in dirs: for n in range(0, random.choice(range(0,10))): fileutil.write_file(os.path.join(d, "test"+str(n)), "content") for n in range(0, random.choice(range(0,10))): dd = os.path.join(d, "testd"+str(n)) os.mkdir(dd) for nn in range(0, random.choice(range(0,10))): os.symlink(dd, os.path.join(dd, "sym"+str(nn))) for n in range(0, random.choice(range(0,10))): os.symlink(d, os.path.join(d, "sym"+str(n))) fileutil.rm_dirs(*dirs) for d in dirs: self.assertEqual(len(os.listdir(d)), 0) def test_get_all_files(self): random_word = lambda: ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) # Create 10 test files at the root dir and 10 other in the sub dir test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') expected_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] test_subdir = os.path.join(self.tmp_dir, 'test_dir') os.mkdir(test_subdir) test_file_in_subdir = os.path.join(test_subdir, self.test_file) test_file_in_subdir2 = os.path.join(test_subdir, 'another_file') expected_files.extend([test_file_in_subdir + random_word() for _ in range(5)] + \ [test_file_in_subdir2 + random_word() for _ in range(5)]) for file in expected_files: open(file, 'a').close() # Get All files using fileutil.get_all_files actual_files = fileutil.get_all_files(self.tmp_dir) self.assertEqual(set(expected_files), set(actual_files)) @patch('os.path.isfile') def test_update_conf_file(self, _): new_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n" existing_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ DHCP_HOSTNAME=existing\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n" bad_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n\ DHCP_HOSTNAME=no_new_line" updated_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n\ DHCP_HOSTNAME=test\n" path = 'path' with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=existing_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=bad_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) def test_clean_ioerror_ignores_missing(self): e = IOError() e.errno = errno.ENOSPC # Send no paths fileutil.clean_ioerror(e) # Send missing file(s) / directories fileutil.clean_ioerror(e, paths=['/foo/not/here', None, '/bar/not/there']) def test_clean_ioerror_ignores_unless_ioerror(self): try: d = tempfile.mkdtemp() fd, f = tempfile.mkstemp() os.close(fd) fileutil.write_file(f, 'Not empty') # Send non-IOError exception e = Exception() fileutil.clean_ioerror(e, paths=[d, f]) self.assertTrue(os.path.isdir(d)) self.assertTrue(os.path.isfile(f)) # Send unrecognized IOError e = IOError() e.errno = errno.EFAULT self.assertFalse(e.errno in fileutil.KNOWN_IOERRORS) fileutil.clean_ioerror(e, paths=[d, f]) self.assertTrue(os.path.isdir(d)) self.assertTrue(os.path.isfile(f)) finally: shutil.rmtree(d) os.remove(f) def test_clean_ioerror_removes_files(self): fd, f = tempfile.mkstemp() os.close(fd) fileutil.write_file(f, 'Not empty') e = IOError() e.errno = errno.ENOSPC fileutil.clean_ioerror(e, paths=[f]) self.assertFalse(os.path.isdir(f)) self.assertFalse(os.path.isfile(f)) def test_clean_ioerror_removes_directories(self): d1 = tempfile.mkdtemp() d2 = tempfile.mkdtemp() for n in ['foo', 'bar']: fileutil.write_file(os.path.join(d2, n), 'Not empty') e = IOError() e.errno = errno.ENOSPC fileutil.clean_ioerror(e, paths=[d1, d2]) self.assertFalse(os.path.isdir(d1)) self.assertFalse(os.path.isfile(d1)) self.assertFalse(os.path.isdir(d2)) self.assertFalse(os.path.isfile(d2)) def test_clean_ioerror_handles_a_range_of_errors(self): for err in fileutil.KNOWN_IOERRORS: e = IOError() e.errno = err d = tempfile.mkdtemp() fileutil.clean_ioerror(e, paths=[d]) self.assertFalse(os.path.isdir(d)) self.assertFalse(os.path.isfile(d)) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/utils/test_flexible_version.py000066400000000000000000000356501335416306700234010ustar00rootroot00000000000000import random import re import unittest from azurelinuxagent.common.utils.flexible_version import FlexibleVersion class TestFlexibleVersion(unittest.TestCase): def setUp(self): self.v = FlexibleVersion() def test_compile_separator(self): tests = [ '.', '', '-' ] for t in tests: t_escaped = re.escape(t) t_re = re.compile(t_escaped) self.assertEqual((t_escaped, t_re), self.v._compile_separator(t)) self.assertEqual(('', re.compile('')), self.v._compile_separator(None)) return def test_compile_pattern(self): self.v._compile_pattern() tests = { '1': True, '1.2': True, '1.2.3': True, '1.2.3.4': True, '1.2.3.4.5': True, '1alpha': True, '1.alpha': True, '1-alpha': True, '1alpha0': True, '1.alpha0': True, '1-alpha0': True, '1.2alpha': True, '1.2.alpha': True, '1.2-alpha': True, '1.2alpha0': True, '1.2.alpha0': True, '1.2-alpha0': True, '1beta': True, '1.beta': True, '1-beta': True, '1beta0': True, '1.beta0': True, '1-beta0': True, '1.2beta': True, '1.2.beta': True, '1.2-beta': True, '1.2beta0': True, '1.2.beta0': True, '1.2-beta0': True, '1rc': True, '1.rc': True, '1-rc': True, '1rc0': True, '1.rc0': True, '1-rc0': True, '1.2rc': True, '1.2.rc': True, '1.2-rc': True, '1.2rc0': True, '1.2.rc0': True, '1.2-rc0': True, '1.2.3.4alpha5': True, ' 1': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_compile_pattern_sep(self): self.v.sep = '-' self.v._compile_pattern() tests = { '1': True, '1-2': True, '1-2-3': True, '1-2-3-4': True, '1-2-3-4-5': True, '1alpha': True, '1-alpha': True, '1-alpha': True, '1alpha0': True, '1-alpha0': True, '1-alpha0': True, '1-2alpha': True, '1-2.alpha': True, '1-2-alpha': True, '1-2alpha0': True, '1-2.alpha0': True, '1-2-alpha0': True, '1beta': True, '1-beta': True, '1-beta': True, '1beta0': True, '1-beta0': True, '1-beta0': True, '1-2beta': True, '1-2.beta': True, '1-2-beta': True, '1-2beta0': True, '1-2.beta0': True, '1-2-beta0': True, '1rc': True, '1-rc': True, '1-rc': True, '1rc0': True, '1-rc0': True, '1-rc0': True, '1-2rc': True, '1-2.rc': True, '1-2-rc': True, '1-2rc0': True, '1-2.rc0': True, '1-2-rc0': True, '1-2-3-4alpha5': True, ' 1': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_compile_pattern_prerel(self): self.v.prerel_tags = ('a', 'b', 'c') self.v._compile_pattern() tests = { '1': True, '1.2': True, '1.2.3': True, '1.2.3.4': True, '1.2.3.4.5': True, '1a': True, '1.a': True, '1-a': True, '1a0': True, '1.a0': True, '1-a0': True, '1.2a': True, '1.2.a': True, '1.2-a': True, '1.2a0': True, '1.2.a0': True, '1.2-a0': True, '1b': True, '1.b': True, '1-b': True, '1b0': True, '1.b0': True, '1-b0': True, '1.2b': True, '1.2.b': True, '1.2-b': True, '1.2b0': True, '1.2.b0': True, '1.2-b0': True, '1c': True, '1.c': True, '1-c': True, '1c0': True, '1.c0': True, '1-c0': True, '1.2c': True, '1.2.c': True, '1.2-c': True, '1.2c0': True, '1.2.c0': True, '1.2-c0': True, '1.2.3.4a5': True, ' 1': False, '1.2.3.4alpha5': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_ensure_compatible_separators(self): v1 = FlexibleVersion('1.2.3') v2 = FlexibleVersion('1-2-3', sep='-') try: v1 == v2 self.assertTrue(False, "Incompatible separators failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible separators raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('alpha', 'beta', 'rc')) v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel_length(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel_order(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) v2 = FlexibleVersion('1.2.3', prerel_tags=('b', 'a')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_major(self): tests = { '1' : 1, '1.2' : 1, '1.2.3' : 1, '1.2.3.4' : 1 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).major) return def test_minor(self): tests = { '1' : 0, '1.2' : 2, '1.2.3' : 2, '1.2.3.4' : 2 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).minor) return def test_patch(self): tests = { '1' : 0, '1.2' : 0, '1.2.3' : 3, '1.2.3.4' : 3 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).patch) return def test_parse(self): tests = { "1.2.3.4": ((1, 2, 3, 4), None), "1.2.3.4alpha5": ((1, 2, 3, 4), ('alpha', 5)), "1.2.3.4-alpha5": ((1, 2, 3, 4), ('alpha', 5)), "1.2.3.4.alpha5": ((1, 2, 3, 4), ('alpha', 5)) } for test in iter(tests): expectation = tests[test] self.v._parse(test) self.assertEqual(expectation, (self.v.version, self.v.prerelease)) return def test_decrement(self): src_v = FlexibleVersion('1.0.0.0.10') dst_v = FlexibleVersion(str(src_v)) for i in range(1,10): dst_v -= 1 self.assertEqual(i, src_v.version[-1] - dst_v.version[-1]) return def test_decrement_disallows_below_zero(self): try: FlexibleVersion('1.0') - 1 self.assertTrue(False, "Decrement failed to raise an exception") except ArithmeticError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Decrement raised an unexpected exception: {0}".format(t)) return def test_increment(self): src_v = FlexibleVersion('1.0.0.0.0') dst_v = FlexibleVersion(str(src_v)) for i in range(1,10): dst_v += 1 self.assertEqual(i, dst_v.version[-1] - src_v.version[-1]) return def test_str(self): tests = [ '1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1alpha', '1.alpha', '1-alpha', '1alpha0', '1.alpha0', '1-alpha0', '1.2alpha', '1.2.alpha', '1.2-alpha', '1.2alpha0', '1.2.alpha0', '1.2-alpha0', '1beta', '1.beta', '1-beta', '1beta0', '1.beta0', '1-beta0', '1.2beta', '1.2.beta', '1.2-beta', '1.2beta0', '1.2.beta0', '1.2-beta0', '1rc', '1.rc', '1-rc', '1rc0', '1.rc0', '1-rc0', '1.2rc', '1.2.rc', '1.2-rc', '1.2rc0', '1.2.rc0', '1.2-rc0', '1.2.3.4alpha5', ] for test in tests: self.assertEqual(test, str(FlexibleVersion(test))) return def test_creation_from_flexible_version(self): tests = [ '1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1alpha', '1.alpha', '1-alpha', '1alpha0', '1.alpha0', '1-alpha0', '1.2alpha', '1.2.alpha', '1.2-alpha', '1.2alpha0', '1.2.alpha0', '1.2-alpha0', '1beta', '1.beta', '1-beta', '1beta0', '1.beta0', '1-beta0', '1.2beta', '1.2.beta', '1.2-beta', '1.2beta0', '1.2.beta0', '1.2-beta0', '1rc', '1.rc', '1-rc', '1rc0', '1.rc0', '1-rc0', '1.2rc', '1.2.rc', '1.2-rc', '1.2rc0', '1.2.rc0', '1.2-rc0', '1.2.3.4alpha5', ] for test in tests: v = FlexibleVersion(test) self.assertEqual(test, str(FlexibleVersion(v))) return def test_repr(self): v = FlexibleVersion('1,2,3rc4', ',', ['lol', 'rc']) expected = "FlexibleVersion ('1,2,3rc4', ',', ('lol', 'rc'))" self.assertEqual(expected, repr(v)) def test_order(self): test0 = ["1.7.0", "1.7.0rc0", "1.11.0"] expected0 = ['1.7.0rc0', '1.7.0', '1.11.0'] self.assertEqual(expected0, list(map(str, sorted([FlexibleVersion(v) for v in test0])))) test1 = [ '2.0.2rc2', '2.2.0beta3', '2.0.10', '2.1.0alpha42', '2.0.2beta4', '2.1.1', '2.0.1', '2.0.2rc3', '2.2.0', '2.0.0', '3.0.1', '2.1.0rc1' ] expected1 = [ '2.0.0', '2.0.1', '2.0.2beta4', '2.0.2rc2', '2.0.2rc3', '2.0.10', '2.1.0alpha42', '2.1.0rc1', '2.1.1', '2.2.0beta3', '2.2.0', '3.0.1' ] self.assertEqual(expected1, list(map(str, sorted([FlexibleVersion(v) for v in test1])))) self.assertEqual(FlexibleVersion("1.0.0.0.0.0.0.0"), FlexibleVersion("1")) self.assertFalse(FlexibleVersion("1.0") > FlexibleVersion("1.0")) self.assertFalse(FlexibleVersion("1.0") < FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") < FlexibleVersion("1.1")) self.assertTrue(FlexibleVersion("1.9") < FlexibleVersion("1.10")) self.assertTrue(FlexibleVersion("1.9.9") < FlexibleVersion("1.10.0")) self.assertTrue(FlexibleVersion("1.0.0.0") < FlexibleVersion("1.2.0.0")) self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.10") > FlexibleVersion("1.9")) self.assertTrue(FlexibleVersion("1.10.0") > FlexibleVersion("1.9.9")) self.assertTrue(FlexibleVersion("1.2.0.0") > FlexibleVersion("1.0.0.0")) self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.1")) self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.1") >= FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") == FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") >= FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.0")) self.assertFalse(FlexibleVersion("1.0") != FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.1") != FlexibleVersion("1.0")) return if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/utils/test_network_util.py000066400000000000000000000051761335416306700225700ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.networkutil as networkutil from tests.tools import * class TestNetworkOperations(AgentTestCase): def test_route_entry(self): interface = "eth0" mask = "C0FFFFFF" # 255.255.255.192 destination = "C0BB910A" # gateway = "C1BB910A" flags = "1" metric = "0" expected = 'Iface: eth0\tDestination: 10.145.187.192\tGateway: 10.145.187.193\tMask: 255.255.255.192\tFlags: 0x0001\tMetric: 0' expected_json = '{"Iface": "eth0", "Destination": "10.145.187.192", "Gateway": "10.145.187.193", "Mask": "255.255.255.192", "Flags": "0x0001", "Metric": "0"}' entry = networkutil.RouteEntry(interface, destination, gateway, mask, flags, metric) self.assertEqual(str(entry), expected) self.assertEqual(entry.to_json(), expected_json) def test_nic_link_only(self): nic = networkutil.NetworkInterfaceCard("test0", "link info") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info" }') def test_nic_ipv4(self): nic = networkutil.NetworkInterfaceCard("test0", "link info") nic.add_ipv4("ipv4-1") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1"] }') nic.add_ipv4("ipv4-2") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1","ipv4-2"] }') def test_nic_ipv6(self): nic = networkutil.NetworkInterfaceCard("test0", "link info") nic.add_ipv6("ipv6-1") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1"] }') nic.add_ipv6("ipv6-2") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1","ipv6-2"] }') def test_nic_ordinary(self): nic = networkutil.NetworkInterfaceCard("test0", "link INFO") nic.add_ipv6("ipv6-1") nic.add_ipv4("ipv4-1") self.assertEqual(str(nic), '{ "name": "test0", "link": "link INFO", "ipv4": ["ipv4-1"], "ipv6": ["ipv6-1"] }') WALinuxAgent-2.2.32/tests/utils/test_passwords.txt000066400000000000000000000000401335416306700222370ustar00rootroot00000000000000김치 करी hamburger caféWALinuxAgent-2.2.32/tests/utils/test_process_util.py000066400000000000000000000237451335416306700225570ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import subprocess from azurelinuxagent.common.exception import ExtensionError from azurelinuxagent.common.utils.processutil \ import format_stdout_stderr, capture_from_process from tests.tools import * import sys process_target = "{0}/process_target.sh".format(os.path.abspath(os.path.join(__file__, os.pardir))) process_cmd_template = "{0} -o '{1}' -e '{2}'" EXTENSION_ERROR_CODE = 1000 class TestProcessUtils(AgentTestCase): def test_format_stdout_stderr00(self): """ If stdout and stderr are both smaller than the max length, the full representation should be displayed. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "The five boxing wizards jump quickly." expected = "[stdout]\n{0}\n\n[stderr]\n{1}".format(stdout, stderr) actual = format_stdout_stderr(stdout, stderr, 1000) self.assertEqual(expected, actual) def test_format_stdout_stderr01(self): """ If stdout and stderr both exceed the max length, then both stdout and stderr are trimmed equally. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "The five boxing wizards jump quickly." # noinspection SpellCheckingInspection expected = '[stdout]\ns over the lazy dog.\n\n[stderr]\nizards jump quickly.' actual = format_stdout_stderr(stdout, stderr, 60) self.assertEqual(expected, actual) self.assertEqual(60, len(actual)) def test_format_stdout_stderr02(self): """ If stderr is much larger than stdout, stderr is allowed to borrow space from stdout's quota. """ stdout = "empty" stderr = "The five boxing wizards jump quickly." expected = '[stdout]\nempty\n\n[stderr]\ns jump quickly.' actual = format_stdout_stderr(stdout, stderr, 40) self.assertEqual(expected, actual) self.assertEqual(40, len(actual)) def test_format_stdout_stderr03(self): """ If stdout is much larger than stderr, stdout is allowed to borrow space from stderr's quota. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "empty" expected = '[stdout]\nr the lazy dog.\n\n[stderr]\nempty' actual = format_stdout_stderr(stdout, stderr, 40) self.assertEqual(expected, actual) self.assertEqual(40, len(actual)) def test_format_stdout_stderr04(self): """ If the max length is not sufficient to even hold the stdout and stderr markers an empty string is returned. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "The five boxing wizards jump quickly." expected = '' actual = format_stdout_stderr(stdout, stderr, 4) self.assertEqual(expected, actual) self.assertEqual(0, len(actual)) def test_format_stdout_stderr05(self): """ If stdout and stderr are empty, an empty template is returned. """ expected = '[stdout]\n\n\n[stderr]\n' actual = format_stdout_stderr('', '', 1000) self.assertEqual(expected, actual) def test_process_stdout_stderr(self): """ If the command has no timeout, the process need not be the leader of its own process group. """ stdout = "The quick brown fox jumps over the lazy dog.\n" stderr = "The five boxing wizards jump quickly.\n" expected = "[stdout]\n{0}\n\n[stderr]\n{1}".format(stdout, stderr) cmd = process_cmd_template.format(process_target, stdout, stderr) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ) actual = capture_from_process(process, cmd) self.assertEqual(expected, actual) def test_process_timeout_non_forked(self): """ non-forked process runs for 20 seconds, timeout is 10 seconds we expect: - test to run in just over 10 seconds - exception should be thrown - output should be collected """ cmd = "{0} -t 20".format(process_target) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ, preexec_fn=os.setsid) try: capture_from_process(process, 'sleep 20', 10, EXTENSION_ERROR_CODE) self.fail('Timeout exception was expected') except ExtensionError as e: body = str(e) self.assertTrue('Timeout(10)' in body) self.assertTrue('Iteration 9' in body) self.assertFalse('Iteration 11' in body) self.assertEqual(EXTENSION_ERROR_CODE, e.code) except Exception as gen_ex: self.fail('Unexpected exception: {0}'.format(gen_ex)) def test_process_timeout_forked(self): """ forked process runs for 20 seconds, timeout is 10 seconds we expect: - test to run in less than 3 seconds - no exception should be thrown - no output is collected """ cmd = "{0} -t 20 &".format(process_target) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ, preexec_fn=os.setsid) start = datetime.datetime.utcnow() try: cap = capture_from_process(process, 'sleep 20 &', 10) except Exception as e: self.fail('No exception should be thrown for a long running process which forks: {0}'.format(e)) duration = datetime.datetime.utcnow() - start self.assertTrue(duration < datetime.timedelta(seconds=3)) self.assertEqual('[stdout]\ncannot collect stdout\n\n[stderr]\n', cap) def test_process_behaved_non_forked(self): """ non-forked process runs for 10 seconds, timeout is 20 seconds we expect: - test to run in just over 10 seconds - no exception should be thrown - output should be collected """ cmd = "{0} -t 10".format(process_target) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ, preexec_fn=os.setsid) try: body = capture_from_process(process, 'sleep 10', 20) except Exception as gen_ex: self.fail('Unexpected exception: {0}'.format(gen_ex)) self.assertFalse('Timeout' in body) self.assertTrue('Iteration 9' in body) self.assertTrue('Iteration 10' in body) def test_process_behaved_forked(self): """ forked process runs for 10 seconds, timeout is 20 seconds we expect: - test to run in under 3 seconds - no exception should be thrown - output is not collected """ cmd = "{0} -t 10 &".format(process_target) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ, preexec_fn=os.setsid) start = datetime.datetime.utcnow() try: body = capture_from_process(process, 'sleep 10 &', 20) except Exception as e: self.fail('No exception should be thrown for a well behaved process which forks: {0}'.format(e)) duration = datetime.datetime.utcnow() - start self.assertTrue(duration < datetime.timedelta(seconds=3)) self.assertEqual('[stdout]\ncannot collect stdout\n\n[stderr]\n', body) def test_process_bad_pgid(self): """ If a timeout is requested but the process is not the root of the process group, raise an exception. """ stdout = "stdout\n" stderr = "stderr\n" cmd = process_cmd_template.format(process_target, stdout, stderr) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ) if sys.version_info < (2, 7): self.assertRaises(ExtensionError, capture_from_process, process, cmd, 10, EXTENSION_ERROR_CODE) else: with self.assertRaises(ExtensionError) as ee: capture_from_process(process, cmd, 10, EXTENSION_ERROR_CODE) body = str(ee.exception) if sys.version_info >= (3, 2): self.assertRegex(body, "process group") else: self.assertRegexpMatches(body, "process group") self.assertEqual(EXTENSION_ERROR_CODE, ee.exception.code) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/utils/test_rest_util.py000066400000000000000000000645311335416306700220540ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import HttpError, \ ResourceGoneError import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.utils.restutil import HTTP_USER_AGENT from azurelinuxagent.common.future import httpclient, ustr from tests.tools import * class TestIOErrorCounter(AgentTestCase): def test_increment_hostplugin(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(1, counts["hostplugin"]) self.assertEqual(0, counts["protocol"]) self.assertEqual(0, counts["other"]) def test_increment_protocol(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(0, counts["hostplugin"]) self.assertEqual(1, counts["protocol"]) self.assertEqual(0, counts["other"]) def test_increment_other(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( '169.254.169.254', 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(0, counts["hostplugin"]) self.assertEqual(0, counts["protocol"]) self.assertEqual(1, counts["other"]) def test_get_and_reset(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, 80) restutil.IOErrorCounter.increment( '169.254.169.254', 80) restutil.IOErrorCounter.increment( '169.254.169.254', 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(2, counts.get("hostplugin")) self.assertEqual(1, counts.get("protocol")) self.assertEqual(2, counts.get("other")) self.assertEqual( {"hostplugin":0, "protocol":0, "other":0}, restutil.IOErrorCounter._counts) class TestHttpOperations(AgentTestCase): def test_parse_url(self): test_uri = "http://abc.def/ghi#hash?jkl=mn" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) self.assertEquals("/ghi#hash?jkl=mn", rel_uri) test_uri = "http://abc.def/" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) self.assertEquals("/", rel_uri) self.assertEquals(False, secure) test_uri = "https://abc.def/ghi?jkl=mn" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals(True, secure) test_uri = "http://abc.def:80/" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) host, port, secure, rel_uri = restutil._parse_url("") self.assertEquals(None, host) self.assertEquals(rel_uri, "") host, port, secure, rel_uri = restutil._parse_url("None") self.assertEquals(None, host) self.assertEquals(rel_uri, "None") def test_cleanup_sas_tokens_from_urls_for_normal_cases(self): test_url = "http://abc.def/ghi#hash?jkl=mn" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) test_url = "http://abc.def:80/" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) test_url = "http://abc.def/" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) test_url = "https://abc.def/ghi?jkl=mn" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) def test_cleanup_sas_tokens_from_urls_containing_sas_tokens(self): # Contains pair of URLs (RawURL, RedactedURL) urls_tuples = [("https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" "=sXBjML1Fpk9UnTBtajo05ZTFSk0LWFGvARZ6WlVcAog%3D&srt=o&ss=b&" "spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00%3A21%3A38Z&" "st=2017-07-01T23%3A16%3A38Z", "https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" "=" + restutil.REDACTED_TEXT + "&srt=o&ss=b&spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00" "%3A21%3A38Z&st=2017-07-01T23%3A16%3A38Z"), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=DavQgRtl99DsEPv9Xeb63GnLXCuaLYw5ay%2BE1cFckQY%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=ttSCKmyjiDEeIzT9q7HtYYgbCRIXuesFSOhNEab52NM%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=X0imGmcj5KcBPFcqlfYjIZakzGrzONGbRv5JMOnGrwc%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=9hfxYvaZzrMahtGO1OgMUiFGnDOtZXulZ3skkv1eVBg%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https" "&sig=cmluQEHnOGsVK9NDm83ruuPdPWNQcerfjOAbkspNZXU%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https&sig" "=" + restutil.REDACTED_TEXT) ] for x in urls_tuples: self.assertEquals(restutil.redact_sas_tokens_in_urls(x[0]), x[1]) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_none_is_default(self, mock_host, mock_port): mock_host.return_value = None mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_configuration_overrides_env(self, mock_host, mock_port): mock_host.return_value = "host" mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual("host", h) self.assertEqual(None, p) self.assertEqual(1, mock_host.call_count) self.assertEqual(1, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_configuration_requires_host(self, mock_host, mock_port): mock_host.return_value = None mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) self.assertEqual(1, mock_host.call_count) self.assertEqual(0, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_http_uses_httpproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://foo.com:80', 'https_proxy' : 'https://bar.com:443' }): h, p = restutil._get_http_proxy() self.assertEqual("foo.com", h) self.assertEqual(80, p) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_https_uses_httpsproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://foo.com:80', 'https_proxy' : 'https://bar.com:443' }): h, p = restutil._get_http_proxy(secure=True) self.assertEqual("bar.com", h) self.assertEqual(443, p) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_ignores_user_in_httpproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://user:pw@foo.com:80' }): h, p = restutil._get_http_proxy() self.assertEqual("foo.com", h) self.assertEqual(80, p) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_direct(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar") HTTPConnection.assert_has_calls([ call("foo", 80, timeout=10) ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_direct_secure(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPSConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", secure=True) HTTPConnection.assert_not_called() HTTPSConnection.assert_has_calls([ call("foo", 443, timeout=10) ]) mock_conn.request.assert_has_calls([ call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_proxy(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", proxy_host="foo.bar", proxy_port=23333) HTTPConnection.assert_has_calls([ call("foo.bar", 23333, timeout=10) ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ call(method="GET", url="http://foo:80/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_proxy_secure(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPSConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", proxy_host="foo.bar", proxy_port=23333, secure=True) HTTPConnection.assert_not_called() HTTPSConnection.assert_has_calls([ call("foo.bar", 23333, timeout=10) ]) mock_conn.request.assert_has_calls([ call(method="GET", url="https://foo:443/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_with_retry(self, _http_request, sleep): mock_http_resp = MagicMock() mock_http_resp.read = Mock(return_value="hehe") _http_request.return_value = mock_http_resp # Test http get resp = restutil.http_get("http://foo.bar") self.assertEquals("hehe", resp.read()) # Test https get resp = restutil.http_get("https://foo.bar") self.assertEquals("hehe", resp.read()) # Test http failure _http_request.side_effect = httpclient.HTTPException("Http failure") self.assertRaises(restutil.HttpError, restutil.http_get, "http://foo.bar") # Test http failure _http_request.side_effect = IOError("IO failure") self.assertRaises(restutil.HttpError, restutil.http_get, "http://foo.bar") @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_status_codes(self, _http_request, _sleep): _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_passed_status_codes(self, _http_request, _sleep): # Ensure the code is not part of the standard set self.assertFalse(httpclient.UNAUTHORIZED in restutil.RETRY_CODES) _http_request.side_effect = [ Mock(status=httpclient.UNAUTHORIZED), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar", retry_codes=[httpclient.UNAUTHORIZED]) self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_with_fibonacci_delay(self, _http_request, _sleep): # Ensure the code is not a throttle code self.assertFalse(httpclient.BAD_GATEWAY in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.BAD_GATEWAY) for i in range(restutil.DEFAULT_RETRIES) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=restutil.DEFAULT_RETRIES+1) self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count) self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count) self.assertEqual( [ call(restutil._compute_delay(i+1, restutil.DELAY_IN_SECONDS)) for i in range(restutil.DEFAULT_RETRIES)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_with_constant_delay_when_throttled(self, _http_request, _sleep): # Ensure the code is a throttle code self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE) for i in range(restutil.DEFAULT_RETRIES) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=restutil.DEFAULT_RETRIES+1) self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count) self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count) self.assertEqual( [call(1) for i in range(restutil.DEFAULT_RETRIES)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_for_safe_minimum_number_when_throttled(self, _http_request, _sleep): # Ensure the code is a throttle code self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE) for i in range(restutil.THROTTLE_RETRIES-1) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=1) self.assertEqual(restutil.THROTTLE_RETRIES, _http_request.call_count) self.assertEqual(restutil.THROTTLE_RETRIES-1, _sleep.call_count) self.assertEqual( [call(1) for i in range(restutil.THROTTLE_RETRIES-1)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_raises_for_resource_gone(self, _http_request, _sleep): _http_request.side_effect = [ Mock(status=httpclient.GONE) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") self.assertEqual(1, _http_request.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_raises_for_invalid_container_configuration(self, _http_request, _sleep): def read(): return b'{ "errorCode": "InvalidContainerConfiguration", "message": "Invalid request." }' _http_request.side_effect = [ Mock(status=httpclient.BAD_REQUEST, reason='Bad Request', read=read) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") self.assertEqual(1, _http_request.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_exceptions(self, _http_request, _sleep): # Testing each exception is difficult because they have varying # signatures; for now, test one and ensure the set is unchanged recognized_exceptions = [ httpclient.NotConnected, httpclient.IncompleteRead, httpclient.ImproperConnectionState, httpclient.BadStatusLine ] self.assertEqual(recognized_exceptions, restutil.RETRY_EXCEPTIONS) _http_request.side_effect = [ httpclient.IncompleteRead(''), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_ioerrors(self, _http_request, _sleep): ioerror = IOError() ioerror.errno = 42 _http_request.side_effect = [ ioerror, Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) def test_request_failed(self): self.assertTrue(restutil.request_failed(None)) resp = Mock() for status in restutil.OK_CODES: resp.status = status self.assertFalse(restutil.request_failed(resp)) self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES) resp.status = httpclient.BAD_REQUEST self.assertTrue(restutil.request_failed(resp)) self.assertFalse( restutil.request_failed( resp, ok_codes=[httpclient.BAD_REQUEST])) def test_request_succeeded(self): self.assertFalse(restutil.request_succeeded(None)) resp = Mock() for status in restutil.OK_CODES: resp.status = status self.assertTrue(restutil.request_succeeded(resp)) self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES) resp.status = httpclient.BAD_REQUEST self.assertFalse(restutil.request_succeeded(resp)) self.assertTrue( restutil.request_succeeded( resp, ok_codes=[httpclient.BAD_REQUEST])) def test_read_response_error(self): """ Validate the read_response_error method handles encoding correctly """ responses = ['message', b'message', '\x80message\x80'] response = MagicMock() response.status = 'status' response.reason = 'reason' with patch.object(response, 'read') as patch_response: for s in responses: patch_response.return_value = s result = restutil.read_response_error(response) print("RESPONSE: {0}".format(s)) print("RESULT: {0}".format(result)) print("PRESENT: {0}".format('[status: reason]' in result)) self.assertTrue('[status: reason]' in result) self.assertTrue('message' in result) def test_read_response_bytes(self): response_bytes = '7b:0a:20:20:20:20:22:65:72:72:6f:72:43:6f:64:65:22:' \ '3a:20:22:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:' \ '69:73:20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:' \ '69:73:20:6f:70:65:72:61:74:69:6f:6e:2e:22:2c:0a:20:' \ '20:20:20:22:6d:65:73:73:61:67:65:22:3a:20:22:c3:af:' \ 'c2:bb:c2:bf:3c:3f:78:6d:6c:20:76:65:72:73:69:6f:6e:' \ '3d:22:31:2e:30:22:20:65:6e:63:6f:64:69:6e:67:3d:22:' \ '75:74:66:2d:38:22:3f:3e:3c:45:72:72:6f:72:3e:3c:43:' \ '6f:64:65:3e:49:6e:76:61:6c:69:64:42:6c:6f:62:54:79:' \ '70:65:3c:2f:43:6f:64:65:3e:3c:4d:65:73:73:61:67:65:' \ '3e:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:69:73:' \ '20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:69:73:' \ '20:6f:70:65:72:61:74:69:6f:6e:2e:0a:52:65:71:75:65:' \ '73:74:49:64:3a:63:37:34:32:39:30:63:62:2d:30:30:30:' \ '31:2d:30:30:62:35:2d:30:36:64:61:2d:64:64:36:36:36:' \ '61:30:30:30:22:2c:0a:20:20:20:20:22:64:65:74:61:69:' \ '6c:73:22:3a:20:22:22:0a:7d'.split(':') expected_response = '[HTTP Failed] [status: reason] {\n "errorCode": "The blob ' \ 'type is invalid for this operation.",\n ' \ '"message": "' \ 'InvalidBlobTypeThe ' \ 'blob type is invalid for this operation.\n' \ 'RequestId:c74290cb-0001-00b5-06da-dd666a000",' \ '\n "details": ""\n}' response_string = ''.join(chr(int(b, 16)) for b in response_bytes) response = MagicMock() response.status = 'status' response.reason = 'reason' with patch.object(response, 'read') as patch_response: patch_response.return_value = response_string result = restutil.read_response_error(response) self.assertEqual(result, expected_response) try: raise HttpError("{0}".format(result)) except HttpError as e: self.assertTrue(result in ustr(e)) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/utils/test_shell_util.py000066400000000000000000000027011335416306700221750ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * import uuid import unittest import os import azurelinuxagent.common.utils.shellutil as shellutil import test class TestrunCmd(AgentTestCase): def test_run_get_output(self): output = shellutil.run_get_output(u"ls /") self.assertNotEquals(None, output) self.assertEquals(0, output[0]) err = shellutil.run_get_output(u"ls /not-exists") self.assertNotEquals(0, err[0]) err = shellutil.run_get_output(u"ls 我") self.assertNotEquals(0, err[0]) def test_shellquote(self): self.assertEqual("\'foo\'", shellutil.quote("foo")) self.assertEqual("\'foo bar\'", shellutil.quote("foo bar")) self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar")) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.32/tests/utils/test_text_util.py000066400000000000000000000165771335416306700220720ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from distutils.version import LooseVersion as Version from tests.tools import * import hashlib import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import ustr class TestTextUtil(AgentTestCase): def test_get_password_hash(self): with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_passwords.txt'), 'rb') as in_file: for data in in_file: # Remove bom on bytes data before it is converted into string. data = textutil.remove_bom(data) data = ustr(data, encoding='utf-8') password_hash = textutil.gen_password_hash(data, 6, 10) self.assertNotEquals(None, password_hash) def test_replace_non_ascii(self): data = ustr(b'\xef\xbb\xbfhehe', encoding='utf-8') self.assertEqual('hehe', textutil.replace_non_ascii(data)) data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdefghijklm", textutil.replace_non_ascii(data)) data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdXeXfghijkXlm", textutil.replace_non_ascii(data, replace_char='X')) self.assertEqual('', textutil.replace_non_ascii(None)) def test_remove_bom(self): #Test bom could be removed data = ustr(b'\xef\xbb\xbfhehe', encoding='utf-8') data = textutil.remove_bom(data) self.assertNotEquals(0xbb, data[0]) #bom is comprised of a sequence of three bytes and ff length of the input is shorter # than three bytes, remove_bom should not do anything data = u"\xa7" data = textutil.remove_bom(data) self.assertEquals(data, data[0]) data = u"\xa7\xef" data = textutil.remove_bom(data) self.assertEquals(u"\xa7", data[0]) self.assertEquals(u"\xef", data[1]) #Test string without BOM is not affected data = u"hehe" data = textutil.remove_bom(data) self.assertEquals(u"h", data[0]) data = u"" data = textutil.remove_bom(data) self.assertEquals(u"", data) data = u" " data = textutil.remove_bom(data) self.assertEquals(u" ", data) def test_version_compare(self): self.assertTrue(Version("1.0") < Version("1.1")) self.assertTrue(Version("1.9") < Version("1.10")) self.assertTrue(Version("1.9.9") < Version("1.10.0")) self.assertTrue(Version("1.0.0.0") < Version("1.2.0.0")) self.assertTrue(Version("1.0") <= Version("1.1")) self.assertTrue(Version("1.1") > Version("1.0")) self.assertTrue(Version("1.1") >= Version("1.0")) self.assertTrue(Version("1.0") == Version("1.0")) self.assertTrue(Version("1.0") >= Version("1.0")) self.assertTrue(Version("1.0") <= Version("1.0")) self.assertTrue(Version("1.9") < "1.10") self.assertTrue("1.9" < Version("1.10")) def test_get_bytes_from_pem(self): content = ("-----BEGIN CERTIFICATE-----\n" "certificate\n" "-----END CERTIFICATE----\n") base64_bytes = textutil.get_bytes_from_pem(content) self.assertEquals("certificate", base64_bytes) content = ("-----BEGIN PRIVATE KEY-----\n" "private key\n" "-----END PRIVATE Key-----\n") base64_bytes = textutil.get_bytes_from_pem(content) self.assertEquals("private key", base64_bytes) def test_swap_hexstring(self): data = [ ['12', 1, '21'], ['12', 2, '12'], ['12', 3, '012'], ['12', 4, '0012'], ['123', 1, '321'], ['123', 2, '2301'], ['123', 3, '123'], ['123', 4, '0123'], ['1234', 1, '4321'], ['1234', 2, '3412'], ['1234', 3, '234001'], ['1234', 4, '1234'], ['abcdef12', 1, '21fedcba'], ['abcdef12', 2, '12efcdab'], ['abcdef12', 3, 'f12cde0ab'], ['abcdef12', 4, 'ef12abcd'], ['aBcdEf12', 1, '21fEdcBa'], ['aBcdEf12', 2, '12EfcdaB'], ['aBcdEf12', 3, 'f12cdE0aB'], ['aBcdEf12', 4, 'Ef12aBcd'] ] for t in data: self.assertEqual(t[2], textutil.swap_hexstring(t[0], width=t[1])) def test_compress(self): result = textutil.compress('[stdout]\nHello World\n\n[stderr]\n\n') self.assertEqual('eJyLLi5JyS8tieXySM3JyVcIzy/KSeHiigaKphYVxXJxAQDAYQr2', result) def test_hash_empty_list(self): result = textutil.hash_strings([]) self.assertEqual(b'\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t', result) def test_hash_list(self): test_list = ["abc", "123"] result_from_list = textutil.hash_strings(test_list) test_string = "".join(test_list) hash_from_string = hashlib.sha1() hash_from_string.update(test_string.encode()) self.assertEqual(result_from_list, hash_from_string.digest()) self.assertEqual(hash_from_string.hexdigest(), '6367c48dd193d56ea7b0baad25b19455e529f5ee') def test_empty_strings(self): self.assertTrue(textutil.is_str_none_or_whitespace(None)) self.assertTrue(textutil.is_str_none_or_whitespace(' ')) self.assertTrue(textutil.is_str_none_or_whitespace('\t')) self.assertTrue(textutil.is_str_none_or_whitespace('\n')) self.assertTrue(textutil.is_str_none_or_whitespace(' \t')) self.assertTrue(textutil.is_str_none_or_whitespace(' \r\n')) self.assertTrue(textutil.is_str_empty(None)) self.assertTrue(textutil.is_str_empty(' ')) self.assertTrue(textutil.is_str_empty('\t')) self.assertTrue(textutil.is_str_empty('\n')) self.assertTrue(textutil.is_str_empty(' \t')) self.assertTrue(textutil.is_str_empty(' \r\n')) self.assertFalse(textutil.is_str_none_or_whitespace(u' \x01 ')) self.assertFalse(textutil.is_str_none_or_whitespace(u'foo')) self.assertFalse(textutil.is_str_none_or_whitespace('bar')) self.assertFalse(textutil.is_str_empty(u' \x01 ')) self.assertFalse(textutil.is_str_empty(u'foo')) self.assertFalse(textutil.is_str_empty('bar')) hex_null_1 = u'\x00' hex_null_2 = u' \x00 ' self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_1)) self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_2)) self.assertTrue(textutil.is_str_empty(hex_null_1)) self.assertTrue(textutil.is_str_empty(hex_null_2)) self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_1), textutil.is_str_empty(hex_null_1)) self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_2), textutil.is_str_empty(hex_null_2)) if __name__ == '__main__': unittest.main()